id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
192956
|
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import warnings
warnings.filterwarnings("ignore")
import yfinance as yf
yf.pdr_override()
import datetime as dt
symbol = 'AMD'
market = 'SPY'
num_of_years = 1
start = dt.date.today() - dt.timedelta(days=365*num_of_years)
end = dt.date.today()
dataset = yf.download(symbol,start,end)
benchmark = yf.download(market,start,end)
dataset['Returns'] = dataset['Adj Close'].pct_change().dropna()
PP = pd.Series((dataset['High'] + dataset['Low'] + dataset['Close']) / 3)
R1 = pd.Series(2 * PP - dataset['Low'])
S1 = pd.Series(2 * PP - dataset['High'])
R2 = pd.Series(PP + dataset['High'] - dataset['Low'])
S2 = pd.Series(PP - dataset['High'] + dataset['Low'])
R3 = pd.Series(dataset['High'] + 2 * (PP - dataset['Low']))
S3 = pd.Series(dataset['Low'] - 2 * (dataset['High'] - PP))
R4 = pd.Series(dataset['High'] + 3 * (PP - dataset['Low']))
S4 = pd.Series(dataset['Low'] - 3 * (dataset['High'] - PP))
R5 = pd.Series(dataset['High'] + 4 * (PP - dataset['Low']))
S5 = pd.Series(dataset['Low'] - 4 * (dataset['High'] - PP))
P = pd.Series((dataset['Open'] + (dataset['High'] + dataset['Low'] + dataset['Close'])) / 4) # Opening Price Formula
psr = {'P':P, 'R1':R1, 'S1':S1, 'R2':R2, 'S2':S2, 'R3':R3, 'S3':S3,'R4':R4, 'S4':S4,'R5':R5, 'S5':S5}
PSR = pd.DataFrame(psr)
dataset = dataset.join(PSR)
print(dataset.head())
pivot_point = pd.concat([dataset['Adj Close'],P,R1,S1,R2,S2,R3,S3],axis=1).plot(figsize=(18,12),grid=True)
plt.title('Stock Pivot Point')
plt.legend(['Price','P','R1','S1','R2','S2','R3','S3'], loc=0)
plt.show()
dataset['Adj Close']['2018-05-01':'2018-06-01']
date_range = dataset[['Adj Close','P','R1','S1','R2','S2','R3','S3']]['2018-05-01':'2018-06-01']# Pick Date Ranges
P = pd.Series((dataset['High'] + dataset['Low'] + 2*dataset['Close']) / 4)
R1 = pd.Series(2 * P - dataset['Low'])
S1 = pd.Series(2 * P - dataset['High'])
R2 = pd.Series(P + dataset['High'] - dataset['Low'])
S2 = pd.Series(P - dataset['High'] + dataset['Low'])
wpp = {'P':P, 'R1':R1, 'S1':S1, 'R2':R2, 'S2':S2}
WPP = pd.DataFrame(wpp)
print(WPP.head())
R1 = pd.Series((dataset['High'] - dataset['Low']) * 1.1 / (2+dataset['Close']))
R2 = pd.Series((dataset['High'] - dataset['Low']) * 1.1 / (4+dataset['Close']))
R3 = pd.Series((dataset['High'] - dataset['Low']) * 1.1 / (6+dataset['Close']))
R4 = pd.Series((dataset['High'] - dataset['Low']) * 1.1 / (12+dataset['Close']))
S1 = pd.Series((dataset['Close'] - (dataset['High']-dataset['Low']) * 1.1)/12)
S2 = pd.Series((dataset['Close'] - (dataset['High']-dataset['Low']) * 1.1)/6)
S3 = pd.Series((dataset['Close'] - (dataset['High']-dataset['Low']) * 1.1)/4)
S4 = pd.Series((dataset['Close'] - (dataset['High']-dataset['Low']) * 1.1)/2)
cpp = {'R1':R1, 'S1':S1, 'R2':R2, 'S2':S2, 'R3':R3, 'S3':S3,'R4':R4, 'S4':S4}
CPP = pd.DataFrame(cpp)
print(CPP.head())
dataset = yf.download(symbol,start,end)
h_l_c = dataset['Close'] < dataset['Open']
h_lc = dataset['Close'] > dataset['Open']
hl_c = dataset['Close'] == dataset['Open']
P = np.zeros(len(dataset['Close']))
P[h_l_c] = dataset['High'][h_l_c] + 2.0 * dataset['Low'][h_l_c] + dataset['Close'][h_l_c]
P[h_lc] = 2.0 * dataset['High'][h_lc] + dataset['Low'][h_lc] + dataset['Close'][h_lc]
P[hl_c] = dataset['High'][hl_c] + dataset['Low'][hl_c] + 2.0 * dataset['Close'][hl_c]
S1 = P / 2.0 - dataset['High']
R1 = P / 2.0 - dataset['Low']
P = P / 4.0
tdm = {'P': P, 'S1': S1, 'R1': R1}
TDM = pd.DataFrame(tdm)
print(TDM.head())
PP = pd.Series((dataset['High'] + dataset['Low'] + dataset['Close']) / 3)
R1 = pd.Series((PP + (dataset['High'] - dataset['Low']) * 0.382))
R2 = pd.Series((PP + (dataset['High'] - dataset['Low']) * 0.618))
R3 = pd.Series((PP + (dataset['High'] - dataset['Low']) * 1.000))
S1 = pd.Series((PP - (dataset['High'] - dataset['Low']) * 0.382))
S2 = pd.Series((PP - (dataset['High'] - dataset['Low']) * 0.618))
S3 = pd.Series((PP - (dataset['High'] - dataset['Low']) * 1.000))
fpp = {'PP':PP, 'R1':R1, 'S1':S1, 'R2':R2, 'S2':S2, 'R3':R3, 'S3':S3}
FPP = pd.DataFrame(fpp)
print(FPP.head())
|
192958
|
import unittest
import numpy as np
import tensorflow as tf
import tensorflow_probability
import mvg_distributions.covariance_representations as cov_rep
from mvg_distributions.sqrt_gamma_gaussian import SqrtGammaGaussian, SparseSqrtGammaGaussian
from mvg_distributions.test.test_losses_base import LossesTestBase
tfd = tensorflow_probability.distributions
tfb = tensorflow_probability.bijectors
class TestSqrtGammaGaussian(LossesTestBase):
def setUp(self):
super().setUp()
self.x, self.x_cov_obj, self.sqrt_w_tfd, self.sqrt_gamma_gaussian = self._create_single_sqrt_wishart_pair()
def _create_single_sqrt_wishart_pair(self, add_sparse_gamma=False):
# Create a random scale matrix for the Wishart distribution
diag_precision_prior = np.abs(np.random.normal(size=(self.batch_size, self.features_size)))
diag_precision_prior = diag_precision_prior.astype(self.dtype.as_numpy_dtype)
precision_prior = np.zeros(shape=(self.batch_size, self.features_size, self.features_size),
dtype=self.dtype.as_numpy_dtype)
for i in range(self.batch_size):
precision_prior[i][np.diag_indices_from(precision_prior[i])] = diag_precision_prior[i]
log_diag_precision_prior = np.log(diag_precision_prior)
# Create a random vector of degrees of freedom, whose values must be larger than features_size
df = np.random.uniform(low=self.features_size, high=self.features_size * 10, size=self.batch_size)
df = df.astype(self.dtype.as_numpy_dtype)
# Create a square root Wishart distribution using bijectors
wishart = tfd.Wishart(scale=precision_prior, df=df)
cholesky_bijector = tfb.Invert(tfb.CholeskyOuterProduct())
sqrt_wishart_tfd = tfd.TransformedDistribution(distribution=wishart, bijector=cholesky_bijector)
# Create our custom square root Wishart distribution with the same parameters
sqrt_gamma_gaussian = SqrtGammaGaussian(df=df, log_diag_scale=log_diag_precision_prior)
if add_sparse_gamma:
sparse_sqrt_gamma_gaussian = SparseSqrtGammaGaussian(df=df, log_diag_scale=log_diag_precision_prior)
# Create a random Cholesky matrix to test the probability density functions
_, __, x_covariance, x_weights, x_basis, log_diag = self._random_normal_params(cov_rep.PrecisionConvCholFilters)
x = np.linalg.cholesky(np.linalg.inv(x_covariance))
# Our custom square root Wishart is optimized to work with PrecisionConvCholFilters, it will measure
# the pdf of the Cholesky of the Precision
img_w = int(np.sqrt(self.features_size))
sample_shape = tf.TensorShape((self.batch_size, img_w, img_w, 1))
x_cov_obj = cov_rep.PrecisionConvCholFilters(weights_precision=tf.constant(x_weights),
filters_precision=tf.constant(x_basis),
sample_shape=sample_shape)
x_cov_obj.log_diag_chol_precision = log_diag
if add_sparse_gamma:
return x, x_cov_obj, sqrt_wishart_tfd, sqrt_gamma_gaussian, sparse_sqrt_gamma_gaussian
else:
return x, x_cov_obj, sqrt_wishart_tfd, sqrt_gamma_gaussian
def test_log_prob(self):
# Test that square root Gamma Gaussian is the same as a Cholesky Wishart
log_prob1 = self.sqrt_w_tfd.log_prob(self.x)
x_with_log_diag = tf.matrix_set_diag(self.x, self.x_cov_obj.log_diag_chol_precision)
log_prob2 = self.sqrt_gamma_gaussian.log_prob(x_with_log_diag)
x_with_log_diag = tf.matrix_set_diag(self.x_cov_obj.chol_precision, self.x_cov_obj.log_diag_chol_precision)
log_prob4 = self.sqrt_gamma_gaussian.log_prob(x_with_log_diag)
self._asset_allclose_tf_feed(log_prob1, log_prob2)
self._asset_allclose_tf_feed(log_prob1, log_prob4)
def test_samples(self):
# Test that square root Gamma Gaussian is the same as a Cholesky Wishart
sample1 = self.sqrt_w_tfd.sample(seed=0)
sample2 = self.sqrt_gamma_gaussian.sample(seed=0)
sample2 = tf.matrix_set_diag(sample2, tf.exp(tf.matrix_diag_part(sample2)))
self._asset_allclose_tf_feed(sample1, sample2)
class TestSparseSqrtGammaGaussian(TestSqrtGammaGaussian):
def setUp(self):
LossesTestBase.setUp(self)
outputs = self._create_single_sqrt_wishart_pair(add_sparse_gamma=True)
self.x, self.x_cov_obj, self.sqrt_w_tfd, self.sqrt_gamma_gaussian_dense, self.sqrt_gamma_gaussian = outputs
def test_log_prob(self):
# Test that square root Gamma Gaussian with dense matrices is the same as a Cholesky Wishart
log_prob1 = self.sqrt_w_tfd.log_prob(self.x)
log_prob2 = self.sqrt_gamma_gaussian.log_prob(self.x)
log_prob4 = self.sqrt_gamma_gaussian.log_prob(self.x_cov_obj.chol_precision)
self._asset_allclose_tf_feed(log_prob1, log_prob2)
self._asset_allclose_tf_feed(log_prob1, log_prob4)
def test_log_prob_sparse(self):
# Test that square root Gamma Gaussian with sparse matrices is the same as a the dense version,
# when the sparse elements are removed afterwards
x_with_log_diag = tf.matrix_set_diag(self.x, self.x_cov_obj.log_diag_chol_precision)
log_prob1_gamma = self.sqrt_gamma_gaussian_dense._log_prob_sqrt_gamma(x_with_log_diag)
log_prob1_normal = self.sqrt_gamma_gaussian_dense.normal_dist.log_prob(self.x)
off_diag_mask = self.x_cov_obj.np_off_diag_mask() # Zero out off-diagonal terms
log_prob1_normal = tf.reduce_sum(log_prob1_normal * off_diag_mask, axis=[1, 2])
log_prob1 = log_prob1_gamma + log_prob1_normal
log_prob2 = self.sqrt_gamma_gaussian.log_prob(self.x_cov_obj)
self._asset_allclose_tf_feed(log_prob1, log_prob2)
@unittest.skip
def test_samples(self):
pass
if __name__ == '__main__':
unittest.main()
|
193002
|
from delphi.GrFN.networks import GroundedFunctionNetwork
# -----------------------------------------------------------------------------
#
# -----------------------------------------------------------------------------
print('Running demo_generate_grfn.py')
source_fortran_file = 'DiscreteSIR-noarrays.f'
print(f' source_fortran_file: {source_fortran_file}')
grfn = GroundedFunctionNetwork.from_fortran_file(source_fortran_file)
agraph = grfn.to_AGraph()
agraph.draw('graph.pdf', prog='dot')
# -----------------------------------------------------------------------------
|
193006
|
import requests
import pickle
from os import path
from pkg_resources import resource_filename
codechefs_languages_map = dict()
# leetcodes_supported_languages = ["cpp", "java", "python", "python3", "c", "csharp", "javascript", "ruby", "swift", "kotlin", "scala", "bash", "go"]
leetcodes_languages_map = dict()
# geeksforgeeks_supported_languages = ["Python", "Python3", "Cpp", "Cpp14", "Java", "Csharp", "C", "Php", "Scala", "Perl"]
geeksforgeeks_languages_map = dict()
supported_languages = dict()
supported_languages_extension = dict()
def set_codechefs_languages_mapping(id, lang_code):
"""
codechef uses lang_code for languages to identify which interpreter/compiler to use
"""
# for python language --lang not set then detection by extension(.py) for different python2,python3,pypy,pypy_3 Maps to pypy_3 (id = 48)
codechefs_languages_map[id] = lang_code
def set_geeksforgeeks_language_mapping(id, geekslang):
geeksforgeeks_languages_map[id] = geekslang
def set_leetcodes_language_mapping(id, leetslang):
leetcodes_languages_map[id] = leetslang
def load_supported_languages():
"""
codeched has extensive range of languages so using it as base for all supported languages by ccr
"""
response = requests.get('https://www.codechef.com/api/ide/undefined/languages/all').json()
id = 1
for lang_code, payload in response['languages'].items():
lang = "_".join(payload['full_name'].lower().split())
# print(lang)
supported_languages[lang] = id
supported_languages_extension[payload['extension']] = id
geekslang = leetslang = None
# map codechef's supported languages to other OJ clients
if lang == 'c++14':
geekslang = 'Cpp14'
leetslang = 'cpp'
elif lang == 'java':
geekslang = 'Java'
leetslang = 'java'
elif lang == 'python' or lang =="pypy":
geekslang = "Python"
leetslang = "python"
elif lang == 'python3' or lang == 'pypy_3':
geekslang = "Python3"
leetslang = "python3"
elif lang == 'c':
geekslang = 'C'
leetslang = 'c'
elif lang == 'c#':
geekslang = 'Csharp'
leetslang = 'csharp'
elif lang == 'scala':
geekslang = 'Scala'
leetslang = 'scala'
elif lang == 'php':
geekslang = 'Php'
elif lang == 'perl':
geekslang = 'Perl'
elif lang == 'go':
leetslang = 'go'
elif lang == 'swift':
leetslang = 'swift'
elif lang == 'ruby':
leetslang = 'ruby'
elif lang == 'kotlin':
leetslang = 'kotlin'
elif lang == 'bash':
leetslang = 'bash'
if geekslang:
set_geeksforgeeks_language_mapping(id, geekslang)
if leetslang:
set_leetcodes_language_mapping(id, leetslang)
set_codechefs_languages_mapping(id, payload['id'])
id += 1
def dump_pickle(path,data):
try:
with open(path,"wb") as f:
pickle.dump(data,f)
except Exception as e:
raise e
def load_pickle(path):
try:
with open(path,"rb") as f:
data = pickle.load(f)
return data
except Exception as e:
raise e
# delete pickle if new language added in codechef api
try:
#during devlopment use this
# sl_path = path.join(path.dirname(path.realpath(__file__)),"pickle/supported_languages.pickle")
# sle_path = path.join(path.dirname(path.realpath(__file__)),"pickle/supported_languages_extension.pickle")
# clm_path = path.join(path.dirname(path.realpath(__file__)),"pickle/codechefs_languages_map.pickle")
# glm_path = path.join(path.dirname(path.realpath(__file__)),"pickle/geeksforgeeks_languages_map.pickle")
#for packaging use below
sl_path = resource_filename("ccr","pickle/supported_languages.pickle")
sle_path = resource_filename("ccr","pickle/supported_languages_extension.pickle")
clm_path = resource_filename("ccr","pickle/codechefs_languages_map.pickle")
glm_path = resource_filename("ccr","pickle/geeksforgeeks_languages_map.pickle")
supported_languages = load_pickle(sl_path)
supported_languages_extension = load_pickle(sle_path)
codechefs_languages_map = load_pickle(clm_path)
geeksforgeeks_languages_map = load_pickle(glm_path)
except Exception:
load_supported_languages()
try:
dump_pickle(sl_path,supported_languages)
dump_pickle(sle_path,supported_languages_extension)
dump_pickle(clm_path,codechefs_languages_map)
dump_pickle(glm_path,geeksforgeeks_languages_map)
except Exception:
print("unable to dump pickle data..")
def supported_language_cli_output_maker():
"""
get the output and cut paste in cli.py
"""
count = 0
for i in supported_languages:
print("- "+i, end="\t")
count+=1
if(count == 3):
print()
count = 0
# print(supported_languages)
# print(supported_languages_extension)
# print(codechefs_languages_map)
# print(geeksforgeeks_languages_map)
|
193019
|
import torch
import torch.nn as nn
from models.baseModule import FuseBlock, BaseNet, Interp
class FuseNet(BaseNet):
def __init__(self):
super(FuseNet, self).__init__()
self.ConvIn = nn.Conv2d(in_channels=512, out_channels=512, kernel_size=1, stride=1, bias=False)
self.AADBlk1 = FuseBlock(cin=512, cout=256, c_ef=512)
self.AADBlk2 = FuseBlock(cin=256, cout=128, c_ef=256)
self.AADBlk3 = FuseBlock(cin=128, cout=64, c_ef=128)
self.AADBlk4 = FuseBlock(cin=64, cout=32, c_ef=64)
self.Up2x = Interp(scale=2)
self.ItStage1 = nn.Sequential(nn.ReplicationPad2d([1, 1, 1, 1]),
nn.Conv2d(in_channels=32, out_channels=32, kernel_size=3),
nn.LeakyReLU(negative_slope=0.1, inplace=True),
nn.Conv2d(in_channels=32, out_channels=3, kernel_size=1),
)
self.randomInitNet()
def forward(self, z_e, z_f):
ST16x = self.ConvIn(torch.cat([z_e[0], z_f[0]], dim=1)) # 64
ST8x = self.AADBlk1(self.Up2x(ST16x), z_e[1], z_f[1]) # 32
ST4x = self.AADBlk2(self.Up2x(ST8x), z_e[2], z_f[2]) # 16
ST2x = self.AADBlk3(self.Up2x(ST4x), z_e[3], z_f[3]) # 8
ST1x = self.AADBlk4(self.Up2x(ST2x), z_e[4], z_f[4]) # 4
ItStage1 = self.ItStage1(ST1x)
return ItStage1, ST4x, ST2x, ST1x
|
193028
|
import numpy as np
from tqdm.autonotebook import tqdm
import gc
import warnings
import sklearn.utils
_remove_cache = {}
def remove_retrain(nmask, X_train, y_train, X_test, y_test, attr_test, model_generator, metric, trained_model, random_state):
""" The model is retrained for each test sample with the important features set to a constant.
If you want to know how important a set of features is you can ask how the model would be
different if those features had never existed. To determine this we can mask those features
across the entire training and test datasets, then retrain the model. If we apply compare the
output of this retrained model to the original model we can see the effect produced by knowning
the features we masked. Since for individualized explanation methods each test sample has a
different set of most important features we need to retrain the model for every test sample
to get the change in model performance when a specified fraction of the most important features
are withheld.
"""
warnings.warn("The retrain based measures can incorrectly evaluate models in some cases!")
# see if we match the last cached call
global _remove_cache
args = (X_train, y_train, X_test, y_test, model_generator, metric)
cache_match = False
if "args" in _remove_cache:
if all(a is b for a,b in zip(_remove_cache["args"], args)) and np.all(_remove_cache["attr_test"] == attr_test):
cache_match = True
X_train, X_test = to_array(X_train, X_test)
# how many features to mask
assert X_train.shape[1] == X_test.shape[1]
# this is the model we will retrain many times
model_masked = model_generator()
# mask nmask top features and re-train the model for each test explanation
X_train_tmp = np.zeros(X_train.shape)
X_test_tmp = np.zeros(X_test.shape)
yp_masked_test = np.zeros(y_test.shape)
tie_breaking_noise = const_rand(X_train.shape[1]) * 1e-6
last_nmask = _remove_cache.get("nmask", None)
last_yp_masked_test = _remove_cache.get("yp_masked_test", None)
for i in tqdm(range(len(y_test)), "Retraining for the 'remove' metric"):
if cache_match and last_nmask[i] == nmask[i]:
yp_masked_test[i] = last_yp_masked_test[i]
elif nmask[i] == 0:
yp_masked_test[i] = trained_model.predict(X_test[i:i+1])[0]
else:
# mask out the most important features for this test instance
X_train_tmp[:] = X_train
X_test_tmp[:] = X_test
ordering = np.argsort(-attr_test[i,:] + tie_breaking_noise)
X_train_tmp[:,ordering[:nmask[i]]] = X_train[:,ordering[:nmask[i]]].mean()
X_test_tmp[i,ordering[:nmask[i]]] = X_train[:,ordering[:nmask[i]]].mean()
# retrain the model and make a prediction
model_masked.fit(X_train_tmp, y_train)
yp_masked_test[i] = model_masked.predict(X_test_tmp[i:i+1])[0]
# save our results so the next call to us can be faster when there is redundancy
_remove_cache["nmask"] = nmask
_remove_cache["yp_masked_test"] = yp_masked_test
_remove_cache["attr_test"] = attr_test
_remove_cache["args"] = args
return metric(y_test, yp_masked_test)
def remove_mask(nmask, X_train, y_train, X_test, y_test, attr_test, model_generator, metric, trained_model, random_state):
""" Each test sample is masked by setting the important features to a constant.
"""
X_train, X_test = to_array(X_train, X_test)
# how many features to mask
assert X_train.shape[1] == X_test.shape[1]
# mask nmask top features for each test explanation
X_test_tmp = X_test.copy()
tie_breaking_noise = const_rand(X_train.shape[1], random_state) * 1e-6
mean_vals = X_train.mean(0)
for i in range(len(y_test)):
if nmask[i] > 0:
ordering = np.argsort(-attr_test[i,:] + tie_breaking_noise)
X_test_tmp[i,ordering[:nmask[i]]] = mean_vals[ordering[:nmask[i]]]
yp_masked_test = trained_model.predict(X_test_tmp)
return metric(y_test, yp_masked_test)
def remove_impute(nmask, X_train, y_train, X_test, y_test, attr_test, model_generator, metric, trained_model, random_state):
""" The model is revaluated for each test sample with the important features set to an imputed value.
Note that the imputation is done using a multivariate normality assumption on the dataset. This depends on
being able to estimate the full data covariance matrix (and inverse) accuractly. So X_train.shape[0] should
be significantly bigger than X_train.shape[1].
"""
X_train, X_test = to_array(X_train, X_test)
# how many features to mask
assert X_train.shape[1] == X_test.shape[1]
# keep nkeep top features for each test explanation
C = np.cov(X_train.T)
C += np.eye(C.shape[0]) * 1e-6
X_test_tmp = X_test.copy()
yp_masked_test = np.zeros(y_test.shape)
tie_breaking_noise = const_rand(X_train.shape[1], random_state) * 1e-6
mean_vals = X_train.mean(0)
for i in range(len(y_test)):
if nmask[i] > 0:
ordering = np.argsort(-attr_test[i,:] + tie_breaking_noise)
observe_inds = ordering[nmask[i]:]
impute_inds = ordering[:nmask[i]]
# impute missing data assuming it follows a multivariate normal distribution
Coo_inv = np.linalg.inv(C[observe_inds,:][:,observe_inds])
Cio = C[impute_inds,:][:,observe_inds]
impute = mean_vals[impute_inds] + Cio @ Coo_inv @ (X_test[i, observe_inds] - mean_vals[observe_inds])
X_test_tmp[i, impute_inds] = impute
yp_masked_test = trained_model.predict(X_test_tmp)
return metric(y_test, yp_masked_test)
def remove_resample(nmask, X_train, y_train, X_test, y_test, attr_test, model_generator, metric, trained_model, random_state):
""" The model is revaluated for each test sample with the important features set to resample background values.
"""
X_train, X_test = to_array(X_train, X_test)
# how many features to mask
assert X_train.shape[1] == X_test.shape[1]
# how many samples to take
nsamples = 100
# keep nkeep top features for each test explanation
N,M = X_test.shape
X_test_tmp = np.tile(X_test, [1, nsamples]).reshape(nsamples * N, M)
tie_breaking_noise = const_rand(M) * 1e-6
inds = sklearn.utils.resample(np.arange(N), n_samples=nsamples, random_state=random_state)
for i in range(N):
if nmask[i] > 0:
ordering = np.argsort(-attr_test[i,:] + tie_breaking_noise)
X_test_tmp[i*nsamples:(i+1)*nsamples, ordering[:nmask[i]]] = X_train[inds, :][:, ordering[:nmask[i]]]
yp_masked_test = trained_model.predict(X_test_tmp)
yp_masked_test = np.reshape(yp_masked_test, (N, nsamples)).mean(1) # take the mean output over all samples
return metric(y_test, yp_masked_test)
def batch_remove_retrain(nmask_train, nmask_test, X_train, y_train, X_test, y_test, attr_train, attr_test, model_generator, metric):
""" An approximation of holdout that only retraines the model once.
This is alse called ROAR (RemOve And Retrain) in work by Google. It is much more computationally
efficient that the holdout method because it masks the most important features in every sample
and then retrains the model once, instead of retraining the model for every test sample like
the holdout metric.
"""
warnings.warn("The retrain based measures can incorrectly evaluate models in some cases!")
X_train, X_test = to_array(X_train, X_test)
# how many features to mask
assert X_train.shape[1] == X_test.shape[1]
# mask nmask top features for each explanation
X_train_tmp = X_train.copy()
X_train_mean = X_train.mean(0)
tie_breaking_noise = const_rand(X_train.shape[1]) * 1e-6
for i in range(len(y_train)):
if nmask_train[i] > 0:
ordering = np.argsort(-attr_train[i, :] + tie_breaking_noise)
X_train_tmp[i, ordering[:nmask_train[i]]] = X_train_mean[ordering[:nmask_train[i]]]
X_test_tmp = X_test.copy()
for i in range(len(y_test)):
if nmask_test[i] > 0:
ordering = np.argsort(-attr_test[i, :] + tie_breaking_noise)
X_test_tmp[i, ordering[:nmask_test[i]]] = X_train_mean[ordering[:nmask_test[i]]]
# train the model with all the given features masked
model_masked = model_generator()
model_masked.fit(X_train_tmp, y_train)
yp_test_masked = model_masked.predict(X_test_tmp)
return metric(y_test, yp_test_masked)
_keep_cache = {}
def keep_retrain(nkeep, X_train, y_train, X_test, y_test, attr_test, model_generator, metric, trained_model, random_state):
""" The model is retrained for each test sample with the non-important features set to a constant.
If you want to know how important a set of features is you can ask how the model would be
different if only those features had existed. To determine this we can mask the other features
across the entire training and test datasets, then retrain the model. If we apply compare the
output of this retrained model to the original model we can see the effect produced by only
knowning the important features. Since for individualized explanation methods each test sample
has a different set of most important features we need to retrain the model for every test sample
to get the change in model performance when a specified fraction of the most important features
are retained.
"""
warnings.warn("The retrain based measures can incorrectly evaluate models in some cases!")
# see if we match the last cached call
global _keep_cache
args = (X_train, y_train, X_test, y_test, model_generator, metric)
cache_match = False
if "args" in _keep_cache:
if all(a is b for a,b in zip(_keep_cache["args"], args)) and np.all(_keep_cache["attr_test"] == attr_test):
cache_match = True
X_train, X_test = to_array(X_train, X_test)
# how many features to mask
assert X_train.shape[1] == X_test.shape[1]
# this is the model we will retrain many times
model_masked = model_generator()
# keep nkeep top features and re-train the model for each test explanation
X_train_tmp = np.zeros(X_train.shape)
X_test_tmp = np.zeros(X_test.shape)
yp_masked_test = np.zeros(y_test.shape)
tie_breaking_noise = const_rand(X_train.shape[1]) * 1e-6
last_nkeep = _keep_cache.get("nkeep", None)
last_yp_masked_test = _keep_cache.get("yp_masked_test", None)
for i in tqdm(range(len(y_test)), "Retraining for the 'keep' metric"):
if cache_match and last_nkeep[i] == nkeep[i]:
yp_masked_test[i] = last_yp_masked_test[i]
elif nkeep[i] == attr_test.shape[1]:
yp_masked_test[i] = trained_model.predict(X_test[i:i+1])[0]
else:
# mask out the most important features for this test instance
X_train_tmp[:] = X_train
X_test_tmp[:] = X_test
ordering = np.argsort(-attr_test[i,:] + tie_breaking_noise)
X_train_tmp[:,ordering[nkeep[i]:]] = X_train[:,ordering[nkeep[i]:]].mean()
X_test_tmp[i,ordering[nkeep[i]:]] = X_train[:,ordering[nkeep[i]:]].mean()
# retrain the model and make a prediction
model_masked.fit(X_train_tmp, y_train)
yp_masked_test[i] = model_masked.predict(X_test_tmp[i:i+1])[0]
# save our results so the next call to us can be faster when there is redundancy
_keep_cache["nkeep"] = nkeep
_keep_cache["yp_masked_test"] = yp_masked_test
_keep_cache["attr_test"] = attr_test
_keep_cache["args"] = args
return metric(y_test, yp_masked_test)
def keep_mask(nkeep, X_train, y_train, X_test, y_test, attr_test, model_generator, metric, trained_model, random_state):
""" The model is revaluated for each test sample with the non-important features set to their mean.
"""
X_train, X_test = to_array(X_train, X_test)
# how many features to mask
assert X_train.shape[1] == X_test.shape[1]
# keep nkeep top features for each test explanation
X_test_tmp = X_test.copy()
yp_masked_test = np.zeros(y_test.shape)
tie_breaking_noise = const_rand(X_train.shape[1], random_state) * 1e-6
mean_vals = X_train.mean(0)
for i in range(len(y_test)):
if nkeep[i] < X_test.shape[1]:
ordering = np.argsort(-attr_test[i,:] + tie_breaking_noise)
X_test_tmp[i,ordering[nkeep[i]:]] = mean_vals[ordering[nkeep[i]:]]
yp_masked_test = trained_model.predict(X_test_tmp)
return metric(y_test, yp_masked_test)
def keep_impute(nkeep, X_train, y_train, X_test, y_test, attr_test, model_generator, metric, trained_model, random_state):
""" The model is revaluated for each test sample with the non-important features set to an imputed value.
Note that the imputation is done using a multivariate normality assumption on the dataset. This depends on
being able to estimate the full data covariance matrix (and inverse) accuractly. So X_train.shape[0] should
be significantly bigger than X_train.shape[1].
"""
X_train, X_test = to_array(X_train, X_test)
# how many features to mask
assert X_train.shape[1] == X_test.shape[1]
# keep nkeep top features for each test explanation
C = np.cov(X_train.T)
C += np.eye(C.shape[0]) * 1e-6
X_test_tmp = X_test.copy()
yp_masked_test = np.zeros(y_test.shape)
tie_breaking_noise = const_rand(X_train.shape[1], random_state) * 1e-6
mean_vals = X_train.mean(0)
for i in range(len(y_test)):
if nkeep[i] < X_test.shape[1]:
ordering = np.argsort(-attr_test[i,:] + tie_breaking_noise)
observe_inds = ordering[:nkeep[i]]
impute_inds = ordering[nkeep[i]:]
# impute missing data assuming it follows a multivariate normal distribution
Coo_inv = np.linalg.inv(C[observe_inds,:][:,observe_inds])
Cio = C[impute_inds,:][:,observe_inds]
impute = mean_vals[impute_inds] + Cio @ Coo_inv @ (X_test[i, observe_inds] - mean_vals[observe_inds])
X_test_tmp[i, impute_inds] = impute
yp_masked_test = trained_model.predict(X_test_tmp)
return metric(y_test, yp_masked_test)
def keep_resample(nkeep, X_train, y_train, X_test, y_test, attr_test, model_generator, metric, trained_model, random_state):
""" The model is revaluated for each test sample with the non-important features set to resample background values.
""" # why broken? overwriting?
X_train, X_test = to_array(X_train, X_test)
# how many features to mask
assert X_train.shape[1] == X_test.shape[1]
# how many samples to take
nsamples = 100
# keep nkeep top features for each test explanation
N,M = X_test.shape
X_test_tmp = np.tile(X_test, [1, nsamples]).reshape(nsamples * N, M)
tie_breaking_noise = const_rand(M) * 1e-6
inds = sklearn.utils.resample(np.arange(N), n_samples=nsamples, random_state=random_state)
for i in range(N):
if nkeep[i] < M:
ordering = np.argsort(-attr_test[i,:] + tie_breaking_noise)
X_test_tmp[i*nsamples:(i+1)*nsamples, ordering[nkeep[i]:]] = X_train[inds, :][:, ordering[nkeep[i]:]]
yp_masked_test = trained_model.predict(X_test_tmp)
yp_masked_test = np.reshape(yp_masked_test, (N, nsamples)).mean(1) # take the mean output over all samples
return metric(y_test, yp_masked_test)
def batch_keep_retrain(nkeep_train, nkeep_test, X_train, y_train, X_test, y_test, attr_train, attr_test, model_generator, metric):
""" An approximation of keep that only retraines the model once.
This is alse called KAR (Keep And Retrain) in work by Google. It is much more computationally
efficient that the keep method because it masks the unimportant features in every sample
and then retrains the model once, instead of retraining the model for every test sample like
the keep metric.
"""
warnings.warn("The retrain based measures can incorrectly evaluate models in some cases!")
X_train, X_test = to_array(X_train, X_test)
# how many features to mask
assert X_train.shape[1] == X_test.shape[1]
# mask nkeep top features for each explanation
X_train_tmp = X_train.copy()
X_train_mean = X_train.mean(0)
tie_breaking_noise = const_rand(X_train.shape[1]) * 1e-6
for i in range(len(y_train)):
if nkeep_train[i] < X_train.shape[1]:
ordering = np.argsort(-attr_train[i, :] + tie_breaking_noise)
X_train_tmp[i, ordering[nkeep_train[i]:]] = X_train_mean[ordering[nkeep_train[i]:]]
X_test_tmp = X_test.copy()
for i in range(len(y_test)):
if nkeep_test[i] < X_test.shape[1]:
ordering = np.argsort(-attr_test[i, :] + tie_breaking_noise)
X_test_tmp[i, ordering[nkeep_test[i]:]] = X_train_mean[ordering[nkeep_test[i]:]]
# train the model with all the features not given masked
model_masked = model_generator()
model_masked.fit(X_train_tmp, y_train)
yp_test_masked = model_masked.predict(X_test_tmp)
return metric(y_test, yp_test_masked)
def local_accuracy(X_train, y_train, X_test, y_test, attr_test, model_generator, metric, trained_model):
""" The how well do the features plus a constant base rate sum up to the model output.
"""
X_train, X_test = to_array(X_train, X_test)
# how many features to mask
assert X_train.shape[1] == X_test.shape[1]
# keep nkeep top features and re-train the model for each test explanation
yp_test = trained_model.predict(X_test)
return metric(yp_test, strip_list(attr_test).sum(1))
def to_array(*args):
return [a.values if str(type(a)).endswith("'pandas.core.frame.DataFrame'>") else a for a in args]
def const_rand(size, seed=23980):
""" Generate a random array with a fixed seed.
"""
old_seed = np.random.seed()
np.random.seed(seed)
out = np.random.rand(size)
np.random.seed(old_seed)
return out
def const_shuffle(arr, seed=23980):
""" Shuffle an array in-place with a fixed seed.
"""
old_seed = np.random.seed()
np.random.seed(seed)
np.random.shuffle(arr)
np.random.seed(old_seed)
def strip_list(attrs):
""" This assumes that if you have a list of outputs you just want the second one (the second class is the '1' class).
"""
if isinstance(attrs, list):
return attrs[1]
else:
return attrs
|
193068
|
from __future__ import print_function
from __future__ import with_statement
from os.path import exists
from twisted.python import log, failure
from twisted.trial import unittest
from twisted.test import proto_helpers
from twisted.internet import defer, error
from txtorcon import TorControlProtocol, TorProtocolFactory, TorState
from txtorcon import ITorControlProtocol
from txtorcon.torcontrolprotocol import parse_keywords, DEFAULT_VALUE
from txtorcon.util import hmac_sha256
import functools
import tempfile
import base64
from binascii import b2a_hex, a2b_hex
class CallbackChecker:
def __init__(self, expected):
self.expected_value = expected
self.called_back = False
def __call__(self, *args, **kwargs):
v = args[0]
if v != self.expected_value:
print("WRONG")
raise RuntimeError(
'Expected "%s" but got "%s"' % (self.expected_value, v)
)
self.called_back = True
return v
class InterfaceTests(unittest.TestCase):
def test_implements(self):
self.assertTrue(ITorControlProtocol.implementedBy(TorControlProtocol))
def test_object_implements(self):
self.assertTrue(ITorControlProtocol.providedBy(TorControlProtocol()))
class LogicTests(unittest.TestCase):
def setUp(self):
self.protocol = TorControlProtocol()
self.protocol.connectionMade = lambda: None
self.transport = proto_helpers.StringTransport()
self.protocol.makeConnection(self.transport)
def test_set_conf_wrong_args(self):
ctl = TorControlProtocol()
d = ctl.set_conf('a')
self.assertTrue(d.called)
self.assertTrue(d.result)
self.assertTrue('even number' in d.result.getErrorMessage())
# ignore the error so trial doesn't get unhappy
d.addErrback(lambda foo: True)
return d
class FactoryTests(unittest.TestCase):
def test_create(self):
TorProtocolFactory().buildProtocol(None)
class AuthenticationTests(unittest.TestCase):
def setUp(self):
self.protocol = TorControlProtocol()
self.transport = proto_helpers.StringTransport()
def send(self, line):
assert type(line) == bytes
self.protocol.dataReceived(line.strip() + b"\r\n")
def test_authenticate_cookie(self):
self.protocol.makeConnection(self.transport)
self.assertEqual(self.transport.value(), b'PROTOCOLINFO 1\r\n')
self.transport.clear()
cookie_data = b'cookiedata!cookiedata!cookiedata'
with open('authcookie', 'wb') as f:
f.write(cookie_data)
self.send(b'250-PROTOCOLINFO 1')
self.send(b'250-AUTH METHODS=COOKIE,HASHEDPASSWORD COOKIEFILE="authcookie"')
self.send(b'250-VERSION Tor="0.2.2.34"')
self.send(b'250 OK')
self.assertEqual(
self.transport.value(),
b'AUTHENTICATE ' + b2a_hex(cookie_data) + b'\r\n',
)
def test_authenticate_password(self):
self.protocol.password_function = lambda: '<PASSWORD>'
self.protocol.makeConnection(self.transport)
self.assertEqual(self.transport.value(), b'PROTOCOLINFO 1\r\n')
self.transport.clear()
self.send(b'250-PROTOCOLINFO 1')
self.send(b'250-AUTH METHODS=HASHEDPASSWORD')
self.send(b'250-VERSION Tor="0.2.2.34"')
self.send(b'250 OK')
self.assertEqual(
self.transport.value(),
b'AUTHENTICATE ' + b2a_hex(b'foo') + b'\r\n'
)
def test_authenticate_password_not_bytes(self):
self.protocol.password_function = lambda: u'<PASSWORD>'
self.protocol.makeConnection(self.transport)
self.assertEqual(self.transport.value(), b'PROTOCOLINFO 1\r\n')
self.transport.clear()
self.send(b'250-PROTOCOLINFO 1')
self.send(b'250-AUTH METHODS=HASHEDPASSWORD')
self.send(b'250-VERSION Tor="0.2.2.34"')
self.send(b'250 OK')
self.assertEqual(
self.transport.value(),
b'AUTHENTICATE ' + b2a_hex(b'foo') + b'\r\n'
)
def test_authenticate_null(self):
self.protocol.makeConnection(self.transport)
self.assertEqual(self.transport.value(), b'PROTOCOLINFO 1\r\n')
self.transport.clear()
self.send(b'250-PROTOCOLINFO 1')
self.send(b'250-AUTH METHODS=NULL')
self.send(b'250-VERSION Tor="0.2.2.34"')
self.send(b'250 OK')
self.assertEqual(self.transport.value(), b'AUTHENTICATE\r\n')
def test_authenticate_password_deferred(self):
d = defer.Deferred()
self.protocol.password_function = lambda: d
self.protocol.makeConnection(self.transport)
self.assertEqual(self.transport.value(), b'PROTOCOLINFO 1\r\n')
self.transport.clear()
self.send(b'250-PROTOCOLINFO 1')
self.send(b'250-AUTH METHODS=HASHEDPASSWORD')
self.send(b'250-VERSION Tor="0.2.2.34"')
self.send(b'250 OK')
# make sure we haven't tried to authenticate before getting
# the password callback
self.assertEqual(self.transport.value(), b'')
d.callback('foo')
# now make sure we DID try to authenticate
self.assertEqual(
self.transport.value(),
b'AUTHENTICATE ' + b2a_hex(b"foo") + b'\r\n'
)
def test_authenticate_password_deferred_but_no_password(self):
d = defer.Deferred()
self.protocol.password_function = lambda: d
self.protocol.makeConnection(self.transport)
self.assertEqual(self.transport.value(), b'PROTOCOLINFO 1\r\n')
self.transport.clear()
self.send(b'250-PROTOCOLINFO 1')
self.send(b'250-AUTH METHODS=HASHEDPASSWORD')
self.send(b'250-VERSION Tor="0.2.2.34"')
self.send(b'250 OK')
d.callback(None)
return self.assertFailure(self.protocol.post_bootstrap, RuntimeError)
def confirmAuthFailed(self, *args):
self.auth_failed = True
def test_authenticate_no_password(self):
self.protocol.post_bootstrap.addErrback(self.confirmAuthFailed)
self.auth_failed = False
self.protocol.makeConnection(self.transport)
self.assertEqual(self.transport.value(), b'PROTOCOLINFO 1\r\n')
self.send(b'250-PROTOCOLINFO 1')
self.send(b'250-AUTH METHODS=HASHEDPASSWORD')
self.send(b'250-VERSION Tor="0.2.2.34"')
self.send(b'250 OK')
self.assertTrue(self.auth_failed)
class DisconnectionTests(unittest.TestCase):
def setUp(self):
self.protocol = TorControlProtocol()
self.protocol.connectionMade = lambda: None
self.transport = proto_helpers.StringTransportWithDisconnection()
self.protocol.makeConnection(self.transport)
# why doesn't makeConnection do this?
self.transport.protocol = self.protocol
def tearDown(self):
self.protocol = None
def test_disconnect_callback(self):
"""
see that we get our callback on_disconnect if the transport
goes away
"""
def it_was_called(*args):
it_was_called.yes = True
return None
it_was_called.yes = False
self.protocol.on_disconnect.addCallback(it_was_called)
self.protocol.on_disconnect.addErrback(it_was_called)
f = failure.Failure(error.ConnectionDone("It's all over"))
self.protocol.connectionLost(f)
self.assertTrue(it_was_called.yes)
def test_when_disconnect(self):
"""
see that we get our callback for when_disconnected if the
transport goes away
"""
def it_was_called(arg):
it_was_called.yes = True
return None
it_was_called.yes = False
d = self.protocol.when_disconnected()
d.addCallback(it_was_called)
f = failure.Failure(error.ConnectionDone("It's all over"))
self.protocol.connectionLost(f)
self.assertTrue(it_was_called.yes)
def test_when_disconnect_error(self):
"""
see that we get our errback for when_disconnected if the
transport goes away
"""
def it_was_called(arg):
it_was_called.yes = True
return None
it_was_called.yes = False
d = self.protocol.when_disconnected()
d.addErrback(it_was_called)
f = failure.Failure(RuntimeError("sadness"))
self.protocol.connectionLost(f)
self.assertTrue(it_was_called.yes)
def test_disconnect_errback(self):
"""
see that we get our callback on_disconnect if the transport
goes away
"""
def it_was_called(*args):
it_was_called.yes = True
return None
it_was_called.yes = False
self.protocol.on_disconnect.addCallback(it_was_called)
self.protocol.on_disconnect.addErrback(it_was_called)
f = failure.Failure(RuntimeError("The thing didn't do the stuff."))
self.protocol.connectionLost(f)
self.assertTrue(it_was_called.yes)
def test_disconnect_outstanding_commands(self):
"""
outstanding commands should errback on disconnect
"""
def it_was_called(f):
str(f)
it_was_called.count += 1
return None
it_was_called.count = 0
# we want to make sure outstanding commands get errbacks
d0 = self.protocol.queue_command("some command0")
d1 = self.protocol.queue_command("some command1")
d0.addErrback(it_was_called)
d1.addErrback(it_was_called)
self.protocol.on_disconnect.addErrback(lambda _: None)
f = failure.Failure(RuntimeError("The thing didn't do the stuff."))
self.protocol.connectionLost(f)
self.assertEqual(it_was_called.count, 2)
class ProtocolTests(unittest.TestCase):
def setUp(self):
self.protocol = TorControlProtocol()
self.protocol.connectionMade = lambda: None
self.transport = proto_helpers.StringTransport()
self.protocol.makeConnection(self.transport)
def tearDown(self):
self.protocol = None
def send(self, line):
assert type(line) == bytes
self.protocol.dataReceived(line.strip() + b"\r\n")
def test_statemachine_broadcast_no_code(self):
try:
self.protocol._broadcast_response("foo")
self.fail()
except RuntimeError as e:
self.assertTrue('No code set yet' in str(e))
def test_statemachine_broadcast_unknown_code(self):
try:
self.protocol.code = 999
self.protocol._broadcast_response("foo")
self.fail()
except RuntimeError as e:
self.assertTrue('Unknown code' in str(e))
def test_statemachine_is_finish(self):
self.assertTrue(not self.protocol._is_finish_line(''))
self.assertTrue(self.protocol._is_finish_line('.'))
self.assertTrue(self.protocol._is_finish_line('300 '))
self.assertTrue(not self.protocol._is_finish_line('250-'))
def test_statemachine_singleline(self):
self.assertTrue(not self.protocol._is_single_line_response('foo'))
def test_statemachine_continuation(self):
try:
self.protocol.code = 250
self.protocol._is_continuation_line("123 ")
self.fail()
except RuntimeError as e:
self.assertTrue('Unexpected code' in str(e))
def test_statemachine_multiline(self):
try:
self.protocol.code = 250
self.protocol._is_multi_line("123 ")
self.fail()
except RuntimeError as e:
self.assertTrue('Unexpected code' in str(e))
def test_response_with_no_request(self):
with self.assertRaises(RuntimeError) as ctx:
self.protocol.code = 200
self.protocol._broadcast_response('200 OK')
self.assertTrue(
"didn't issue a command" in str(ctx.exception)
)
def auth_failed(self, msg):
self.assertEqual(str(msg.value), '551 go away')
self.got_auth_failed = True
def test_authenticate_fail(self):
self.got_auth_failed = False
self.protocol._auth_failed = self.auth_failed
self.protocol.password_function = lambda: '<PASSWORD>'
self.protocol._do_authenticate('''PROTOCOLINFO 1
AUTH METHODS=HASHEDPASSWORD
VERSION Tor="0.2.2.35"
OK''')
self.send(b'551 go away\r\n')
self.assertTrue(self.got_auth_failed)
def test_authenticate_no_auth_line(self):
try:
self.protocol._do_authenticate('''PROTOCOLINFO 1
FOOAUTH METHODS=COOKIE,SAFECOOKIE COOKIEFILE="/dev/null"
VERSION Tor="0.2.2.35"
OK''')
self.assertTrue(False)
except RuntimeError as e:
self.assertTrue('find AUTH line' in str(e))
def test_authenticate_not_enough_cookie_data(self):
with tempfile.NamedTemporaryFile() as cookietmp:
cookietmp.write(b'x' * 35) # too much data
cookietmp.flush()
try:
self.protocol._do_authenticate('''PROTOCOLINFO 1
AUTH METHODS=COOKIE COOKIEFILE="%s"
VERSION Tor="0.2.2.35"
OK''' % cookietmp.name)
self.assertTrue(False)
except RuntimeError as e:
self.assertTrue('cookie to be 32' in str(e))
def test_authenticate_not_enough_safecookie_data(self):
with tempfile.NamedTemporaryFile() as cookietmp:
cookietmp.write(b'x' * 35) # too much data
cookietmp.flush()
try:
self.protocol._do_authenticate('''PROTOCOLINFO 1
AUTH METHODS=SAFECOOKIE COOKIEFILE="%s"
VERSION Tor="0.2.2.35"
OK''' % cookietmp.name)
self.assertTrue(False)
except RuntimeError as e:
self.assertTrue('cookie to be 32' in str(e))
def test_authenticate_safecookie(self):
with tempfile.NamedTemporaryFile() as cookietmp:
cookiedata = bytes(bytearray([0] * 32))
cookietmp.write(cookiedata)
cookietmp.flush()
self.protocol._do_authenticate('''PROTOCOLINFO 1
AUTH METHODS=SAFECOOKIE COOKIEFILE="{}"
VERSION Tor="0.2.2.35"
OK'''.format(cookietmp.name))
self.assertTrue(
b'AUTHCHALLENGE SAFECOOKIE ' in self.transport.value()
)
x = self.transport.value().split()[-1]
client_nonce = a2b_hex(x)
self.transport.clear()
server_nonce = bytes(bytearray([0] * 32))
server_hash = hmac_sha256(
b"Tor safe cookie authentication server-to-controller hash",
cookiedata + client_nonce + server_nonce,
)
self.send(
b'250 AUTHCHALLENGE SERVERHASH=' +
base64.b16encode(server_hash) + b' SERVERNONCE=' +
base64.b16encode(server_nonce) + b'\r\n'
)
self.assertTrue(b'AUTHENTICATE ' in self.transport.value())
def test_authenticate_cookie_without_reading(self):
server_nonce = bytes(bytearray([0] * 32))
server_hash = bytes(bytearray([0] * 32))
try:
self.protocol._safecookie_authchallenge(
'250 AUTHCHALLENGE SERVERHASH=%s SERVERNONCE=%s' %
(base64.b16encode(server_hash), base64.b16encode(server_nonce))
)
self.assertTrue(False)
except RuntimeError as e:
self.assertTrue('not read' in str(e))
def test_authenticate_unexisting_cookie_file(self):
unexisting_file = __file__ + "-unexisting"
try:
self.protocol._do_authenticate('''PROTOCOLINFO 1
AUTH METHODS=COOKIE COOKIEFILE="%s"
VERSION Tor="0.2.2.35"
OK''' % unexisting_file)
self.assertTrue(False)
except RuntimeError:
pass
def test_authenticate_unexisting_safecookie_file(self):
unexisting_file = __file__ + "-unexisting"
try:
self.protocol._do_authenticate('''PROTOCOLINFO 1
AUTH METHODS=SAFECOOKIE COOKIEFILE="{}"
VERSION Tor="0.2.2.35"
OK'''.format(unexisting_file))
self.assertTrue(False)
except RuntimeError:
pass
def test_authenticate_dont_send_cookiefile(self):
try:
self.protocol._do_authenticate('''PROTOCOLINFO 1
AUTH METHODS=SAFECOOKIE
VERSION Tor="0.2.2.35"
OK''')
self.assertTrue(False)
except RuntimeError:
pass
def test_authenticate_password_when_cookie_unavailable(self):
unexisting_file = __file__ + "-unexisting"
self.protocol.password_function = lambda: '<PASSWORD>'
self.protocol._do_authenticate('''PROTOCOLINFO 1
AUTH METHODS=COOKIE,HASHEDPASSWORD COOKIEFILE="{}"
VERSION Tor="0.2.2.35"
OK'''.format(unexisting_file))
self.assertEqual(
self.transport.value(),
b'AUTHENTICATE ' + b2a_hex(b'foo') + b'\r\n',
)
def test_authenticate_password_when_safecookie_unavailable(self):
unexisting_file = __file__ + "-unexisting"
self.protocol.password_function = lambda: '<PASSWORD>'
self.protocol._do_authenticate('''PROTOCOLINFO 1
AUTH METHODS=SAFECOOKIE,HASHEDPASSWORD COOKIEFILE="{}"
VERSION Tor="0.2.2.35"
OK'''.format(unexisting_file))
self.assertEqual(
self.transport.value(),
b'AUTHENTICATE ' + b2a_hex(b'foo') + b'\r\n',
)
def test_authenticate_safecookie_wrong_hash(self):
cookiedata = bytes(bytearray([0] * 32))
server_nonce = bytes(bytearray([0] * 32))
server_hash = bytes(bytearray([0] * 32))
# pretend we already did PROTOCOLINFO and read the cookie
# file
self.protocol._cookie_data = cookiedata
self.protocol.client_nonce = server_nonce # all 0's anyway
try:
self.protocol._safecookie_authchallenge(
'250 AUTHCHALLENGE SERVERHASH={} SERVERNONCE={}'.format(
b2a_hex(server_hash).decode('ascii'),
b2a_hex(server_nonce).decode('ascii'),
)
)
self.assertTrue(False)
except RuntimeError as e:
self.assertTrue('hash not expected' in str(e))
def confirm_version_events(self, arg):
self.assertEqual(self.protocol.version, 'foo')
events = 'GUARD STREAM CIRC NS NEWCONSENSUS ORCONN NEWDESC ADDRMAP STATUS_GENERAL'.split()
self.assertEqual(len(self.protocol.valid_events), len(events))
self.assertTrue(all(x in self.protocol.valid_events for x in events))
def test_bootstrap_callback(self):
d = self.protocol.post_bootstrap
d.addCallback(CallbackChecker(self.protocol))
d.addCallback(self.confirm_version_events)
events = b'GUARD STREAM CIRC NS NEWCONSENSUS ORCONN NEWDESC ADDRMAP STATUS_GENERAL'
self.protocol._bootstrap()
# answer all the requests generated by boostrapping etc.
self.send(b"250-signal/names=")
self.send(b"250 OK")
self.send(b"250-version=foo")
self.send(b"250 OK")
self.send(b"250-events/names=" + events)
self.send(b"250 OK")
self.send(b"250 OK") # for USEFEATURE
return d
def test_bootstrap_tor_does_not_support_signal_names(self):
self.protocol._bootstrap()
self.send(b'552 Unrecognized key "signal/names"')
valid_signals = ["RELOAD", "DUMP", "DEBUG", "NEWNYM", "CLEARDNSCACHE"]
self.assertEqual(self.protocol.valid_signals, valid_signals)
def test_async(self):
"""
test the example from control-spec.txt to see that we
handle interleaved async notifications properly.
"""
self.protocol._set_valid_events('CIRC')
self.protocol.add_event_listener('CIRC', lambda _: None)
self.send(b"250 OK")
d = self.protocol.get_conf("SOCKSPORT ORPORT")
self.send(b"650 CIRC 1000 EXTENDED moria1,moria2")
self.send(b"250-SOCKSPORT=9050")
self.send(b"250 ORPORT=0")
return d
def test_async_multiline(self):
# same as above, but i think the 650's can be multline,
# too. Like:
# 650-CIRC 1000 EXTENDED moria1,moria2 0xBEEF
# 650-EXTRAMAGIC=99
# 650 ANONYMITY=high
self.protocol._set_valid_events('CIRC')
self.protocol.add_event_listener(
'CIRC',
CallbackChecker(
"1000 EXTENDED moria1,moria2\nEXTRAMAGIC=99\nANONYMITY=high"
)
)
self.send(b"250 OK")
d = self.protocol.get_conf("SOCKSPORT ORPORT")
d.addCallback(CallbackChecker({"ORPORT": "0", "SOCKSPORT": "9050"}))
self.send(b"650-CIRC 1000 EXTENDED moria1,moria2")
self.send(b"650-EXTRAMAGIC=99")
self.send(b"650 ANONYMITY=high")
self.send(b"250-SOCKSPORT=9050")
self.send(b"250 ORPORT=0")
return d
def test_multiline_plus(self):
"""
"""
d = self.protocol.get_info("FOO")
d.addCallback(CallbackChecker({"FOO": "\na\nb\nc"}))
self.send(b"250+FOO=")
self.send(b"a")
self.send(b"b")
self.send(b"c")
self.send(b".")
self.send(b"250 OK")
return d
def test_multiline_plus_embedded_equals(self):
"""
"""
d = self.protocol.get_info("FOO")
d.addCallback(CallbackChecker({"FOO": "\na="}))
self.send(b"250+FOO=")
self.send(b"a=")
self.send(b".")
self.send(b"250 OK")
return d
def incremental_check(self, expected, actual):
if '=' in actual:
return
self.assertEqual(expected, actual)
def test_getinfo_incremental(self):
d = self.protocol.get_info_incremental(
"FOO",
functools.partial(self.incremental_check, "bar")
)
self.send(b"250+FOO=")
self.send(b"bar")
self.send(b"bar")
self.send(b".")
self.send(b"250 OK")
return d
def test_getinfo_incremental_continuation(self):
d = self.protocol.get_info_incremental(
"FOO",
functools.partial(self.incremental_check, "bar")
)
self.send(b"250-FOO=")
self.send(b"250-bar")
self.send(b"250-bar")
self.send(b"250 OK")
return d
def test_getinfo_one_line(self):
d = self.protocol.get_info(
"foo",
)
self.send(b'250 foo=bar')
d.addCallback(lambda _: functools.partial(self.incremental_check, "bar"))
return d
def test_getconf(self):
d = self.protocol.get_conf("SOCKSPORT ORPORT")
d.addCallback(CallbackChecker({'SocksPort': '9050', 'ORPort': '0'}))
self.send(b"250-SocksPort=9050")
self.send(b"250 ORPort=0")
return d
def test_getconf_raw(self):
d = self.protocol.get_conf_raw("SOCKSPORT ORPORT")
d.addCallback(CallbackChecker('SocksPort=9050\nORPort=0'))
self.send(b"250-SocksPort=9050")
self.send(b"250 ORPort=0")
return d
def test_getconf_single(self):
d = self.protocol.get_conf_single("SOCKSPORT")
d.addCallback(CallbackChecker('9050'))
self.send(b"250 SocksPort=9050")
return d
def response_ok(self, v):
self.assertEqual(v, '')
def test_setconf(self):
d = self.protocol.set_conf("foo", "bar").addCallback(
functools.partial(self.response_ok)
)
self.send(b"250 OK")
self._wait(d)
self.assertEqual(self.transport.value(), b"SETCONF foo=bar\r\n")
def test_setconf_with_space(self):
d = self.protocol.set_conf("foo", "a value with a space")
d.addCallback(functools.partial(self.response_ok))
self.send(b"250 OK")
self._wait(d)
self.assertEqual(
self.transport.value(),
b'SETCONF foo="a value with a space"\r\n'
)
def test_setconf_multi(self):
d = self.protocol.set_conf("foo", "bar", "baz", 1)
self.send(b"250 OK")
self._wait(d)
self.assertEqual(
self.transport.value(),
b"SETCONF foo=bar baz=1\r\n",
)
def test_quit(self):
d = self.protocol.quit()
self.send(b"250 OK")
self._wait(d)
self.assertEqual(
self.transport.value(),
b"QUIT\r\n",
)
def test_dot(self):
# just checking we don't expode
self.protocol.graphviz_data()
def test_debug(self):
self.protocol.start_debug()
self.assertTrue(exists('txtorcon-debug.log'))
def error(self, failure):
print("ERROR", failure)
self.assertTrue(False)
def test_twocommands(self):
"Two commands on the wire before first response."
d1 = self.protocol.get_conf("FOO")
ht = {"a": "one", "b": "two"}
d1.addCallback(CallbackChecker(ht)).addErrback(log.err)
d2 = self.protocol.get_info_raw("BAR")
d2.addCallback(CallbackChecker("bar")).addErrback(log.err)
self.send(b"250-a=one")
self.send(b"250-b=two")
self.send(b"250 OK")
self.send(b"250 bar")
return d2
def test_signal_error(self):
try:
self.protocol.signal('FOO')
self.fail()
except Exception as e:
self.assertTrue('Invalid signal' in str(e))
def test_signal(self):
self.protocol.valid_signals = ['NEWNYM']
self.protocol.signal('NEWNYM')
self.assertEqual(
self.transport.value(),
b'SIGNAL NEWNYM\r\n',
)
def test_650_after_authenticate(self):
self.protocol._set_valid_events('CONF_CHANGED')
self.protocol.add_event_listener(
'CONF_CHANGED',
CallbackChecker("Foo=bar")
)
self.send(b"250 OK")
self.send(b"650-CONF_CHANGED")
self.send(b"650-Foo=bar")
def test_notify_after_getinfo(self):
self.protocol._set_valid_events('CIRC')
self.protocol.add_event_listener(
'CIRC',
CallbackChecker("1000 EXTENDED moria1,moria2")
)
self.send(b"250 OK")
d = self.protocol.get_info("a")
d.addCallback(CallbackChecker({'a': 'one'})).addErrback(self.fail)
self.send(b"250-a=one")
self.send(b"250 OK")
self.send(b"650 CIRC 1000 EXTENDED moria1,moria2")
return d
def test_notify_error(self):
self.protocol._set_valid_events('CIRC')
self.send(b"650 CIRC 1000 EXTENDED moria1,moria2")
def test_getinfo(self):
d = self.protocol.get_info("version")
d.addCallback(CallbackChecker({'version': '0.2.2.34'}))
d.addErrback(self.fail)
self.send(b"250-version=0.2.2.34")
self.send(b"250 OK")
self.assertEqual(
self.transport.value(),
b"GETINFO version\r\n",
)
return d
def test_getinfo_single(self):
d = self.protocol.get_info_single("version")
d.addCallback(CallbackChecker('0.2.2.34'))
d.addErrback(self.fail)
self.send(b"250-version=0.2.2.34")
self.send(b"250 OK")
self.assertEqual(
self.transport.value(),
b"GETINFO version\r\n",
)
return d
def test_getinfo_for_descriptor(self):
descriptor_info = b"""250+desc/name/moria1=
router moria1 172.16.31.10 9101 0 9131
platform Tor 0.2.5.0-alpha-dev on Linux
protocols Link 1 2 Circuit 1
published 2013-07-05 23:48:52
fingerprint 9695 DFC3 5FFE B861 329B 9F1A B04C 4639 7020 CE31
uptime 1818933
bandwidth 512000 62914560 1307929
extra-info-digest 17D0142F6EBCDF60160EB1794FA6C9717D581F8C
caches-extra-info
onion-key
-----BEGIN RSA PUBLIC KEY-----
<KEY>
-----END RSA PUBLIC KEY-----
signing-key
-----BEGIN RSA PUBLIC KEY-----
<KEY>
-----END RSA PUBLIC KEY-----
hidden-service-dir
contact 1024D/28988BF5 arma mit edu
ntor-onion-key 9ZVjNkf/iLEnD685SpC5kcDytQ7u5ViiI9JOftdbE0k=
reject *:*
router-signature
-----BEGIN SIGNATURE-----
Y8Tj2e7mPbFJbguulkPEBVYzyO57p4btpWEXvRMD6vxIh/eyn25pehg5dUVBtZlL
iO3EUE0AEYah2W9gdz8t+i3Dtr0zgqLS841GC/TyDKCm+MKmN8d098qnwK0NGF9q
01NZPuSqXM1b6hnl2espFzL7XL8XEGRU+aeg+f/ukw4=
-----END SIGNATURE-----
.
250 OK"""
d = self.protocol.get_info("desc/name/moria1")
d.addCallback(CallbackChecker({'desc/name/moria1': '\n' + '\n'.join(descriptor_info.decode('ascii').split('\n')[1:-2])}))
d.addErrback(self.fail)
for line in descriptor_info.split(b'\n'):
self.send(line)
return d
def test_getinfo_multiline(self):
descriptor_info = b"""250+desc/name/moria1=
router moria1 172.16.31.10 9101 0 9131
platform Tor 0.2.5.0-alpha-dev on Linux
.
250 OK"""
d = self.protocol.get_info("desc/name/moria1")
gold = "\nrouter moria1 172.16.31.10 9101 0 9131\nplatform Tor 0.2.5.0-alpha-dev on Linux"
d.addCallback(CallbackChecker({'desc/name/moria1': gold}))
d.addErrback(self.fail)
for line in descriptor_info.split(b'\n'):
self.send(line)
return d
def test_addevent(self):
self.protocol._set_valid_events('FOO BAR')
self.protocol.add_event_listener('FOO', lambda _: None)
# is it dangerous/ill-advised to depend on internal state of
# class under test?
d = self.protocol.defer
self.send(b"250 OK")
self._wait(d)
self.assertEqual(
self.transport.value().split(b'\r\n')[-2],
b"SETEVENTS FOO"
)
self.transport.clear()
self.protocol.add_event_listener('BAR', lambda _: None)
d = self.protocol.defer
self.send(b"250 OK")
self.assertTrue(
self.transport.value() == b"SETEVENTS FOO BAR\r\n" or
self.transport.value() == b"SETEVENTS BAR FOO\r\n"
)
self._wait(d)
try:
self.protocol.add_event_listener(
'SOMETHING_INVALID', lambda _: None
)
self.assertTrue(False)
except Exception:
pass
def test_eventlistener(self):
self.protocol._set_valid_events('STREAM')
class EventListener(object):
stream_events = 0
def __call__(self, data):
self.stream_events += 1
listener = EventListener()
self.protocol.add_event_listener('STREAM', listener)
d = self.protocol.defer
self.send(b"250 OK")
self._wait(d)
self.send(b"650 STREAM 1234 NEW 4321 1.2.3.4:555 REASON=MISC")
self.send(b"650 STREAM 2345 NEW 4321 2.3.4.5:666 REASON=MISC")
self.assertEqual(listener.stream_events, 2)
def test_eventlistener_error(self):
self.protocol._set_valid_events('STREAM')
class EventListener(object):
stream_events = 0
do_error = False
def __call__(self, data):
self.stream_events += 1
if self.do_error:
raise Exception("the bad thing happened")
# we make sure the first listener has the errors to prove the
# second one still gets called.
listener0 = EventListener()
listener0.do_error = True
listener1 = EventListener()
self.protocol.add_event_listener('STREAM', listener0)
self.protocol.add_event_listener('STREAM', listener1)
d = self.protocol.defer
self.send(b"250 OK")
self._wait(d)
self.send(b"650 STREAM 1234 NEW 4321 1.2.3.4:555 REASON=MISC")
self.send(b"650 STREAM 2345 NEW 4321 2.3.4.5:666 REASON=MISC")
self.assertEqual(listener0.stream_events, 2)
self.assertEqual(listener1.stream_events, 2)
# should have logged the two errors
logged = self.flushLoggedErrors()
self.assertEqual(2, len(logged))
self.assertTrue("the bad thing happened" in str(logged[0]))
self.assertTrue("the bad thing happened" in str(logged[1]))
def test_remove_eventlistener(self):
self.protocol._set_valid_events('STREAM')
class EventListener(object):
stream_events = 0
def __call__(self, data):
self.stream_events += 1
listener = EventListener()
self.protocol.add_event_listener('STREAM', listener)
self.assertEqual(self.transport.value(), b'SETEVENTS STREAM\r\n')
self.protocol.lineReceived(b"250 OK")
self.transport.clear()
self.protocol.remove_event_listener('STREAM', listener)
self.assertEqual(self.transport.value(), b'SETEVENTS \r\n')
def test_remove_eventlistener_multiple(self):
self.protocol._set_valid_events('STREAM')
class EventListener(object):
stream_events = 0
def __call__(self, data):
self.stream_events += 1
listener0 = EventListener()
listener1 = EventListener()
self.protocol.add_event_listener('STREAM', listener0)
self.assertEqual(self.transport.value(), b'SETEVENTS STREAM\r\n')
self.protocol.lineReceived(b"250 OK")
self.transport.clear()
# add another one, shouldn't issue a tor command
self.protocol.add_event_listener('STREAM', listener1)
self.assertEqual(self.transport.value(), b'')
# remove one, should still not issue a tor command
self.protocol.remove_event_listener('STREAM', listener0)
self.assertEqual(self.transport.value(), b'')
# remove the other one, NOW should issue a command
self.protocol.remove_event_listener('STREAM', listener1)
self.assertEqual(self.transport.value(), b'SETEVENTS \r\n')
# try removing invalid event
try:
self.protocol.remove_event_listener('FOO', listener0)
self.fail()
except Exception as e:
self.assertTrue('FOO' in str(e))
def test_continuation_line(self):
d = self.protocol.get_info_raw("key")
def check_continuation(v):
self.assertEqual(v, "key=\nvalue0\nvalue1")
d.addCallback(check_continuation)
self.send(b"250+key=")
self.send(b"value0")
self.send(b"value1")
self.send(b".")
self.send(b"250 OK")
return d
def test_newdesc(self):
"""
FIXME: this test is now maybe a little silly, it's just testing
multiline GETINFO... (Real test is in
TorStateTests.test_newdesc_parse)
"""
self.protocol.get_info_raw('ns/id/624926802351575FF7E4E3D60EFA3BFB56E67E8A')
d = self.protocol.defer
d.addCallback(CallbackChecker("""ns/id/624926802351575FF7E4E3D60EFA3BFB56E67E8A=
r fake YkkmgCNRV1/35OPWDvo7+1bmfoo tanLV/4ZfzpYQW0xtGFqAa46foo 2011-12-12 16:29:16 192.168.127.12 443 80
s Exit Fast Guard HSDir Named Running Stable V2Dir Valid
w Bandwidth=518000
p accept 43,53,79-81,110,143,194,220,443,953,989-990,993,995,1194,1293,1723,1863,2082-2083,2086-2087,2095-2096,3128,4321,5050,5190,5222-5223,6679,6697,7771,8000,8008,8080-8081,8090,8118,8123,8181,8300,8443,8888"""))
self.send(b"250+ns/id/624926802351575FF7E4E3D60EFA3BFB56E67E8A=")
self.send(b"r fake YkkmgCNRV1/35OPWDvo7+1bmfoo tanLV/4ZfzpYQW0xtGFqAa46foo 2011-12-12 16:29:16 192.168.127.12 443 80")
self.send(b"s Exit Fast Guard HSDir Named Running Stable V2Dir Valid")
self.send(b"w Bandwidth=518000")
self.send(b"p accept 43,53,79-81,110,143,194,220,443,953,989-990,993,995,1194,1293,1723,1863,2082-2083,2086-2087,2095-2096,3128,4321,5050,5190,5222-5223,6679,6697,7771,8000,8008,8080-8081,8090,8118,8123,8181,8300,8443,8888")
self.send(b".")
self.send(b"250 OK")
return d
def test_plus_line_no_command(self):
self.protocol.lineReceived(b"650+NS\r\n")
self.protocol.lineReceived(b"r Gabor gFpAHsFOHGATy12ZUswRf0ZrqAU GG6GDp40cQfR3ODvkBT0r+Q09kw 2012-05-12 16:54:56 172.16.58.3 443 80\r\n")
def test_minus_line_no_command(self):
"""
haven't seen 600's use - "in the wild" but don't see why it's not
possible
"""
self.protocol._set_valid_events('NS')
self.protocol.add_event_listener('NS', lambda _: None)
self.protocol.lineReceived(b"650-NS\r\n")
self.protocol.lineReceived(b"650 OK\r\n")
class ParseTests(unittest.TestCase):
def setUp(self):
self.controller = TorState(TorControlProtocol())
self.controller.connectionMade = lambda _: None
def test_keywords(self):
x = parse_keywords('events/names=CIRC STREAM ORCONN BW DEBUG INFO NOTICE WARN ERR NEWDESC ADDRMAP AUTHDIR_NEWDESCS DESCCHANGED NS STATUS_GENERAL STATUS_CLIENT STATUS_SERVER GUARD STREAM_BW CLIENTS_SEEN NEWCONSENSUS BUILDTIMEOUT_SET')
self.assertTrue('events/names' in x)
self.assertEqual(x['events/names'], 'CIRC STREAM ORCONN BW DEBUG INFO NOTICE WARN ERR NEWDESC ADDRMAP AUTHDIR_NEWDESCS DESCCHANGED NS STATUS_GENERAL STATUS_CLIENT STATUS_SERVER GUARD STREAM_BW CLIENTS_SEEN NEWCONSENSUS BUILDTIMEOUT_SET')
self.assertEqual(len(x.keys()), 1)
def test_keywords_mutli_equals(self):
x = parse_keywords('foo=something subvalue="foo"')
self.assertEqual(len(x), 1)
self.assertTrue('foo' in x)
self.assertEqual(x['foo'], 'something subvalue="foo"')
def test_default_keywords(self):
x = parse_keywords('foo')
self.assertEqual(len(x), 1)
self.assertTrue('foo' in x)
self.assertEqual(x['foo'], DEFAULT_VALUE)
def test_multientry_keywords_2(self):
x = parse_keywords('foo=bar\nfoo=zarimba')
self.assertEqual(len(x), 1)
self.assertTrue(isinstance(x['foo'], list))
self.assertEqual(len(x['foo']), 2)
self.assertEqual(x['foo'][0], 'bar')
self.assertEqual(x['foo'][1], 'zarimba')
def test_multientry_keywords_3(self):
x = parse_keywords('foo=bar\nfoo=baz\nfoo=zarimba')
self.assertEqual(len(x), 1)
self.assertTrue(isinstance(x['foo'], list))
self.assertEqual(len(x['foo']), 3)
self.assertEqual(x['foo'][0], 'bar')
self.assertEqual(x['foo'][1], 'baz')
self.assertEqual(x['foo'][2], 'zarimba')
def test_multientry_keywords_4(self):
x = parse_keywords('foo=bar\nfoo=baz\nfoo=zarimba\nfoo=foo')
self.assertEqual(len(x), 1)
self.assertTrue(isinstance(x['foo'], list))
self.assertEqual(len(x['foo']), 4)
self.assertEqual(x['foo'][0], 'bar')
self.assertEqual(x['foo'][1], 'baz')
self.assertEqual(x['foo'][2], 'zarimba')
self.assertEqual(x['foo'][3], 'foo')
def test_multiline_keywords_with_spaces(self):
x = parse_keywords('''ns/name/foo=
r foo aaaam7E7h1vY5Prk8v9/nSRCydY BBBBOfum4CtAYuOgf/D33Qq5+rk 2013-10-27 06:22:18 1.2.3.4 9001 9030
s Fast Guard HSDir Running Stable V2Dir Valid
w Bandwidth=1234
ns/name/bar=
r bar aaaaHgNYtTVPw5hHTO28J4je5i8 BBBBBUaJaBFSU/HDrTxnSh+D3+fY 2013-10-27 07:48:56 1.2.4.5 9001 9030
s Exit Fast Guard HSDir Named Running Stable V2Dir Valid
w Bandwidth=1234
OK
''')
self.assertEqual(2, len(x))
keys = sorted(x.keys())
self.assertEqual(keys, ['ns/name/bar', 'ns/name/foo'])
def test_multiline_keywords(self):
x = parse_keywords('''Foo=bar\nBar''')
self.assertEqual(x, {'Foo': 'bar\nBar'})
x = parse_keywords('''Foo=bar\nBar''', multiline_values=False)
self.assertEqual(x, {'Foo': 'bar',
'Bar': DEFAULT_VALUE})
def test_unquoted_keywords(self):
x = parse_keywords('''Tor="0.1.2.3.4-rc44"''')
self.assertEqual(x, {'Tor': '0.1.2.3.4-rc44'})
def test_unquoted_keywords_singlequote(self):
x = parse_keywords("Tor='0.1.2.3.4-rc44'")
self.assertEqual(x, {'Tor': '0.1.2.3.4-rc44'})
def test_unquoted_keywords_empty(self):
x = parse_keywords('foo=')
self.assertEqual(x, {'foo': ''})
def test_network_status(self):
self.controller._update_network_status("""ns/all=
r right2privassy3 ADQ6gCT3DiFHKPDFr3rODBUI8HM JehnjB8l4Js47dyjLCEmE8VJqao 2011-12-02 03:36:40 172.16.58.3 9023 0
s Exit Fast Named Running Stable Valid
w Bandwidth=53
p accept 80,1194,1220,1293,1500,1533,1677,1723,1863,2082-2083,2086-2087,2095-2096,2102-2104,3128,3389,3690,4321,4643,5050,5190,5222-5223,5228,5900,6660-6669,6679,6697,8000,8008,8074,8080,8087-8088,8443,8888,9418,9999-10000,19294,19638
r Unnamed AHe2V2pmj4Yfn0H9+Np3lci7htU T/g7ZLzG/ooqCn+gdLd9Jjh+AEI 2011-12-02 15:52:09 192.168.3.11 443 9030
s Exit Fast Running V2Dir Valid
w Bandwidth=33
p reject 25,119,135-139,445,563,1214,4661-4666,6346-6429,6699,6881-6999""")
# the routers list is always keyed with both name and hash
self.assertEqual(len(self.controller.routers_by_name), 2)
self.assertEqual(len(self.controller.routers_by_hash), 2)
self.assertTrue('right2privassy3' in self.controller.routers)
self.assertTrue('Unnamed' in self.controller.routers)
self.controller.routers.clear()
self.controller.routers_by_name.clear()
self.controller.routers_by_hash.clear()
def test_circuit_status(self):
self.controller._update_network_status("""ns/all=
r wildnl f+Ty/+B6lgYr0Ntbf67O/L2M8ZI c1iK/kPPXKGZZvwXRWbvL9eCfSc 2011-12-02 19:07:05 192.168.127.12 9001 0
s Exit Fast Named Running Stable Valid
w Bandwidth=1900
p reject 25,119,135-139,445,563,1214,4661-4666,6346-6429,6699,6881-6999
r l0l wYXUpLBpzVWfzVSMgGO0dThdd38 KIJC+W1SHeaFOj/BVsEAgxbtQNM 2011-12-02 13:43:39 172.16.58.3 443 80
s Fast Named Running Stable V2Dir Valid
w Bandwidth=22800
p reject 1-65535
r Tecumseh /xAD0tFLS50Dkz+O37xGyVLoKlk yJHbad7MFl1VW2/23RxrPKBTOIE 2011-12-02 09:44:10 192.168.127.12 22 9030
s Fast Guard HSDir Named Running Stable V2Dir Valid
w Bandwidth=18700
p reject 1-65535""")
self.controller._circuit_status("""circuit-status=
4472 BUILT $FF1003D2D14B4B9D03933F8EDFBC46C952E82A59=Tecumseh,$C185D4A4B069CD559FCD548C8063B475385D777F=l0l,$7FE4F2FFE07A96062BD0DB5B7FAECEFCBD8CF192=wildnl PURPOSE=GENERAL""")
self.assertEqual(len(self.controller.circuits), 1)
self.assertTrue(4472 in self.controller.circuits)
self.controller.routers.clear()
self.controller.routers_by_name.clear()
self.controller.routers_by_hash.clear()
self.controller.circuits.clear()
|
193154
|
from typing import Dict
from botocore.paginate import Paginator
class ListEndpointsByPlatformApplication(Paginator):
def paginate(self, PlatformApplicationArn: str, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`SNS.Client.list_endpoints_by_platform_application`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/ListEndpointsByPlatformApplication>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
PlatformApplicationArn='string',
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'Endpoints': [
{
'EndpointArn': 'string',
'Attributes': {
'string': 'string'
}
},
],
}
**Response Structure**
- *(dict) --*
Response for ListEndpointsByPlatformApplication action.
- **Endpoints** *(list) --*
Endpoints returned for ListEndpointsByPlatformApplication action.
- *(dict) --*
Endpoint for mobile app and device.
- **EndpointArn** *(string) --*
EndpointArn for mobile app and device.
- **Attributes** *(dict) --*
Attributes for endpoint.
- *(string) --*
- *(string) --*
:type PlatformApplicationArn: string
:param PlatformApplicationArn: **[REQUIRED]**
PlatformApplicationArn for ListEndpointsByPlatformApplicationInput action.
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class ListPhoneNumbersOptedOut(Paginator):
def paginate(self, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`SNS.Client.list_phone_numbers_opted_out`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/ListPhoneNumbersOptedOut>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'phoneNumbers': [
'string',
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
The response from the ``ListPhoneNumbersOptedOut`` action.
- **phoneNumbers** *(list) --*
A list of phone numbers that are opted out of receiving SMS messages. The list is paginated, and each page can contain up to 100 phone numbers.
- *(string) --*
- **NextToken** *(string) --*
A token to resume pagination.
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class ListPlatformApplications(Paginator):
def paginate(self, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`SNS.Client.list_platform_applications`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/ListPlatformApplications>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'PlatformApplications': [
{
'PlatformApplicationArn': 'string',
'Attributes': {
'string': 'string'
}
},
],
}
**Response Structure**
- *(dict) --*
Response for ListPlatformApplications action.
- **PlatformApplications** *(list) --*
Platform applications returned when calling ListPlatformApplications action.
- *(dict) --*
Platform application object.
- **PlatformApplicationArn** *(string) --*
PlatformApplicationArn for platform application object.
- **Attributes** *(dict) --*
Attributes for platform application object.
- *(string) --*
- *(string) --*
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class ListSubscriptions(Paginator):
def paginate(self, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`SNS.Client.list_subscriptions`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/ListSubscriptions>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'Subscriptions': [
{
'SubscriptionArn': 'string',
'Owner': 'string',
'Protocol': 'string',
'Endpoint': 'string',
'TopicArn': 'string'
},
],
}
**Response Structure**
- *(dict) --*
Response for ListSubscriptions action
- **Subscriptions** *(list) --*
A list of subscriptions.
- *(dict) --*
A wrapper type for the attributes of an Amazon SNS subscription.
- **SubscriptionArn** *(string) --*
The subscription's ARN.
- **Owner** *(string) --*
The subscription's owner.
- **Protocol** *(string) --*
The subscription's protocol.
- **Endpoint** *(string) --*
The subscription's endpoint (format depends on the protocol).
- **TopicArn** *(string) --*
The ARN of the subscription's topic.
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class ListSubscriptionsByTopic(Paginator):
def paginate(self, TopicArn: str, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`SNS.Client.list_subscriptions_by_topic`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/ListSubscriptionsByTopic>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
TopicArn='string',
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'Subscriptions': [
{
'SubscriptionArn': 'string',
'Owner': 'string',
'Protocol': 'string',
'Endpoint': 'string',
'TopicArn': 'string'
},
],
}
**Response Structure**
- *(dict) --*
Response for ListSubscriptionsByTopic action.
- **Subscriptions** *(list) --*
A list of subscriptions.
- *(dict) --*
A wrapper type for the attributes of an Amazon SNS subscription.
- **SubscriptionArn** *(string) --*
The subscription's ARN.
- **Owner** *(string) --*
The subscription's owner.
- **Protocol** *(string) --*
The subscription's protocol.
- **Endpoint** *(string) --*
The subscription's endpoint (format depends on the protocol).
- **TopicArn** *(string) --*
The ARN of the subscription's topic.
:type TopicArn: string
:param TopicArn: **[REQUIRED]**
The ARN of the topic for which you wish to find subscriptions.
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class ListTopics(Paginator):
def paginate(self, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`SNS.Client.list_topics`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/ListTopics>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'Topics': [
{
'TopicArn': 'string'
},
],
}
**Response Structure**
- *(dict) --*
Response for ListTopics action.
- **Topics** *(list) --*
A list of topic ARNs.
- *(dict) --*
A wrapper type for the topic's Amazon Resource Name (ARN). To retrieve a topic's attributes, use ``GetTopicAttributes`` .
- **TopicArn** *(string) --*
The topic's ARN.
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
|
193160
|
from os import listdir, stat
from os.path import isfile, join
ignored=['websetup.html', 'jquery.js', 'selfarchive.py', 'boot.py', 'hal.py', 'run.sh']
for file in listdir('./'):
if file!='version.py' and file[0] != '.' and file not in ignored:
if not isfile(file):
continue
else:
print 'file:{}:{}'.format(stat(file).st_size,file)
# Do version as last
for file in listdir('./'):
if file=='version.py':
if not isfile(file):
continue
else:
print 'file:{}:{}'.format(stat(file).st_size,file)
|
193185
|
import logging
from surrortg.game_io import ConfigType
from .aruco_filter import ArucoFilter
from .aruco_source import ArucoDetector
DEFAULT_CAMERA = "/dev/video21"
CUSTOM_KEY = "custom"
NUM_MARKERS_KEY = "Number of aruco markers to find"
MIN_DISTANCE_KEY = "Minimum distance for detection (0 for any distance)"
IN_ORDER_KEY = "Markers must be detected in order"
IN_ORDER_DEF_VAL = False
NUM_LAPS_KEY = "Number of laps"
class ArucoFinder:
@classmethod
async def create(
cls,
io,
source=DEFAULT_CAMERA,
num_markers=5,
min_distance=100,
in_order=False,
num_laps=1,
bot_specific=False,
seat=0,
):
"""Class for creating treasure hunt and racing games with aruco markers
This class is an easy way to incorporate aruco markers into a game. The
class creates its own ArucoDetector instance and has built-in game
logic, so only a few lines of code are required to use the class.
The class sends configuration options to the dashboard Game Engine
settings, where the configuration parameters can be changed. For a
racing type game, set in_order to True.
Short example of how to integrate the class into game logic:
.. code-block:: python
from surrortg.image_recognition.aruco import ArucoFinder
YourGame(Game):
async def on_init(self):
self.finder = await ArucoFinder.create(self.io)
async def on_config(self):
self.finder.on_config(self.configs)
async def on_start(self):
self.finder.on_start()
:param io: GameIO instance, used to register configs and send scores
to the game engine
:type io: GameIO
:param source: Video capture device used for detecting aruco markers.
By default uses a loopback device from the main camera used by the
streamer ("/dev/video21"). (No action necessary when using the
Surrogate image)
:type source: string, optional
:param num_markers: number of aruco IDs to use. IDs will be generated
from 0 to num_markers-1. Defaults to 5.
:type num_markers: int
:param min_distance: Distance threshold for detecting an aruco
marker. This class uses an arbitrary unit of distance between 0 and
500. Defaults to 100.
:type min_distance: float between 0 and 500, optional
:param in_order: Whether markers must be detected in ascending
numerical order. Useful for racing games. Defaults to False.
:type in_order: bool, optional
:param num_laps: Number of laps in one game. Defaults to 1.
:type num_laps: int, optional
:param bot_specific: Whether the configuration options in the web
interface are in effect for all bots, or just this bot. Defaults
to False.
:type bot_specific: bool, optional
:param seat: Seat number of the bot. Defaults to 0.
:type seat: int, optional
"""
self = cls()
self.io = io
self.seat = seat
self.io.register_config(
NUM_MARKERS_KEY,
ConfigType.INTEGER,
num_markers,
bot_specific,
minimum=1,
maximum=50,
)
self.io.register_config(
MIN_DISTANCE_KEY,
ConfigType.INTEGER,
min_distance,
bot_specific,
minimum=0,
maximum=500,
)
self.io.register_config(
NUM_LAPS_KEY,
ConfigType.INTEGER,
num_laps,
bot_specific,
minimum=1,
maximum=100,
)
self.io.register_config(
IN_ORDER_KEY, ConfigType.BOOLEAN, in_order, bot_specific
)
self.aruco_source = await ArucoDetector.create(source)
self.filter = ArucoFilter(
self._score_logic,
self.aruco_source,
self._gen_id_list(num_markers),
min_distance,
)
return self
def on_config(self, configs):
"""Parses the configs object for configs from web interface.
MUST BE CALLED every time at the on_config part of the game loop.
:param configs: self.configs from the Game object which owns this
instance
:type configs: dict
"""
self.filter.stop()
self.num_markers = configs[CUSTOM_KEY][NUM_MARKERS_KEY]
if configs[CUSTOM_KEY][MIN_DISTANCE_KEY] == 0:
self.filter.min_dist = 0
else:
self.filter.min_dist = 0.5 / configs[CUSTOM_KEY][MIN_DISTANCE_KEY]
self.num_laps = configs[CUSTOM_KEY][NUM_LAPS_KEY]
self.in_order = configs[CUSTOM_KEY][IN_ORDER_KEY]
self.filter.ids = self._gen_id_list(self.num_markers)
logging.info(
f"ArucoFinder configured with markers: {self.filter.ids}"
f" and minimum marker size: {self.filter.min_dist}"
)
def on_start(self):
"""Starts the aruco detection and begins counting scores. MUST BE
CALLED every time at the on_start part of the game loop.
"""
self.score = 0
self.cur_lap = 1
self.filter.start()
def stop(self):
"""Call this at the on_exit part of the game loop.
This ensures that the program exits correctly.
"""
self.filter.stop()
def _score_logic(self, marker):
is_final = False
if not self.in_order or (
self.in_order and marker.id == min(self.filter.ids)
):
self.filter.ids.remove(marker.id)
self.score += 1
if len(self.filter.ids) == 0:
if self.cur_lap == self.num_laps:
self.filter.stop()
is_final = True
else:
self.cur_lap += 1
self.filter.ids = self._gen_id_list(self.num_markers)
self.io.send_score(self.score, seat=self.seat, final_score=is_final)
def _gen_id_list(self, num_ids):
return set(marker_id for marker_id in range(num_ids))
|
193201
|
import torch
import logging
from utils.io import KeyphraseDataset
from torch.utils.data import DataLoader
def load_vocab(opt):
# load vocab
logging.info("Loading vocab from disk: %s" % (opt.vocab))
if not opt.custom_vocab_filename_suffix:
word2idx, idx2word, vocab = torch.load(opt.vocab + '/vocab.pt', 'wb')
else:
word2idx, idx2word, vocab = torch.load(opt.vocab + '/vocab.%s.pt' % opt.vocab_filename_suffix, 'wb')
# assign vocab to opt
opt.word2idx = word2idx
opt.idx2word = idx2word
opt.vocab = vocab
logging.info('#(vocab)=%d' % len(vocab))
logging.info('#(vocab used)=%d' % opt.vocab_size)
return word2idx, idx2word, vocab
def load_data_and_vocab(opt, load_train=True):
# load vocab
word2idx, idx2word, vocab = load_vocab(opt)
# constructor data loader
logging.info("Loading train and validate data from '%s'" % opt.data)
if load_train: # load training dataset
if not opt.one2many: # load one2one dataset
if not opt.custom_data_filename_suffix:
train_one2one = torch.load(opt.data + '/train.one2one.pt', 'wb')
else:
train_one2one = torch.load(opt.data + '/train.one2one.%s.pt' % opt.data_filename_suffix, 'wb')
train_one2one_dataset = KeyphraseDataset(train_one2one, word2idx=word2idx, idx2word=idx2word, type='one2one', load_train=load_train, remove_src_eos=opt.remove_src_eos, title_guided=opt.title_guided)
train_loader = DataLoader(dataset=train_one2one_dataset,
collate_fn=train_one2one_dataset.collate_fn_one2one,
num_workers=opt.batch_workers, batch_size=opt.batch_size, pin_memory=True,
shuffle=True)
logging.info('#(train data size: #(batch)=%d' % (len(train_loader)))
if not opt.custom_data_filename_suffix:
valid_one2one = torch.load(opt.data + '/valid.one2one.pt', 'wb')
else:
valid_one2one = torch.load(opt.data + '/valid.one2one.%s.pt' % opt.data_filename_suffix, 'wb')
valid_one2one_dataset = KeyphraseDataset(valid_one2one, word2idx=word2idx, idx2word=idx2word,
type='one2one', load_train=load_train, remove_src_eos=opt.remove_src_eos, title_guided=opt.title_guided)
valid_loader = DataLoader(dataset=valid_one2one_dataset,
collate_fn=valid_one2one_dataset.collate_fn_one2one,
num_workers=opt.batch_workers, batch_size=opt.batch_size, pin_memory=True,
shuffle=False)
logging.info('#(valid data size: #(batch)=%d' % (len(valid_loader)))
else: # load one2many dataset
if not opt.custom_data_filename_suffix:
train_one2many = torch.load(opt.data + '/train.one2many.pt', 'wb')
else:
train_one2many = torch.load(opt.data + '/train.one2many.%s.pt' % opt.data_filename_suffix, 'wb')
train_one2many_dataset = KeyphraseDataset(train_one2many, word2idx=word2idx, idx2word=idx2word, type='one2many', delimiter_type=opt.delimiter_type, load_train=load_train, remove_src_eos=opt.remove_src_eos, title_guided=opt.title_guided)
train_loader = DataLoader(dataset=train_one2many_dataset,
collate_fn=train_one2many_dataset.collate_fn_one2many,
num_workers=opt.batch_workers, batch_size=opt.batch_size, pin_memory=True,
shuffle=True)
logging.info('#(train data size: #(batch)=%d' % (len(train_loader)))
if not opt.custom_data_filename_suffix:
valid_one2many = torch.load(opt.data + '/valid.one2many.pt', 'wb')
else:
valid_one2many = torch.load(opt.data + '/valid.one2many.%s.pt' % opt.data_filename_suffix, 'wb')
#valid_one2many = valid_one2many[:2000]
valid_one2many_dataset = KeyphraseDataset(valid_one2many, word2idx=word2idx, idx2word=idx2word,
type='one2many', delimiter_type=opt.delimiter_type, load_train=load_train, remove_src_eos=opt.remove_src_eos, title_guided=opt.title_guided)
valid_loader = DataLoader(dataset=valid_one2many_dataset,
collate_fn=valid_one2many_dataset.collate_fn_one2many,
num_workers=opt.batch_workers, batch_size=opt.batch_size, pin_memory=True,
shuffle=False)
logging.info('#(valid data size: #(batch)=%d' % (len(valid_loader)))
return train_loader, valid_loader, word2idx, idx2word, vocab
else:
if not opt.custom_data_filename_suffix:
test_one2many = torch.load(opt.data + '/test.one2many.pt', 'wb')
else:
test_one2many = torch.load(opt.data + '/test.one2many.%s.pt' % opt.data_filename_suffix, 'wb')
test_one2many_dataset = KeyphraseDataset(test_one2many, word2idx=word2idx, idx2word=idx2word,
type='one2many', delimiter_type=opt.delimiter_type, load_train=load_train, remove_src_eos=opt.remove_src_eos, title_guided=opt.title_guided)
test_loader = DataLoader(dataset=test_one2many_dataset,
collate_fn=test_one2many_dataset.collate_fn_one2many,
num_workers=opt.batch_workers, batch_size=opt.batch_size, pin_memory=True,
shuffle=False)
logging.info('#(test data size: #(batch)=%d' % (len(test_loader)))
return test_loader, word2idx, idx2word, vocab
|
193212
|
import gym
import portfolio_management
import portfolio_management.paths as p
from portfolio_management.io_utilities import pickle_dump
def test_episode(download_and_pickle: bool = True):
database_name = 'test_episode'
if download_and_pickle:
from portfolio_management.data.manager import Manager
from portfolio_management.data.retrieve import get_dataset
from portfolio_management.data.preprocessing import get_pca_preprocessing_function
symbol_list = ['BNBBTC', 'BNBETH']
interval = '30m'
start = "2020-01-01"
end = "2020-02-01"
manager = Manager(
database_name=database_name,
reset_tables=True,
)
manager.insert(
symbol_list=symbol_list,
interval=interval,
start=start,
end=end,
)
preprocessing_function = get_pca_preprocessing_function()
dataset = get_dataset(
database_name=database_name,
interval=interval,
preprocessing=preprocessing_function
)
datasets_folder_path = p.datasets_folder_path
path_dataset = datasets_folder_path.joinpath(database_name).with_suffix('.pkl')
pickle_dump(path_dataset, dataset)
env_kwargs = {
'dataset_name': database_name
}
env = gym.make('Portfolio-v0', **env_kwargs) # todo not pass by make to be able to debug
env.reset()
done = False
while not done:
state, reward, done, info = env.step(env.action_space.sample())
print(f'State: {state} Reward: {reward} Done: {done}')
if __name__ == '__main__':
test_episode()
|
193234
|
from unittest import mock
import graphene
from django.utils.functional import SimpleLazyObject
from freezegun import freeze_time
from .....shipping.error_codes import ShippingErrorCode
from .....shipping.models import ShippingZone
from .....webhook.event_types import WebhookEventAsyncType
from .....webhook.payloads import generate_meta, generate_requestor
from ....tests.utils import get_graphql_content
CREATE_SHIPPING_ZONE_MUTATION = """
mutation createShipping(
$name: String
$description: String
$default: Boolean
$countries: [String!]
$addWarehouses: [ID!]
$addChannels: [ID!]
) {
shippingZoneCreate(
input: {
name: $name
description: $description
countries: $countries
default: $default
addWarehouses: $addWarehouses
addChannels: $addChannels
}
) {
errors {
field
code
message
channels
}
shippingZone {
id
name
description
countries {
code
}
default
warehouses {
name
}
channels {
id
}
}
}
}
"""
def test_create_shipping_zone(
staff_api_client, warehouse, permission_manage_shipping, channel_PLN
):
warehouse_id = graphene.Node.to_global_id("Warehouse", warehouse.pk)
channel_id = graphene.Node.to_global_id("Channel", channel_PLN.pk)
variables = {
"name": "<NAME>",
"description": "test description",
"countries": ["PL"],
"addWarehouses": [warehouse_id],
"addChannels": [channel_id],
}
response = staff_api_client.post_graphql(
CREATE_SHIPPING_ZONE_MUTATION,
variables,
permissions=[permission_manage_shipping],
)
content = get_graphql_content(response)
data = content["data"]["shippingZoneCreate"]
zone = data["shippingZone"]
assert not data["errors"]
assert zone["name"] == "test shipping"
assert zone["description"] == "test description"
assert zone["countries"] == [{"code": "PL"}]
assert len(zone["warehouses"]) == 1
assert zone["warehouses"][0]["name"] == warehouse.name
assert len(zone["channels"]) == 1
assert zone["channels"][0]["id"] == channel_id
assert zone["default"] is False
@freeze_time("2022-05-12 12:00:00")
@mock.patch("saleor.plugins.webhook.plugin.get_webhooks_for_event")
@mock.patch("saleor.plugins.webhook.plugin.trigger_webhooks_async")
def test_create_shipping_zone_trigger_webhook(
mocked_webhook_trigger,
mocked_get_webhooks_for_event,
any_webhook,
staff_api_client,
permission_manage_shipping,
warehouse,
channel_USD,
settings,
):
# given
mocked_get_webhooks_for_event.return_value = [any_webhook]
settings.PLUGINS = ["saleor.plugins.webhook.plugin.WebhookPlugin"]
warehouse_id = graphene.Node.to_global_id("Warehouse", warehouse.id)
channel_id = graphene.Node.to_global_id("Channel", channel_USD.id)
variables = {
"name": "Shipping Zone Name",
"description": "Shipping Zone Description",
"countries": ["PL"],
"addWarehouses": [warehouse_id],
"addChannels": [channel_id],
}
# when
response = staff_api_client.post_graphql(
CREATE_SHIPPING_ZONE_MUTATION,
variables,
permissions=[permission_manage_shipping],
)
content = get_graphql_content(response)
shipping_zone = ShippingZone.objects.last()
data = content["data"]["shippingZoneCreate"]
# then
assert shipping_zone
assert data["errors"] == []
mocked_webhook_trigger.assert_called_once_with(
{
"id": data["shippingZone"]["id"],
"meta": generate_meta(
requestor_data=generate_requestor(
SimpleLazyObject(lambda: staff_api_client.user)
)
),
},
WebhookEventAsyncType.SHIPPING_ZONE_CREATED,
[any_webhook],
shipping_zone,
SimpleLazyObject(lambda: staff_api_client.user),
)
def test_create_shipping_zone_with_empty_warehouses(
staff_api_client, permission_manage_shipping
):
variables = {
"name": "<NAME>",
"countries": ["PL"],
"addWarehouses": [],
}
response = staff_api_client.post_graphql(
CREATE_SHIPPING_ZONE_MUTATION,
variables,
permissions=[permission_manage_shipping],
)
content = get_graphql_content(response)
data = content["data"]["shippingZoneCreate"]
assert not data["errors"]
zone = data["shippingZone"]
assert zone["name"] == "test shipping"
assert zone["countries"] == [{"code": "PL"}]
assert not zone["warehouses"]
assert zone["default"] is False
def test_create_shipping_zone_without_warehouses_and_channels(
staff_api_client, permission_manage_shipping
):
variables = {
"name": "<NAME>",
"countries": ["PL"],
}
response = staff_api_client.post_graphql(
CREATE_SHIPPING_ZONE_MUTATION,
variables,
permissions=[permission_manage_shipping],
)
content = get_graphql_content(response)
data = content["data"]["shippingZoneCreate"]
assert not data["errors"]
zone = data["shippingZone"]
assert zone["name"] == "test shipping"
assert zone["countries"] == [{"code": "PL"}]
assert not zone["warehouses"]
assert zone["default"] is False
TEST_COUNTRIES_LIST = ["DZ", "AX", "BY"]
@mock.patch(
"saleor.graphql.shipping.mutations.shippings.get_countries_without_shipping_zone",
return_value=TEST_COUNTRIES_LIST,
)
def test_create_default_shipping_zone(
_, staff_api_client, warehouse, permission_manage_shipping
):
unassigned_countries = TEST_COUNTRIES_LIST
warehouse_id = graphene.Node.to_global_id("Warehouse", warehouse.pk)
variables = {
"default": True,
"name": "test shipping",
"countries": ["PL"],
"addWarehouses": [warehouse_id],
}
response = staff_api_client.post_graphql(
CREATE_SHIPPING_ZONE_MUTATION,
variables,
permissions=[permission_manage_shipping],
)
expected_countries = set(unassigned_countries + variables["countries"])
content = get_graphql_content(response)
data = content["data"]["shippingZoneCreate"]
assert not data["errors"]
zone = data["shippingZone"]
assert zone["name"] == "test shipping"
assert zone["warehouses"][0]["name"] == warehouse.name
assert zone["default"] is True
zone_countries = {c["code"] for c in zone["countries"]}
assert zone_countries == expected_countries
def test_create_duplicated_default_shipping_zone(
staff_api_client, shipping_zone, permission_manage_shipping
):
shipping_zone.default = True
shipping_zone.save()
variables = {
"default": True,
"name": "test shipping",
"countries": ["PL"],
"addChannels": [],
}
response = staff_api_client.post_graphql(
CREATE_SHIPPING_ZONE_MUTATION,
variables,
permissions=[permission_manage_shipping],
)
content = get_graphql_content(response)
data = content["data"]["shippingZoneCreate"]
assert data["errors"]
assert data["errors"][0]["field"] == "default"
assert data["errors"][0]["code"] == ShippingErrorCode.ALREADY_EXISTS.name
|
193249
|
from typing import Optional # NOQA
import chainer
import numpy # NOQA
class GraphConvPredictor(chainer.Chain):
"""Wrapper class that combines a graph convolution and MLP."""
def __init__(
self,
graph_conv, # type: chainer.Link
mlp=None, # type: Optional[chainer.Link]
label_scaler=None, # type: Optional[chainer.Link]
postprocess_fn=None, # type: Optional[chainer.FunctionNode]
train_graph_conv=True
):
# type: (...) -> None
"""Initialize the graph convolution predictor.
Args:
graph_conv (chainer.Chain): The graph convolution network
required to obtain molecule feature representation.
mlp (chainer.Chain or None): Multi layer perceptron;
used as the final fully connected layer. Set it to
`None` if no operation is necessary after the
`graph_conv` calculation.
label_scaler (chainer.Link or None): scaler link
postprocess_fn (chainer.FunctionNode or None):
postprocess function for prediction.
"""
super(GraphConvPredictor, self).__init__()
with self.init_scope():
self.graph_conv = graph_conv
if isinstance(mlp, chainer.Link):
self.mlp = mlp
if isinstance(label_scaler, chainer.Link):
self.label_scaler = label_scaler
if not isinstance(mlp, chainer.Link):
self.mlp = mlp
if not isinstance(label_scaler, chainer.Link):
self.label_scaler = label_scaler
self.postprocess_fn = postprocess_fn or chainer.functions.identity
self.train_graph_conv = train_graph_conv
def __call__(self, atoms, adjs):
# type: (numpy.ndarray, numpy.ndarray) -> chainer.Variable
with chainer.using_config(
'enable_backprop', self.train_graph_conv):
x = self.graph_conv(atoms, adjs)
if self.mlp:
x = self.mlp(x)
return x
def predict(self, atoms, adjs):
# type: (numpy.ndarray, numpy.ndarray) -> chainer.Variable
with chainer.no_backprop_mode(), chainer.using_config('train', False):
x = self.__call__(atoms, adjs)
if self.label_scaler is not None:
x = self.label_scaler.inverse_transform(x)
return self.postprocess_fn(x)
def normalize(self, s0=1.):
self.graph_conv.normalize(s0)
def get_report_targets(self):
return self.graph_conv.get_report_targets()
|
193274
|
import re
# a subset of PEP 440
_VERSION_REGEX = re.compile(
r"""
^\s*
v?
(?P<major>\d+)
(?:\.(?P<minor>\d+))?
(?:\.(?P<patch>\d+))?
\s*$
""",
re.VERBOSE | re.IGNORECASE,
)
class Version:
"""
Represents a major.minor.patch version string
"""
def __init__(self, major, minor=0, patch=0):
self.major = major
self.minor = minor
self.patch = patch
self.tag = f"v{str(self)}"
def __str__(self):
parts = [str(x) for x in [self.major, self.minor, self.patch]]
return ".".join(parts).lower()
def increment(self, increment_type):
incr = None
if increment_type == "major":
incr = Version(self.major + 1)
elif increment_type == "minor":
incr = Version(self.major, self.minor + 1)
elif increment_type == "patch":
incr = Version(self.major, self.minor, self.patch + 1)
return incr
def parse(version):
match = _VERSION_REGEX.search(version)
if not match:
raise ValueError(f"invalid version: {version}")
return Version(
int(match.group("major") or 0),
int(match.group("minor") or 0),
int(match.group("patch") or 0),
)
def next_version_from_current_version(current_version, increment_type):
return parse(current_version).increment(increment_type)
|
193296
|
import pickle
'''
This module contains functions for saving and
loading .pkl files
'''
def save(object_, file_name):
'''
Saves an object into a file.
Parameters
----------
object_ : object
The object to save
file_name : str
The name of the file to save the object in.
Raises
------
OSError
If the file cannot be created due to a system-related error.
'''
file = open(file_name, 'wb')
pickle.dump(object_, file)
file.close()
def load(file_name):
'''
Loads an object from file.
Parameters
----------
file_name : str
The name of the file to load the object from.
Returns
-------
object
The python object stored in the file.
Raises
------
FileNotFoundError
If the file does not exist.
'''
file = open(file_name, 'rb')
object_ = pickle.load(file)
file.close()
return object_
|
193318
|
from setuptools import find_packages
from setuptools import setup
import pymt5adapter
with open('README.md') as f:
readme = f.read()
with open('LICENSE') as f:
license = f.read()
setup(
name='pymt5adapter',
version=pymt5adapter.__version__.get('pymt5adapter'),
description='A drop in replacement wrapper for the MetaTrader5 package',
long_description_content_type='text/markdown',
long_description=readme,
author='nicholishen',
author_email='<EMAIL>',
url='https://github.com/nicholishen/pymt5adapter',
license='MIT',
packages=find_packages(exclude=('tests', 'docs')),
install_requires=['MetaTrader5==5.0.33'],
setup_requires=['wheel'],
python_requires='>=3.6',
)
|
193342
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^daily_logs$', views.get_daily_logs, name='get_daily_logs'),
url(r'^logs_sample$', views.get_preview_data, name='get_preview_data'),
url(r'^date_range$', views.get_date_range, name='get_date_range'),
url(r'^column_data$', views.get_column_data, name='get_column_data'),
]
|
193428
|
from socketclusterclient import Socketcluster
def onconnect(socket):
print "on connect got called"
def ondisconnect(socket):
print "on disconnect got called"
def onConnectError(socket, error):
print "On connect error got called"
def onSetAuthentication(socket, token):
print "Token received " + token
socket.setAuthtoken(token)
def onAuthentication(socket, isauthenticated):
print "Authenticated is " + str(isauthenticated)
socket.subscribeack('yell', suback)
socket.publishack('yell', 'Hi dudies', puback)
socket.onchannel('yell', channelmessage)
socket.unsubscribeack('yell', unsuback)
def suback(channel, error, object):
if error is '':
print "Subscribed successfully to channel " + channel
def puback(channel, error, object):
if error is '':
print "Publish sent successfully to channel " + channel
def channelmessage(key, object):
print "Got data " + object + " from key " + key
def unsuback(channel, error, object):
if error is '':
print "Unsubscribed to channel " + channel
if __name__ == "__main__":
socket = Socketcluster.socket("ws://localhost:8000/socketcluster/")
socket.setBasicListener(onconnect, ondisconnect, onConnectError)
socket.setAuthenticationListener(onSetAuthentication, onAuthentication)
socket.connect()
|
193436
|
import rps.robotarium as robotarium
from rps.utilities import *
from rps.utilities.barrier_certificates import *
from rps.utilities.controllers import *
from rps.utilities.transformations import *
from reachGoal import reachGoal
from matplotlib import patches
import numpy as np
import time
N = 1
initial_conditions = np.transpose(np.array([[-1, 0.2, 0]]))
r = robotarium.Robotarium(number_of_robots=N, show_figure=True, initial_conditions=initial_conditions, sim_in_real_time=False)
si_to_uni_dyn = create_si_to_uni_dynamics_with_backwards_motion()
_, uni_to_si_states = create_si_to_uni_mapping()
si_barrier_cert = create_single_integrator_barrier_certificate_with_boundary()
hg = -1
## Visualize goals and obstacles
r.axes.add_patch(patches.Circle((1, 0), 0.2, fill=False, zorder=10)) # Goal region
r.axes.add_patch(patches.Ellipse((0, 0), 0.4, 1.0, 0, fill=False)) # Obstacle
while (hg <= 0):
x = r.get_poses()
x_si = uni_to_si_states(x)
u, hg = reachGoal(x)
# Create safe control inputs (i.e., no collisions)
u = si_barrier_cert(u, x[0:2])
# Transform single integrator velocity commands to unicycle
dxu = si_to_uni_dyn(u, x)
# Set the velocities by mapping the single-integrator inputs to unciycle inputs
r.set_velocities(np.arange(N), dxu)
# Iterate the simulation
r.step()
r.call_at_scripts_end()
|
193495
|
import gym
from ray.rllib.models.modelv2 import ModelV2
from ray.rllib.models.torch.torch_modelv2 import TorchModelV2
from ray.rllib.utils import try_import_torch
from ray.rllib.utils.annotations import override
torch, nn = try_import_torch()
class OnlineLinearRegression(nn.Module):
def __init__(self, feature_dim, alpha=1, lambda_=1):
super(OnlineLinearRegression, self).__init__()
self.d = feature_dim
self.alpha = alpha
self.precision = nn.Parameter(
data=lambda_ * torch.eye(self.d), requires_grad=False)
self.f = nn.Parameter(data=torch.zeros(self.d, ), requires_grad=False)
self.covariance = nn.Parameter(
data=torch.inverse(self.precision), requires_grad=False)
self.theta = nn.Parameter(
data=self.covariance.matmul(self.f), requires_grad=False)
self._init_params()
def _init_params(self):
self.update_schedule = 1
self.delta_f = 0
self.delta_b = 0
self.time = 0
self.covariance.mul_(self.alpha)
self.dist = torch.distributions.multivariate_normal\
.MultivariateNormal(self.theta, self.covariance)
def partial_fit(self, x, y):
# TODO: Handle batch of data rather than individual points
self._check_inputs(x, y)
x = x.squeeze(0)
y = y.item()
self.time += 1
self.delta_f += y * x
self.delta_b += torch.ger(x, x)
# Can follow an update schedule if not doing sherman morison updates
if self.time % self.update_schedule == 0:
self.precision += self.delta_b
self.f += self.delta_f
self.delta_b = 0
self.delta_f = 0
torch.inverse(self.precision, out=self.covariance)
torch.matmul(self.covariance, self.f, out=self.theta)
self.covariance.mul_(self.alpha)
def sample_theta(self):
theta = self.dist.sample()
return theta
def get_ucbs(self, x):
""" Calculate upper confidence bounds using covariance matrix according
to algorithm 1: LinUCB
(http://proceedings.mlr.press/v15/chu11a/chu11a.pdf).
Args:
x (torch.Tensor): Input feature tensor of shape
(batch_size, feature_dim)
"""
projections = self.covariance @ x.T
batch_dots = (x * projections.T).sum(dim=1)
return batch_dots.sqrt()
def forward(self, x, sample_theta=False):
""" Predict scores on input batch using the underlying linear model.
Args:
x (torch.Tensor): Input feature tensor of shape
(batch_size, feature_dim)
sample_theta (bool): Whether to sample the weights from its
posterior distribution to perform Thompson Sampling as per
http://proceedings.mlr.press/v28/agrawal13.pdf .
"""
self._check_inputs(x)
theta = self.sample_theta() if sample_theta else self.theta
scores = x @ theta
return scores
def _check_inputs(self, x, y=None):
assert x.ndim in [2, 3], \
"Input context tensor must be 2 or 3 dimensional, where the" \
" first dimension is batch size"
assert x.shape[1] == self.d, \
"Feature dimensions of weights ({}) and context ({}) do not " \
"match!".format(self.d, x.shape[1])
if y:
assert torch.is_tensor(y) and y.numel() == 1,\
"Target should be a tensor;" \
"Only online learning with a batch size of 1 is " \
"supported for now!"
class DiscreteLinearModel(TorchModelV2, nn.Module):
def __init__(self, obs_space, action_space, num_outputs, model_config,
name):
TorchModelV2.__init__(self, obs_space, action_space, num_outputs,
model_config, name)
nn.Module.__init__(self)
alpha = model_config.get("alpha", 1)
lambda_ = model_config.get("lambda_", 1)
self.feature_dim = obs_space.sample().size
self.arms = nn.ModuleList([
OnlineLinearRegression(
feature_dim=self.feature_dim, alpha=alpha, lambda_=lambda_)
for i in range(self.num_outputs)
])
self._cur_value = None
self._cur_ctx = None
@override(ModelV2)
def forward(self, input_dict, state, seq_lens):
x = input_dict["obs"]
scores = self.predict(x)
return scores, state
def predict(self, x, sample_theta=False, use_ucb=False):
self._cur_ctx = x
scores = torch.stack(
[self.arms[i](x, sample_theta) for i in range(self.num_outputs)],
dim=-1)
self._cur_value = scores
if use_ucb:
ucbs = torch.stack(
[self.arms[i].get_ucbs(x) for i in range(self.num_outputs)],
dim=-1)
return scores + ucbs
else:
return scores
def partial_fit(self, x, y, arm):
assert 0 <= arm.item() < len(self.arms), \
"Invalid arm: {}. It should be 0 <= arm < {}".format(
arm.item(), len(self.arms))
self.arms[arm].partial_fit(x, y)
@override(ModelV2)
def value_function(self):
assert self._cur_value is not None, "must call forward() first"
return self._cur_value
def current_obs(self):
assert self._cur_ctx is not None, "must call forward() first"
return self._cur_ctx
class DiscreteLinearModelUCB(DiscreteLinearModel):
def forward(self, input_dict, state, seq_lens):
x = input_dict["obs"]
scores = super(DiscreteLinearModelUCB, self).predict(
x, sample_theta=False, use_ucb=True)
return scores, state
class DiscreteLinearModelThompsonSampling(DiscreteLinearModel):
def forward(self, input_dict, state, seq_lens):
x = input_dict["obs"]
scores = super(DiscreteLinearModelThompsonSampling, self).predict(
x, sample_theta=True, use_ucb=False)
return scores, state
class ParametricLinearModel(TorchModelV2, nn.Module):
def __init__(self, obs_space, action_space, num_outputs, model_config,
name):
TorchModelV2.__init__(self, obs_space, action_space, num_outputs,
model_config, name)
nn.Module.__init__(self)
alpha = model_config.get("alpha", 1)
lambda_ = model_config.get("lambda_", 0.1)
# RLlib preprocessors will flatten the observation space and unflatten
# it later. Accessing the original space here.
original_space = obs_space.original_space
assert isinstance(original_space, gym.spaces.Dict) and \
"item" in original_space.spaces, \
"This model only supports gym.spaces.Dict observation spaces."
self.feature_dim = original_space["item"].shape[-1]
self.arm = OnlineLinearRegression(
feature_dim=self.feature_dim, alpha=alpha, lambda_=lambda_)
self._cur_value = None
self._cur_ctx = None
def _check_inputs(self, x):
if x.ndim == 3:
assert x.size()[
0] == 1, "Only batch size of 1 is supported for now."
@override(ModelV2)
def forward(self, input_dict, state, seq_lens):
x = input_dict["obs"]["item"]
self._check_inputs(x)
x.squeeze_(dim=0) # Remove the batch dimension
scores = self.predict(x)
scores.unsqueeze_(dim=0) # Add the batch dimension
return scores, state
def predict(self, x, sample_theta=False, use_ucb=False):
self._cur_ctx = x
scores = self.arm(x, sample_theta)
self._cur_value = scores
if use_ucb:
ucbs = self.arm.get_ucbs(x)
return scores + 0.3 * ucbs
else:
return scores
def partial_fit(self, x, y, arm):
x = x["item"]
action_id = arm.item()
self.arm.partial_fit(x[:, action_id], y)
@override(ModelV2)
def value_function(self):
assert self._cur_value is not None, "must call forward() first"
return self._cur_value
def current_obs(self):
assert self._cur_ctx is not None, "must call forward() first"
return self._cur_ctx
class ParametricLinearModelUCB(ParametricLinearModel):
def forward(self, input_dict, state, seq_lens):
x = input_dict["obs"]["item"]
self._check_inputs(x)
x.squeeze_(dim=0) # Remove the batch dimension
scores = super(ParametricLinearModelUCB, self).predict(
x, sample_theta=False, use_ucb=True)
scores.unsqueeze_(dim=0) # Add the batch dimension
return scores, state
class ParametricLinearModelThompsonSampling(ParametricLinearModel):
def forward(self, input_dict, state, seq_lens):
x = input_dict["obs"]["item"]
self._check_inputs(x)
x.squeeze_(dim=0) # Remove the batch dimension
scores = super(ParametricLinearModelThompsonSampling, self).predict(
x, sample_theta=True, use_ucb=False)
scores.unsqueeze_(dim=0) # Add the batch dimension
return scores, state
|
193496
|
from __future__ import division
import os
import numpy as np
import pprint
import tensorflow as tf
import tensorflow.contrib.slim as slim
import pickle, csv
from utils import *
from model import UNet3D, SurvivalVAE
flags = tf.app.flags
flags.DEFINE_integer("epoch", 4, "Epoch to train [4]")
flags.DEFINE_string("train_patch_dir", "patches", "Directory of the training data [patches]")
flags.DEFINE_bool("split_train", False, "Whether to split the train data into train and val [False]")
flags.DEFINE_string("train_data_dir", "../BraTS17TrainingData", "Directory of the train data [../BraTS17TrainingData]")
flags.DEFINE_string("deploy_data_dir", "../BraTS17ValidationData", "Directory of the test data [../BraTS17ValidationData]")
flags.DEFINE_string("deploy_output_dir", "output_validation", "Directory name of the output data [output]")
flags.DEFINE_string("train_csv", "../BraTS17TrainingData/survival_data.csv", "CSV path of the training data")
flags.DEFINE_string("deploy_csv", "../BraTS17ValidationData/survival_evaluation.csv", "CSV path of the validation data")
flags.DEFINE_integer("batch_size", 1, "Batch size [1]")
flags.DEFINE_integer("seg_features_root", 48, "Number of features in the first filter in the seg net [48]")
flags.DEFINE_integer("survival_features", 16, "Number of features in the survival net [16]")
flags.DEFINE_integer("conv_size", 3, "Convolution kernel size in encoding and decoding paths [3]")
flags.DEFINE_integer("layers", 3, "Encoding and deconding layers [3]")
flags.DEFINE_string("loss_type", "cross_entropy", "Loss type in the model [cross_entropy]")
flags.DEFINE_float("dropout", 0.5, "Drop out ratio [0.5]")
flags.DEFINE_string("checkpoint_dir", "checkpoint", "Directory name to save the checkpoints [checkpoint]")
flags.DEFINE_string("log_dir", "logs", "Directory name to save logs [logs]")
flags.DEFINE_boolean("train", False, "True for training, False for deploying [False]")
flags.DEFINE_boolean("run_seg", True, "True if run segmentation [True]")
flags.DEFINE_boolean("run_survival", False, "True if run survival prediction [True]")
FLAGS = flags.FLAGS
def main(_):
pp = pprint.PrettyPrinter()
pp.pprint(flags.FLAGS.__flags)
# Train
all_train_paths = []
for dirpath, dirnames, files in os.walk(FLAGS.train_data_dir):
if os.path.basename(dirpath)[0:7] == 'Brats17':
all_train_paths.append(dirpath)
if FLAGS.split_train:
if os.path.exists(os.path.join(FLAGS.train_patch_dir, 'files.log')):
with open(os.path.join(FLAGS.train_patch_dir, 'files.log'), 'r') as f:
training_paths, testing_paths = pickle.load(f)
else:
all_paths = [os.path.join(FLAGS.train_patch_dir, p) for p in sorted(os.listdir(FLAGS.train_data_dir))]
np.random.shuffle(all_paths)
n_training = int(len(all_paths) * 4 / 5)
training_paths = all_paths[:n_training]
testing_paths = all_paths[n_training:]
# Save the training paths and testing paths
with open(os.path.join(FLAGS.train_data_dir, 'files.log'), 'w') as f:
pickle.dump([training_paths, testing_paths], f)
training_ids = [os.path.basename(i) for i in training_paths]
testing_ids = [os.path.basename(i) for i in testing_paths]
training_survival_data = {}
testing_survival_data = {}
with open(FLAGS.train_csv, 'r') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
if row[0] in training_ids:
training_survival_data[row[0]] = (row[1], row[2])
elif row[0] in testing_ids:
testing_survival_data[row[0]] = (row[1], row[2])
training_survival_paths = [p for p in all_train_paths if os.path.basename(p) in training_survival_data.keys()]
testing_survival_paths = [p for p in all_train_paths if os.path.basename(p) in testing_survival_data.keys()]
else:
training_paths = [os.path.join(FLAGS.train_patch_dir, name) for name in os.listdir(FLAGS.train_patch_dir)
if '.log' not in name]
testing_paths = None
training_ids = [os.path.basename(i) for i in training_paths]
training_survival_paths = []
testing_survival_paths = None
training_survival_data = {}
testing_survival_data = None
with open(FLAGS.train_csv, 'r') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
if row[0] in training_ids:
training_survival_data[row[0]] = (row[1], row[2])
training_survival_paths = [p for p in all_train_paths if os.path.basename(p) in training_survival_data.keys()]
if not os.path.exists(FLAGS.checkpoint_dir):
os.makedirs(FLAGS.checkpoint_dir)
if not os.path.exists(FLAGS.log_dir):
os.makedirs(FLAGS.log_dir)
# Segmentation net
if FLAGS.run_seg:
run_config = tf.ConfigProto()
with tf.Session(config=run_config) as sess:
unet = UNet3D(sess, checkpoint_dir=FLAGS.checkpoint_dir, log_dir=FLAGS.log_dir, training_paths=training_paths,
testing_paths=testing_paths, batch_size=FLAGS.batch_size, layers=FLAGS.layers,
features_root=FLAGS.seg_features_root, conv_size=FLAGS.conv_size,
dropout=FLAGS.dropout, loss_type=FLAGS.loss_type)
if FLAGS.train:
model_vars = tf.trainable_variables()
slim.model_analyzer.analyze_vars(model_vars, print_info=True)
train_config = {}
train_config['epoch'] = FLAGS.epoch
unet.train(train_config)
else:
# Deploy
if not os.path.exists(FLAGS.deploy_output_dir):
os.makedirs(FLAGS.deploy_output_dir)
unet.deploy(FLAGS.deploy_data_dir, FLAGS.deploy_output_dir)
tf.reset_default_graph()
# Survival net
if FLAGS.run_survival:
run_config = tf.ConfigProto()
with tf.Session(config=run_config) as sess:
survivalvae = SurvivalVAE(sess, checkpoint_dir=FLAGS.checkpoint_dir, log_dir=FLAGS.log_dir,
training_paths=training_survival_paths, testing_paths=testing_survival_paths,
training_survival_data=training_survival_data,
testing_survival_data=testing_survival_data)
if FLAGS.train:
model_vars = tf.trainable_variables()
slim.model_analyzer.analyze_vars(model_vars, print_info=True)
train_config = {}
train_config['epoch'] = FLAGS.epoch * 100
survivalvae.train(train_config)
else:
all_deploy_paths = []
for dirpath, dirnames, files in os.walk(FLAGS.deploy_data_dir):
if os.path.basename(dirpath)[0:7] == 'Brats17':
all_deploy_paths.append(dirpath)
deploy_survival_data = {}
with open(FLAGS.deploy_csv, 'r') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
if row[0] != 'Brats17ID':
deploy_survival_data[row[0]] = row[1]
deploy_survival_paths = [p for p in all_deploy_paths if os.path.basename(p) in deploy_survival_data.keys()]
survivalnet.deploy(FLAGS.deploy_survival_paths, FLAGS.deploy_survival_data)
if __name__ == '__main__':
tf.app.run()
|
193530
|
from sklearn.linear_model import SGDClassifier
sgd = SGDClassifier(loss='modified_huber',shuffle=True,random_state=101)
sgd.fit(XA,yA)
yP = sgd.predict(XB)
|
193559
|
from setuptools import find_packages, setup
console_scripts = """
[console_scripts]
mapswipe_workers=mapswipe_workers.mapswipe_workers:cli
ms=mapswipe_workers.mapswipe_workers:cli
"""
with open("requirements.txt") as f:
requirements = f.read().splitlines()
setup(
name="mapswipe-workers",
version="3.0",
description="Install script for the MapSwipe-Python-Workers.",
author="<NAME>, <NAME>, <NAME>",
author_email="",
url="www.mapswipe.org",
packages=find_packages(exclude=("docs", "python_scripts")),
install_requires=requirements,
entry_points=console_scripts,
)
|
193615
|
import pytest
from yt.data_objects.static_output import Dataset
from yt.geometry.grid_geometry_handler import GridIndex
from yt.loaders import load, load_simulation
from yt.utilities.exceptions import (
YTAmbiguousDataType,
YTSimulationNotIdentified,
YTUnidentifiedDataType,
)
from yt.utilities.object_registries import output_type_registry
@pytest.fixture
def tmp_data_dir(tmp_path):
from yt.config import ytcfg
pre_test_data_dir = ytcfg["yt", "test_data_dir"]
ytcfg.set("yt", "test_data_dir", str(tmp_path))
yield tmp_path
ytcfg.set("yt", "test_data_dir", pre_test_data_dir)
@pytest.mark.usefixtures("tmp_data_dir")
def test_load_not_a_file(tmp_path):
with pytest.raises(FileNotFoundError):
load(tmp_path / "not_a_file")
@pytest.mark.parametrize("simtype", ["Enzo", "unregistered_simulation_type"])
@pytest.mark.usefixtures("tmp_data_dir")
def test_load_simulation_not_a_file(simtype, tmp_path):
# it is preferable to report the most important problem in an error message
# (missing data is worse than a typo insimulation_type)
# so we make sure the error raised is not YTSimulationNotIdentified,
# even with an absurd simulation type
with pytest.raises(FileNotFoundError):
load_simulation(tmp_path / "not_a_file", simtype)
@pytest.fixture()
def tmp_path_with_empty_file(tmp_path):
empty_file_path = tmp_path / "empty_file"
empty_file_path.touch()
return tmp_path, empty_file_path
def test_load_unidentified_data_dir(tmp_path_with_empty_file):
tmp_path, empty_file_path = tmp_path_with_empty_file
with pytest.raises(YTUnidentifiedDataType):
load(tmp_path)
def test_load_unidentified_data_file(tmp_path_with_empty_file):
tmp_path, empty_file_path = tmp_path_with_empty_file
with pytest.raises(YTUnidentifiedDataType):
load(empty_file_path)
def test_load_simulation_unidentified_data_dir(tmp_path_with_empty_file):
tmp_path, empty_file_path = tmp_path_with_empty_file
with pytest.raises(YTSimulationNotIdentified):
load_simulation(tmp_path, "unregistered_simulation_type")
def test_load_simulation_unidentified_data_file(tmp_path_with_empty_file):
tmp_path, empty_file_path = tmp_path_with_empty_file
with pytest.raises(YTSimulationNotIdentified):
load_simulation(
empty_file_path,
"unregistered_simulation_type",
)
@pytest.fixture()
def ambiguous_dataset_classes():
# We deliberately setup a situation where two Dataset subclasses
# that aren't parents are consisdered valid.
# We implement the bare minimum for these classes to be actually
# loadable in order to test hints.
class MockHierarchy(GridIndex):
pass
class MockDataset(Dataset):
_index_class = MockHierarchy
def _parse_parameter_file(self, *args, **kwargs):
self.current_time = -1.0
self.cosmological_simulation = 0
def _set_code_unit_attributes(self, *args, **kwargs):
self.length_unit = self.quan(1, "m")
self.mass_unit = self.quan(1, "kg")
self.time_unit = self.quan(1, "s")
class AlphaDataset(MockDataset):
@classmethod
def _is_valid(cls, *args, **kwargs):
return True
class BetaDataset(MockDataset):
@classmethod
def _is_valid(cls, *args, **kwargs):
return True
yield
# teardown to avoid possible breakage in following tests
output_type_registry.pop("MockDataset")
output_type_registry.pop("AlphaDataset")
output_type_registry.pop("BetaDataset")
@pytest.mark.usefixtures("ambiguous_dataset_classes")
def test_load_ambiguous_data(tmp_path):
with pytest.raises(YTAmbiguousDataType):
load(tmp_path)
file = tmp_path / "fake_datafile0011.dump"
file.touch()
pattern = str(tmp_path / "fake_datafile00??.dump")
# loading a DatasetSeries should not crash until an item is retrieved
ts = load(pattern)
with pytest.raises(YTAmbiguousDataType):
ts[0]
@pytest.mark.parametrize(
"hint, expected_type",
[
("alpha", "AlphaDataset"),
("al", "AlphaDataset"),
("ph", "AlphaDataset"),
("beta", "BetaDataset"),
("BeTA", "BetaDataset"),
("b", "BetaDataset"),
],
)
@pytest.mark.usefixtures("ambiguous_dataset_classes")
def test_load_ambiguous_data_with_hint(hint, expected_type, tmp_path):
ds = load(tmp_path, hint=hint)
assert type(ds).__name__ == expected_type
file1 = tmp_path / "fake_datafile0011.dump"
file2 = tmp_path / "fake_datafile0022.dump"
file1.touch()
file2.touch()
pattern = str(tmp_path / "fake_datafile00??.dump")
ts = load(pattern, hint=hint)
ds = ts[0]
assert type(ds).__name__ == expected_type
ds = ts[1]
assert type(ds).__name__ == expected_type
|
193650
|
import os
import glob
import h5py
import tensorflow as tf
from utils.common import Notify
from .base_dataset import BaseDataset
class Imw2020(BaseDataset):
default_config = {
'num_parallel_calls': 10, 'truncate': None
}
def _init_dataset(self, **config):
print(Notify.INFO, "Initializing dataset:", config['data_name'], Notify.ENDC)
if config['data_split'] == 'val':
proj_paths = ['reichstag', 'sacre_coeur', 'st_peters_square']
seq_paths = [os.path.join(i, 'set_100', 'images') for i in proj_paths]
elif config['data_split'] == 'test':
proj_paths = ['british_museum', 'lincoln_memorial_statue', 'milan_cathedral',
'piazza_san_marco', 'st_pauls_cathedral', 'florence_cathedral_side',
'london_bridge', 'mount_rushmore', 'sagrada_familia', 'united_states_capitol']
seq_paths = proj_paths
else:
raise NotImplementedError
base_path = config['data_root']
image_paths = []
for idx, val in enumerate(seq_paths):
dump_folder = os.path.join(config['dump_root'], proj_paths[idx])
if not os.path.exists(dump_folder):
os.makedirs(dump_folder)
seq_path = os.path.join(base_path, val)
image_paths.extend(glob.glob(os.path.join(seq_path, '*.jpg')))
if config['truncate'] is not None:
print(Notify.WARNING, "Truncate from",
config['truncate'][0], "to", config['truncate'][1], Notify.ENDC)
image_paths = image_paths[config['truncate'][0]:config['truncate'][1]]
if config['data_split'] == 'val':
seq_names = [i.split('/')[-4] for i in image_paths]
elif config['data_split'] == 'test':
seq_names = [i.split('/')[-2] for i in image_paths]
else:
raise NotImplementedError
image_names = [os.path.splitext(os.path.basename(i))[0] for i in image_paths]
dump_paths = [os.path.join(seq_names[i], image_names[i]) for i in range(len(image_paths))]
print(Notify.INFO, "Found images:", len(image_paths), Notify.ENDC)
self.data_length = len(image_paths)
tf.data.Dataset.map_parallel = lambda self, fn: self.map(
fn, num_parallel_calls=config['num_parallel_calls'])
files = {'image_paths': image_paths, 'dump_paths': dump_paths}
return files
def _format_data(self, data):
dump_path = data['dump_path'].decode('utf-8')
seq_name = dump_path.split('/')[-2]
basename = os.path.basename(dump_path)
seq_folder = os.path.join(self.config['dump_root'], seq_name)
h5_kpt = os.path.join(seq_folder, 'keypoints.h5')
h5_desc = os.path.join(seq_folder, 'descriptors.h5')
h5_score = os.path.join(seq_folder, 'scores.h5')
if not os.path.exists(h5_desc) and not os.path.exists(h5_kpt) and not os.path.exists(h5_score):
gen_kpt_f = h5py.File(h5_kpt, 'w')
gen_desc_f = h5py.File(h5_desc, 'w')
gen_score_f = h5py.File(h5_score, 'w')
else:
gen_kpt_f = h5py.File(h5_kpt, 'a')
gen_desc_f = h5py.File(h5_desc, 'a')
gen_score_f = h5py.File(h5_score, 'a')
if basename not in gen_kpt_f and basename not in gen_desc_f:
feat = data['dump_data'][0]
kpt = data['dump_data'][1]
score = data['dump_data'][2]
_ = gen_kpt_f.create_dataset(basename, data=kpt)
_ = gen_desc_f.create_dataset(basename, data=feat)
_ = gen_score_f.create_dataset(basename, data=score)
|
193658
|
from collections import defaultdict
class Graph :
def __init__(self):
self.graph = defaultdict(list)
def add_edge(self , u , v):
self.graph[u].append(v)
def dfsUtil(self , s , vis):
stack = []
stack.append(s)
while(stack):
# Pop
node = stack.pop()
# Mark visited
if(vis[node] == False):
vis[node] = True
print(node)
# Add elements from adj list in stack and mark visited
for i in self.graph[node]:
if(vis[i] == False):
stack.append(i)
vis[i] = True
def dfs(self):
v = len(self.graph)
vis = [False]*(v+1)
for i in range(v):
if(vis[i] == False):
self.dfsUtil(i , vis)
if __name__ == '__main__' :
graph = Graph()
graph.add_edge(1, 0 )
graph.add_edge(1, 4)
graph.add_edge(2 , 1)
graph.add_edge(3 , 4)
graph.add_edge(4 , 0)
graph.dfs()
|
193690
|
from os.path import dirname, join, realpath
import pytest
from numpy import array, dtype, nan
from numpy.testing import assert_array_equal, assert_equal
from pandas_plink import example_file_prefix, read_plink, read_plink1_bin
def test_read_plink():
datafiles = join(dirname(realpath(__file__)), "data_files")
file_prefix = join(datafiles, "data")
(bim, fam, bed) = read_plink(file_prefix, verbose=False)
assert_equal(bed.dtype, dtype("float32"))
assert_array_equal(bim.query("chrom=='1' and pos==72515")["snp"], ["rs4030300"])
assert_array_equal(bim.query("chrom=='1'").shape, [10, 7])
assert_array_equal(
fam.query("fid=='Sample_2' and iid=='Sample_2'")["trait"], ["-9"]
)
assert_array_equal(
bed,
array(
[
[2, 2, 1],
[2, 1, 2],
[nan, nan, nan],
[nan, nan, 1],
[2, 2, 2],
[2, 2, 2],
[2, 1, 0],
[2, 2, 2],
[1, 2, 2],
[2, 1, 2],
]
),
)
def test_read_plink_prefix_dot():
with pytest.raises(IOError):
read_plink("/home/joao/84757.genotypes.norm.renamed")
def test_read_plink_wildcard():
datafiles = join(dirname(realpath(__file__)), "data_files")
file_prefix = join(datafiles, "chr*")
(bim, fam, bed) = read_plink(file_prefix, verbose=False)
assert_array_equal(bim[bim["chrom"] == "11"]["i"].values[:2], [0, 1])
assert_array_equal(bim[bim["chrom"] == "12"]["i"].values[:2], [779, 780])
def test_read_plink1_bin():
datafiles = join(dirname(realpath(__file__)), "data_files")
file_prefix = join(datafiles, "data")
bim = file_prefix + ".bim"
bed = file_prefix + ".bed"
fam = file_prefix + ".fam"
G = read_plink1_bin(bed, bim, fam, verbose=False)
assert_equal(G.data.dtype, dtype("float32"))
snp = G.where((G.chrom == "1") & (G.pos == 72515), drop=True)["snp"].values
assert_array_equal(snp, ["rs4030300"])
shape = G.where(G.chrom == "1", drop=True).shape
assert_array_equal(shape, [3, 10])
shape = G.where(G.chrom == "2", drop=True).shape
assert_array_equal(shape, [3, 0])
g = G.where((G.fid == "Sample_2") & (G.iid == "Sample_2"), drop=True)
assert_array_equal(g["trait"].values, ["-9"])
arr = [
[2.0, 2.0, nan, nan, 2.0, 2.0, 2.0, 2.0, 1.0, 2.0],
[2.0, 1.0, nan, nan, 2.0, 2.0, 1.0, 2.0, 2.0, 1.0],
[1.0, 2.0, nan, 1.0, 2.0, 2.0, 0.0, 2.0, 2.0, 2.0],
]
assert_array_equal(G, arr)
def test_read_plink1_bin_a0():
datafiles = join(dirname(realpath(__file__)), "data_files")
file_prefix = join(datafiles, "data")
bim = file_prefix + ".bim"
bed = file_prefix + ".bed"
fam = file_prefix + ".fam"
G = read_plink1_bin(bed, bim, fam, verbose=False, ref="a0")
assert_equal(G.data.dtype, dtype("float32"))
snp = G.where((G.chrom == "1") & (G.pos == 72515), drop=True)["snp"].values
assert_array_equal(snp, ["rs4030300"])
shape = G.where(G.chrom == "1", drop=True).shape
assert_array_equal(shape, [3, 10])
shape = G.where(G.chrom == "2", drop=True).shape
assert_array_equal(shape, [3, 0])
g = G.where((G.fid == "Sample_2") & (G.iid == "Sample_2"), drop=True)
assert_array_equal(g["trait"].values, ["-9"])
arr = [
[0.0, 0.0, nan, nan, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 1.0, nan, nan, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0],
[1.0, 0.0, nan, 1.0, 0.0, 0.0, 2.0, 0.0, 0.0, 0.0],
]
assert_array_equal(G, arr)
def test_read_plink1_bin_wildcard_not_found():
datafiles = join(dirname(realpath(__file__)), "data_files")
bed_files = join(datafiles, "chrr*.bed")
with pytest.raises(ValueError):
read_plink1_bin(bed_files, verbose=False)
bed_files = join(datafiles, "chr*.bed")
with pytest.raises(ValueError):
read_plink1_bin(bed_files, "chr11.bim", verbose=False)
bed_files = join(datafiles, "chr*.bed")
bim_files = join(datafiles, "chrr*.bim")
with pytest.raises(ValueError):
read_plink1_bin(bed_files, bim_files, verbose=False)
bed_files = join(datafiles, "chr*.bed")
bim_files = join(datafiles, "chr*.bim")
fam_files = join(datafiles, "chr*.fam")
with pytest.warns(UserWarning):
read_plink1_bin(bed_files, bim_files, fam_files, verbose=True)
def test_read_plink1_bin_wildcard():
datafiles = join(dirname(realpath(__file__)), "data_files")
bed_files = join(datafiles, "chr*.bed")
G = read_plink1_bin(bed_files, verbose=False)
G.where(G.chrom == "11", drop=True).values
assert_equal(G.where(G.chrom == "11", drop=True).shape, (14, 779))
assert_equal(G.where(G.chrom == "12", drop=True).shape, (14, 473))
x = [[0.00, 0.00], [0.00, 1.00]]
assert_equal(G.where(G.chrom == "11", drop=True).values[:2, :2], x)
def test_example_file_prefix():
with pytest.warns(DeprecationWarning):
example_file_prefix()
|
193705
|
import six
import re
import logging
try:
import urllib.request as urllib2
except ImportError:
import urllib2
try:
import urllib.parse as urllib
except ImportError:
import urllib
try:
from http.cookiejar import CookieJar
except ImportError:
from cookielib import CookieJar
class Downloader(object):
"""
Provides an object to easily log into Google.
This code is modified from the original snippet by <NAME> at
https://gist.github.com/gregroberts/11001277, discovered through the useful
reddit post here http://www.reddit.com/r/Python/comments/233a0c/trying_to_download_google_trends_data/.
"""
def __init__(self, username, password):
"""
Sets various object parameters.
"""
self.login_params = {
"continue": 'http://www.google.com/trends',
"PersistentCookie": "yes",
"Email": username,
"Passwd": password,
}
self.headers = [("Referrer", "https://www.google.com/accounts/ServiceLoginBoxAuth"),
("Content-type", "application/x-www-form-urlencoded"),
('User-Agent', 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.21 (KHTML, like Gecko) Chrome/19.0.1042.0 Safari/535.21'),
("Accept", "text/plain")]
self.url_ServiceLoginBoxAuth = 'https://accounts.google.com/ServiceLoginBoxAuth'
self.url_Export = 'http://www.google.com/accounts/ServiceLoginBoxAuth'
self.url_CookieCheck = 'https://www.google.com/accounts/CheckCookie?chtml=LoginDoneHtml'
self.url_PrefCookie = 'http://www.google.com'
self.header_dictionary = {}
self._connect()
def _connect(self):
"""
Connects to Google Trends.
"""
self.cj = CookieJar()
cook = urllib2.HTTPCookieProcessor(self.cj)
self.opener = urllib2.build_opener(cook)
self.opener.addheaders = self.headers
galx = re.compile('<input type="hidden"[\s]+name="GALX"[\s]+value="(?P<galx>[a-zA-Z0-9_-]+)">')
resp = str(self.opener.open(self.url_ServiceLoginBoxAuth).read())
resp = re.sub(r'\s\s+', ' ', resp)
m = galx.search(resp)
self.login_params['GALX'] = m.group('galx')
params = urllib.urlencode(self.login_params).encode("utf-8")
self.opener.open(self.url_ServiceLoginBoxAuth, params)
self.opener.open(self.url_CookieCheck)
self.opener.open(self.url_PrefCookie)
def downloadReport(self, query):
"""
Returns original raw csv file as a one large string.
"""
data = self.opener.open(query).read()
#This is because in Py3 data is returned as bytes, and we want str
#This is an issue when in Py2 data is already an str, and doesn't
#have the decode method.
if not isinstance(data, six.string_types):
data = data.decode()
if data in ['You must be signed in to export data from Google Trends']:
logging.error('You must be signed in to export data from Google Trends')
raise Exception(data)
return data
|
193706
|
import time
class FailUntilSucceeds:
ROBOT_LIBRARY_SCOPE = 'TESTCASE'
def __init__(self, times_to_fail=0):
self.times_to_fail = int(times_to_fail)
def set_times_to_fail(self, times_to_fail):
self.__init__(times_to_fail)
def fail_until_retried_often_enough(self, message="Hello", sleep=0):
self.times_to_fail -= 1
time.sleep(sleep)
if self.times_to_fail >= 0:
raise Exception('Still %d times to fail!' % self.times_to_fail)
return message
|
193722
|
import datetime
import os
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
from tqdm import tqdm, trange
from common.evaluators.bow_evaluator import BagOfWordsEvaluator
from datasets.bow_processors.abstract_processor import StreamingSparseDataset
class BagOfWordsTrainer(object):
def __init__(self, model, vectorizer, optimizer, processor, args):
self.args = args
self.model = model
self.processor = processor
self.optimizer = optimizer
self.vectorizer = vectorizer
train_examples = self.processor.get_train_examples(args.data_dir, args.training_file)
self.train_features = vectorizer.fit_transform([x.text for x in train_examples])
self.train_labels = [[float(x) for x in document.label] for document in train_examples]
timestamp = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
self.snapshot_path = os.path.join(self.args.save_path, self.processor.NAME, '%s.pt' % timestamp)
self.log_header = 'Epoch Iteration Progress Dev/Acc. Dev/Pr. Dev/Re. Dev/F1 Dev/Loss'
self.log_template = ' '.join('{:>5.0f},{:>9.0f},{:>6.0f}/{:<5.0f} {:>6.4f},{:>8.4f},{:8.4f},{:8.4f},{:10.4f}'.split(','))
self.train_loss = 0
self.best_dev_f1 = 0
self.nb_train_steps = 0
self.unimproved_iters = 0
self.early_stop = False
def train_epoch(self, train_dataloader):
for step, batch in enumerate(tqdm(train_dataloader, desc="Training")):
self.model.train()
self.optimizer.zero_grad()
features, labels = tuple(t.to(self.args.device) for t in batch)
logits = self.model(features)
if self.args.n_gpu > 1:
logits = logits.view(labels.size())
if self.args.is_multilabel:
loss = F.binary_cross_entropy_with_logits(logits, labels.float())
else:
loss = F.cross_entropy(logits, torch.argmax(labels, dim=1))
if self.args.n_gpu > 1:
loss = loss.mean()
loss.backward()
self.optimizer.step()
self.train_loss += loss.item()
self.nb_train_steps += 1
def train(self):
train_data = StreamingSparseDataset(self.train_features, self.train_labels)
train_dataloader = DataLoader(train_data, shuffle=True, batch_size=self.args.batch_size)
print("Number of examples: ", len(self.train_labels))
print("Batch size:", self.args.batch_size)
for epoch in trange(int(self.args.epochs), desc="Epoch"):
self.train_epoch(train_dataloader)
dev_evaluator = BagOfWordsEvaluator(self.model, self.vectorizer, self.processor, self.args, split='dev')
dev_acc, dev_precision, dev_recall, dev_f1, dev_loss = dev_evaluator.get_scores()[0]
# Print validation results
tqdm.write(self.log_header)
tqdm.write(self.log_template.format(epoch + 1, self.nb_train_steps, epoch + 1, self.args.epochs,
dev_acc, dev_precision, dev_recall, dev_f1, dev_loss))
# Update validation results
if dev_f1 > self.best_dev_f1:
self.unimproved_iters = 0
self.best_dev_f1 = dev_f1
torch.save(self.model, self.snapshot_path)
else:
self.unimproved_iters += 1
if self.unimproved_iters >= self.args.patience:
self.early_stop = True
tqdm.write("Early Stopping. Epoch: {}, Best Dev F1: {}".format(epoch, self.best_dev_f1))
break
|
193739
|
from dlcliche.image import *
from lib_fat2019 import *
APPNAME = 'final'
conf.DURATION = 1
conf.AUG_LEVEL = 1
conf.CV = 0
conf.RELABEL = 'COOC_PROB'
conf.DATA = Path('/mnt/dataset/freesound-audio-tagging-2019')
conf.ROOT = Path('/mnt/dataset/fat2019_files')
conf.WORK = Path('/mnt/dataset/work/fat2019')
conf.MODEL = 'specgram'
conf.PRETRAINED_MODEL = get_pretrained_model(conf)
conf.RESAMPLE = False
conf.RESAMPLE_SIZES = None
conf.USE_NOISY = True
conf.NOISY_DATA = 'NOISY_SINGLE'
conf.NOISY_SAMPLE_SIZES = None
conf.NOISY_REMOVE_NG = ['Bus', 'Run',
'Male_speech_and_man_speaking',
'Cricket',
'Race_car_and_auto_racing',
'Printer',
'Church_bell',
'Crowd',
'Gong',
'Mechanical_fan',
'Traffic_noise_and_roadway_noise',
'Waves_and_surf']
conf.noisy_OKs = ['Accelerating_and_revving_and_vroom', 'Accordion', 'Acoustic_guitar', 'Applause', 'Bark',
'Bass_drum', 'Bass_guitar', 'Bathtub_(filling_or_washing)', 'Bicycle_bell', 'Burping_and_eructation',
'Buzz', 'Car_passing_by', 'Cheering', 'Chewing_and_mastication', 'Child_speech_and_kid_speaking',
'Chink_and_clink', 'Chirp_and_tweet', 'Clapping', 'Computer_keyboard', 'Crackle',
'Cupboard_open_or_close', 'Cutlery_and_silverware','Dishes_and_pots_and_pans', 'Drawer_open_or_close', 'Drip',
'Electric_guitar', 'Fart', 'Female_singing', 'Female_speech_and_woman_speaking', 'Fill_(with_liquid)',
'Finger_snapping', 'Frying_(food)', 'Gasp', 'Glockenspiel', 'Gurgling',
'Harmonica', 'Hi-hat', 'Hiss', 'Keys_jangling', 'Knock',
'Male_singing', 'Marimba_and_xylophone', 'Meow', 'Microwave_oven', 'Motorcycle',
'Purr', 'Raindrop', 'Scissors', 'Screaming', 'Shatter',
'Sigh', 'Sink_(filling_or_washing)', 'Skateboard', 'Slam', 'Sneeze',
'Squeak', 'Stream', 'Strum', 'Tap', 'Tick-tock',
'Toilet_flush', 'Trickle_and_dribble', 'Walk_and_footsteps', 'Water_tap_and_faucet', 'Whispering',
'Writing', 'Yell', 'Zipper_(clothing)']
conf.FORCE_SINGLE_LABEL = True
conf.SUPER_MIXUP = True
update_conf(conf)
set_fastai_random_seed()
best_weight = f'{conf.NAME}_{APPNAME}'
df, X_train, learn, data = fat2019_initialize_training(conf)
learn.fit_one_cycle(8, 1e-1)
learn.fit_one_cycle(10, 1e-2)
learn.unfreeze()
learn.fit_one_cycle(20, 1e-2)
learn.fit_one_cycle(20, 1e-2)
learn.fit_one_cycle(20, 1e-2)
learn.fit_one_cycle(30, slice(1e-3, 1e-2))
learn.fit_one_cycle(30, slice(1e-3, 1e-2))
learn.fit_one_cycle(30, slice(1e-3, 1e-2))
learn.fit_one_cycle(300, slice(1e-4, 1e-2), callbacks=get_saver_callbacks(conf, learn, best_weight))
print(f'Writing {best_weight}.csv ...')
df = evaluate_weight(conf, f'{best_weight}', for_what='train')
df.to_csv(f'work/models/{best_weight}.csv')
print('lwlrap', df_lwlrap_sum(df))
print(df[:10])
|
193789
|
import numpy as np
import unittest
import os
from openmdao.api import Problem, Group
from openmdao.utils.assert_utils import assert_near_equal, assert_check_partials
from pycycle.elements.shaft import Shaft
fpath = os.path.dirname(os.path.realpath(__file__))
ref_data = np.loadtxt(fpath + "/reg_data/shaft.csv",
delimiter=",", skiprows=1)
header = [
'trqLoad1',
'trqLoad2',
'trqLoad3',
'Nmech',
'HPX',
'fracLoss',
'trqIn',
'trqOut',
'trqNet',
'pwrIn',
'pwrOut',
'pwrNet']
h_map = dict(((v_name, i) for i, v_name in enumerate(header)))
class ShaftTestCase(unittest.TestCase):
def setUp(self):
self.prob = Problem()
self.prob.model = Group()
self.prob.model.add_subsystem("shaft", Shaft(num_ports=3), promotes=["*"])
self.prob.model.set_input_defaults('trq_0', 17., units='ft*lbf')
self.prob.model.set_input_defaults('trq_1', 17., units='ft*lbf')
self.prob.model.set_input_defaults('trq_2', 17., units='ft*lbf')
self.prob.model.set_input_defaults('Nmech', 17., units='rpm')
self.prob.model.set_input_defaults('HPX', 17., units='hp')
self.prob.model.set_input_defaults('fracLoss', 17.)
self.prob.setup(check=False, force_alloc_complex=True)
def test_case1(self):
# 6 cases to check against
for i, data in enumerate(ref_data):
# input torques
self.prob['trq_0'] = data[h_map['trqLoad1']]
self.prob['trq_1'] = data[h_map['trqLoad2']]
self.prob['trq_2'] = data[h_map['trqLoad3']]
# shaft inputs
self.prob['Nmech'] = data[h_map['Nmech']]
self.prob['HPX'] = data[h_map['HPX']]
self.prob['fracLoss'] = data[h_map['fracLoss']]
self.prob.run_model()
# check outputs
trqIn, trqOut, trqNet = data[
h_map['trqIn']], data[
h_map['trqOut']], data[
h_map['trqNet']]
pwrIn, pwrOut, pwrNet = data[
h_map['pwrIn']], data[
h_map['pwrOut']], data[
h_map['pwrNet']]
trqIn_comp = self.prob['trq_in']
trqOut_comp = self.prob['trq_out']
trqNet_comp = self.prob['trq_net']
pwrIn_comp = self.prob['pwr_in']
pwrOut_comp = self.prob['pwr_out']
pwrNet_comp = self.prob['pwr_net']
tol = 1.0e-4
assert_near_equal(trqIn_comp, trqIn, tol)
assert_near_equal(trqOut_comp, trqOut, tol)
assert_near_equal(trqNet_comp, trqNet, tol)
assert_near_equal(pwrIn_comp, pwrIn, tol)
assert_near_equal(pwrOut_comp, pwrOut, tol)
assert_near_equal(pwrNet_comp, pwrNet, tol)
partial_data = self.prob.check_partials(out_stream=None, method='cs')
assert_check_partials(partial_data, atol=1e-8, rtol=1e-8)
if __name__ == "__main__":
unittest.main()
|
193798
|
from torch.nn import CrossEntropyLoss
from torchkit.loss.dist_softmax import DistCrossEntropy
from torchkit.loss.focal import FocalLoss
from torchkit.loss.ddl import DDL
_loss_dict = {
'Softmax': CrossEntropyLoss(),
'DistCrossEntropy': DistCrossEntropy(),
'FocalLoss': FocalLoss(),
'DDL': DDL()
}
def get_loss(key):
""" Get different training loss functions by key,
support Softmax(distfc = False), DistCrossEntropy (distfc = True), FocalLoss, and DDL.
"""
if key in _loss_dict.keys():
return _loss_dict[key]
else:
raise KeyError("not support loss {}".format(key))
|
193841
|
from copy import deepcopy
import torch
from torch import nn, optim
import torch.nn.functional as F
from transformers import AdamW, BertConfig, BertForSequenceClassification
from model.scapt import SCAPT
class LabelSmoothLoss(nn.Module):
def __init__(self, smoothing=0.0):
super(LabelSmoothLoss, self).__init__()
self.smoothing = smoothing
def forward(self, input, target):
log_prob = F.log_softmax(input, dim=-1)
weight = input.new_ones(input.size()) * self.smoothing / (input.size(-1) - 1.)
weight.scatter_(-1, target.unsqueeze(-1), (1. - self.smoothing))
loss = (-weight * log_prob).sum(dim=-1).mean()
return loss
class SupConLoss(nn.Module):
"""https://github.com/HobbitLong/SupContrast/blob/master/losses.py
Supervised Contrastive Learning: https://arxiv.org/pdf/2004.11362.pdf.
It also supports the unsupervised contrastive loss in SimCLR"""
def __init__(self, temperature=0.07, contrast_mode='all',
base_temperature=0.07):
super(SupConLoss, self).__init__()
self.temperature = temperature
self.contrast_mode = contrast_mode
self.base_temperature = base_temperature
def forward(self, features, labels=None, mask=None):
"""Compute loss for model. If both `labels` and `mask` are None,
it degenerates to SimCLR unsupervised loss:
https://arxiv.org/pdf/2002.05709.pdf
Args:
features: hidden vector of shape [bsz, n_views, ...].
labels: ground truth of shape [bsz].
mask: contrastive mask of shape [bsz, bsz], mask_{i,j}=1 if sample j
has the same class as sample i. Can be asymmetric.
Returns:
A loss scalar.
"""
device = (torch.device('cuda')
if features.is_cuda
else torch.device('cpu'))
if len(features.shape) < 3:
raise ValueError('`features` needs to be [bsz, n_views, ...],'
'at least 3 dimensions are required')
if len(features.shape) > 3:
features = features.view(features.shape[0], features.shape[1], -1)
batch_size = features.shape[0]
if labels is not None and mask is not None:
raise ValueError('Cannot define both `labels` and `mask`')
elif labels is None and mask is None:
mask = torch.eye(batch_size, dtype=torch.float32).to(device)
elif labels is not None:
labels = labels.contiguous().view(-1, 1)
if labels.shape[0] != batch_size:
raise ValueError('Num of labels does not match num of features')
mask = torch.eq(labels, labels.T).float().to(device)
else:
mask = mask.float().to(device)
contrast_count = features.shape[1]
contrast_feature = torch.cat(torch.unbind(features, dim=1), dim=0)
if self.contrast_mode == 'one':
anchor_feature = features[:, 0]
anchor_count = 1
elif self.contrast_mode == 'all':
anchor_feature = contrast_feature
anchor_count = contrast_count
else:
raise ValueError('Unknown mode: {}'.format(self.contrast_mode))
# compute logits
anchor_dot_contrast = torch.div(
torch.matmul(anchor_feature, contrast_feature.T),
self.temperature)
# for numerical stability
logits_max, _ = torch.max(anchor_dot_contrast, dim=1, keepdim=True)
logits = anchor_dot_contrast - logits_max.detach()
# tile mask
mask = mask.repeat(anchor_count, contrast_count)
# mask-out self-contrast cases
logits_mask = torch.scatter(
torch.ones_like(mask),
1,
torch.arange(batch_size * anchor_count).view(-1, 1).to(device),
0
)
mask = mask * logits_mask
# compute log_prob
exp_logits = torch.exp(logits) * logits_mask
log_prob = logits - torch.log(exp_logits.sum(1, keepdim=True) + 1e-30)
# compute mean of log-likelihood over positive
mean_log_prob_pos = (mask * log_prob).sum(1) / mask.sum(1)
# loss
loss = - (self.temperature / self.base_temperature) * mean_log_prob_pos
loss = loss.view(anchor_count, batch_size).mean()
return loss
def build_absa_model(config, embedding_layer=None):
bert_config = BertConfig.from_pretrained("bert-base-uncased")
bert_config.num_labels = 3
bert_config.hidden_dropout_prob = config['dropout']
bert_config.id2label = {
0: 'positive',
1: 'negative',
2: 'neutral'
}
bert_config.label2id = {
'positive': 0,
'negative': 1,
'neutral': 2,
}
bert_config.model = config['model']
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if config['model'] == 'TransEnc':
bert_config.num_attention_heads = config['head_size']
bert_config.num_hidden_layers = config['layers']
bert_config.hidden_size = config['hidden_size']
bert_config.intermediate_size = config['feedforward']
bert_for_facts_absa = SCAPT(bert_config, hidden_size=config['dense_hidden_size'])
else:
bert_for_facts_absa = SCAPT.from_pretrained('bert-base-uncased', config=bert_config).to(device)
return bert_for_facts_absa
def build_optimizer(config, model):
lr = config['learning_rate']
weight_decay = config['weight_decay']
opt = {
'sgd': optim.SGD,
'adam': optim.Adam,
'adamw': AdamW,
'adagrad': optim.Adagrad,
}
if 'momentum' in config:
optimizer = opt[config['optimizer']](
model.parameters(),
lr=lr,
weight_decay=weight_decay,
momentum=config['momentum']
)
else:
optimizer = opt[config['optimizer']](
model.parameters(),
lr=lr,
weight_decay=weight_decay,
)
return optimizer
|
193844
|
import keras.layers
import tensorflow as tf
import runai.mp
import runai.utils
from . import coordinator
class Parallelised(keras.layers.Layer):
""" A helper class for MP-supported Keras layers
"""
def add_weights(self, name, shape, **kwargs):
""" Declare parallelised weights
# Arguments:
name: The base name of the weights
shape: The (split) shape of each weight
# Returns:
A list of tensors with the tensor names of the weights
"""
runai.utils.log.debug('Declaring %d weights (%s) of shape %s for \'%s\' layer "%s"' % (runai.mp.splits, name, shape, self.__class__.__name__, getattr(self, 'name', 'N/A')))
def add_weight(gpu):
with tf.device('/device:GPU:%d' % gpu):
return self.add_weight(
name='%s_%d' % (name, gpu),
shape=shape,
**kwargs)
return [add_weight(gpu) for gpu in range(runai.mp.splits)]
def calculate_cs(self, cin, cout):
""" Calculate the 'C' dimensions
Using the total (i.e. original) cin and cout dimension sizes,
we calculate the split sizes of those dimensions. In addition,
we provide an extra dimension size (c) which is the split size
of the output dimension.
# Arguments:
cin: the total cin size
cout: the total cout size
# Returns:
A tuple (cin, cout, c) of the per-GPU sizes
"""
# TODO(levosos): support uneven division
c = cout // runai.mp.splits # output dimension is always split
if runai.mp.method == runai.mp.Method.Cin:
cin = cin // runai.mp.splits
elif runai.mp.method == runai.mp.Method.Cout:
cout = c
else:
raise ValueError('Unrecognized MP method: %s' % runai.mp.method)
return (cin, cout, c)
def inputs(self, input, channel_axis):
""" Get the respective split (per-GPU) inputs for an input tensor
Returns the split inputs of a merged tensor in the correct way.
This varies between the MP methods.
"""
assert isinstance(input, tf.Tensor) # and not a list/tuple of tensors
if runai.mp.method == runai.mp.Method.Cin:
if coordinator.registered(input):
runai.utils.log.info('Using parallelised input for \'%s\' layer "%s"', self.__class__.__name__, getattr(self, 'name', 'N/A'))
return coordinator.resolve(input)
else:
runai.utils.log.warning('Splitting non-parallelised input (%s) for \'%s\' layer "%s"', input.name, self.__class__.__name__, getattr(self, 'name', 'N/A'))
return tf.split(input, runai.mp.splits, axis=channel_axis)
elif runai.mp.method == runai.mp.Method.Cout:
if coordinator.registered(input):
runai.utils.log.info('Gathering parallelised input for \'%s\' layer "%s"', self.__class__.__name__, getattr(self, 'name', 'N/A'))
def gather(gpu):
with tf.device('/device:GPU:%d' % gpu):
return keras.layers.Concatenate(axis=channel_axis)(coordinator.resolve(input))
return [gather(gpu) for gpu in range(runai.mp.splits)]
else:
return [input] * runai.mp.splits
else:
raise ValueError('Unrecognized MP method: %s' % runai.mp.method)
def merge(self, outputs, channel_axis):
""" Merge and register the split outputs of the layer
Merges a parallelised layer's output tensors and register
then in 'coordinator' for later use.
The merge is done be concatenating the tensors on the specified
channel axis.
# Returns:
A tensor representing the merged outputs of the layer
"""
merged = keras.layers.Concatenate(axis=channel_axis)(outputs)
coordinator.register(merged, outputs)
return merged
def parallelise(self, l, *iterables):
""" Evaluate a lambda on each GPU with device placement
"""
def wrap(gpu, *args):
with tf.device('/device:GPU:%d' % gpu):
return l(*args)
return [wrap(gpu, *args) for gpu, args in enumerate(zip(*iterables))]
def reduce_split(self, outputs, channel_axis):
""" Reduce-split some tensors on a specified axis
"""
def split(gpu):
with tf.device('/device:GPU:%d' % gpu):
return tf.split(outputs[gpu], runai.mp.splits, axis=channel_axis)
splits = [split(gpu) for gpu in range(runai.mp.splits)]
def output(gpu):
with tf.device('/device:GPU:%d' % gpu):
return keras.layers.Add()([split[gpu] for split in splits])
return [output(gpu) for gpu in range(runai.mp.splits)]
|
193869
|
a=int(input("Enter a number"))
if a%2==0:
print(a,"is an Even Number")
else:
print(a,"is an Odd Number")
|
193874
|
import logging
from dbnd._core.constants import TaskType
from dbnd._core.errors.friendly_error.task_execution import (
failed_to_assign_result,
failed_to_process_non_empty_result,
)
from dbnd._core.task.pipeline_task import PipelineTask
from dbnd._core.task.python_task import PythonTask
from dbnd._core.task.task import Task
from dbnd._core.task_build.task_results import FuncResultParameter
logger = logging.getLogger(__name__)
class _DecoratedCallableTask(Task):
_dbnd_decorated_task = True
# default result for any decorated function
result = None
def _invoke_func(self, extra_kwargs=None, force_invoke=False):
# this function is in charge of calling user defined code (decorated function) call
# usually it's called from from task.run/task.band
extra_kwargs = extra_kwargs or {}
spec = self.task_decorator.get_callable_spec()
invoke_kwargs = {}
for name in spec.args:
# if there is no parameter - it was disabled at TaskDefinition building stage
if self._params.get_param(name) is None:
continue
invoke_kwargs[name] = getattr(self, name)
invoke_kwargs.update(extra_kwargs)
if not self._dbnd_call_state:
from dbnd._core.task_build.task_cls__call_state import TaskCallState
self._dbnd_call_state = TaskCallState()
self._dbnd_call_state.start()
if self.task_decorator.is_class:
# this is the case of
# @task
# class UserClass:
# pass
# now we are in the Task instance, it was created via UserClass() at @pipeline
obj_cls = self.task_decorator.class_or_func
invoke_kwargs["__call_original_cls"] = True
self.task_user_obj = obj_cls(**invoke_kwargs)
result = self.task_user_obj.run()
else:
# we are going to run user function
func_call = spec.item
result = func_call(**invoke_kwargs)
self._dbnd_call_state.finish(result)
result_param = self.__class__.result
if result_param is None and result:
raise failed_to_process_non_empty_result(self, result)
if isinstance(result_param, FuncResultParameter):
# if we have result that combined from different output params
# assign all returned values to relevant outputs
# so they will be automatically saved
if result is None:
raise failed_to_assign_result(self, result_param)
for r_name, value in result_param.named_results(result):
setattr(self, r_name, value)
else:
self.result = result
return result
def on_kill(self):
task_user_obj = getattr(self, "task_user_obj", None)
if task_user_obj is not None and hasattr(task_user_obj, "on_kill"):
task_user_obj.on_kill()
return
else:
super(_DecoratedCallableTask, self).on_kill()
class DecoratedPythonTask(PythonTask, _DecoratedCallableTask):
_conf__task_type_name = TaskType.python
def run(self):
self._invoke_func(force_invoke=True)
class DecoratedPipelineTask(PipelineTask, _DecoratedCallableTask):
_conf__task_type_name = TaskType.pipeline
def band(self):
return self._invoke_func()
|
193878
|
import pytest
from kgx.graph.nx_graph import NxGraph
from kgx.graph_operations import (
remove_singleton_nodes,
fold_predicate,
unfold_node_property,
remap_edge_property,
remap_node_property,
remap_node_identifier,
)
def get_graphs1():
"""
Returns instances of defined graphs.
"""
g1 = NxGraph()
g1.add_edge("B", "A", **{"predicate": "biolink:sub_class_of"})
g1.add_edge("C", "B", **{"predicate": "biolink:sub_class_of"})
g1.add_edge("D", "C", **{"predicate": "biolink:sub_class_of"})
g1.add_edge("D", "A", **{"predicate": "biolink:related_to"})
g1.add_edge("E", "D", **{"predicate": "biolink:sub_class_of"})
g1.add_edge("F", "D", **{"predicate": "biolink:sub_class_of"})
g2 = NxGraph()
g2.name = "Graph 1"
g2.add_node(
"HGNC:12345",
id="HGNC:12345",
name="Test Gene",
category=["biolink:NamedThing"],
alias="NCBIGene:54321",
same_as="UniProtKB:54321",
)
g2.add_node("B", id="B", name="Node B", category=["biolink:NamedThing"], alias="Z")
g2.add_node("C", id="C", name="Node C", category=["biolink:NamedThing"])
g2.add_edge(
"C",
"B",
edge_key="C-biolink:subclass_of-B",
subject="C",
object="B",
predicate="biolink:subclass_of",
relation="rdfs:subClassOf",
provided_by="Graph 1",
publications=[1],
pubs=["PMID:123456"],
)
g2.add_edge(
"B",
"A",
edge_key="B-biolink:subclass_of-A",
subject="B",
object="A",
predicate="biolink:subclass_of",
relation="rdfs:subClassOf",
provided_by="Graph 1",
)
g2.add_edge(
"C",
"c",
edge_key="C-biolink:exact_match-B",
subject="C",
object="c",
predicate="biolink:exact_match",
relation="skos:exactMatch",
provided_by="Graph 1",
)
return [g1, g2]
def get_graphs2():
"""
Returns instances of defined graphs.
"""
g1 = NxGraph()
g1.name = "Graph 1"
g1.add_node(
"HGNC:12345",
id="HGNC:12345",
name="Test Gene",
category=["biolink:NamedThing"],
alias="NCBIGene:54321",
same_as="UniProtKB:54321",
)
g1.add_node("B", id="B", name="Node B", category=["biolink:NamedThing"], alias="Z")
g1.add_node("C", id="C", name="Node C", category=["biolink:NamedThing"])
g1.add_edge(
"C",
"B",
edge_key="C-biolink:subclass_of-B",
subject="C",
object="B",
predicate="biolink:subclass_of",
relation="rdfs:subClassOf",
provided_by="Graph 1",
publications=[1],
pubs=["PMID:123456"],
)
g1.add_edge(
"B",
"A",
edge_key="B-biolink:subclass_of-A",
subject="B",
object="A",
predicate="biolink:subclass_of",
relation="rdfs:subClassOf",
provided_by="Graph 1",
)
g2 = NxGraph()
g2.name = "Graph 2"
g2.add_node(
"A",
id="A",
name="Node A",
description="Node A in Graph 2",
category=["biolink:Gene"],
xref=["NCBIGene:12345", "HGNC:001033"],
)
g2.add_node(
"B",
id="B",
name="Node B",
description="Node B in Graph 2",
category=["biolink:Gene"],
xref=["NCBIGene:56463", "HGNC:012901"],
)
g2.add_node(
"C",
id="C",
name="Node C",
description="Node C in Graph 2",
category=["biolink:Gene", "biolink:NamedThing"],
xref=["NCBIGene:08239", "HGNC:103431"],
)
g2.add_node(
"D",
id="D",
name="Node D",
description="Node D in Graph 2",
category=["biolink:Gene"],
xref=["HGNC:394233"],
)
g2.add_node(
"E",
id="E",
name="Node E",
description="Node E in Graph 2",
category=["biolink:NamedThing"],
xref=["NCBIGene:X", "HGNC:X"],
)
g2.add_node(
"F",
id="F",
name="Node F",
description="Node F in Graph 2",
category=["biolink:NamedThing"],
xref=["HGNC:Y"],
)
g2.add_edge(
"B",
"A",
edge_key="B-biolink:subclass_of-A",
subject="B",
object="A",
predicate="biolink:subclass_of",
relation="rdfs:subClassOf",
provided_by="Graph 2",
)
g2.add_edge(
"B",
"A",
edge_key="B-biolink:related_to-A",
subject="B",
object="A",
predicate="biolink:related_to",
relation="biolink:related_to",
)
g2.add_edge(
"D",
"A",
edge_key="D-biolink:related_to-A",
subject="D",
object="A",
predicate="biolink:related_to",
relation="biolink:related_to",
)
g2.add_edge(
"E",
"A",
edge_key="E-biolink:related_to-A",
subject="E",
object="A",
predicate="biolink:related_to",
relation="biolink:related_to",
)
g2.add_edge(
"E",
"F",
edge_key="F-biolink:related_to-A",
subject="E",
object="F",
predicate="biolink:related_to",
relation="biolink:related_to",
)
return [g1, g2]
def test_fold_predicate1():
"""
Test the fold_predicate operation.
"""
g = get_graphs1()[1]
fold_predicate(g, "biolink:exact_match")
assert not g.has_edge("C", "c")
n = g.nodes(data=True)["C"]
assert "biolink:exact_match" in n and n["biolink:exact_match"] == "c"
def test_fold_predicate2():
"""
Test the fold predicate operation, where the prefix of
the predicate is removed.
"""
g = get_graphs1()[1]
fold_predicate(g, "biolink:exact_match", remove_prefix=True)
assert not g.has_edge("C", "c")
n = g.nodes(data=True)["C"]
assert "exact_match" in n and n["exact_match"] == "c"
def test_unfold_node_property1():
"""Test the unfold node property operation."""
g = get_graphs1()[1]
unfold_node_property(g, "same_as")
assert "same_as" not in g.nodes()["HGNC:12345"]
assert g.has_edge("HGNC:12345", "UniProtKB:54321")
e = list(dict(g.get_edge("HGNC:12345", "UniProtKB:54321")).values())[0]
assert "subject" in e and e["subject"] == "HGNC:12345"
assert "predicate" in e and e["predicate"] == "same_as"
assert "object" in e and e["object"] == "UniProtKB:54321"
def test_unfold_node_property2():
"""
Test the unfold node property operation, where the prefix of
the predicate is added explicitly.
"""
g = get_graphs1()[1]
unfold_node_property(g, "same_as", prefix="biolink")
assert "same_as" not in g.nodes()["HGNC:12345"]
assert g.has_edge("HGNC:12345", "UniProtKB:54321")
e = list(dict(g.get_edge("HGNC:12345", "UniProtKB:54321")).values())[0]
assert "subject" in e and e["subject"] == "HGNC:12345"
assert "predicate" in e and e["predicate"] == "biolink:same_as"
assert "object" in e and e["object"] == "UniProtKB:54321"
def test_remove_singleton_nodes():
"""
Test the remove singleton nodes operation.
"""
g = NxGraph()
g.add_edge("A", "B")
g.add_edge("B", "C")
g.add_edge("C", "D")
g.add_edge("B", "D")
g.add_node("X")
g.add_node("Y")
assert g.number_of_nodes() == 6
assert g.number_of_edges() == 4
remove_singleton_nodes(g)
assert g.number_of_nodes() == 4
assert g.number_of_edges() == 4
def test_remap_node_identifier_alias():
"""
Test remap node identifier operation.
"""
graphs = get_graphs2()
g = remap_node_identifier(
graphs[0], "biolink:NamedThing", alternative_property="alias"
)
assert g.has_node("NCBIGene:54321")
assert g.has_node("Z")
assert g.has_node("C")
assert g.has_edge("C", "Z")
assert g.has_edge("Z", "A")
assert not g.has_edge("C", "B")
assert not g.has_edge("B", "A")
e1 = list(g.get_edge("C", "Z").values())[0]
assert e1["subject"] == "C" and e1["object"] == "Z"
assert e1["edge_key"] == "C-biolink:subclass_of-Z"
e2 = list(g.get_edge("Z", "A").values())[0]
assert e2["subject"] == "Z" and e2["object"] == "A"
assert e2["edge_key"] == "Z-biolink:subclass_of-A"
def test_remap_node_identifier_xref():
"""
Test remap node identifier operation.
"""
graphs = get_graphs2()
g = remap_node_identifier(
graphs[1], "biolink:Gene", alternative_property="xref", prefix="NCBIGene"
)
assert g.has_node("NCBIGene:12345")
assert g.has_node("NCBIGene:56463")
assert g.has_node("NCBIGene:08239")
assert g.has_node("D")
assert g.has_node("E")
assert g.has_node("F")
assert not g.has_node("A")
assert not g.has_node("B")
assert not g.has_node("C")
e1 = list(g.get_edge("NCBIGene:56463", "NCBIGene:12345").values())[0]
assert e1["subject"] == "NCBIGene:56463" and e1["object"] == "NCBIGene:12345"
e2 = list(g.get_edge("D", "NCBIGene:12345").values())[0]
assert e2["subject"] == "D" and e2["object"] == "NCBIGene:12345"
e3 = list(g.get_edge("E", "NCBIGene:12345").values())[0]
assert e3["subject"] == "E" and e3["object"] == "NCBIGene:12345"
e4 = list(g.get_edge("E", "F").values())[0]
assert e4["subject"] == "E" and e4["object"] == "F"
def test_remap_node_property():
"""
Test remap node property operation.
"""
graphs = get_graphs2()
remap_node_property(
graphs[0],
category="biolink:NamedThing",
old_property="alias",
new_property="same_as",
)
assert graphs[0].nodes()["HGNC:12345"]["alias"] == "UniProtKB:54321"
def test_remap_node_property_fail():
"""
Test remap node property operation, where the test fails due to an attempt
to change a core node property.
"""
graphs = get_graphs2()
with pytest.raises(AttributeError):
remap_node_property(
graphs[0],
category="biolink:NamedThing",
old_property="id",
new_property="alias",
)
@pytest.mark.skip()
def test_remap_edge_property():
"""
Test remap edge property operation.
"""
graphs = get_graphs2()
remap_edge_property(
graphs[0],
edge_predicate="biolink:subclass_of",
old_property="publications",
new_property="pubs",
)
e = list(graphs[0].get_edge("C", "B").values())[0]
assert e["publications"] == ["PMID:123456"]
def test_remap_edge_property_fail():
"""
Test remap edge property operation, where the test fails due to an attempt
to change a core edge property.
"""
graphs = get_graphs2()
with pytest.raises(AttributeError):
remap_edge_property(
graphs[0],
edge_predicate="biolink:subclass_of",
old_property="subject",
new_property="pubs",
)
with pytest.raises(AttributeError):
remap_edge_property(
graphs[0],
edge_predicate="biolink:subclass_of",
old_property="object",
new_property="pubs",
)
with pytest.raises(AttributeError):
remap_edge_property(
graphs[0],
edge_predicate="biolink:subclass_of",
old_property="predicate",
new_property="pubs",
)
|
193881
|
import numpy as np
import torch
import torch.nn as nn
from collections import OrderedDict
def summary(model, x, *args, **kwargs):
"""Summarize the given input model.
Summarized information are 1) output shape, 2) kernel shape,
3) number of the parameters and 4) operations (Mult-Adds)
Args:
model (Module): Model to summarize
x (Tensor): Input tensor of the model with [N, C, H, W] shape
dtype and device have to match to the model
args, kwargs: Other argument used in `model.forward` function
"""
def register_hook(module):
def hook(module, inputs, outputs):
cls_name = str(module.__class__).split(".")[-1].split("'")[0]
module_idx = len(summary)
key = "{}_{}".format(module_idx, cls_name)
info = OrderedDict()
info["id"] = id(module)
if isinstance(outputs, (list, tuple)):
info["out"] = list(outputs[0].size())
else:
info["out"] = list(outputs.size())
info["ksize"] = "-"
info["inner"] = OrderedDict()
info["params"], info["macs"] = 0, 0
for name, param in module.named_parameters():
info["params"] += param.nelement()
if "weight" == name:
ksize = list(param.size())
# to make [in_shape, out_shape, ksize, ksize]
if len(ksize) > 1:
ksize[0], ksize[1] = ksize[1], ksize[0]
info["ksize"] = ksize
# ignore N, C when calculate Mult-Adds in ConvNd
if "Conv" in cls_name:
info["macs"] += int(param.nelement() * np.prod(info["out"][2:]))
else:
info["macs"] += param.nelement()
# RNN modules have inner weights such as weight_ih_l0
elif "weight" in name:
info["inner"][name] = list(param.size())
info["macs"] += param.nelement()
# if the current module is already-used, mark as "(recursive)"
# check if this module has params
if list(module.named_parameters()):
for v in summary.values():
if info["id"] == v["id"]:
info["params"] = "(recursive)"
if info["params"] == 0:
info["params"], info["macs"] = "-", "-"
summary[key] = info
# ignore Sequential and ModuleList
if not module._modules:
hooks.append(module.register_forward_hook(hook))
hooks = []
summary = OrderedDict()
model.apply(register_hook)
model.eval()
with torch.no_grad():
input_params = x if isinstance(x, list) or isinstance(x, tuple) else [x]
model(*input_params) if not (kwargs or args) else model(
*input_params, *args, **kwargs
)
for hook in hooks:
hook.remove()
print("-" * 100)
print(
"{:<15} {:>20} {:>20} {:>20} {:>20}".format(
"Layer", "Kernel Shape", "Output Shape", "# Params (K)", "# Mult-Adds (M)"
)
)
print("=" * 100)
total_params, total_macs = 0, 0
for layer, info in summary.items():
repr_ksize = str(info["ksize"])
repr_out = str(info["out"])
repr_params = info["params"]
repr_macs = info["macs"]
if isinstance(repr_params, (int, float)):
total_params += repr_params
repr_params = "{0:,.2f}".format(repr_params / 1000)
if isinstance(repr_macs, (int, float)):
total_macs += repr_macs
repr_macs = "{0:,.2f}".format(repr_macs / 1000000)
print(
"{:<15} {:>20} {:>20} {:>20} {:>20}".format(
layer, repr_ksize, repr_out, repr_params, repr_macs
)
)
# for RNN, describe inner weights (i.e. w_hh, w_ih)
for inner_name, inner_shape in info["inner"].items():
print(" {:<13} {:>20}".format(inner_name, str(inner_shape)))
print("=" * 100)
print("# Params: {0:,.2f}K".format(total_params / 1000))
print("# Mult-Adds: {0:,.2f}M".format(total_macs / 1000000))
print("-" * 100)
|
193983
|
from shapes.square import Square
# Discover plugins
import offshoot
offshoot.discover("Shape", globals())
|
194062
|
import pytest
import shutil
from common.version import DigitDotVersion
from indy_common.constants import APP_NAME
from indy_common.version import src_version_cls
from indy_node.utils.node_control_utils import (
NodeControlUtil, ShellError, DebianVersion
)
# TODO
# - conditionally skip all tests for non-debian systems
# - teste _parse_version_deps_from_pkg_mgr_output deeply
generated_commands = []
@pytest.fixture
def catch_generated_commands(monkeypatch):
generated_commands[:] = []
def _f(command, *args, **kwargs):
generated_commands.append(command)
return ''
monkeypatch.setattr(NodeControlUtil, 'run_shell_script', _f)
monkeypatch.setattr(NodeControlUtil, 'run_shell_script_extended', _f)
monkeypatch.setattr(NodeControlUtil, 'run_shell_command', _f)
def test_generated_cmd_get_curr_info(catch_generated_commands):
pkg_name = 'some_package'
# TODO not an API for now
NodeControlUtil._get_curr_info(pkg_name)
assert len(generated_commands) == 1
assert generated_commands[0] == "dpkg -s {}".format(pkg_name)
def test_generated_cmd_get_latest_pkg_version(catch_generated_commands):
pkg_name = 'some_package'
NodeControlUtil.get_latest_pkg_version(pkg_name)
assert len(generated_commands) == 2
assert generated_commands[0] == "apt update"
assert generated_commands[1] == (
"apt-cache show {} | grep -E '^Version: '"
.format(pkg_name)
)
generated_commands[:] = []
upstream = src_version_cls(pkg_name)('1.2.3')
NodeControlUtil.get_latest_pkg_version(
pkg_name, upstream=upstream, update_cache=False)
assert len(generated_commands) == 1
assert generated_commands[0] == (
"apt-cache show {} | grep -E '^Version: '"
.format(pkg_name)
)
def test_generated_cmd_get_info_from_package_manager(catch_generated_commands):
packages = ['package1', 'package2']
# TODO not an API for now
NodeControlUtil._get_info_from_package_manager(*packages)
assert len(generated_commands) == 1
assert generated_commands[0] == "apt-cache show {}".format(" ".join(packages))
def test_generated_cmd_update_package_cache(catch_generated_commands):
NodeControlUtil.update_package_cache()
assert len(generated_commands) == 1
assert generated_commands[0] == "apt update"
def test_generated_cmd_get_sys_holds(monkeypatch, catch_generated_commands):
monkeypatch.setattr(shutil, 'which', lambda *_: 'path')
NodeControlUtil.get_sys_holds()
assert len(generated_commands) == 1
assert generated_commands[0] == "apt-mark showhold"
def test_generated_cmd_hold_packages(monkeypatch, catch_generated_commands):
packages = ['package1', 'package2']
monkeypatch.setattr(shutil, 'which', lambda *_: 'path')
NodeControlUtil.hold_packages(packages)
assert len(generated_commands) == 1
assert generated_commands[0] == "apt-mark hold {}".format(' '.join(packages))
def test_get_latest_pkg_version_invalid_args():
pkg_name = 'any_package'
with pytest.raises(TypeError) as excinfo:
NodeControlUtil.get_latest_pkg_version(
pkg_name,
upstream=DigitDotVersion('1.2.3'),
update_cache=False
)
assert (
"should be instance of {}"
.format(src_version_cls(pkg_name)) in str(excinfo.value)
)
@pytest.mark.parametrize(
'pkg_name,upstream,output,expected',
[
# some top level package
('any_package', None, '', None),
('any_package', None, 'Version: 1.2.3\nVersion: 1.2.4', '1.2.4'),
('any_package', None, 'Version: 1.2.4\nVersion: 1.2.3', '1.2.4'),
# self package (APP_NAME)
(APP_NAME, None, 'Version: 1.2.3\nVersion: 1.2.4', '1.2.4'),
(APP_NAME, None, 'Version: 1.2.4\nVersion: 1.2.3', '1.2.4'),
(APP_NAME, None, 'Version: 1.2.4~dev1\nVersion: 1.2.4~rc1', '1.2.4rc1'),
(APP_NAME, None, 'Version: 1.2.4~rc1\nVersion: 1.2.4~dev1', '1.2.4rc1'),
(APP_NAME, None, 'Version: 1.2.4~dev1\nVersion: 1.2.4', '1.2.4'),
(APP_NAME, None, 'Version: 1.2.4~rc2\nVersion: 1.2.4', '1.2.4'),
(APP_NAME, '1.2.5', 'Version: 1.2.4', None),
(APP_NAME, '1.2.5', 'Version: 1.2.5~rc1', None),
(APP_NAME, '1.2.5', 'Version: 1.2.5~dev1', None),
# invalid versions from output
('any_package', None, 'Version: 1.2.3.4.5', None),
(APP_NAME, None, 'Version: 1.2.3.4.5', None),
# combined cases
('any_package', None, 'Version: 1.2.3\nVersion: 1.2.4\nVersion: 1.2.3.4.5', '1.2.4'),
('any_package', '1.2.5', 'Version: 1.2.3\nVersion: 1.2.4\nVersion: 1.2.3.4.5', None),
(APP_NAME, None, 'Version: 1.2.3\nVersion: 1.2.4\nVersion: 1.2.5~rc1\nVersion: 1.2.5~dev1\nVersion: 1.2.3.4.5', '1.2.5rc1'),
(APP_NAME, '1.2.5', 'Version: 1.2.3\nVersion: 1.2.4\nVersion: 1.2.5~rc1\nVersion: 1.2.5~dev1\nVersion: 1.2.3.4.5', None),
],
ids=lambda s: s.replace('\n', '_').replace(' ', '_')
)
def test_get_latest_pkg_version(
monkeypatch, pkg_name, upstream, output, expected):
def _f(command, *args, **kwargs):
if not output:
raise ShellError(1, command)
else:
return output
if upstream is not None:
upstream = src_version_cls(pkg_name)(upstream)
expected = None if expected is None else src_version_cls(pkg_name)(expected)
monkeypatch.setattr(NodeControlUtil, 'run_shell_script_extended', _f)
res = NodeControlUtil.get_latest_pkg_version(
pkg_name, upstream, update_cache=False)
assert expected == res if expected is None else res.upstream
def test_get_latest_pkg_version_for_unknown_package():
assert NodeControlUtil.get_latest_pkg_version(
'some-unknown-package-name', update_cache=False) is None
def test_curr_pkg_info_no_data(monkeypatch):
monkeypatch.setattr(NodeControlUtil, 'run_shell_command', lambda *_: '')
assert (None, []) == NodeControlUtil.curr_pkg_info('any_package')
def test_curr_pkg_info(monkeypatch):
output = 'Version: 1.2.3\nDepends: aaa (= 1.2.4), bbb (>= 1.2.5), ccc, aaa'
expected_deps = ['aaa=1.2.4', 'bbb=1.2.5', 'ccc']
monkeypatch.setattr(NodeControlUtil, 'run_shell_command', lambda *_: output)
for pkg_name in [APP_NAME, 'any_package']:
upstream_cls = src_version_cls(pkg_name)
expected_version = DebianVersion(
'1.2.3', upstream_cls=upstream_cls)
pkg_info = NodeControlUtil.curr_pkg_info(pkg_name)
assert expected_version == pkg_info[0]
assert isinstance(expected_version, type(pkg_info[0]))
assert isinstance(expected_version.upstream, type(pkg_info[0].upstream))
assert expected_deps == pkg_info[1]
|
194128
|
from enum import Enum
from typing import Any, List
class Err(Enum):
# temporary errors. Don't blacklist
DOES_NOT_EXTEND = -1
BAD_HEADER_SIGNATURE = -2
MISSING_FROM_STORAGE = -3
INVALID_PROTOCOL_MESSAGE = -4
SELF_CONNECTION = -5
INVALID_HANDSHAKE = -6
INVALID_ACK = -7
INCOMPATIBLE_PROTOCOL_VERSION = -8
DUPLICATE_CONNECTION = -9
BLOCK_NOT_IN_BLOCKCHAIN = -10
NO_PROOF_OF_SPACE_FOUND = -11
PEERS_DONT_HAVE_BLOCK = -12
MAX_INBOUND_CONNECTIONS_REACHED = -13
UNKNOWN = 1
# permanent errors. Block is un-salvageable garbage.
INVALID_BLOCK_SOLUTION = 2
INVALID_COIN_SOLUTION = 3
DUPLICATE_OUTPUT = 4
DOUBLE_SPEND = 5
UNKNOWN_UNSPENT = 6
BAD_AGGREGATE_SIGNATURE = 7
WRONG_PUZZLE_HASH = 8
BAD_FARMER_COIN_AMOUNT = 9
INVALID_CONDITION = 10
ASSERT_MY_COIN_ID_FAILED = 11
ASSERT_ANNOUNCE_CONSUMED_FAILED = 12
ASSERT_HEIGHT_RELATIVE_FAILED = 13
ASSERT_HEIGHT_ABSOLUTE_FAILED = 14
ASSERT_SECONDS_ABSOLUTE_FAILED = 15
COIN_AMOUNT_EXCEEDS_MAXIMUM = 16
SEXP_ERROR = 17
INVALID_FEE_LOW_FEE = 18
MEMPOOL_CONFLICT = 19
MINTING_COIN = 20
EXTENDS_UNKNOWN_BLOCK = 21
COINBASE_NOT_YET_SPENDABLE = 22
BLOCK_COST_EXCEEDS_MAX = 23
BAD_ADDITION_ROOT = 24
BAD_REMOVAL_ROOT = 25
INVALID_POSPACE_HASH = 26
INVALID_COINBASE_SIGNATURE = 27
INVALID_PLOT_SIGNATURE = 28
TIMESTAMP_TOO_FAR_IN_PAST = 29
TIMESTAMP_TOO_FAR_IN_FUTURE = 30
INVALID_TRANSACTIONS_FILTER_HASH = 31
INVALID_POSPACE_CHALLENGE = 32
INVALID_POSPACE = 33
INVALID_HEIGHT = 34
INVALID_COINBASE_AMOUNT = 35
INVALID_MERKLE_ROOT = 36
INVALID_BLOCK_FEE_AMOUNT = 37
INVALID_WEIGHT = 38
INVALID_TOTAL_ITERS = 39
BLOCK_IS_NOT_FINISHED = 40
INVALID_NUM_ITERATIONS = 41
INVALID_POT = 42
INVALID_POT_CHALLENGE = 43
INVALID_TRANSACTIONS_GENERATOR_HASH = 44
INVALID_POOL_TARGET = 45
INVALID_COINBASE_PARENT = 46
INVALID_FEES_COIN_PARENT = 47
RESERVE_FEE_CONDITION_FAILED = 48
NOT_BLOCK_BUT_HAS_DATA = 49
IS_TRANSACTION_BLOCK_BUT_NO_DATA = 50
INVALID_PREV_BLOCK_HASH = 51
INVALID_TRANSACTIONS_INFO_HASH = 52
INVALID_FOLIAGE_BLOCK_HASH = 53
INVALID_REWARD_COINS = 54
INVALID_BLOCK_COST = 55
NO_END_OF_SLOT_INFO = 56
INVALID_PREV_CHALLENGE_SLOT_HASH = 57
INVALID_SUB_EPOCH_SUMMARY_HASH = 58
NO_SUB_EPOCH_SUMMARY_HASH = 59
SHOULD_NOT_MAKE_CHALLENGE_BLOCK = 60
SHOULD_MAKE_CHALLENGE_BLOCK = 61
INVALID_CHALLENGE_CHAIN_DATA = 62
INVALID_CC_EOS_VDF = 65
INVALID_RC_EOS_VDF = 66
INVALID_CHALLENGE_SLOT_HASH_RC = 67
INVALID_PRIOR_POINT_RC = 68
INVALID_DEFICIT = 69
INVALID_SUB_EPOCH_SUMMARY = 70
INVALID_PREV_SUB_EPOCH_SUMMARY_HASH = 71
INVALID_REWARD_CHAIN_HASH = 72
INVALID_SUB_EPOCH_OVERFLOW = 73
INVALID_NEW_DIFFICULTY = 74
INVALID_NEW_SUB_SLOT_ITERS = 75
INVALID_CC_SP_VDF = 76
INVALID_RC_SP_VDF = 77
INVALID_CC_SIGNATURE = 78
INVALID_RC_SIGNATURE = 79
CANNOT_MAKE_CC_BLOCK = 80
INVALID_RC_SP_PREV_IP = 81
INVALID_RC_IP_PREV_IP = 82
INVALID_IS_TRANSACTION_BLOCK = 83
INVALID_URSB_HASH = 84
OLD_POOL_TARGET = 85
INVALID_POOL_SIGNATURE = 86
INVALID_FOLIAGE_BLOCK_PRESENCE = 87
INVALID_CC_IP_VDF = 88
INVALID_RC_IP_VDF = 89
IP_SHOULD_BE_NONE = 90
INVALID_REWARD_BLOCK_HASH = 91
INVALID_MADE_NON_OVERFLOW_INFUSIONS = 92
NO_OVERFLOWS_IN_FIRST_SUB_SLOT_NEW_EPOCH = 93
MEMPOOL_NOT_INITIALIZED = 94
SHOULD_NOT_HAVE_ICC = 95
SHOULD_HAVE_ICC = 96
INVALID_ICC_VDF = 97
INVALID_ICC_HASH_CC = 98
INVALID_ICC_HASH_RC = 99
INVALID_ICC_EOS_VDF = 100
INVALID_SP_INDEX = 101
TOO_MANY_BLOCKS = 102
INVALID_CC_CHALLENGE = 103
INVALID_PREFARM = 104
ASSERT_SECONDS_RELATIVE_FAILED = 105
BAD_COINBASE_SIGNATURE = 106
# removed
# INITIAL_TRANSACTION_FREEZE = 107
NO_TRANSACTIONS_WHILE_SYNCING = 108
ALREADY_INCLUDING_TRANSACTION = 109
INCOMPATIBLE_NETWORK_ID = 110
PRE_SOFT_FORK_MAX_GENERATOR_SIZE = 111 # Size in bytes
INVALID_REQUIRED_ITERS = 112
TOO_MANY_GENERATOR_REFS = 113 # Number of uint32 entries in the List
ASSERT_MY_PARENT_ID_FAILED = 114
ASSERT_MY_PUZZLEHASH_FAILED = 115
ASSERT_MY_AMOUNT_FAILED = 116
GENERATOR_RUNTIME_ERROR = 117
INVALID_COST_RESULT = 118
INVALID_TRANSACTIONS_GENERATOR_REFS_ROOT = 119
FUTURE_GENERATOR_REFS = 120 # All refs must be to blocks in the past
GENERATOR_REF_HAS_NO_GENERATOR = 121
DOUBLE_SPEND_IN_FORK = 122
INVALID_FEE_TOO_CLOSE_TO_ZERO = 123
COIN_AMOUNT_NEGATIVE = 124
class ValidationError(Exception):
def __init__(self, code: Err, error_msg: str = ""):
self.code = code
self.error_msg = error_msg
class ConsensusError(Exception):
def __init__(self, code: Err, errors: List[Any] = []):
super(ConsensusError, self).__init__(f"Error code: {code.name}")
self.errors = errors
class ProtocolError(Exception):
def __init__(self, code: Err, errors: List[Any] = []):
super(ProtocolError, self).__init__(f"Error code: {code.name}")
self.code = code
self.errors = errors
|
194173
|
import tkinter as tk
import traceback
import threading
import matplotlib
matplotlib.use('Agg')
import matplotlib.figure as figure
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
class Widget(tk.Frame):
def __init__(self, parent):
super().__init__(parent)
self.var = tk.StringVar()
#tk.Entry(self, textvariable=self.var).grid()
self._thing = tk.Frame(self)
def task():
print("Running off thread on", threading.get_ident())
fig = figure.Figure(figsize=(5,5))
FigureCanvas(fig)
fig.add_subplot(1,1,1)
print("All done off thread...")
threading.Thread(target=task).start()
def __del__(self):
print("Being deleted...", self.__repr__(), id(self))
print("Thread is", threading.get_ident())
traceback.print_stack()
root = tk.Tk()
frame = Widget(root)
frame.grid(row=1, column=0)
def click():
global frame
frame.destroy()
frame = Widget(root)
frame.grid(row=1, column=0)
tk.Button(root, text="Click me", command=click).grid(row=0, column=0)
root.mainloop()
|
194189
|
import copy
import logging, os
import gettext as _gettext
from gettext import NullTranslations, GNUTranslations
import warnings
import tg
from tg.util import lazify
from tg._compat import PY3, string_type
log = logging.getLogger(__name__)
class LanguageError(Exception):
"""Exception raised when a problem occurs with changing languages"""
pass
def _parse_locale(identifier, sep='_'):
"""
Took from Babel,
Parse a locale identifier into a tuple of the form::
``(language, territory, script, variant)``
>>> parse_locale('zh_CN')
('zh', 'CN', None, None)
>>> parse_locale('zh_Hans_CN')
('zh', 'CN', 'Hans', None)
The default component separator is "_", but a different separator can be
specified using the `sep` parameter:
:see: `IETF RFC 4646 <http://www.ietf.org/rfc/rfc4646.txt>`_
"""
if '.' in identifier:
# this is probably the charset/encoding, which we don't care about
identifier = identifier.split('.', 1)[0]
if '@' in identifier:
# this is a locale modifier such as @euro, which we don't care about
# either
identifier = identifier.split('@', 1)[0]
parts = identifier.split(sep)
lang = parts.pop(0).lower()
if not lang.isalpha():
raise ValueError('expected only letters, got %r' % lang)
script = territory = variant = None
if parts:
if len(parts[0]) == 4 and parts[0].isalpha():
script = parts.pop(0).title()
if parts:
if len(parts[0]) == 2 and parts[0].isalpha():
territory = parts.pop(0).upper()
elif len(parts[0]) == 3 and parts[0].isdigit():
territory = parts.pop(0)
if parts:
if len(parts[0]) == 4 and parts[0][0].isdigit() or\
len(parts[0]) >= 5 and parts[0][0].isalpha():
variant = parts.pop()
if parts:
raise ValueError('%r is not a valid locale identifier' % identifier)
return lang, territory, script, variant
def gettext_noop(value):
"""Mark a string for translation without translating it. Returns
value.
"""
return value
def ugettext(value):
"""Mark a string for translation. Returns the localized unicode
string of value.
Mark a string to be localized as follows::
_('This should be in lots of languages')
"""
if PY3: #pragma: no cover
return tg.translator.gettext(value)
else: #pragma: no cover
return tg.translator.ugettext(value)
lazy_ugettext = lazify(ugettext)
def ungettext(singular, plural, n):
"""Mark a string for translation. Returns the localized unicode
string of the pluralized value.
This does a plural-forms lookup of a message id. ``singular`` is
used as the message id for purposes of lookup in the catalog, while
``n`` is used to determine which plural form to use. The returned
message is a Unicode string.
Mark a string to be localized as follows::
ungettext('There is %(num)d file here', 'There are %(num)d files here',
n) % {'num': n}
"""
if PY3: #pragma: no cover
return tg.translator.ngettext(singular, plural, n)
else: #pragma: no cover
return tg.translator.ungettext(singular, plural, n)
lazy_ungettext = lazify(ungettext)
class _TGI18NIdentityTranslator(NullTranslations):
"""Translator where each string always translates to itself."""
def add_fallback(self, fallback):
# disable fallbacks, otherwise strings would be chained to fallbacks
# instead of being translated by themselves.
return
_TRANSLATORS_CACHE = {}
def _translator_from_mofiles(domain, mofiles, class_=None, fallback=False):
"""
Adapted from python translation function in gettext module
to work with a provided list of mo files
"""
if class_ is None:
class_ = GNUTranslations
if not mofiles:
if fallback:
return NullTranslations()
raise LanguageError('No translation file found for domain %s' % domain)
result = None
for mofile in mofiles:
if hasattr(mofile, 'gettext'):
# An instance of a translator was provided.
# Use it instead of trying to load from disk.
t = mofile
else:
key = (class_, os.path.abspath(mofile))
t = _TRANSLATORS_CACHE.get(key)
if t is None:
with open(mofile, 'rb') as fp:
# Cache Translator to avoid reading it again
t = _TRANSLATORS_CACHE.setdefault(key, class_(fp))
t = copy.copy(t)
if result is None:
# Copy the translation object to be able to append fallbacks
# without affecting the cached object.
result = t
else:
result.add_fallback(t)
return result
def _get_translator(lang, tgl=None, tg_config=None, **kwargs):
"""Utility method to get a valid translator object from a language name"""
if tg_config:
conf = tg_config
else:
if tgl:
conf = tgl.config
else: # pragma: no cover
#backward compatibility with explicit calls without
#specifying local context or config.
conf = tg.config.current_conf()
if not lang:
return NullTranslations()
try:
localedir = conf['localedir']
except KeyError: # pragma: no cover
localedir = os.path.join(conf['paths']['root'], 'i18n')
app_domain = conf['package'].__name__
native_lang = conf.get('i18n.native') # Languages that requires no translation
if not isinstance(lang, list):
lang = [lang]
mofiles = []
supported_languages = []
for l in lang:
if native_lang and l in native_lang:
mo = _TGI18NIdentityTranslator()
else:
mo = _gettext.find(app_domain, localedir=localedir, languages=[l], all=False)
if mo is not None:
mofiles.append(mo)
supported_languages.append(l)
try:
translator = _translator_from_mofiles(app_domain, mofiles, **kwargs)
except IOError as ioe:
raise LanguageError('IOError: %s' % ioe)
translator.tg_lang = lang
translator.tg_supported_lang = supported_languages
return translator
def get_lang(all=True):
"""
Return the current i18n languages used
returns ``None`` if no supported language is available (no translations
are in place) or a list of languages.
In case ``all`` parameter is ``False`` only the languages for which
the application is providing a translation are returned. Otherwise
all the languages preferred by the user are returned.
"""
if all is False:
return getattr(tg.translator, 'tg_supported_lang', [])
return getattr(tg.translator, 'tg_lang', [])
def add_fallback(lang, **kwargs):
"""Add a fallback language from which words not matched in other
languages will be translated to.
This fallback will be associated with the currently selected
language -- that is, resetting the language via set_lang() resets
the current fallbacks.
This function can be called multiple times to add multiple
fallbacks.
"""
tgl = tg.request_local.context._current_obj()
return tg.translator.add_fallback(_get_translator(lang, tgl=tgl, **kwargs))
sanitized_language_cache = {}
def sanitize_language_code(lang):
"""Sanitize the language code if the spelling is slightly wrong.
For instance, 'pt-br' and 'pt_br' should be interpreted as 'pt_BR'.
"""
try:
lang = sanitized_language_cache[lang]
except:
orig_lang = lang
try:
lang = '_'.join(filter(None, _parse_locale(lang)[:2]))
except ValueError:
if '-' in lang:
try:
lang = '_'.join(filter(None, _parse_locale(lang, sep='-')[:2]))
except ValueError:
pass
sanitized_language_cache[orig_lang] = lang
return lang
def set_request_lang(languages, tgl=None):
"""Set the current request language(s) used for translations
without touching the session language.
languages should be a string or a list of strings.
First lang will be used as main lang, others as fallbacks.
"""
# the logging to the screen was removed because
# the printing to the screen for every problem causes serious slow down.
if not tgl:
tgl = tg.request_local.context._current_obj()
# Should only raise exceptions in case of IO errors,
# so we let them propagate to the developer.
tgl.translator = _get_translator(languages, tgl=tgl, fallback=True)
# If the application has a set of supported translation
# limit the formencode translations to those so that
# we don't get the application in a language and
# the errors in another one
supported_languages = get_lang(all=False)
if supported_languages:
languages = supported_languages
try:
set_formencode_translation(languages, tgl=tgl)
except LanguageError:
pass
def set_temporary_lang(*args, **kwargs):
warnings.warn("i18n.set_temporary_lang has been deprecated in favor of"
"i18n.set_request_lang and will be removed.", DeprecationWarning)
return set_request_lang(*args, **kwargs)
def set_lang(languages, **kwargs):
"""Set the current language(s) used for translations
in current call and session.
languages should be a string or a list of strings.
First lang will be used as main lang, others as fallbacks.
"""
tgl = tg.request_local.context._current_obj()
set_request_lang(languages, tgl)
if tgl.session:
tgl.session[tgl.config.get('lang_session_key', 'tg_lang')] = languages
tgl.session.save()
FormEncodeMissing = '_MISSING_FORMENCODE'
formencode = None
_localdir = None
def set_formencode_translation(languages, tgl=None):
"""Set request specific translation of FormEncode."""
global formencode, _localdir
if formencode is FormEncodeMissing: # pragma: no cover
return
if formencode is None:
try:
import formencode
_localdir = formencode.api.get_localedir()
except ImportError: # pragma: no cover
formencode = FormEncodeMissing
return
if not tgl: # pragma: no cover
tgl = tg.request_local.context._current_obj()
try:
formencode_translation = _gettext.translation('FormEncode',
languages=languages,
localedir=_localdir)
except IOError as error:
raise LanguageError('IOError: %s' % error)
tgl.translator._formencode_translation = formencode_translation
# Idea stolen from Pylons
def _formencode_gettext(value):
trans = ugettext(value)
# Translation failed, try formencode
if trans == value:
try:
fetrans = tg.translator._formencode_translation
except (AttributeError, TypeError):
# the translator was not set in the TG context
# we are certainly in the test framework
# let's make sure won't return something that is ok with the caller
fetrans = None
if not fetrans:
fetrans = NullTranslations()
translator_gettext = getattr(fetrans, 'ugettext', fetrans.gettext)
trans = translator_gettext(value)
return trans
__all__ = [
"set_lang", "get_lang", "add_fallback",
"set_request_lang", "set_temporary_lang",
"ugettext", "lazy_ugettext", "ungettext", "lazy_ungettext"
]
|
194234
|
import collections
import sys
import character
iterations = int(sys.argv[1])
for msg, gen in [('With Thieves', character.Character),
('Without Thieves', character.LBBCharacter)]:
dist = collections.Counter(gen(testing=True).character_class['name']
for _ in range(iterations))
print msg
for c, count in dist.most_common():
print c, count * 100 / iterations
print '-'
|
194244
|
import matplotlib.pyplot as plt
import numpy as np
def inverse_normalization(X):
return X * 255.0
def plot_generated_batch(X_full, X_sketch, generator_model, epoch_num, dataset_name, batch_num):
# Generate images
X_gen = generator_model.predict(X_sketch)
X_sketch = inverse_normalization(X_sketch)
X_full = inverse_normalization(X_full)
X_gen = inverse_normalization(X_gen)
# limit to 8 images as output
Xs = X_sketch[:8]
Xg = X_gen[:8]
Xr = X_full[:8]
# put |decoded, generated, original| images next to each other
X = np.concatenate((Xs, Xg, Xr), axis=3)
# make one giant block of images
X = np.concatenate(X, axis=1)
# save the giant n x 3 images
plt.imsave('./pix2pix_out/progress_imgs/{}_epoch_{}_batch_{}.png'.format(dataset_name, epoch_num, batch_num), X[0], cmap='Greys_r')
|
194257
|
from fasteve.io.base import Client, ConnectionException, DataLayer
from fasteve.core import config
from fastapi import HTTPException
from fasteve.resource import Resource
from pymongo.collection import Collection
from motor.motor_asyncio import AsyncIOMotorClient
from fasteve.core.utils import log, ObjectID
from typing import List, Tuple
class DataBase:
client: AsyncIOMotorClient = None
db = DataBase()
class MongoClient(Client):
@classmethod
async def get_database(cls) -> AsyncIOMotorClient:
return db.client
@classmethod
def connect(cls) -> None:
try:
client = AsyncIOMotorClient(
str(config.MONGODB_URI),
serverSelectionTimeoutMS=config.CONNECTION_TIMEOUT,
)
except Exception:
raise ConnectionException
db.client = client
@classmethod
def close(cls) -> None:
db.client.close()
class Mongo(DataLayer):
"""MongoDB data access layer for Fasteve."""
def init_app(self) -> None:
self.mongo_prefix = None
async def get_collection(self, resource: Resource) -> Collection:
# maybe it would be better to use inject db with
# Depends(get_database) at the path operation function?
# By better I mean more FastAPI-ish.
# However, then I have to pass the db all the way down to the
# datalayer...
try:
client = await MongoClient.get_database()
except Exception as e:
HTTPException(500, e)
return client[config.MONGODB_DATABASE][resource.name]
async def aggregate(
self,
resource: Resource,
pipline: List[dict] = [],
skip: int = 0,
limit: int = 0,
) -> Tuple[List[dict], int]:
collection = await self.get_collection(resource)
paginated_results: List[dict] = []
paginated_results.append({"$skip": skip})
paginated_results.append({"$limit": limit})
facet_pipelines: dict = {}
facet_pipelines["paginated_results"] = paginated_results
facet_pipelines["total_count"] = list({"$count": "count"})
facet = {"$facet": facet_pipelines}
pipline.append(facet)
count = 0
try:
async for res in collection.aggregate(pipline):
items = res["paginated_results"]
if res["total_count"]:
# IndexError: list index out of range
count = res["total_count"][0]["count"]
except Exception as e:
raise e
return items, count
async def find(
self, resource: Resource, query: dict = {}, skip: int = 0, limit: int = 0
) -> Tuple[List[dict], int]:
"""Retrieves a set of documents matching a given request. Queries can
be expressed in two different formats: the mongo query syntax, and the
python syntax. The first kind of query would look like: ::
?where={"name": "<NAME>"}
while the second would look like: ::
?where=name=="<NAME>"
:param resource: Resource object.
"""
# process_query(q)
collection = await self.get_collection(resource)
items = []
# Perform find and iterate results
# https://motor.readthedocs.io/en/stable/tutorial-asyncio.html#async-for
try:
async for row in collection.find(query, skip=skip, limit=limit):
items.append(row)
except Exception as e:
raise e
count = await collection.count_documents(query)
return items, count
async def find_one(self, resource: Resource, query: dict) -> dict:
""""""
collection = await self.get_collection(resource)
try:
item = await collection.find_one(query)
except Exception as e:
raise e
return item
@log
async def insert(self, resource: Resource, payload: dict) -> dict:
""""""
collection = await self.get_collection(resource)
try:
await collection.insert_one(payload)
except Exception as e:
raise e
return payload
async def insert_many(self, resource: Resource, payload: List[dict]) -> List[dict]:
""""""
collection = await self.get_collection(resource)
try:
await collection.insert_many(payload)
except Exception as e:
raise e
return payload
async def remove(self, resource: Resource) -> None:
"""Removes an entire set of documents from a
database collection.
"""
collection = await self.get_collection(resource)
try:
await collection.delete_many({})
except Exception as e:
raise e
async def remove_item(self, resource: Resource, item_id: ObjectID) -> None:
"""Removes a single document from a database collection."""
collection = await self.get_collection(resource)
try:
await collection.delete_one({"_id": item_id})
except Exception as e:
raise e
async def replace_item(
self, resource: Resource, item_id: ObjectID, payload: dict
) -> None:
"""Replaces single document from a database collection"""
collection = await self.get_collection(resource)
try:
await collection.replace_one({"_id": item_id}, payload)
except Exception as e:
raise e
async def update_item(
self, resource: Resource, item_id: ObjectID, payload: dict
) -> None:
"""Updates single document from a database collection"""
collection = await self.get_collection(resource)
try:
await collection.update_one({"_id": item_id}, {"$set": payload})
except Exception as e:
raise e
|
194262
|
from django import forms
class SantaLogInputForm(forms.Form):
path = forms.CharField(initial="/var/db/santa/santa.log")
def get_filebeat_input(self):
return {"type": "log",
"paths": [self.cleaned_data["path"]]}
inputs = {"santa_log": {"name": "santa.log",
"form_class": SantaLogInputForm}}
|
194299
|
import os
import sys
from scripts.common.util import RunRemoteRepo, import_server_list
def main():
server_list_path = sys.argv[1]
server_list = import_server_list(server_list_path)
with RunRemoteRepo(server_list[0], 'dev') as rrr:
rrr.run("bash ~/PipeSwitch/scripts/figures/figure9/stop_next_resnet152/remote_run_data.sh")
if __name__ == '__main__':
main()
|
194317
|
from rubicon_ml.client import Rubicon as SyncRubicon
from rubicon_ml.client.asynchronous import Config, Project
from rubicon_ml.exceptions import RubiconException
class Rubicon(SyncRubicon):
"""The asynchronous `rubicon` client's entry point.
Creates a `Config` and injects it into the client level
objects at run-time.
Parameters
----------
persistence : str, optional
The persistence type. Can be one of ["filesystem"].
root_dir : str, optional
Absolute or relative filepath. Currently, only s3
paths are supported asynchronously.
auto_git_enabled : bool, optional
True to use the `git` command to automatically log relevant repository
information to projects and experiments logged with this client instance,
False otherwise. Defaults to False.
storage_options : dict, optional
Additional keyword arguments specific to the protocol being chosen. They
are passed directly to the underlying filesystem class.
"""
def __init__(
self, persistence="filesystem", root_dir=None, auto_git_enabled=False, **storage_options
):
self.config = Config(persistence, root_dir, auto_git_enabled, **storage_options)
async def create_project(self, name, description=None, github_url=None, training_metadata=None):
"""Overrides `rubicon.client.Rubicon.create_experiment`
to asynchronously create a project.
Parameters
----------
name : str
The project's name.
description : str, optional
The project's description.
github_url : str, optional
The URL of the GitHub repository associated with this
project. If omitted and automatic `git` logging is
enabled, it will be retrieved via `git remote`.
training_metadata : tuple or list of tuples, optional
Metadata associated with the training dataset(s)
used across each experiment in this project.
Returns
-------
rubicon.client.asynchronous.Project
The created project.
"""
project = self._create_project_domain(name, description, github_url, training_metadata)
await self.repository.create_project(project)
return Project(project, self.config)
async def get_project(self, name):
"""Overrides `rubicon.client.Rubicon.get_project`
to asynchronously get a project.
Parameters
----------
name : str
The name of the project to get.
Returns
-------
rubicon.client.asynchronous.Project
The project with name `name`.
"""
project = await self.repository.get_project(name)
return Project(project, self.config)
async def get_project_as_dask_df(self, name, group_by=None):
"""Overrides `rubicon.client.Rubicon.get_project_as_dask_df`
to asynchronously get a dask dataframe representation of a project.
Parameters
----------
name : str
The name of the project to get.
group_by : str or None, optional
How to group the project's experiments in the returned
DataFrame(s). Valid options include ["commit_hash"].
Returns
-------
dask.DataFrame or list of dask.DataFrame
If `group_by` is `None`, a dask dataframe holding the project's
data. Otherwise a list of dask dataframes holding the project's
data grouped by `group_by`.
"""
project = await self.get_project(name)
return await project.to_dask_df(group_by=group_by)
async def get_or_create_project(self, name, **kwargs):
"""Overrides `rubicon.client.Rubicon.get_or_create_project`
to asynchronously get or create a project.
Parameters
----------
name : str
The project's name.
kwargs : dict
Additional keyword arguments to be passed to
`Rubicon.create_project`.
Returns
-------
rubicon.client.asynchronous.Project
The corresponding project.
"""
try:
project = await self.get_project(name)
except RubiconException:
project = await self.create_project(name, **kwargs)
return project
async def projects(self):
"""Overrides `rubicon.client.Rubicon.projects` to
asynchronously get a list of available projects.
Returns
-------
list of rubicon.client.Project
The list of available projects.
"""
return [Project(project, self.config) for project in await self.repository.get_projects()]
|
194363
|
import twint
c = twint.Config()
c.Search = "twitter" # comment if you want to search a user instead
# c.Username = "dril" # comment out if want to search a user instead
c.Limit = 20 # must be multiple of 20
# c.Min_retweets = 50
c.Links = "exclude"
c.Verified = True
c.Lang = "en"
c.Popular_tweets = True
c.Custom["tweet"] = ["tweet"]
c.Output = "tweets.csv"
c.Store_csv = True
twint.run.Search(c)
|
194388
|
import pytest
import taichi as ti
@ti.test(experimental_real_function=True)
def test_function_without_return():
x = ti.field(ti.i32, shape=())
@ti.func
def foo(val: ti.i32):
x[None] += val
@ti.kernel
def run():
foo(40)
foo(2)
x[None] = 0
run()
assert x[None] == 42
@ti.test(experimental_real_function=True)
def test_function_with_return():
x = ti.field(ti.i32, shape=())
@ti.func
def foo(val: ti.i32) -> ti.i32:
x[None] += val
return val
@ti.kernel
def run():
a = foo(40)
foo(2)
assert a == 40
x[None] = 0
run()
assert x[None] == 42
@ti.test(experimental_real_function=True, exclude=[ti.opengl, ti.cc])
def test_function_with_multiple_last_return():
x = ti.field(ti.i32, shape=())
@ti.func
def foo(val: ti.i32) -> ti.i32:
if x[None]:
x[None] += val * 2
return val * 2
else:
x[None] += val
return val
@ti.kernel
def run():
a = foo(40)
foo(1)
assert a == 40
x[None] = 0
run()
assert x[None] == 42
@ti.test(experimental_real_function=True)
def test_call_expressions():
x = ti.field(ti.i32, shape=())
@ti.func
def foo(val: ti.i32) -> ti.i32:
if x[None] > 10:
x[None] += 1
x[None] += val
return 0
@ti.kernel
def run():
assert foo(15) == 0
assert foo(10) == 0
x[None] = 0
run()
assert x[None] == 26
@ti.test(arch=ti.cpu, experimental_real_function=True)
def test_failing_multiple_return():
x = ti.field(ti.i32, shape=())
@ti.func
def foo(val: ti.i32) -> ti.i32:
if x[None] > 10:
if x[None] > 20:
return 1
x[None] += 1
x[None] += val
return 0
@ti.kernel
def run():
assert foo(15) == 0
assert foo(10) == 0
assert foo(100) == 1
with pytest.raises(AssertionError):
x[None] = 0
run()
assert x[None] == 26
@ti.test(experimental_real_function=True)
def test_python_function():
x = ti.field(ti.i32, shape=())
@ti.func
def inc(val: ti.i32):
x[None] += val
def identity(x):
return x
@ti.data_oriented
class A:
def __init__(self):
self.count = ti.field(ti.i32, shape=())
self.count[None] = 0
@ti.pyfunc
def dec(self, val: ti.i32) -> ti.i32:
self.count[None] += 1
x[None] -= val
return self.count[None]
@ti.kernel
def run(self) -> ti.i32:
a = self.dec(1)
identity(2)
inc(identity(3))
return a
a = A()
x[None] = 0
assert a.run() == 1
assert a.run() == 2
assert x[None] == 4
assert a.dec(4) == 3
assert x[None] == 0
@ti.test(arch=[ti.cpu, ti.cuda], debug=True)
def test_default_templates():
@ti.func
def func1(x: ti.template()):
x = 1
@ti.func
def func2(x: ti.template()):
x += 1
@ti.func
def func3(x):
x = 1
@ti.func
def func4(x):
x += 1
@ti.func
def func1_field(x: ti.template()):
x[None] = 1
@ti.func
def func2_field(x: ti.template()):
x[None] += 1
@ti.func
def func3_field(x):
x[None] = 1
@ti.func
def func4_field(x):
x[None] += 1
v = ti.field(dtype=ti.i32, shape=())
@ti.kernel
def run_func():
a = 0
func1(a)
assert a == 1
b = 0
func2(b)
assert b == 1
c = 0
func3(c)
assert c == 0
d = 0
func4(d)
assert d == 0
v[None] = 0
func1_field(v)
assert v[None] == 1
v[None] = 0
func2_field(v)
assert v[None] == 1
v[None] = 0
func3_field(v)
assert v[None] == 1
v[None] = 0
func4_field(v)
assert v[None] == 1
run_func()
@ti.test(experimental_real_function=True)
def test_experimental_templates():
x = ti.field(ti.i32, shape=())
y = ti.field(ti.i32, shape=())
answer = ti.field(ti.i32, shape=8)
@ti.kernel
def kernel_inc(x: ti.template()):
x[None] += 1
def run_kernel():
x[None] = 10
y[None] = 20
kernel_inc(x)
assert x[None] == 11
assert y[None] == 20
kernel_inc(y)
assert x[None] == 11
assert y[None] == 21
@ti.func
def inc(x: ti.template()):
x[None] += 1
@ti.kernel
def run_func():
x[None] = 10
y[None] = 20
inc(x)
answer[0] = x[None]
answer[1] = y[None]
inc(y)
answer[2] = x[None]
answer[3] = y[None]
def verify():
assert answer[0] == 11
assert answer[1] == 20
assert answer[2] == 11
assert answer[3] == 21
run_kernel()
run_func()
verify()
@ti.test(experimental_real_function=True)
def test_missing_arg_annotation():
with pytest.raises(ti.KernelDefError, match='must be type annotated'):
@ti.func
def add(a, b: ti.i32) -> ti.i32:
return a + b
@ti.test(experimental_real_function=True)
def test_missing_return_annotation():
with pytest.raises(ti.TaichiCompilationError,
match='return value must be annotated'):
@ti.func
def add(a: ti.i32, b: ti.i32):
return a + b
@ti.kernel
def run():
add(30, 2)
run()
|
194423
|
def insertion_sort(arr):
for i in range(1, len(arr)):
key = arr[i]
j = i - 1
while(j>=0 and arr[j]>key):
arr[j+1]=arr[j]
j = j - 1
arr[j+1] = key
return arr
def main():
arr = [6, 5, 8, 9, 3, 1, 4, 7, 2]
sorted_arr = insertion_sort(arr)
for i in sorted_arr:
print(i, end=" ")
if __name__ == "__main__":
main()
|
194459
|
from django import forms
from django.conf import settings
from django.contrib import admin
from django.core.exceptions import ValidationError
from django.db import transaction
from django.utils.translation import gettext_lazy as _
from geotrek.common.mixins import MergeActionMixin
from .models import (
Practice, Difficulty, Level, Dive
)
if 'modeltranslation' in settings.INSTALLED_APPS:
from modeltranslation.admin import TranslationAdmin
else:
TranslationAdmin = admin.ModelAdmin
class PracticeAdmin(MergeActionMixin, TranslationAdmin):
list_display = ('name', 'order', 'pictogram_img')
search_fields = ('name', )
class DifficultyForm(forms.ModelForm):
def clean_id(self):
self.oldid = self.instance.pk if self.instance else None
self.newid = self.cleaned_data.get('id')
exists = len(Difficulty.objects.filter(pk=self.newid)) > 0
if self.oldid != self.newid and exists:
raise ValidationError(_("Difficulty with id '%s' already exists") % self.newid)
return self.newid
class DifficultyAdmin(MergeActionMixin, TranslationAdmin):
form = DifficultyForm
list_display = ('name', 'id', 'pictogram_img')
search_fields = ('name',)
fields = ('id', 'name', 'pictogram')
merge_field = "name"
def save_model(self, request, obj, form, change):
"""
Allows to change Difficulty id from Admin form.
It will migrate all dives using this difficulty to the new id.
"""
self.oldid = None
# Nominal case. No migration.
if form.oldid is None or form.oldid == form.newid:
obj.save()
return
with transaction.atomic():
# Migrate Dives
migrated = []
for t in Dive.objects.filter(difficulty=form.oldid):
t.difficulty = None
t.save()
migrated.append(t)
# Apply id change
self.oldid = form.oldid
obj.save() # Will create new row in DB
old = Difficulty.objects.get(id=self.oldid)
old.delete()
# Restore
for t in migrated:
t.difficulty = obj
t.save()
def response_change(self, request, obj):
"""
If id was changed, always returns to the list (prevent 404).
Otherwise, behave as usual.
"""
if self.oldid is not None:
msg = _('Difficulty id {old} was changed to {new} successfully.').format(
old=self.oldid, new=obj.pk)
self.message_user(request, msg)
return self.response_post_save_change(request, obj)
return super().response_change(request, obj)
class LevelForm(forms.ModelForm):
def clean_id(self):
self.oldid = self.instance.pk if self.instance else None
self.newid = self.cleaned_data.get('id')
exists = len(Level.objects.filter(pk=self.newid)) > 0
if self.oldid != self.newid and exists:
raise ValidationError(_("Level with id '%s' already exists") % self.newid)
return self.newid
class LevelAdmin(MergeActionMixin, TranslationAdmin):
form = LevelForm
list_display = ('name', 'id', 'pictogram_img')
search_fields = ('name',)
fields = ('id', 'name', 'description', 'pictogram')
merge_field = "name"
def save_model(self, request, obj, form, change):
"""
Allows to change Level id from Admin form.
It will migrate all dives using these levels to the new id.
"""
self.oldid = None
# Nominal case. No migration.
if form.oldid is None or form.oldid == form.newid:
obj.save()
return
with transaction.atomic():
# Migrate Dives
migrated = []
for t in Dive.objects.filter(levels__in=[form.oldid]):
t.levels.remove(Level.objects.get(id=form.oldid))
t.save()
migrated.append(t)
# Apply id change
self.oldid = form.oldid
obj.save() # Will create new row in DB
old = Level.objects.get(id=self.oldid)
old.delete()
# Restore
for t in migrated:
t.levels.add(obj)
t.save()
def response_change(self, request, obj):
"""
If id was changed, always returns to the list (prevent 404).
Otherwise, behave as usual.
"""
if self.oldid is not None:
msg = _('Level id {old} was changed to {new} successfully.').format(
old=self.oldid, new=obj.pk)
self.message_user(request, msg)
return self.response_post_save_change(request, obj)
return super().response_change(request, obj)
# Register previously defined modeladmins
admin_to_register = [
(Practice, PracticeAdmin),
(Difficulty, DifficultyAdmin),
(Level, LevelAdmin),
]
for model, model_admin in admin_to_register:
admin.site.register(model, model_admin)
|
194465
|
from netgrasp import netgrasp
from netgrasp.database import database
from netgrasp.utils import pretty
def start(ng):
import os
pid = ng.is_running()
if pid:
ng.debugger.critical("Netgrasp is already running with pid %d.", (pid,))
ng.debugger.warning("Starting netgrasp...")
if os.getuid() != 0:
ng.debugger.critical("netgrasp must be run as root (currently running as %s), exiting", (ng.debugger.whoami()))
netgrasp.netgrasp_instance = ng
# @TODO: use pcap to set and test interface
if not ng.listen["interface"]:
ng.debugger.critical("Required [Listen] 'interface' not defined in configuration file, exiting.")
if not ng.database["filename"]:
ng.debugger.critical("Required [Database] 'filename' not defined in configuration file, exiting.")
# Start netgrasp.
if ng.daemonize:
# Test that we can write to the log.
try:
with open(ng.logging["filename"], "w"):
ng.debugger.info("successfully opened logfile for writing")
except Exception as e:
ng.debugger.dump_exception("start() exception")
ng.debugger.critical("failed to open logfile '%s' for writing: %s", (ng.logging["filename"], e))
import daemonize
# Test that we can write to the pidfile.
try:
with open(ng.logging["pidfile"], "w"):
ng.debugger.info("successfully opened pidfile for writing")
except IOError as e:
ng.debugger.critical("failed to open pidfile '%s' for writing: %s", (ng.logging["pidfile"], e))
ng.debugger.info("daemonizing app=netgrasp, pidfile=%s, user=%s, group=%s, verbose=True", (ng.logging["pidfile"], ng.security["user"], ng.security["group"]))
ng.debugger.warning("daemonizing, output redirected to log file: %s", (ng.logging["filename"],))
try:
ng.debugger.logToFile()
daemon = daemonize.Daemonize(app="netgrasp", pid=ng.logging["pidfile"], privileged_action=netgrasp.get_pcap, user=ng.security["user"], group=ng.security["group"], action=netgrasp.main, keep_fds=[ng.debugger.handler.stream.fileno()], logger=ng.logger, verbose=True)
daemon.start()
except Exception as e:
ng.debugger.critical("Failed to daemonize: %s, exiting", (e,))
else:
netgrasp.main()
def stop(ng, must_be_running=True):
import os
import signal
import errno
pid = ng.is_running()
if not pid:
if must_be_running:
ng.debugger.critical("Netgrasp is not running.")
else:
ng.debugger.info("Netgrasp is not running.")
else:
ng.debugger.warning("Stopping netgrasp...")
try:
os.kill(pid, signal.SIGTERM)
except OSError as e:
if e.errno == errno.EPERM:
ng.debugger.critical("Failed (perhaps try with sudo): %s", (e,))
else:
ng.debugger.critical("Failed: %s", (e,))
def restart(ng):
import time
stop(ng, False)
running = ng.is_running()
loops = 0
while running:
loops += 1
if loops > 15:
ng.debugger.critical("Failed to stop netgrasp.")
time.sleep(0.2)
running = ng.is_running()
start(ng)
def status(ng):
pid = ng.is_running()
if pid:
ng.debugger.warning("Netgrasp is running with pid %d", (pid,))
else:
ng.debugger.warning("Netgrasp is not running.")
def update(ng):
from netgrasp.update import update
netgrasp.netgrasp_instance = ng
try:
ng.db = database.Database()
except Exception as e:
ng.debugger.error("error: %s", (e,))
ng.debugger.critical("Failed to open or create database file %s (as user %s), exiting.", (ng.database["filename"], ng.debugger.whoami()))
ng.db.cursor = ng.db.connection.cursor()
query = database.SelectQueryBuilder("state")
query.db_select("{%BASE}.value")
query.db_where("{%BASE}.key = 'schema_version'")
ng.db.cursor.execute(query.db_query(), query.db_args())
schema_version = ng.db.cursor.fetchone()
if schema_version:
version = schema_version[0]
else:
version = 0
updates = update.needed(version)
if updates:
ng.debugger.warning("schema updates required: %s", (updates,))
else:
ng.debugger.critical("no schema updates are required.")
pid = ng.is_running()
if pid:
ng.debugger.critical("Netgrasp must be stopped before running updates.")
netgrasp.netgrasp_instance = ng
netgrasp.email.email_instance = None
netgrasp.notify.notify_instance = None
update.run_updates(version)
def list(ng):
import datetime
netgrasp.netgrasp_instance = ng
pid = ng.is_running()
if not pid:
ng.debugger.critical("Netgrasp is not running.")
try:
ng.db = database.Database()
except Exception as e:
ng.debugger.error("error: %s", (e,))
ng.debugger.critical("Failed to open or create database file %s (as user %s), exiting.", (ng.database["filename"], ng.debugger.whoami()))
ng.debugger.info("Opened %s as user %s", (ng.database["filename"], ng.debugger.whoami()))
ng.db.cursor = ng.db.connection.cursor()
if ng.args.type == "device":
# List devices.
query = database.SelectQueryBuilder("activity")
query.db_select("{%BASE}.did")
query.db_select("mac.address")
query.db_select("ip.address")
query.db_select("{%BASE}.updated")
if ng.args.all:
description = "All devices"
else:
description = "Active devices"
query.db_where("{%BASE}.active = ?", 1)
query.db_where("{%BASE}.updated IS NOT NULL")
if not ng.args.all or ng.args.all == 1:
query.db_group("{%BASE}.did")
query.db_order("{%BASE}.updated DESC")
rowfmt = "{:>16}{:>34}{:>22}"
header = ["IP", "Name", "Last seen"]
elif ng.args.type == 'event':
# List events.
query = database.SelectQueryBuilder("event")
query.db_select("{%BASE}.did")
query.db_select("mac.address")
query.db_select("ip.address")
query.db_select("{%BASE}.timestamp")
query.db_select("{%BASE}.type")
if ng.args.all:
description = "All alerts"
# @TODO: this is a bogus WHERE, get rid of altogether
query.db_where("{%BASE}.timestamp >= ?", 1)
else:
description = "Recent alerts"
recent = datetime.datetime.now() - datetime.timedelta(seconds=ng.listen["active_timeout"])
query.db_where("{%BASE}.timestamp >= ?", recent)
if not ng.args.all or ng.args.all == 1:
query.db_group("{%BASE}.did")
query.db_group("{%BASE}.type")
query.db_order("{%BASE}.timestamp DESC")
rowfmt = "{:>16}{:>24}{:>21}{:>18}"
header = ["IP", "Name", "Event", "Last seen"]
query.db_leftjoin("device", "{%BASE}.did = device.did")
query.db_leftjoin("ip", "{%BASE}.iid = ip.iid")
query.db_leftjoin("mac", "device.mid = mac.mid")
if ng.args.mac:
query.db_where("mac.address LIKE ?", "%"+ng.args.mac+"%")
if ng.args.ip:
query.db_where("ip.address LIKE ?", "%"+ng.args.ip+"%")
if ng.args.vendor:
query.db_leftjoin("vendor", "device.vid = vendor.vid")
query.db_where("vendor.name LIKE ?", "%"+ng.args.vendor+"%")
if ng.args.hostname or ng.args.custom:
query.db_leftjoin("host", "device.hid = host.hid")
if ng.args.hostname:
query.db_where("host.name LIKE ?", "%"+ng.args.hostname+"%")
else:
query.db_where("host.custom_name LIKE ?", "%"+ng.args.custom+"%")
ng.db.cursor.execute(query.db_query(), query.db_args())
rows = ng.db.cursor.fetchall()
if rows:
print """ %s:""" % description
print rowfmt.format(*header)
for row in rows:
if ng.args.type == 'device':
print rowfmt.format(pretty.truncate_string(row[2], 15), pretty.truncate_string(pretty.name_did(row[0]), 32), pretty.truncate_string(pretty.time_ago(row[3], False), 20))
else:
print rowfmt.format(pretty.truncate_string(row[2], 15), pretty.truncate_string(pretty.name_did(row[0]), 22), pretty.truncate_string(row[4], 19), pretty.truncate_string(pretty.time_ago(row[3], False), 16))
def identify(ng):
from netgrasp.utils import exclusive_lock
netgrasp.netgrasp_instance = ng
pid = ng.is_running()
if not pid:
ng.debugger.critical("Netgrasp is not running.")
try:
ng.db = database.Database()
except Exception as e:
ng.debugger.error("%s", (e,))
ng.debugger.critical("Failed to open or create database file %s (as user %s), exiting.", (ng.database["filename"], ng.debugger.whoami()))
ng.debugger.info("Opened %s as user %s", (ng.database["filename"], ng.debugger.whoami()))
ng.db.cursor = ng.db.connection.cursor()
if not ng.args.set:
description = "Use --set ID 'CUSTOM NAME' to set a custom name on a device"
header = ["ID", "IP", "Name", "Last seen"]
rowfmt = "{:>7}{:>16}{:>34}{:>22}"
query = database.SelectQueryBuilder("host")
query.db_select("{%BASE}.hid")
query.db_leftjoin("ip", "{%BASE}.iid = ip.iid")
query.db_leftjoin("mac", "ip.mid = mac.mid")
query.db_leftjoin("activity", "{%BASE}.iid = activity.iid")
query.db_select("activity.did")
query.db_select("mac.address")
query.db_select("ip.address")
query.db_select("activity.updated")
query.db_group("activity.did")
query.db_order("activity.updated DESC")
if not ng.args.all and not ng.args.custom:
query.db_where("{%BASE}.custom_name IS NULL")
if ng.args.mac:
query.db_where("mac.address LIKE ?", "%"+ng.args.mac+"%")
if ng.args.ip:
query.db_where("ip.address LIKE ?", "%"+ng.args.ip+"%")
if ng.args.vendor:
query.db_leftjoin("vendor", "mac.vid = vendor.vid")
query.db_where("vendor.name LIKE ?", "%"+ng.args.vendor+"%")
if ng.args.hostname:
query.db_where("host.name LIKE ?", "%"+ng.args.hostname+"%")
if ng.args.custom:
query.db_where("host.custom_name LIKE ?", "%"+ng.args.custom+"%")
ng.db.cursor.execute(query.db_query(), query.db_args())
rows = ng.db.cursor.fetchall()
if rows:
print """ %s:""" % description
print rowfmt.format(*header)
for row in rows:
# @TODO handle IP changes
print rowfmt.format(row[0], pretty.truncate_string(row[3], 15), pretty.truncate_string(pretty.name_did(row[1]), 32), pretty.truncate_string(pretty.time_ago(row[4], False), 20))
else:
if ng.args.verbose > 1:
print "id:", ng.args.set[0], "| custom name:", ng.args.set[1]
ng.db.cursor.execute("SELECT vendor.vid FROM vendor LEFT JOIN mac ON vendor.vid = mac.vid LEFT JOIN host ON mac.mid = host.hid WHERE host.hid = ?", (ng.args.set[0],))
with exclusive_lock.ExclusiveFileLock(ng, 5, "failed to set custom name, please try again"):
db_args = [ng.args.set[1]]
db_args.append(ng.args.set[0])
ng.db.cursor.execute("UPDATE host SET custom_name = ? WHERE hid = ?", db_args)
ng.db.connection.commit()
def template(ng):
import pkg_resources
if ng.args.alert or ng.args.type == 'alert':
template_file = "mail_templates/template." + ng.args.alert + ".json"
if ng.args.alert:
if not pkg_resources.resource_exists("netgrasp", template_file):
tmpl = pkg_resources.resource_string("netgrasp", "mail_templates/template.default.json")
else:
tmpl = pkg_resources.resource_string("netgrasp", "mail_templates/template." + ng.args.alert + ".json")
print tmpl
elif ng.args.type == "config":
tmpl = pkg_resources.resource_string("netgrasp", "template.netgrasp.cfg")
print tmpl
|
194469
|
import torch
import torch.nn as nn
import torch.nn.functional as F
# import numpy as np
'''
reference from: https://github.com/auspicious3000/autovc/blob/master/model_vc.py
'''
class LinearNorm(torch.nn.Module):
def __init__(self, in_dim, out_dim, bias=True, w_init_gain='linear'):
super(LinearNorm, self).__init__()
self.linear_layer = torch.nn.Linear(in_dim, out_dim, bias=bias)
torch.nn.init.xavier_uniform_(
self.linear_layer.weight,
gain=torch.nn.init.calculate_gain(w_init_gain))
def forward(self, x):
return self.linear_layer(x)
class ConvNorm(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1,
padding=None, dilation=1, bias=True, w_init_gain='linear'):
super(ConvNorm, self).__init__()
if padding is None:
assert(kernel_size % 2 == 1)
padding = int(dilation * (kernel_size - 1) / 2)
self.conv = torch.nn.Conv1d(in_channels, out_channels,
kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation,
bias=bias)
torch.nn.init.xavier_uniform_(
self.conv.weight, gain=torch.nn.init.calculate_gain(w_init_gain))
def forward(self, signal):
conv_signal = self.conv(signal)
return conv_signal
class Postnet(nn.Module):
"""Postnet
- Five 1-d convolution with 512 channels and kernel size 5
"""
def __init__(self):
super(Postnet, self).__init__()
self.convolutions = nn.ModuleList()
self.convolutions.append(
nn.Sequential(
ConvNorm(80, 512,
kernel_size=5, stride=1,
padding=2,
dilation=1, w_init_gain='tanh'),
nn.BatchNorm1d(512))
)
for i in range(1, 5 - 1):
self.convolutions.append(
nn.Sequential(
ConvNorm(512,
512,
kernel_size=5, stride=1,
padding=2,
dilation=1, w_init_gain='tanh'),
nn.BatchNorm1d(512))
)
self.convolutions.append(
nn.Sequential(
ConvNorm(512, 80,
kernel_size=5, stride=1,
padding=2,
dilation=1, w_init_gain='linear'),
nn.BatchNorm1d(80))
)
def forward(self, x):
for i in range(len(self.convolutions) - 1):
x = torch.tanh(self.convolutions[i](x))
x = self.convolutions[-1](x)
return x
class Decoder(nn.Module):
"""Decoder module:
"""
def __init__(self, dim_neck=64, dim_lf0=1, dim_emb=256, dim_pre=512):
super(Decoder, self).__init__()
self.lstm1 = nn.LSTM(dim_neck+dim_emb+dim_lf0, dim_pre, 1, batch_first=True)
convolutions = []
for i in range(3):
conv_layer = nn.Sequential(
ConvNorm(dim_pre,
dim_pre,
kernel_size=5, stride=1,
padding=2,
dilation=1, w_init_gain='relu'),
nn.BatchNorm1d(dim_pre))
convolutions.append(conv_layer)
self.convolutions = nn.ModuleList(convolutions)
self.lstm2 = nn.LSTM(dim_pre, 1024, 2, batch_first=True)
self.linear_projection = LinearNorm(1024, 80)
def forward(self, x):
#self.lstm1.flatten_parameters()
x, _ = self.lstm1(x)
x = x.transpose(1, 2)
for conv in self.convolutions:
x = F.relu(conv(x))
x = x.transpose(1, 2)
outputs, _ = self.lstm2(x)
decoder_output = self.linear_projection(outputs)
return decoder_output
class Decoder_ac(nn.Module):
"""Decoder_ac network."""
def __init__(self, dim_neck=64, dim_lf0=1, dim_emb=256, dim_pre=512, use_l1_loss=False):
super(Decoder_ac, self).__init__()
self.use_l1_loss = use_l1_loss
# self.encoder = Encoder(dim_neck, dim_emb, freq)
self.decoder = Decoder(dim_neck, dim_lf0, dim_emb, dim_pre)
self.postnet = Postnet()
def forward(self, z, lf0_embs, spk_embs, mel_target=None):
z = F.interpolate(z.transpose(1, 2), scale_factor=2) # (bs, 140/2, 64) -> (bs, 64, 140/2) -> (bs, 64, 140)
z = z.transpose(1, 2) # (bs, 64, 140) -> (bs, 140, 64)
spk_embs_exp = spk_embs.unsqueeze(1).expand(-1,z.shape[1],-1)
lf0_embs = lf0_embs[:,:z.shape[1],:]
# print(z.shape, lf0_embs.shape)
x = torch.cat([z, lf0_embs, spk_embs_exp], dim=-1)
mel_outputs = self.decoder(x)
mel_outputs_postnet = self.postnet(mel_outputs.transpose(2,1))
mel_outputs_postnet = mel_outputs + mel_outputs_postnet.transpose(2,1)
# print('mel_outputs.shape:', mel_outputs_postnet.shape)
if mel_target is None:
return mel_outputs_postnet
else:
# mel_target = mel_target[:,1:-1,:]
loss = F.mse_loss(mel_outputs, mel_target) + \
F.mse_loss(mel_outputs_postnet, mel_target)
if self.use_l1_loss:
loss = loss + F.l1_loss(mel_outputs, mel_target) + \
F.l1_loss(mel_outputs_postnet, mel_target)
return loss, mel_outputs_postnet
|
194505
|
import os
import time
import traceback
from .pin import *
import logging
class Thingpin(object):
"""
Monitor GPIO pins and report to AWS IoT.
Each GPIO pin is associated with an AWS IoT Thing. As the GPIO pin
state changes the AWS IoT Thing state is published via MQTT. An example
is a GPIO pin connected to a reed switch to detect whether a door is open
or closed. This would be associated with an AWS IoT Thing named "door".
Once started it does not return.
"""
def __init__(self, notifier, pin_mode=None, things=None, debug=True):
"""
Create and configure a Thingpin.
Args:
notifier (Notifier): Adafruit IO or AWS IoT notifier to publish
messages to
pin_mode (str): GPIO pin mode, 'BOARD' or 'BCM'
things (dict of str: dict): each key is athing name
and each value is the config for the Thing. For
example:
{'door1' : {
'pin': 21,
'resistor': 'pull_up',
'iot_states': {
'HIGH': { 'state': 'open' },
'LOW': { 'state': 'closed'}
}
}}
daemon (bool): if True run as a daemon and log to syslog, else
run as a foreground process and log to stdout
debug (bool): if True log debugging info
"""
self.log = logging.getLogger('thingpin')
self.notifier = notifier
self.pin_mode = pin_mode
self.thing_config = things
self.debug = debug
self.pins = {}
self.initialized = False
def initialize(self):
"""Initialize GPIO pins and connect to AWS IoT"""
if not self.initialized:
self.log.info('initializing')
for k in ['pin_mode', 'thing_config', 'debug']:
self.log.info('{} = {}'.format(k, getattr(self, k)))
set_pin_mode(self.pin_mode)
self.notifier.initialize()
# Pins
for name, config in self.thing_config.items():
self.pins[name] = Pin(self.notifier, name, config)
self.initialized = True
self.log.info('initialize complete')
def cleanup(self):
"""Release system resources and reset GPIO pins"""
self.notifier.cleanup()
pin_cleanup()
def run(self):
self.initialize()
self.log.info('run')
for pin in self.pins.values():
pin.run()
while True:
time.sleep(1000)
class Pin(object):
"""Connect a GPIO pin to a notifier, interpreting pin state per config"""
def __init__(self, notifier, name, config):
"""Setup an input pin"""
self.name = name
self.config = config
setup_input_pin(config['pin'], config.get('resistor'))
self.notifier = notifier
self.watcher = Watcher(observer=self,
pin=config['pin'],
sleep=config.get('sleep', .010),
debounce_delay=config.get('debounce_delay'))
def update_pin(self, pin, reading):
self.notifier.notify(self.name, self.get_state(reading))
def get_state(self, reading):
"""Get state to report for GPIO reading"""
if reading == GPIO.HIGH:
return self.config['iot_states']['HIGH']
else:
return self.config['iot_states']['LOW']
def run(self):
self.watcher.start()
|
194508
|
import copy
import logging
import math
import pathlib
import types
from unittest import mock
import pytest
from connexion.apis.flask_api import Jsonifier
from connexion.exceptions import InvalidSpecification
from connexion.json_schema import resolve_refs
from connexion.middleware.security import SecurityOperation
from connexion.operations import Swagger2Operation
from connexion.resolver import Resolver
TEST_FOLDER = pathlib.Path(__file__).parent
DEFINITIONS = {'new_stack': {'required': ['image_version', 'keep_stacks', 'new_traffic', 'senza_yaml'],
'type': 'object',
'properties': {'keep_stacks': {'type': 'integer',
'description':
'Number of older stacks to keep'},
'image_version': {'type': 'string',
'description':
'Docker image version to deploy'},
'senza_yaml': {'type': 'string',
'description': 'YAML to provide to senza'},
'new_traffic': {'type': 'integer',
'description':
'Percentage of the traffic'}}},
'composed': {'required': ['test'],
'type': 'object',
'properties': {'test': {'schema': {'$ref': '#/definitions/new_stack'}}}},
'problem': {"not": "defined"}}
PARAMETER_DEFINITIONS = {'myparam': {'in': 'path', 'type': 'integer'}}
OPERATION1 = {'description': 'Adds a new stack to be created by lizzy and returns the '
'information needed to keep track of deployment',
'operationId': 'fakeapi.hello.post_greeting',
'parameters': [{'in': 'body',
'name': 'new_stack',
'required': True,
'schema': {'$ref': '#/definitions/new_stack'}}],
'responses': {201: {'description': 'Stack to be created. The '
'CloudFormation Stack creation can '
"still fail if it's rejected by senza "
'or AWS CF.',
'schema': {'$ref': '#/definitions/new_stack'}},
400: {'description': 'Stack was not created because request '
'was invalid',
'schema': {'$ref': '#/definitions/problem'}},
401: {'description': 'Stack was not created because the '
'access token was not provided or was '
'not valid for this operation',
'schema': {'$ref': '#/definitions/problem'}}},
'security': [{'oauth': ['uid']}],
'summary': 'Create new stack'}
OPERATION2 = {'description': 'Adds a new stack to be created by lizzy and returns the '
'information needed to keep track of deployment',
'operationId': 'fakeapi.hello.post_greeting',
'parameters': [{'in': 'body',
'name': 'new_stack',
'required': True,
'schema': {'$ref': '#/definitions/new_stack'}},
{'in': 'body',
'name': 'new_stack',
'required': True,
'schema': {'$ref': '#/definitions/new_stack'}}],
'responses': {201: {'description': 'Stack to be created. The '
'CloudFormation Stack creation can '
"still fail if it's rejected by senza "
'or AWS CF.',
'schema': {'$ref': '#/definitions/new_stack'}},
400: {'description': 'Stack was not created because request '
'was invalid',
'schema': {'$ref': '#/definitions/problem'}},
401: {'description': 'Stack was not created because the '
'access token was not provided or was '
'not valid for this operation',
'schema': {'$ref': '#/definitions/problem'}}},
'security': [{'oauth': ['uid']}],
'summary': 'Create new stack'}
OPERATION3 = {'operationId': 'fakeapi.hello.post_greeting',
'parameters': [{'$ref': '#/parameters/myparam'}]}
OPERATION4 = {'description': 'Adds a new stack to be created by lizzy and returns the '
'information needed to keep track of deployment',
'operationId': 'fakeapi.hello.post_greeting',
'parameters': [
{
'in': 'body',
'name': 'new_stack',
'required': True,
'schema': {'$ref': '#/definitions/new_stack'}
},
{
'in': 'query',
'name': 'stack_version',
'default': 'one',
'type': 'number'
}
],
'responses': {201: {'description': 'Stack to be created. The '
'CloudFormation Stack creation can '
"still fail if it's rejected by senza "
'or AWS CF.',
'schema': {'$ref': '#/definitions/new_stack'}},
400: {'description': 'Stack was not created because request '
'was invalid',
'schema': {'$ref': '#/definitions/problem'}},
401: {'description': 'Stack was not created because the '
'access token was not provided or was '
'not valid for this operation',
'schema': {'$ref': '#/definitions/problem'}}},
'summary': 'Create new stack'}
OPERATION5 = {
'description': 'Adds a new stack to be created by lizzy and returns the '
'information needed to keep track of deployment',
'operationId': 'fakeapi.hello.post_greeting',
'parameters': [
{
'in': 'body',
'name': 'new_stack',
'required': True,
'type': 'integer',
'default': 'stack'
}
],
'responses': {'201': {'description': 'Stack to be created. The '
'CloudFormation Stack creation can '
"still fail if it's rejected by senza "
'or AWS CF.',
'schema': {'$ref': '#/definitions/new_stack'}},
'400': {'description': 'Stack was not created because request '
'was invalid',
'schema': {'$ref': '#/definitions/problem'}},
'401': {'description': 'Stack was not created because the '
'access token was not provided or was '
'not valid for this operation',
'schema': {'$ref': '#/definitions/problem'}}},
'security': [{'oauth': ['uid']}],
'summary': 'Create new stack'
}
OPERATION6 = {
'operationId': 'fakeapi.hello.schema',
'parameters': [
{
'type': 'object',
'in': 'body',
'name': 'new_stack',
'default': {'keep_stack': 1, 'image_version': 1, 'senza_yaml': 'senza.yaml',
'new_traffic': 100},
'schema': {'$ref': '#/definitions/new_stack'}
}
],
'responses': {},
'security': [{'oauth': ['uid']}],
'summary': 'Create new stack'
}
OPERATION7 = {'description': 'Adds a new stack to be created by lizzy and returns the '
'information needed to keep track of deployment',
'operationId': 'fakeapi.hello.post_greeting',
'parameters': [{'in': 'body',
'name': 'new_stack',
'required': True,
'schema': {'type': 'array', 'items': {'$ref': '#/definitions/new_stack'}}}],
'responses': {'201': {'description': 'Stack to be created. The '
'CloudFormation Stack creation can '
"still fail if it's rejected by senza "
'or AWS CF.',
'schema': {'$ref': '#/definitions/new_stack'}},
'400': {'description': 'Stack was not created because request '
'was invalid',
'schema': {'$ref': '#/definitions/problem'}},
'401': {'description': 'Stack was not created because the '
'access token was not provided or was '
'not valid for this operation',
'schema': {'$ref': '#/definitions/problem'}}},
'security': [{'oauth': ['uid']}],
'summary': 'Create new stack'}
OPERATION8 = {'description': 'Adds a new stack to be created by lizzy and returns the '
'information needed to keep track of deployment',
'operationId': 'fakeapi.hello.post_greeting',
'parameters': [{'in': 'body',
'name': 'test',
'required': True,
'schema': {'$ref': '#/definitions/composed'}}],
'responses': {'201': {'description': 'Stack to be created. The '
'CloudFormation Stack creation can '
"still fail if it's rejected by senza "
'or AWS CF.',
'schema': {'$ref': '#/definitions/new_stack'}},
'400': {'description': 'Stack was not created because request '
'was invalid',
'schema': {'$ref': '#/definitions/problem'}},
'401': {'description': 'Stack was not created because the '
'access token was not provided or was '
'not valid for this operation',
'schema': {'$ref': '#/definitions/problem'}}},
'security': [{'oauth': ['uid']}],
'summary': 'Create new stack'}
OPERATION9 = {'description': 'operation secured with 2 api keys',
'operationId': 'fakeapi.hello.post_greeting',
'responses': {'200': {'description': 'OK'}},
'security': [{'key1': [], 'key2': []}]}
OPERATION10 = {'description': 'operation secured with 2 oauth schemes combined using logical AND',
'operationId': 'fakeapi.hello.post_greeting',
'responses': {'200': {'description': 'OK'}},
'security': [{'oauth_1': ['uid'], 'oauth_2': ['uid']}]}
OPERATION11 = {'description': 'operation secured with an oauth schemes with 2 possible scopes (in OR)',
'operationId': 'fakeapi.hello.post_greeting',
'responses': {'200': {'description': 'OK'}},
'security': [{'oauth': ['myscope']}, {'oauth': ['myscope2']}]}
SECURITY_DEFINITIONS_REMOTE = {'oauth': {'type': 'oauth2',
'flow': 'password',
'x-tokenInfoUrl': 'https://oauth.example/token_info',
'scopes': {'myscope': 'can do stuff'}}}
SECURITY_DEFINITIONS_LOCAL = {'oauth': {'type': 'oauth2',
'flow': 'password',
'x-tokenInfoFunc': 'math.ceil',
'scopes': {'myscope': 'can do stuff',
'myscope2': 'can do other stuff'}}}
SECURITY_DEFINITIONS_BOTH = {'oauth': {'type': 'oauth2',
'flow': 'password',
'x-tokenInfoFunc': 'math.ceil',
'x-tokenInfoUrl': 'https://oauth.example/token_info',
'scopes': {'myscope': 'can do stuff'}}}
SECURITY_DEFINITIONS_WO_INFO = {'oauth': {'type': 'oauth2',
'flow': 'password',
'scopes': {'myscope': 'can do stuff'}}}
SECURITY_DEFINITIONS_2_KEYS = {'key1': {'type': 'apiKey',
'in': 'header',
'name': 'X-Auth-1',
'x-apikeyInfoFunc': 'math.ceil'},
'key2': {'type': 'apiKey',
'in': 'header',
'name': 'X-Auth-2',
'x-apikeyInfoFunc': 'math.ceil'}}
SECURITY_DEFINITIONS_2_OAUTH = {'oauth_1': {'type': 'oauth2',
'flow': 'password',
'x-tokenInfoFunc': 'math.ceil',
'scopes': {'myscope': 'can do stuff'}},
'oauth_2': {'type': 'oauth2',
'flow': 'password',
'x-tokenInfoFunc': 'math.ceil',
'scopes': {'myscope': 'can do stuff'}}}
@pytest.fixture
def api(security_handler_factory):
api = mock.MagicMock(jsonifier=Jsonifier)
api.security_handler_factory = security_handler_factory
yield api
def make_operation(op, definitions=True, parameters=True):
""" note the wrapper because definitions namespace and
operation namespace collide
"""
new_op = {"wrapper": copy.deepcopy(op)}
if definitions:
new_op.update({"definitions": DEFINITIONS})
if parameters:
new_op.update({"parameters": PARAMETER_DEFINITIONS})
return resolve_refs(new_op)["wrapper"]
def test_operation(api, security_handler_factory):
op_spec = make_operation(OPERATION1)
operation = Swagger2Operation(api=api,
method='GET',
path='endpoint',
path_parameters=[],
operation=op_spec,
app_produces=['application/json'],
app_consumes=['application/json'],
definitions=DEFINITIONS,
resolver=Resolver())
assert operation.method == 'GET'
assert operation.produces == ['application/json']
assert operation.consumes == ['application/json']
expected_body_schema = op_spec["parameters"][0]["schema"]
expected_body_schema.update({'definitions': DEFINITIONS})
assert operation.body_schema == expected_body_schema
def test_operation_remote_token_info(security_handler_factory):
verify_oauth = mock.MagicMock(return_value='verify_oauth_result')
security_handler_factory.verify_oauth = verify_oauth
security_handler_factory.get_token_info_remote = mock.MagicMock(return_value='get_token_info_remote_result')
SecurityOperation(security_handler_factory=security_handler_factory,
security=[{'oauth': ['uid']}],
security_schemes=SECURITY_DEFINITIONS_REMOTE)
verify_oauth.assert_called_with('get_token_info_remote_result',
security_handler_factory.validate_scope,
['uid'])
security_handler_factory.get_token_info_remote.assert_called_with('https://oauth.example/token_info')
def test_operation_array(api):
op_spec = make_operation(OPERATION7)
operation = Swagger2Operation(api=api,
method='GET',
path='endpoint',
path_parameters=[],
operation=op_spec,
app_produces=['application/json'],
app_consumes=['application/json'],
definitions=DEFINITIONS,
resolver=Resolver())
assert isinstance(operation.function, types.FunctionType)
assert operation.method == 'GET'
assert operation.produces == ['application/json']
assert operation.consumes == ['application/json']
expected_body_schema = {
'type': 'array',
'items': DEFINITIONS["new_stack"],
'definitions': DEFINITIONS
}
assert operation.body_schema == expected_body_schema
def test_operation_composed_definition(api):
op_spec = make_operation(OPERATION8)
operation = Swagger2Operation(api=api,
method='GET',
path='endpoint',
path_parameters=[],
operation=op_spec,
app_produces=['application/json'],
app_consumes=['application/json'],
definitions=DEFINITIONS,
resolver=Resolver())
assert isinstance(operation.function, types.FunctionType)
assert operation.method == 'GET'
assert operation.produces == ['application/json']
assert operation.consumes == ['application/json']
expected_body_schema = op_spec["parameters"][0]["schema"]
expected_body_schema.update({'definitions': DEFINITIONS})
assert operation.body_schema == expected_body_schema
def test_operation_local_security_oauth2(security_handler_factory):
verify_oauth = mock.MagicMock(return_value='verify_oauth_result')
security_handler_factory.verify_oauth = verify_oauth
SecurityOperation(security_handler_factory=security_handler_factory,
security=[{'oauth': ['uid']}],
security_schemes=SECURITY_DEFINITIONS_LOCAL)
verify_oauth.assert_called_with(math.ceil, security_handler_factory.validate_scope, ['uid'])
def test_operation_local_security_duplicate_token_info(security_handler_factory):
verify_oauth = mock.MagicMock(return_value='verify_oauth_result')
security_handler_factory.verify_oauth = verify_oauth
SecurityOperation(security_handler_factory,
security=[{'oauth': ['uid']}],
security_schemes=SECURITY_DEFINITIONS_BOTH)
verify_oauth.call_args.assert_called_with(math.ceil, security_handler_factory.validate_scope)
def test_multi_body(api):
with pytest.raises(InvalidSpecification) as exc_info: # type: py.code.ExceptionInfo
op_spec = make_operation(OPERATION2)
operation = Swagger2Operation(api=api,
method='GET',
path='endpoint',
path_parameters=[],
operation=op_spec,
app_produces=['application/json'],
app_consumes=['application/json'],
definitions=DEFINITIONS,
resolver=Resolver())
operation.body_schema
exception = exc_info.value
assert str(exception) == "GET endpoint There can be one 'body' parameter at most"
assert repr(exception) == """<InvalidSpecification: "GET endpoint There can be one 'body' parameter at most">"""
def test_no_token_info(security_handler_factory):
SecurityOperation(security_handler_factory=security_handler_factory,
security=[{'oauth': ['uid']}],
security_schemes=SECURITY_DEFINITIONS_WO_INFO)
def test_multiple_security_schemes_and(security_handler_factory):
"""Tests an operation with multiple security schemes in AND fashion."""
def return_api_key_name(func, in_, name):
return name
verify_api_key = mock.MagicMock(side_effect=return_api_key_name)
security_handler_factory.verify_api_key = verify_api_key
verify_multiple = mock.MagicMock(return_value='verify_multiple_result')
security_handler_factory.verify_multiple_schemes = verify_multiple
security = [{'key1': [], 'key2': []}]
SecurityOperation(security_handler_factory=security_handler_factory,
security=security,
security_schemes=SECURITY_DEFINITIONS_2_KEYS)
assert verify_api_key.call_count == 2
verify_api_key.assert_any_call(math.ceil, 'header', 'X-Auth-1')
verify_api_key.assert_any_call(math.ceil, 'header', 'X-Auth-2')
# Assert verify_multiple_schemes is called with mapping from scheme name
# to result of security_handler_factory.verify_api_key()
verify_multiple.assert_called_with({'key1': 'X-Auth-1', 'key2': 'X-Auth-2'})
def test_multiple_oauth_in_and(security_handler_factory, caplog):
"""Tests an operation with multiple oauth security schemes in AND fashion.
These should be ignored and raise a warning.
"""
caplog.set_level(logging.WARNING, logger="connexion.operations.secure")
verify_oauth = mock.MagicMock(return_value='verify_oauth_result')
security_handler_factory.verify_oauth = verify_oauth
security = [{'oauth_1': ['uid'], 'oauth_2': ['uid']}]
SecurityOperation(security_handler_factory=security_handler_factory,
security=security,
security_schemes=SECURITY_DEFINITIONS_2_OAUTH)
assert '... multiple OAuth2 security schemes in AND fashion not supported' in caplog.text
def test_parameter_reference(api):
op_spec = make_operation(OPERATION3, definitions=False)
operation = Swagger2Operation(api=api,
method='GET',
path='endpoint',
path_parameters=[],
operation=op_spec,
app_produces=['application/json'],
app_consumes=['application/json'],
definitions={},
resolver=Resolver())
assert operation.parameters == [{'in': 'path', 'type': 'integer'}]
def test_default(api):
op_spec = make_operation(OPERATION4)
op_spec['parameters'][1]['default'] = 1
Swagger2Operation(
api=api, method='GET', path='endpoint', path_parameters=[],
operation=op_spec, app_produces=['application/json'],
app_consumes=['application/json'], definitions=DEFINITIONS,
resolver=Resolver()
)
op_spec = make_operation(OPERATION6, parameters=False)
op_spec['parameters'][0]['default'] = {
'keep_stacks': 1,
'image_version': 'one',
'senza_yaml': 'senza.yaml',
'new_traffic': 100
}
Swagger2Operation(
api=api, method='POST', path='endpoint', path_parameters=[],
operation=op_spec, app_produces=['application/json'],
app_consumes=['application/json'], definitions=DEFINITIONS,
resolver=Resolver()
)
def test_get_path_parameter_types(api):
op_spec = make_operation(OPERATION1, parameters=False)
op_spec['parameters'] = [
{'in': 'path', 'type': 'int', 'name': 'int_path'},
{'in': 'path', 'type': 'string', 'name': 'string_path'},
{'in': 'path', 'type': 'string', 'format': 'path', 'name': 'path_path'}
]
operation = Swagger2Operation(
api=api, method='GET', path='endpoint', path_parameters=[],
operation=op_spec, app_produces=['application/json'],
app_consumes=['application/json'],
definitions=DEFINITIONS, resolver=Resolver()
)
assert {'int_path': 'int', 'string_path': 'string', 'path_path': 'path'} == operation.get_path_parameter_types()
def test_oauth_scopes_in_or(security_handler_factory):
"""Tests whether an OAuth security scheme with 2 different possible scopes is correctly handled."""
verify_oauth = mock.MagicMock(return_value='verify_oauth_result')
security_handler_factory.verify_oauth = verify_oauth
security = [{'oauth': ['myscope']}, {'oauth': ['myscope2']}]
SecurityOperation(security_handler_factory=security_handler_factory,
security=security,
security_schemes=SECURITY_DEFINITIONS_LOCAL)
verify_oauth.assert_has_calls([
mock.call(math.ceil, security_handler_factory.validate_scope, ['myscope']),
mock.call(math.ceil, security_handler_factory.validate_scope, ['myscope2']),
])
|
194539
|
import json, sys, os
sys.path.append("/opt/src")
from connectors.redis.redis_wrapper import RedisWrapper
class BaseRedisApplication:
def __init__(self, name, redis_address, port, redis_queue, logger, request_key_name=None, response_key_name=None, debug=False):
self.m_name = name
self.m_host_address = redis_address
self.m_port = port
self.m_queue_name = redis_queue
self.m_log = logger
self.m_debug = debug
self.m_db = int(os.getenv("ENV_REDIS_DB_ID", 0))
self.m_redis_password = os.getenv("ENV_REDIS_PASSWORD", "") # if set to empty string use password=None
if str(self.m_redis_password) == "":
self.m_redis_password = None
self.m_rw = None
self.m_put_count = 0
self.m_get_count = 0
self.m_failed_get_count = 0
self.m_fetch_timeout = 60
self.m_overflowed = False
self.m_max_count_on_arch = sys.maxsize - 1
self.m_sleep_for_connection_outage = 1
self.m_request_key = request_key_name
self.m_response_key = response_key_name
# end of __init__
def enable_debug(self):
self.m_debug = True
return None
# end of enable_debug
def disable_debug(self):
self.m_debug = False
return None
# end of enable_debug
def reset_counts(self):
self.m_get_count = 0
self.m_put_count = 0
self.m_failed_get_count = 0
self.m_overflowed = False
return None
# end of reset_counts
def update_failed_get_count(self):
if self.m_failed_get_count == self.m_max_count_on_arch:
self.m_overflowed = True
self.m_failed_get_count = 0
else:
self.m_failed_get_count += 1
return None
# end of update_failed_get_count
def update_get_count(self):
if self.m_get_count == self.m_max_count_on_arch:
self.m_overflowed = True
self.m_get_count = 0
else:
self.m_get_count += 1
return None
# end of update_get_count
def update_put_count(self):
if self.m_put_count == self.m_max_count_on_arch:
self.m_overflowed = True
self.m_put_count = 0
else:
self.m_put_count += 1
return None
# end of update_put_count
def lg(self, msg, level=6):
if self.m_log:
full_msg = self.m_name + ": " + msg
if self.m_debug:
print full_msg
self.m_log.log(full_msg, level)
return None
# end of lg
# Force ALL Derived clients to Disconnect correctly
def disconnect(self):
return None
# end of disconnect
# Force ALL Derived clients to Connect correctly
def connect(self):
return None
# end of connect
def get_message(self):
self.lg("Testing Get Message Timeout(" + str(self.m_fetch_timeout) + ")", 7)
# By default RedisWrapper returns None when the timeout is hit
msg = self.m_rw.get(False, self.m_fetch_timeout)
self.update_get_count()
return msg
# end of get_message
def put_message(self, msg_object):
self.lg("Putting Message(" + str(msg_object.__class__.__name__) + ")", 7)
msg = self.m_rw.put(msg_object)
self.update_put_count()
return None
# end of put_message
# end of BaseRedisApplication
|
194590
|
import torch
import torch.nn as nn
from torchvision import datasets, transforms
IMAGES_PATH = 'image_path'
transform = transforms.Compose([transforms.Resize(256),
transforms.RandomCrop(224),
transforms.ToTensor()])
trainset = datasets.ImageFolder(IMAGES_PATH+str('train/'), transform=transform)
testset = datasets.ImageFolder(IMAGES_PATH+str('test/'), transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True)
testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True)
class VGG16(nn.Module):
def __init__(self, features):
self.features = features
self.classifier = nn.Sequential(
nn.Linear(512*7*7, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, 1000)
)
def forward(self, x):
x = self.features(x)
x = torch.flatten(x, 1)
x = self.classifier(x)
return x
arc = [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M',
512, 512, 512, 'M', 512, 512, 512, 'M']
def make_features(arc):
layers = []
in_channels = 3
for i in arc:
if i == 'M':
layers += [nn.MaxPool2d(2, 2)]
else:
conv = nn.Conv2d(in_channels, i, 3, padding=1)
layers += [conv, nn.BatchNorm2d(i), nn.ReLU(True)]
in_channels = i
return nn.Sequential(*layers)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = VGG16(make_features(arc))
model.to(device)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)
for e in range(10):
for images, labels in trainloader:
images, labels = images.to(device), labels.to(device)
outputs = model(images)
loss = criterion(outputs, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
print(f'Epoch: {e+1}, Loss: {loss.item()}')
model.eval()
with torch.no_grad():
correct = 0
total = 0
for images, labels in testloader:
images, labels = images.to(device), labels.to(device)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print(f'Accuracy: {100 * correct / total:.4f}')
|
194592
|
import unittest
import torch
import torch.nn as nn
from torch.autograd import Variable
from wavenet.layers import *
from test.models import *
import numpy as np
class Test_dilation(unittest.TestCase):
def test_dilate(self):
input = Variable(torch.arange(0, 13).view(1, 1, 13))
dilated, _ = dilate(input, 1)
self.assertEqual(dilated.size(), (1, 1, 13))
self.assertEqual(dilated[0, 0, 4].data[0], 4)
dilated, _ = dilate(input, 2)
self.assertEqual(dilated.size(), (2, 1, 7))
self.assertEqual(dilated[1, 0, 2].data[0], 4)
dilated, _ = dilate(input, 4)
self.assertEqual(dilated.size(), (4, 1, 4))
self.assertEqual(dilated[3, 0, 1].data[0], 4)
dilated, _ = dilate(dilated, 1)
self.assertEqual(dilated.size(), (1, 1, 16))
self.assertEqual(dilated[0, 0, 7].data[0], 4)
def test_dilate_multichannel(self):
input = Variable(torch.arange(0, 36).view(2, 3, 6))
dilated, _ = dilate(input, 1)
self.assertEqual(dilated.size(), (1, 3, 12))
dilated, _ = dilate(input, 2)
self.assertEqual(dilated.size(), (2, 3, 6))
dilated, _ = dilate(input, 4)
self.assertEqual(dilated.size(), (4, 3, 3))
def test_dilate_invalid(self):
input = Variable(torch.arange(0, 36).view(2, 3, 6))
try:
dilate(input, 5)
except AssertionError:
print("raised AssertionError")
class Test_padding(unittest.TestCase):
def test_constantpad1d(self):
# equal padding on all 4 sides
input = torch.rand(3, 2, 5)
padding = 1
m = ConstantPad1d(padding) # m for model
output = m(input).data
self.assertEqual(input[0, 0, 0], output[0, padding, padding])
self.assertTrue(np.all(output[0, :, 0].numpy()==0))
self.assertTrue(np.all(output[0, :, -1].numpy()==0))
self.assertTrue(np.all(output[0, 0, :].numpy()==0))
self.assertTrue(np.all(output[0, -1, :].numpy()==0))
# unequal padding on dimensions, but equal within dimension
input = torch.rand(3, 2, 5)
padding = (1, 2)
m = ConstantPad1d(padding) # m for model
output = m(input).data
self.assertEqual(input[0, 0, 0], output[0, padding[1], padding[0]])
self.assertTrue(np.all(output[0, :, :padding[0]].numpy()==0))
self.assertTrue(np.all(output[0, :, -padding[0]:].numpy()==0))
self.assertTrue(np.all(output[0, :padding[1], :].numpy()==0))
self.assertTrue(np.all(output[0, -padding[1]:, :].numpy()==0))
# padding in one dimension, like we'll use for wavenet
input = torch.rand(3, 2, 5)
padding = (3, 0, 0, 0)
m = ConstantPad1d(padding) # m for model
output = m(input).data
self.assertTrue(np.all(output[:, :, :padding[0]].numpy()==0))
# non-zero padding, possibly useful for masking
input = torch.rand(3, 2, 5)
padding = (3, 0, 0, 0)
pad_val = -100
m = ConstantPad1d(padding, pad_val) # m for model
output = m(input).data
self.assertTrue(np.all(output[:, :, :padding[0]].numpy()==pad_val))
class Test_conv1dext(unittest.TestCase):
def test_ncc(self):
module = Conv1dExt(in_channels=3,
out_channels=5,
kernel_size=4)
rand = Variable(torch.rand(5, 3, 4))
module._parameters['weight'] = module.weight * module.weight + rand * 1
ncc = module.normalized_cross_correlation()
print("ncc:\n{}".format(ncc.data))
class Test_simple_models(unittest.TestCase):
def test_net_forward(self):
model = Net()
print(model)
self.assertEqual(model.conv1.out_channels, model.conv2.out_channels)
self.assertEqual(model.conv1.out_channels, model.conv3.in_channels)
self.assertEqual(model.conv2.out_channels, model.conv3.in_channels)
self.assertEqual(model.conv3.out_channels, model.conv4.in_channels)
# simple forward pass
input = Variable(torch.rand(1, 1, 4) * 2 - 1)
output = model(input)
self.assertEqual(output.size(), (1, 2, 4))
# feature split
model.conv1.split_feature(feature_i=1)
model.conv2.split_feature(feature_i=3)
print(model)
self.assertEqual(model.conv1.out_channels, model.conv2.out_channels)
self.assertEqual(model.conv1.out_channels, model.conv3.in_channels)
self.assertEqual(model.conv2.out_channels, model.conv3.in_channels)
self.assertEqual(model.conv3.out_channels, model.conv4.in_channels)
output2 = model(input)
diff = output - output2
dot = torch.dot(diff.view(-1), diff.view(-1))
# should be close to 0
#self.assertTrue(np.isclose(dot.data[0], 0., atol=1e-2))
print("mse: ", dot.data[0])
class Test_dilated_queue(unittest.TestCase):
def test_enqueue(self):
queue = DilatedQueue(max_length=8, num_channels=3)
e = torch.zeros((3))
for i in range(11):
e = e + 1
queue.enqueue(e)
data = queue.data[0, :].data
#print('data: ', data)
self.assertEqual(data[0], 9)
self.assertEqual(data[2], 11)
self.assertEqual(data[7], 8)
def test_dequeue(self):
queue = DilatedQueue(max_length=8, num_channels=1)
e = torch.zeros((1))
for i in range(11):
e = e + 1
queue.enqueue(e)
#print('data: ', queue.data)
for i in range(9):
d = queue.dequeue(num_deq=3, dilation=2)
d = d.data # only using values for tests
#print("dequeue size: {}".format(d.size()))
self.assertEqual(d[0][0], 5)
self.assertEqual(d[0][1], 7)
self.assertEqual(d[0][2], 9)
def test_combined(self):
queue = DilatedQueue(max_length=12, num_channels=1)
e = torch.zeros((1))
for i in range(30):
e = e + 1
queue.enqueue(e)
d = queue.dequeue(num_deq=3, dilation=4)
d = d.data
self.assertEqual(d[0][0], max(i - 7, 0))
'''
class Test_zero_padding(unittest.TestCase):
def test_end_padding(self):
x = torch.ones((3, 4, 5))
p = zero_pad(x, num_pad=5, dimension=0)
assert p.size() == (8, 4, 5)
assert p[-1, 0, 0] == 0
p = zero_pad(x, num_pad=5, dimension=1)
assert p.size() == (3, 9, 5)
assert p[0, -1, 0] == 0
p = zero_pad(x, num_pad=5, dimension=2)
assert p.size() == (3, 4, 10)
assert p[0, 0, -1] == 0
def test_start_padding(self):
x = torch.ones((3, 4, 5))
p = zero_pad(x, num_pad=5, dimension=0, pad_start=True)
assert p.size() == (8, 4, 5)
assert p[0, 0, 0] == 0
p = zero_pad(x, num_pad=5, dimension=1, pad_start=True)
assert p.size() == (3, 9, 5)
assert p[0, 0, 0] == 0
p = zero_pad(x, num_pad=5, dimension=2, pad_start=True)
assert p.size() == (3, 4, 10)
assert p[0, 0, 0] == 0
def test_narrowing(self):
x = torch.ones((2, 3, 4))
x = x.narrow(2, 1, 2)
print(x)
x = x.narrow(0, -1, 3)
print(x)
assert False
class Test_wav_files(unittest.TestCase):
def test_wav_read(self):
data = wavfile.read('trained_generated.wav')[1]
print(data)
# [0.1, -0.53125...
assert False
class Test_padding(unittest.TestCase):
def test_1d(self):
x = Variable(torch.ones((2, 3, 4)), requires_grad=True)
pad = ConstantPad1d(5, dimension=0, pad_start=False)
res = pad(x)
assert res.size() == (5, 3, 4)
assert res[-1, 0, 0] == 0
test = gradcheck(ConstantPad1d, x, eps=1e-6, atol=1e-4)
print('gradcheck', test)
# torch.autograd.backward(res, )
res.backward()
back = pad.backward(res)
assert back.size() == (2, 3, 4)
assert back[-1, 0, 0] == 1
#
# pad = ConstantPad1d(5, dimension=1, pad_start=True)
#
# res = pad(x)
# assert res.size() == (2, 5, 4)
# assert res[0, 4, 0] == 0
#
# back = pad.backward(res)
# assert back.size() == (2, 3, 4)
# assert back[0, 2, 0] == 1
def test_2d(self):
pad = ConstantPad2d((5, 0, 0, 0))
x = Variable(torch.ones((2, 3, 4, 5)))
res = pad.forward(x)
print(res.size())
assert False
'''
def main():
unittest.main()
if __name__ == '__main__':
main()
|
194601
|
from base_learner import BaseLearner
from sklearn.svm import SVC
from sklearn.svm import LinearSVC
from sklearn.metrics import classification_report
from torch.utils.data import DataLoader
from smoke_video_dataset import SmokeVideoFeatureDataset
import joblib
import uuid
from util import *
import numpy as np
import torch
import time
import re
import shutil
# SVM learner using I3D features
class SvmLearner(BaseLearner):
def __init__(self,
C=1, # SVM parameters
mode="rgb", # can be "rgb" or "flow"
p_feat="../data/i3d_features_rgb/", # path to load features
):
super().__init__()
self.C = C
self.mode = mode
self.p_feat = p_feat
def log_parameters(self):
text = ""
text += "C: " + str(self.C) + "\n"
text += "mode: " + str(self.mode) + "\n"
text += "p_feat: " + self.p_feat + "\n"
self.log(text)
def set_dataloader(self, metadata_path, root_dir):
dataloader = {}
for phase in metadata_path:
self.log("Create dataloader for " + phase)
dataset = SmokeVideoFeatureDataset(metadata_path=metadata_path[phase], root_dir=root_dir)
dataloader[phase] = DataLoader(dataset, batch_size=len(dataset), shuffle=False, num_workers=0, pin_memory=False)
return dataloader
def fit(self,
p_model=None, # not used, just for consistency with the i3d model's parameters
model_id_suffix="", # the suffix appended after the model id
p_metadata_train="../data/split/metadata_train_split_0_by_camera.json", # metadata path (train)
p_metadata_validation="../data/split/metadata_validation_split_0_by_camera.json", # metadata path (validation)
p_metadata_test="../data/split/metadata_test_split_0_by_camera.json", # metadata path (test)
save_model_path="../data/saved_svm/[model_id]/model/", # path to save the models ([model_id] will be replaced)
save_log_path="../data/saved_svm/[model_id]/log/train.log", # path to save log files ([model_id] will be replaced)
save_metadata_path="../data/saved_svm/[model_id]/metadata/" # path to save metadata ([model_id] will be replaced)
):
# Set path
model_id = str(uuid.uuid4())[0:7] + "-svm-" + self.mode
model_id += model_id_suffix
save_model_path = save_model_path.replace("[model_id]", model_id)
save_log_path = save_log_path.replace("[model_id]", model_id)
save_metadata_path = save_metadata_path.replace("[model_id]", model_id)
# Copy training, validation, and testing metadata
check_and_create_dir(save_metadata_path)
shutil.copy(p_metadata_train, save_metadata_path + "metadata_train.json")
shutil.copy(p_metadata_validation, save_metadata_path + "metadata_validation.json")
shutil.copy(p_metadata_test, save_metadata_path + "metadata_test.json")
# Set logger
self.create_logger(log_path=save_log_path)
self.log("="*60)
self.log("="*60)
self.log("Use SVM learner with I3D features")
self.log("save_model_path: " + save_model_path)
self.log("save_log_path: " + save_log_path)
self.log("p_metadata_train: " + p_metadata_train)
self.log("p_metadata_validation: " + p_metadata_validation)
self.log("p_metadata_test: " + p_metadata_test)
self.log_parameters()
# Set model
model = SVC(C=self.C, gamma="scale")
#model = LinearSVC(C=self.C, max_iter=10)
# Load datasets
metadata_path = {"train": p_metadata_train, "validation": p_metadata_validation}
dataloader = self.set_dataloader(metadata_path, self.p_feat)
# Train and validate
for phase in ["train", "validation"]:
self.log("phase " + phase)
for d in dataloader[phase]:
file_name = d["file_name"]
feature = d["feature"].numpy()
true_labels = d["label"].numpy()
if phase == "train":
model.fit(feature, true_labels)
pred_labels = model.predict(feature)
# Save precision, recall, and f-score to the log
self.log(classification_report(true_labels, pred_labels))
# Save model
self.save(model, save_model_path + "model.pkl")
self.log("Done training")
def test(self,
p_model=None # the path to load thepreviously self-trained model
):
# Check
if p_model is None:
self.log("Need to provide model path")
return
# Set path
match = re.search(r'\b/[0-9a-fA-F]{7}-svm-(rgb|flow)[^/]*/\b', p_model)
model_id = match.group()[1:-1]
if model_id is None:
self.log("Cannot find a valid model id from the model path.")
return
p_root = p_model[:match.start()] + "/" + model_id + "/"
p_metadata_test = p_root + "metadata/metadata_test.json" # metadata path (test)
save_log_path = p_root + "log/test.log" # path to save log files
# Set logger
self.create_logger(log_path=save_log_path)
self.log("="*60)
self.log("="*60)
self.log("Use SVM learner with I3D features")
self.log("Start testing with mode: " + self.mode)
self.log("save_log_path: " + save_log_path)
self.log("p_metadata_test: " + p_metadata_test)
self.log_parameters()
# Set model
model = self.load(p_model)
# Load datasets
metadata_path = {"test": p_metadata_test}
dataloader = self.set_dataloader(metadata_path, self.p_feat)
# Test
for d in dataloader["test"]:
file_name = d["file_name"]
feature = d["feature"].numpy()
true_labels = d["label"].numpy()
pred_labels = model.predict(feature)
# Save precision, recall, and f-score to the log
self.log(classification_report(true_labels, pred_labels))
self.log("Done testing")
def save(self, model, out_path):
if model is not None and out_path is not None:
self.log("Save model to " + out_path)
check_and_create_dir(out_path)
joblib.dump(model, out_path)
def load(self, in_path):
if in_path is not None:
self.log("Load model from " + in_path)
model = joblib.load(in_path)
return model
else:
return None
|
194605
|
class Point(object):
def __init__(self, x, y):
self.x, self.y = x, y
class PointHash(object):
def __init__(self, x, y):
self.x, self.y = x, y
def __hash__(self):
return hash((self.x, self.y))
def __eq__(self, other):
return self.x == other.x and self.y == other.y
if __name__ == "__main__":
print("Test with default hash function")
p1 = Point(1, 1)
p2 = Point(1, 1)
points = set([p1, p2])
print("Contents of set([p1, p2]): ", points)
print("Point(1, 1) in set([p1, p2]) = ", (Point(1, 1) in points))
print("Test with custom hash function")
p1 = PointHash(1, 1)
p2 = PointHash(1, 1)
points = set([p1, p2])
print("Contents of set([p1, p2]): ", points)
print("Point(1, 1) in set([p1, p2]) = ", (PointHash(1, 1) in points))
|
194621
|
import numpy as np
from controller import Controller
from wrappers import visualize
from collections import defaultdict
from math import pi
GRID_SIZE = 0.5
ROTATION_STEPS = 4
def move_position(position, rotation):
if rotation == 0:
return (position[0] + 1, position[1])
elif rotation == 1:
return (position[0], position[1] + 1)
elif rotation == 2:
return (position[0] - 1, position[1])
elif rotation == 3:
return (position[0], position[1] - 1)
class Navigator:
def __init__(self, controller):
self.controller = controller
def _move(self, position):
return self.controller.move_to((position[0] * GRID_SIZE, position[1] * GRID_SIZE))
def _rotate(self, rotation):
return self.controller.rotate_to(rotation * 2 * pi / 4)
def _can_move(self):
return not self.controller.is_occupied()
def collect(self, observation, position, rotation):
print("Collecting %s-%s" % (position, rotation))
def explore(self):
self.maze = defaultdict(lambda: 0)
self.maze[(0,0)] = 1
position = (0,0)
rotation = 0
self._explore(position, rotation)
def _explore(self, position, rotation):
self.maze[position] = 2
collect_spots = []
for i in range(4):
if self.maze[move_position(position, rotation)] == 0:
canMove = self._can_move()
state = 1 if canMove else 3
self.maze[move_position(position, rotation)] = state
if canMove:
collect_spots.append((move_position(position, rotation), rotation))
for r in range(3):
self.collect(self.controller.observe(), position, rotation + (float(r) / 3))
self._rotate(rotation + (float(r + 1) / 3))
if i != 3:
rotation = (rotation + 1) % 4
else:
self._rotate(rotation)
for i in range(4):
if len(collect_spots) > 0:
pos, rot = collect_spots.pop()
if rot == rotation:
self._move(pos)
self._explore(pos, rot)
self._move(position)
else:
collect_spots.append((pos, rot))
if i != 3:
rotation = (rotation - 1) % 4
self._rotate(rotation)
|
194644
|
import torch, os
import yaml
from IPython import embed
def get_config(args):
configuration = dict(
SEED=1337, # random seed for reproduce results
INPUT_SIZE=[112, 112], # support: [112, 112] and [224, 224]
EMBEDDING_SIZE=512, # feature dimension
)
if args.workers_id == 'cpu' or not torch.cuda.is_available():
configuration['GPU_ID'] = []
print("check", args.workers_id, torch.cuda.is_available())
else:
configuration['GPU_ID'] = [int(i) for i in args.workers_id.split(',')]
if len(configuration['GPU_ID']) == 0:
configuration['DEVICE'] = torch.device('cpu')
configuration['MULTI_GPU'] = False
else:
configuration['DEVICE'] = torch.device('cuda:%d' % configuration['GPU_ID'][0])
if len(configuration['GPU_ID']) == 1:
configuration['MULTI_GPU'] = False
else:
configuration['MULTI_GPU'] = True
configuration['NUM_EPOCH'] = args.epochs
configuration['BATCH_SIZE'] = args.batch_size
if args.data_mode == 'retina':
configuration['DATA_ROOT'] = './Data/ms1m-retinaface-t1/'
else:
raise Exception(args.data_mode)
configuration['EVAL_PATH'] = './eval/'
assert args.net in [ 'VIT','VITs']
configuration['BACKBONE_NAME'] = args.net
assert args.head in ['Softmax', 'ArcFace', 'CosFace', 'SFaceLoss']
configuration['HEAD_NAME'] = args.head
configuration['TARGET'] = [i for i in args.target.split(',')]
if args.resume:
configuration['BACKBONE_RESUME_ROOT'] = args.resume
else:
configuration['BACKBONE_RESUME_ROOT'] = '' # the root to resume training from a saved checkpoint
configuration['WORK_PATH'] = args.outdir # the root to buffer your checkpoints
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
return configuration
|
194654
|
import numpy as np
from .other import clip_boxes
from .text_proposal_graph_builder import TextProposalGraphBuilder
class TextProposalConnector:
def __init__(self):
self.graph_builder=TextProposalGraphBuilder()
def group_text_proposals(self, text_proposals, scores, im_size):
graph=self.graph_builder.build_graph(text_proposals, scores, im_size)
return graph.sub_graphs_connected()
def fit_y(self, X, Y, x1, x2):
len(X)!=0
# if X only include one point, the function will get line y=Y[0]
if np.sum(X==X[0])==len(X):
return Y[0], Y[0]
p=np.poly1d(np.polyfit(X, Y, 1))
return p(x1), p(x2)
def get_text_lines(self, text_proposals, scores, im_size):
# tp=text proposal
tp_groups=self.group_text_proposals(text_proposals, scores, im_size)
text_lines=np.zeros((len(tp_groups), 5), np.float32)
for index, tp_indices in enumerate(tp_groups):
text_line_boxes=text_proposals[list(tp_indices)]
x0=np.min(text_line_boxes[:, 0])
x1=np.max(text_line_boxes[:, 2])
offset=(text_line_boxes[0, 2]-text_line_boxes[0, 0])*0.5
lt_y, rt_y=self.fit_y(text_line_boxes[:, 0], text_line_boxes[:, 1], x0+offset, x1-offset)
lb_y, rb_y=self.fit_y(text_line_boxes[:, 0], text_line_boxes[:, 3], x0+offset, x1-offset)
# the score of a text line is the average score of the scores
# of all text proposals contained in the text line
score=scores[list(tp_indices)].sum()/float(len(tp_indices))
text_lines[index, 0]=x0
text_lines[index, 1]=min(lt_y, rt_y)
text_lines[index, 2]=x1
text_lines[index, 3]=max(lb_y, rb_y)
text_lines[index, 4]=score
text_lines=clip_boxes(text_lines, im_size)
text_recs = np.zeros((len(text_lines), 9), np.float)
index = 0
for line in text_lines:
xmin,ymin,xmax,ymax=line[0],line[1],line[2],line[3]
text_recs[index, 0] = xmin
text_recs[index, 1] = ymin
text_recs[index, 2] = xmax
text_recs[index, 3] = ymin
text_recs[index, 4] = xmin
text_recs[index, 5] = ymax
text_recs[index, 6] = xmax
text_recs[index, 7] = ymax
text_recs[index, 8] = line[4]
index = index + 1
return text_recs
|
194662
|
from typing import List
import numpy as np
import pandas as pd
from category_encoders.backward_difference import BackwardDifferenceEncoder
from category_encoders.cat_boost import CatBoostEncoder
from category_encoders.helmert import HelmertEncoder
from category_encoders.james_stein import JamesSteinEncoder
from category_encoders.leave_one_out import LeaveOneOutEncoder
from category_encoders.m_estimate import MEstimateEncoder
from category_encoders.one_hot import OneHotEncoder
from category_encoders.ordinal import OrdinalEncoder
from category_encoders.sum_coding import SumEncoder
from category_encoders.target_encoder import TargetEncoder
from category_encoders.woe import WOEEncoder
from sklearn.model_selection import RepeatedStratifiedKFold
def get_single_encoder(encoder_name: str, cat_cols: list):
"""
Get encoder by its name
:param encoder_name: Name of desired encoder
:param cat_cols: Cat columns for encoding
:return: Categorical encoder
"""
if encoder_name == "FrequencyEncoder":
encoder = FrequencyEncoder(cols=cat_cols)
if encoder_name == "WOEEncoder":
encoder = WOEEncoder(cols=cat_cols)
if encoder_name == "TargetEncoder":
encoder = TargetEncoder(cols=cat_cols)
if encoder_name == "SumEncoder":
encoder = SumEncoder(cols=cat_cols)
if encoder_name == "MEstimateEncoder":
encoder = MEstimateEncoder(cols=cat_cols)
if encoder_name == "LeaveOneOutEncoder":
encoder = LeaveOneOutEncoder(cols=cat_cols)
if encoder_name == "HelmertEncoder":
encoder = HelmertEncoder(cols=cat_cols)
if encoder_name == "BackwardDifferenceEncoder":
encoder = BackwardDifferenceEncoder(cols=cat_cols)
if encoder_name == "JamesSteinEncoder":
encoder = JamesSteinEncoder(cols=cat_cols)
if encoder_name == "OrdinalEncoder":
encoder = OrdinalEncoder(cols=cat_cols)
if encoder_name == "CatBoostEncoder":
encoder = CatBoostEncoder(cols=cat_cols)
if encoder_name == "MEstimateEncoder":
encoder = MEstimateEncoder(cols=cat_cols)
if encoder_name == "OneHotEncoder":
encoder = OneHotEncoder(cols=cat_cols)
if encoder is None:
raise NotImplementedError("To be implemented")
return encoder
class DoubleValidationEncoderNumerical:
"""
Encoder with validation within
"""
def __init__(self, cols, encoders_names_tuple=()):
"""
:param cols: Categorical columns
:param encoders_names_tuple: Tuple of str with encoders
"""
self.cols, self.num_cols = cols, None
self.encoders_names_tuple = encoders_names_tuple
self.n_folds, self.n_repeats = 5, 3
self.model_validation = RepeatedStratifiedKFold(
n_splits=self.n_folds, n_repeats=self.n_repeats, random_state=0
)
self.encoders_dict = {}
self.storage = None
def fit_transform(self, X: pd.DataFrame, y: np.array) -> pd.DataFrame:
self.num_cols = [col for col in X.columns if col not in self.cols]
self.storage = []
for encoder_name in self.encoders_names_tuple:
for n_fold, (train_idx, val_idx) in enumerate(
self.model_validation.split(X, y)
):
encoder = get_single_encoder(encoder_name, self.cols)
X_train, X_val = (
X.loc[train_idx].reset_index(drop=True),
X.loc[val_idx].reset_index(drop=True),
)
y_train, y_val = y[train_idx], y[val_idx]
_ = encoder.fit_transform(X_train, y_train)
# transform validation part and get all necessary cols
val_t = encoder.transform(X_val)
val_t = val_t[
[col for col in val_t.columns if col not in self.num_cols]
].values
if encoder_name not in self.encoders_dict.keys():
cols_representation = np.zeros((X.shape[0], val_t.shape[1]))
self.encoders_dict[encoder_name] = [encoder]
else:
self.encoders_dict[encoder_name].append(encoder)
cols_representation[val_idx, :] += val_t / self.n_repeats
cols_representation = pd.DataFrame(cols_representation)
cols_representation.columns = [
f"encoded_{encoder_name}_{i}"
for i in range(cols_representation.shape[1])
]
self.storage.append(cols_representation)
for df in self.storage:
X = pd.concat([X, df], axis=1)
X.drop(self.cols, axis=1, inplace=True)
return X
def transform(self, X: pd.DataFrame) -> pd.DataFrame:
self.storage = []
for encoder_name in self.encoders_names_tuple:
cols_representation = None
for encoder in self.encoders_dict[encoder_name]:
test_tr = encoder.transform(X)
test_tr = test_tr[
[col for col in test_tr.columns if col not in self.num_cols]
].values
if cols_representation is None:
cols_representation = np.zeros(test_tr.shape)
cols_representation = (
cols_representation + test_tr / self.n_folds / self.n_repeats
)
cols_representation = pd.DataFrame(cols_representation)
cols_representation.columns = [
f"encoded_{encoder_name}_{i}"
for i in range(cols_representation.shape[1])
]
self.storage.append(cols_representation)
for df in self.storage:
X = pd.concat([X, df], axis=1)
X.drop(self.cols, axis=1, inplace=True)
return X
class MultipleEncoder:
"""
Multiple encoder for categorical columns
"""
def __init__(self, cols: List[str], encoders_names_tuple=()):
"""
:param cols: List of categorical columns
:param encoders_names_tuple: Tuple of categorical encoders names. Possible values in tuple are:
"FrequencyEncoder", "WOEEncoder", "TargetEncoder", "SumEncoder", "MEstimateEncoder", "LeaveOneOutEncoder",
"HelmertEncoder", "BackwardDifferenceEncoder", "JamesSteinEncoder", "OrdinalEncoder""CatBoostEncoder"
"""
self.cols = cols
self.num_cols = None
self.encoders_names_tuple = encoders_names_tuple
self.encoders_dict = {}
# list for storing results of transformation from each encoder
self.storage = None
def fit_transform(self, X: pd.DataFrame, y: np.array) -> pd.DataFrame:
self.num_cols = [col for col in X.columns if col not in self.cols]
self.storage = []
for encoder_name in self.encoders_names_tuple:
encoder = get_single_encoder(encoder_name=encoder_name, cat_cols=self.cols)
cols_representation = encoder.fit_transform(X, y)
self.encoders_dict[encoder_name] = encoder
cols_representation = cols_representation[
[col for col in cols_representation.columns if col not in self.num_cols]
].values
cols_representation = pd.DataFrame(cols_representation)
cols_representation.columns = [
f"encoded_{encoder_name}_{i}"
for i in range(cols_representation.shape[1])
]
self.storage.append(cols_representation)
# concat cat cols representations with initial dataframe
for df in self.storage:
X = pd.concat([X, df], axis=1)
# remove all columns as far as we have their representations
X.drop(self.cols, axis=1, inplace=True)
return X
def transform(self, X) -> pd.DataFrame:
self.storage = []
for encoder_name in self.encoders_names_tuple:
# get representation of cat columns and form a pd.DataFrame for it
cols_representation = self.encoders_dict[encoder_name].transform(X)
cols_representation = cols_representation[
[col for col in cols_representation.columns if col not in self.num_cols]
].values
cols_representation = pd.DataFrame(cols_representation)
cols_representation.columns = [
f"encoded_{encoder_name}_{i}"
for i in range(cols_representation.shape[1])
]
self.storage.append(cols_representation)
# concat cat cols representations with initial dataframe
for df in self.storage:
X = pd.concat([X, df], axis=1)
# remove all columns as far as we have their representations
X.drop(self.cols, axis=1, inplace=True)
return X
class FrequencyEncoder:
def __init__(self, cols):
self.cols = cols
self.counts_dict = None
def fit(self, X: pd.DataFrame, y=None) -> pd.DataFrame:
counts_dict = {}
for col in self.cols:
values, counts = np.unique(X[col], return_counts=True)
counts_dict[col] = dict(zip(values, counts))
self.counts_dict = counts_dict
def transform(self, X: pd.DataFrame) -> pd.DataFrame:
counts_dict_test = {}
res = []
for col in self.cols:
values, counts = np.unique(X[col], return_counts=True)
counts_dict_test[col] = dict(zip(values, counts))
# if value is in "train" keys - replace "test" counts with "train" counts
for k in [
key
for key in counts_dict_test[col].keys()
if key in self.counts_dict[col].keys()
]:
counts_dict_test[col][k] = self.counts_dict[col][k]
res.append(X[col].map(counts_dict_test[col]).values.reshape(-1, 1))
res = np.hstack(res)
X[self.cols] = res
return X
def fit_transform(self, X: pd.DataFrame, y=None) -> pd.DataFrame:
self.fit(X, y)
X = self.transform(X)
return X
if __name__ == "__main__":
df = pd.DataFrame({})
df["cat_col"] = [1, 2, 3, 1, 2, 3, 1, 1, 1]
df["target"] = [0, 1, 0, 1, 0, 1, 0, 1, 0]
#
temp = df.copy()
enc = CatBoostEncoder(cols=["cat_col"])
print(enc.fit_transform(temp, temp["target"]))
#
temp = df.copy()
enc = MultipleEncoder(cols=["cat_col"], encoders_names_tuple=("CatBoostEncoder",))
print(enc.fit_transform(temp, temp["target"]))
#
temp = df.copy()
enc = DoubleValidationEncoderNumerical(
cols=["cat_col"], encoders_names_tuple=("CatBoostEncoder",)
)
print(enc.fit_transform(temp, temp["target"]))
|
194754
|
import numpy as np
import sys,os
import cv2
caffe_root = '/home/yaochuanqi/work/tmp/ssd/'
sys.path.insert(0, caffe_root + 'python')
import caffe
net_file= 'ssdlite/coco/deploy.prototxt'
caffe_model='ssdlite/deploy.caffemodel'
test_dir = "images"
caffe.set_mode_cpu()
net = caffe.Net(net_file,caffe_model,caffe.TEST)
COCO_CLASSES = ("background" , "person" , "bicycle" , "car" , "motorcycle" ,
"airplane" , "bus" , "train" , "truck" , "boat" , "traffic light",
"fire hydrant", "N/A" , "stop sign", "parking meter", "bench" ,
"bird" , "cat" , "dog" , "horse" , "sheep" , "cow" , "elephant" ,
"bear" , "zebra" , "giraffe" , "N/A" , "backpack" , "umbrella" ,
"N/A" , "N/A" , "handbag" , "tie" , "suitcase" , "frisbee" , "skis" ,
"snowboard" , "sports ball", "kite" , "baseball bat", "baseball glove",
"skateboard" , "surfboard" , "tennis racket", "bottle" , "N/A" ,
"wine glass", "cup" , "fork" , "knife" , "spoon" , "bowl" , "banana" ,
"apple" , "sandwich" , "orange" , "broccoli" , "carrot" , "hot dog",
"pizza" , "donut" , "cake" , "chair" , "couch" , "potted plant",
"bed" , "N/A" , "dining table", "N/A" , "N/A" , "toilet" , "N/A" ,
"tv" , "laptop" , "mouse" , "remote" , "keyboard" , "cell phone",
"microwave" , "oven" , "toaster" , "sink" , "refrigerator" , "N/A" ,
"book" , "clock" , "vase" , "scissors" , "teddy bear", "hair drier",
"toothbrush" )
def preprocess(src):
img = cv2.resize(src, (300,300))
img = img - 127.5
img = img / 127.5
return img
def postprocess(img, out):
h = img.shape[0]
w = img.shape[1]
box = out['detection_out'][0,0,:,3:7] * np.array([w, h, w, h])
cls = out['detection_out'][0,0,:,1]
conf = out['detection_out'][0,0,:,2]
return (box.astype(np.int32), conf, cls)
def detect(imgfile):
origimg = cv2.imread(imgfile)
img = preprocess(origimg)
img = img.astype(np.float32)
img = img.transpose((2, 0, 1))
net.blobs['data'].data[...] = img
out = net.forward()
box, conf, cls = postprocess(origimg, out)
for i in range(len(box)):
p1 = (box[i][0], box[i][1])
p2 = (box[i][2], box[i][3])
cv2.rectangle(origimg, p1, p2, (0,255,0))
p3 = (max(p1[0], 15), max(p1[1], 15))
title = "%s:%.2f" % (COCO_CLASSES[int(cls[i])], conf[i])
cv2.putText(origimg, title, p3, cv2.FONT_ITALIC, 0.6, (0, 255, 0), 1)
cv2.imshow("SSD", origimg)
k = cv2.waitKey(0) & 0xff
#Exit if ESC pressed
if k == 27 : return False
return True
for f in os.listdir(test_dir):
if detect(test_dir + "/" + f) == False:
break
|
194778
|
from poop.hfdp.command.simpleremote.light import Light
class LightOffCommand:
def __init__(self, light: Light) -> None:
self.__light = light
def execute(self) -> None:
self.__light.off()
|
194810
|
from unittest import mock
from kinto.core import authentication, utils
from kinto.core.testing import DummyRequest, unittest
from .support import BaseWebTest
class AuthenticationPoliciesTest(BaseWebTest, unittest.TestCase):
def test_basic_auth_is_accepted_by_default(self):
self.app.get(self.plural_url, headers=self.headers, status=200)
# Check that the capability is exposed on the homepage.
resp = self.app.get("/")
assert "basicauth" in resp.json["capabilities"]
def test_basic_auth_is_accepted_if_enabled_in_settings(self):
app = self.make_app({"multiauth.policies": "basicauth"})
app.get(self.plural_url, headers=self.headers, status=200)
# Check that the capability is exposed on the homepage.
resp = app.get("/")
assert "basicauth" in resp.json["capabilities"]
def test_basic_auth_is_declined_if_disabled_in_settings(self):
app = self.make_app(
{
"multiauth.policies": "dummy",
"multiauth.policy.dummy.use": (
"pyramid.authentication." "RepozeWho1AuthenticationPolicy"
),
}
)
app.get(self.plural_url, headers=self.headers, status=401)
# Check that the capability is exposed on the homepage.
resp = app.get("/")
assert "basicauth" not in resp.json["capabilities"]
@mock.patch("kinto.core.authentication.BasicAuthAuthenticationPolicy")
def test_policy_name_is_used(self, basicAuth):
basicAuth.return_value.authenticated_userid.return_value = "user"
basicAuth.return_value.name = "foobar"
app = self.make_app(
{
"multiauth.policies": "dummy",
"multiauth.policy.dummy.use": (
"kinto.core.authentication." "BasicAuthAuthenticationPolicy"
),
}
)
# Check that the policy uses its name rather than the settings prefix
resp = app.get("/")
assert resp.json["user"]["id"].startswith("foobar:")
def test_views_are_forbidden_if_unknown_auth_method(self):
app = self.make_app({"multiauth.policies": "basicauth"})
self.headers["Authorization"] = "Carrier"
app.get(self.plural_url, headers=self.headers, status=401)
self.headers["Authorization"] = "Carrier pigeon"
app.get(self.plural_url, headers=self.headers, status=401)
def test_principals_are_fetched_from_permission_backend(self):
patch = mock.patch(("tests.core.support." "AllowAuthorizationPolicy.permits"))
self.addCleanup(patch.stop)
mocked = patch.start()
self.permission.add_user_principal(self.principal, "group:admin")
self.app.get(self.plural_url, headers=self.headers)
_, principals, _ = mocked.call_args[0]
self.assertIn("group:admin", principals)
def test_user_principals_are_cached_per_user(self):
patch = mock.patch.object(
self.permission, "get_user_principals", wraps=self.permission.get_user_principals
)
self.addCleanup(patch.stop)
mocked = patch.start()
batch = {
"defaults": {"headers": self.headers, "path": "/mushrooms"},
"requests": [
{},
{},
{},
{"headers": {"Authorization": "Basic Ym9iOg=="}},
{"headers": {"Authorization": "Basic bWF0Og=="}},
],
}
self.app.post_json("/batch", batch)
self.assertEqual(mocked.call_count, 3)
class BasicAuthenticationPolicyTest(unittest.TestCase):
def setUp(self):
self.policy = authentication.BasicAuthAuthenticationPolicy()
self.request = DummyRequest()
self.request.headers["Authorization"] = "Basic bWF0Og=="
@mock.patch("kinto.core.utils.hmac_digest")
def test_userid_is_hashed(self, mocked):
mocked.return_value = "yeah"
user_id = self.policy.unauthenticated_userid(self.request)
self.assertIn("yeah", user_id)
def test_userid_is_built_using_password(self):
auth_password = utils.encode64("user:secret1", encoding="ascii")
self.request.headers["Authorization"] = "Basic {}".format(auth_password)
user_id1 = self.policy.unauthenticated_userid(self.request)
auth_password = utils.encode64("user:secret2", encoding="ascii")
self.request.headers["Authorization"] = "Basic {}".format(auth_password)
user_id2 = self.policy.unauthenticated_userid(self.request)
self.assertNotEqual(user_id1, user_id2)
def test_views_are_forbidden_if_basic_is_wrong(self):
self.request.headers["Authorization"] = "Basic abc"
user_id = self.policy.unauthenticated_userid(self.request)
self.assertIsNone(user_id)
def test_returns_none_if_username_is_empty(self):
auth_password = <PASSWORD>.encode64(":<PASSWORD>", encoding="ascii")
self.request.headers["Authorization"] = "Basic {}".format(auth_password)
user_id = self.policy.unauthenticated_userid(self.request)
self.assertIsNone(user_id)
def test_providing_empty_password_is_supported(self):
auth_password = utils.encode64("secret:", encoding="ascii")
self.request.headers["Authorization"] = "Basic {}".format(auth_password)
user_id = self.policy.unauthenticated_userid(self.request)
self.assertIsNotNone(user_id)
|
194817
|
from vnpy.api.oes.vnoes import OesApi_GetErrorMsg, OesApi_GetLastError
def error_to_str(code: int):
try:
# return error_codes[code]
return OesApi_GetErrorMsg(code)
except KeyError:
return "Unknown error code!"
def get_last_error():
code = OesApi_GetLastError()
return OesApi_GetErrorMsg(code)
error_codes = {
1001: "报文格式错误",
1002: "当前主机不是主节点",
1003: "主存库操作失败",
1004: "因状态等基础数据不匹配,无法更新数据",
1005: "协议版本不兼容",
1006: "数据不存在",
1007: "未到达服务开放时间",
1008: "非法的定位游标",
1009: "非法的客户端登陆用户名称",
1010: "非法的证券代码",
1011: "非法的客户代码",
1012: "非法的客户端类型",
1013: "客户端已被禁用",
1014: "客户端密码不正确",
1015: "客户端重复登录",
1016: "客户端连接数量过多",
1017: "客户端未经授权操作他人账户",
1018: "数据超出修改范围",
1019: "非法的应用系统名称",
1020: "请求条件有冲突",
1021: "非法的客户端IP/MAC地址格式",
1022: "尚不支持此业务",
1023: "非法的客户端环境号",
1024: "交易所拒绝",
1025: "主柜拒绝",
1026: "流量超出限制范围",
1027: "禁止使用API登录",
1028: "非法的私募基金产品代码",
1029: "密码未改变",
1030: "非法的来源分类",
1031: "非法的加密类型",
1032: "非法的客户端设备序列号",
1033: "无可用节点",
1101: "登录柜台失败",
1102: "上报至柜台失败",
1103: "从柜台获取状态失败",
1201: "非法的证券账户代码",
1202: "非法的资金账户代码",
1203: "非法的出入金方向",
1204: "非法的市场代码",
1205: "非法的证券类别",
1206: "非法的买卖类型",
1207: "非法的币种",
1208: "非法的委托类型",
1209: "无效的账户状态",
1210: "未找到委托信息",
1211: "未找到持仓信息",
1212: "未找到出入金流水",
1213: "流水号重复",
1214: "当前时段不能报价",
1215: "没有操作权限",
1216: "可用/可取资金余额不足",
1217: "可用持仓不足",
1218: "委托数量不在合法区间内",
1219: "非数量单位的整数倍",
1220: "非法的PBU代码",
1221: "价格不在合法区间内",
1222: "非价格单位的整数倍",
1223: "无涨停价市价委托失败",
1224: "当前时段不支持市价委托",
1225: "无效的订单状态",
1226: "撤单信息与原始委托不符",
1227: "重复撤单",
1228: "未通过限仓检查",
1229: "未通过限购检查",
1230: "超过了ETF最大现金替代比例",
1231: "非行权日",
1232: "产品(证券)停牌",
1233: "合约限制开仓",
1234: "当日累计申购或赎回数量超过限额",
1235: "当日累计净申购或净赎回数量超过限额",
1236: "找不到前收盘价",
1237: "超过报撤比限制",
1238: "委托请求过于频繁",
1239: "非法的出入金转账金额",
1240: "重复的认购委托",
1241: "认购委托份数超过认购额度",
1242: "出入金笔数超过限制",
1243: "禁止同时做多笔出入金",
1244: "非法的新股配号、中签记录类型",
1245: "限制股东账户进行买交易",
1246: "限制股东账户进行卖交易",
1247: "限制股东账户进行逆回购交易",
1248: "限制股东账户进行新股认购交易",
1249: "股东账户没有市价委托交易的权限",
1250: "股东账户没有交易创业板证券的权限",
1251: "股东账户没有交易分级基金的权限",
1252: "股东账户没有债券合格投资者的权限",
1253: "客户风险评级低于交易证券需求的风险等级",
1254: "股东账户没有交易风险警示证券的权限",
1255: "股东账户没有交易退市整理证券的权限",
1256: "股东账户没有交易单市场ETF的权限",
1257: "股东账户没有交易跨市场ETF的权限",
1258: "股东账户没有交易货币基金ETF的权限",
1259: "股东账户没有交易跨境ETF的权限",
1260: "仅允许合格投资者投资该证券",
1261: "仅允许合格机构投资者投资该证券",
1262: "出入金执行异常,待人工干预",
1263: "交易日不在证券的发行期内",
1264: "ETF产品禁止申购",
1265: "ETF产品禁止赎回",
1266: "限制股东账户进行撤指定",
1267: "限制股东账户进行转托管",
1268: "机构客户/主柜业务不支持银行转帐",
1269: "不能买入被禁止开仓的证券",
1270: "不能买入黑名单中的证券",
1271: "股东账户没有交易存托凭证的权限",
1272: "股东账户没有交易创新企业股票的权限",
1273: "非法的出入金转账类型",
}
|
194860
|
import subprocess, re, os
import gzip, shutil
from smsgateway.config import *
def _logging_namer(name):
return name + ".gz"
def _logging_rotater(source, dest):
with open(source, "rb") as sf:
# data = sf.read()
# compressed = zlib.compress(data, 9)
with gzip.open(dest, 'wb') as df:
shutil.copyfileobj(sf, df)
os.remove(source)
def setup_logging(service_name):
import logging
from logging import StreamHandler
from logging.handlers import RotatingFileHandler
filelog_formatter = logging.Formatter(f'%(asctime)s {service_name} %(levelname)s %(funcName)s(%(lineno)d) %(message)s')
syslog_formatter = logging.Formatter('%(message)s')
logFile = os.path.join(LOG_DIR, f"{service_name}.log")
file_handler = RotatingFileHandler(logFile, mode='a', maxBytes=20*1024*1024,
backupCount=5, encoding=None, delay=0)
file_handler.rotator = _logging_rotater
file_handler.namer = _logging_namer
file_handler.setFormatter(filelog_formatter)
file_handler.setLevel(logging.DEBUG)
std_handler = StreamHandler(sys.stdout)
std_handler.setFormatter(syslog_formatter)
std_handler.setLevel(LOG_STD_LEVEL)
app_log = logging.getLogger('root')
app_log.setLevel(logging.DEBUG)
# app_log.addHandler(file_handler)
app_log.addHandler(std_handler)
return app_log
def run_cmd(args, name=None, maxlines=7, timeout=300):
success = True
try:
res = subprocess.check_output(args, stderr=subprocess.STDOUT, timeout=timeout).decode('UTF-8').strip()
except subprocess.CalledProcessError as e:
success = False
if name:
res = "%s:\nError: %s\n%s" % (name, e, e.output.decode('UTF-8').strip())
else:
res = "Error %s:\n%s" % (e, e.output.decode('UTF-8').strip())
except subprocess.TimeoutExpired as e:
success = False
if name:
res = "%s:\nError: Timeout when calling Process:\n%s\n%s" % (name, e, e.output.decode('UTF-8').strip())
else:
res = "Error %s:Timeout when calling Process:\n%s" % (e, e.output.decode('UTF-8').strip())
res = '\n'.join([x for x in res.split('\n') if x][:maxlines])
return (success, res)
def _repl(regex, sub, text):
return re.sub(regex, sub, text, flags=re.UNICODE)
def format_sms(type, text, headers):
msg = f"{type}\n"
msg += '\n'.join([f"{k[0].upper() + k[1:]}: {v}" for k,v in headers.items() if len(k) > 1])
if text:
msg += f"\n\n{text}"
return msg
try:
import emoji
use_emoji = True
except ModuleNotFoundError:
use_emoji = False
def replaceEmoticons(text):
text = _repl(r'\U0001F60A', ':-)', text)
text = _repl(r'\U0001F914', ':-?', text)
# text = _repl(r'\U0001F602', ':-D', text)
text = _repl(r'\U0001F604', ':-D', text)
if use_emoji:
return emoji.demojize(text)
else:
return text
def sizeof_fmt(num, suffix='B'):
for unit in ['','Ki','Mi','Gi','Ti','Pi','Ei','Zi']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Yi', suffix)
|
194907
|
f = open("io/data/file1")
print(f.readline())
print(f.readline(3))
print(f.readline(4))
print(f.readline(5))
print(f.readline())
# readline() on writable file
f = open("io/data/file1", "ab")
try:
f.readline()
except OSError:
print("OSError")
f.close()
|
194926
|
import FWCore.ParameterSet.Config as cms
muonCSCStubPSet = cms.PSet(
#csc CLCT, central BX 7
cscCLCT = cms.PSet(
verbose = cms.int32(0),
inputTag = cms.InputTag("simCscTriggerPrimitiveDigis"),
minBX = cms.int32(6),
maxBX = cms.int32(8),
minNHitsChamber = cms.int32(4),
),
#csc ALCT, central BX 3 in CMSSW
cscALCT = cms.PSet(
verbose = cms.int32(0),
inputTag = cms.InputTag("simCscTriggerPrimitiveDigis"),
minBX = cms.int32(2),
maxBX = cms.int32(4),
minNHitsChamber = cms.int32(4),
),
#csc LCT, central BX 8
cscLCT = cms.PSet(
verbose = cms.int32(0),
inputTag = cms.InputTag("simCscTriggerPrimitiveDigis"),
minBX = cms.int32(7),
maxBX = cms.int32(9),
minNHitsChamber = cms.int32(4),
addGhosts = cms.bool(False)
),
#csc LCT, central BX 8
cscMPLCT = cms.PSet(
verbose = cms.int32(0),
inputTag = cms.InputTag("simCscTriggerPrimitiveDigis","MPCSORTED"),
minBX = cms.int32(7),
maxBX = cms.int32(9),
minNHitsChamber = cms.int32(4),
),
)
|
194961
|
import torch
from torchdiffeq import odeint_adjoint
from deprecated.anode.adjoint import odesolver_adjoint
from .dynamics import InversedDynamics, LossDynamics, DensityDynamics
class Flow(torch.nn.Module):
def forward(self, x, inverse=False):
pass
class StackedFlow(torch.nn.Module):
def __init__(self, flows):
super().__init__()
self._flows = flows
def forward(self, x, inverse=False):
n_batch = x.shape[0]
flows = self._flows
if inverse:
flows = reversed(flows)
logp = torch.zeros(n_batch, 1)
for flow in flows:
x, dlogp = flow(x, inverse=inverse)
logp = logp + dlogp
return x, logp
class ContinuousNormalizingFlow(torch.nn.Module):
def __init__(
self,
dynamics,
integrator="dopri5",
atol=1e-10,
rtol=1e-5,
n_time_steps=2,
**kwargs
):
super().__init__()
self._dynamics = DensityDynamics(dynamics)
self._inverse_dynamics = DensityDynamics(InversedDynamics(dynamics))
self._integrator_method = integrator
self._integrator_atol = atol
self._integrator_rtol = rtol
self._n_time_steps = n_time_steps
self._kwargs = kwargs
def forward(self, x, t=1.0, inverse=False, keep_all=False, checkpoint=False, **kwargs):
n_batch = x.shape[0]
logp_init = torch.zeros(n_batch, 1).to(x)
state = torch.cat([x, logp_init], dim=-1).contiguous()
if inverse:
dynamics = self._inverse_dynamics
dynamics._t_max = t
else:
dynamics = self._dynamics
ts = torch.linspace(0.0, t, self._n_time_steps).to(x)
kwargs = {**self._kwargs, **kwargs}
if not checkpoint:
state = odeint_adjoint(
dynamics,
state,
t=ts,
method=self._integrator_method,
rtol=1e-5,
atol=1e-10,
options=kwargs
)
else:
state = odesolver_adjoint(dynamics, state, options=kwargs)
if len(state.shape) < 3 or not keep_all:
if len(state.shape) > 2:
state = state[-1]
x = state[:, :-1]
logp = state[:, -1:]
else:
x = [s[:, :-1] for s in state]
logp = [s[:, -1:] for s in state]
return x, logp
def set_trace(self, trace):
pass
class ContinuousNormalizingFlowOld(torch.nn.Module):
def __init__(
self,
dynamics,
integrator="dopri5",
atol=1e-10,
rtol=1e-5,
n_time_steps=2,
):
super().__init__()
self._dynamics = dynamics
self._inverse_dynamics = InversedDynamics(dynamics)
self._integrator_method = integrator
self._integrator_atol = atol
self._integrator_rtol = rtol
self._n_time_steps = n_time_steps
def forward(self, x, t=1.0, inverse=False, keep_all=False):
n_batch = x.shape[0]
logp_init = torch.zeros(n_batch, 1).to(x)
state = torch.cat([x, logp_init], dim=-1).contiguous()
if inverse:
dynamics = self._inverse_dynamics
dynamics._t_max = t
else:
dynamics = self._dynamics
ts = torch.linspace(0.0, t, self._n_time_steps).to(x)
state = odeint_adjoint(
dynamics,
state,
t=ts,
method=self._integrator_method,
rtol=1e-5,
atol=1e-10,
)
if not keep_all:
state = state[-1]
x = state[:, :-1]
logp = state[:, -1:]
else:
x = [s[:, :-1] for s in state]
logp = [s[:, -1:] for s in state]
return x, logp
class ContinuousNormalizingFlowMultiTemperature(torch.nn.Module):
def __init__(
self,
dynamics,
energy,
integrator="dopri5",
atol=1e-10,
rtol=1e-5,
n_time_steps=2,
):
super().__init__()
self._dynamics = LossDynamics(dynamics, energy)
self._energy = energy
self._inverse_dynamics = InversedDynamics(dynamics)
self._integrator_method = integrator
self._integrator_atol = atol
self._integrator_rtol = rtol
self._n_time_steps = n_time_steps
def forward(self, x, t=1.0, inverse=False):
n_batch = x.shape[0]
logp_init = torch.zeros(n_batch, 1).to(x)
loss_init = torch.zeros(n_batch, 1).to(x)
state = torch.cat([x, logp_init, loss_init], dim=-1).contiguous()
if inverse:
dynamics = self._inverse_dynamics
dynamics._t_max = t
else:
dynamics = self._dynamics
ts = torch.linspace(0.0, t, self._n_time_steps).to(x)
state = odeint_adjoint(
dynamics,
state,
t=ts,
method=self._integrator_method,
rtol=1e-5,
atol=1e-10,
)[-1]
x = state[:, :-2]
logp = state[:, -2]
loss = state[:, -1]
return x, logp, loss
class DiffEqFlow(Flow):
def __init__(
self,
dynamics,
integrator="dopri5",
atol=1e-10,
rtol=1e-5,
n_time_steps=2,
t_max = 1.,
use_checkpoints=False,
**kwargs
):
super().__init__()
self._dynamics = DensityDynamics(dynamics)
self._inverse_dynamics = DensityDynamics(InversedDynamics(dynamics, t_max))
self._integrator_method = integrator
self._integrator_atol = atol
self._integrator_rtol = rtol
self._n_time_steps = n_time_steps
self._t_max = t_max
self._use_checkpoints = use_checkpoints
self._kwargs = kwargs
def _forward(self, *xs, **kwargs):
return self._run_ode(*xs, dynamics=self._dynamics, **kwargs)
def _inverse(self, *xs, **kwargs):
return self._run_ode(*xs, dynamics=self._inverse_dynamics, **kwargs)
def _run_ode(self, *xs, dynamics, **kwargs):
# TODO: kwargs should be parsed to avoid conflicts!
assert(all(x.shape[0] == xs[0].shape[0] for x in xs[1:]))
n_batch = xs[0].shape[0]
logp_init = torch.zeros(n_batch, 1).to(xs[0])
state = [*xs, logp_init]
ts = torch.linspace(0.0, self._t_max, self._n_time_steps).to(xs[0])
kwargs = {**self._kwargs, **kwargs}
# remove this and give every dynmaics kwargs
if "brute_force" in kwargs:
print("yes")
dynamics.before_ode(**kwargs)
else:
dynamics.before_ode()
if not self._use_checkpoints:
from torchdiffeq import odeint_adjoint
*ys, dlogp = odeint_adjoint(
dynamics,
state,
t=ts,
method=self._integrator_method,
rtol=self._integrator_rtol,
atol=self._integrator_atol,
options=kwargs
)
else:
from deprecated.anode.adjoint import odesolver_adjoint
state = odesolver_adjoint(dynamics, state, options=kwargs)
ys = [y[-1] for y in ys]
dlogp = dlogp[-1]
return (*ys, dlogp)
|
194983
|
BLOCKCHAIN = {
'class': 'thenewboston_node.business_logic.blockchain.file_blockchain.FileBlockchain',
'kwargs': {},
}
BLOCKCHAIN_URL_PATH_PREFIX = '/blockchain/'
|
194997
|
import unittest
import numpy as np
import numpy.testing as npt
import wisdem.drivetrainse.layout as lay
import wisdem.drivetrainse.drive_structure as ds
from wisdem.commonse import gravity
npts = 12
class TestDirectStructure(unittest.TestCase):
def setUp(self):
self.inputs = {}
self.outputs = {}
self.discrete_inputs = {}
self.discrete_outputs = {}
self.opt = {}
self.discrete_inputs["upwind"] = True
self.inputs["L_12"] = 2.0
self.inputs["L_h1"] = 1.0
self.inputs["L_generator"] = 3.25
# self.inputs['L_2n'] = 1.5
# self.inputs['L_grs'] = 1.1
# self.inputs['L_gsn'] = 1.1
self.inputs["L_hss"] = 0.75
self.inputs["L_gearbox"] = 1.2
self.inputs["overhang"] = 6.25
self.inputs["drive_height"] = 4.875
self.inputs["tilt"] = 4.0
self.inputs["access_diameter"] = 0.9
myones = np.ones(5)
self.inputs["lss_diameter"] = 3.3 * myones
self.inputs["lss_wall_thickness"] = 0.45 * myones
self.inputs["hss_diameter"] = 1.6 * np.ones(3)
self.inputs["hss_wall_thickness"] = 0.25 * np.ones(3)
self.inputs["nose_diameter"] = 2.2 * myones
self.inputs["nose_wall_thickness"] = 0.1 * myones
self.inputs["bedplate_wall_thickness"] = 0.06 * np.ones(npts)
self.inputs["bedplate_flange_width"] = 1.5
self.inputs["bedplate_flange_thickness"] = 0.05
# self.inputs['bedplate_web_height'] = 1.0
self.inputs["bedplate_web_thickness"] = 0.05
self.inputs["D_top"] = 6.5
self.inputs["hub_diameter"] = 4.0
self.inputs["other_mass"] = 200e3
self.inputs["mb1_mass"] = 10e3
self.inputs["mb1_I"] = 10e3 * 0.5 * 2 ** 2 * np.ones(3)
self.inputs["mb2_mass"] = 10e3
self.inputs["mb2_I"] = 10e3 * 0.5 * 1.5 ** 2 * np.ones(3)
self.inputs["mb1_max_defl_ang"] = 0.008
self.inputs["mb2_max_defl_ang"] = 0.008
self.inputs["m_stator"] = 100e3
self.inputs["cm_stator"] = -0.3
self.inputs["I_stator"] = np.array([1e6, 5e5, 5e5, 0.0, 0.0, 0.0])
self.inputs["generator_rotor_mass"] = 100e3
self.inputs["cm_rotor"] = -0.3
self.inputs["generator_rotor_I"] = np.array([1e6, 5e5, 5e5, 0.0, 0.0, 0.0])
self.inputs["generator_stator_mass"] = 100e3
self.inputs["cm_rotor"] = -0.3
self.inputs["generator_stator_I"] = np.array([1e6, 5e5, 5e5, 0.0, 0.0, 0.0])
self.inputs["generator_mass"] = 200e3
self.inputs["generator_I"] = np.array([2e6, 1e6, 1e6, 0.0, 0.0, 0.0])
self.inputs["gearbox_mass"] = 100e3
self.inputs["gearbox_I"] = np.array([1e6, 5e5, 5e5])
self.inputs["brake_mass"] = 10e3
self.inputs["brake_I"] = np.array([1e4, 5e3, 5e3])
self.inputs["carrier_mass"] = 10e3
self.inputs["carrier_I"] = np.array([1e4, 5e3, 5e3])
self.inputs["gear_ratio"] = 1.0
self.inputs["F_mb1"] = np.array([2409.750e3, -1716.429e3, 74.3529e3]).reshape((3, 1))
self.inputs["F_mb2"] = np.array([2409.750e3, -1716.429e3, 74.3529e3]).reshape((3, 1))
self.inputs["M_mb1"] = np.array([-1.83291e7, 6171.7324e3, 5785.82946e3]).reshape((3, 1))
self.inputs["M_mb2"] = np.array([-1.83291e7, 6171.7324e3, 5785.82946e3]).reshape((3, 1))
self.inputs["hub_system_mass"] = 100e3
self.inputs["hub_system_cm"] = 2.0
self.inputs["hub_system_I"] = np.array([2409.750e3, -1716.429e3, 74.3529e3, 0.0, 0.0, 0.0])
self.inputs["F_hub"] = np.array([2409.750e3, 0.0, 74.3529e2]).reshape((3, 1))
self.inputs["M_hub"] = np.array([-1.83291e4, 6171.7324e2, 5785.82946e2]).reshape((3, 1))
self.inputs["lss_E"] = self.inputs["hss_E"] = self.inputs["bedplate_E"] = 210e9
self.inputs["lss_G"] = self.inputs["hss_G"] = self.inputs["bedplate_G"] = 80.8e9
self.inputs["lss_rho"] = self.inputs["hss_rho"] = self.inputs["bedplate_rho"] = 7850.0
self.inputs["lss_Xy"] = self.inputs["hss_Xy"] = self.inputs["bedplate_Xy"] = 250e6
self.opt["gamma_f"] = 1.35
self.opt["gamma_m"] = 1.3
self.opt["gamma_n"] = 1.0
def compute_layout(self, direct=True):
myobj = lay.DirectLayout() if direct else lay.GearedLayout()
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
for k in self.outputs.keys():
self.inputs[k] = self.outputs[k]
def testBaseF_BaseM(self):
self.inputs["tilt"] = 0.0
self.inputs["F_mb1"] = np.zeros(3).reshape((3, 1))
self.inputs["F_mb2"] = np.zeros(3).reshape((3, 1))
self.inputs["M_mb1"] = np.zeros(3).reshape((3, 1))
self.inputs["M_mb2"] = np.zeros(3).reshape((3, 1))
self.compute_layout()
myobj = ds.Nose_Stator_Bedplate_Frame(modeling_options=self.opt, n_dlcs=1)
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2], 0.0)
npt.assert_almost_equal(self.outputs["base_M"][0], 0.0)
npt.assert_almost_equal(self.outputs["base_M"][-1], 0.0)
F0 = self.outputs["base_F"]
M0 = self.outputs["base_M"]
self.inputs["other_mass"] += 500e3
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2], 0.0)
npt.assert_almost_equal(self.outputs["base_F"][2], F0[2] - 500e3 * gravity)
npt.assert_almost_equal(self.outputs["base_M"][0], 0.0)
npt.assert_almost_equal(self.outputs["base_M"][1], M0[1])
npt.assert_almost_equal(self.outputs["base_M"][2], 0.0)
self.inputs["M_mb1"] = 10e3 * np.arange(1, 4).reshape((3, 1))
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2], 0.0)
npt.assert_almost_equal(self.outputs["base_F"][2], F0[2] - 500e3 * gravity)
npt.assert_almost_equal(self.outputs["base_M"], M0 + self.inputs["M_mb1"], decimal=0)
self.inputs["M_mb2"] = 20e3 * np.arange(1, 4).reshape((3, 1))
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2], 0.0)
npt.assert_almost_equal(self.outputs["base_F"][2], F0[2] - 500e3 * gravity)
npt.assert_almost_equal(self.outputs["base_M"], M0 + self.inputs["M_mb1"] + self.inputs["M_mb2"], decimal=-1)
self.inputs["F_mb1"] = np.array([30e2, 40e2, 50e2]).reshape((3, 1))
self.inputs["F_mb2"] = np.array([30e2, 40e2, 50e2]).reshape((3, 1))
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2], 2 * self.inputs["F_mb2"][:2])
npt.assert_almost_equal(self.outputs["base_F"][2], F0[2] - 500e3 * gravity + 2 * 50e2)
def testBaseF_BaseM_withTilt(self):
self.inputs["tilt"] = 5.0
self.inputs["F_mb1"] = np.zeros(3).reshape((3, 1))
self.inputs["F_mb2"] = np.zeros(3).reshape((3, 1))
self.inputs["M_mb1"] = np.zeros(3).reshape((3, 1))
self.inputs["M_mb2"] = np.zeros(3).reshape((3, 1))
self.compute_layout()
myobj = ds.Nose_Stator_Bedplate_Frame(modeling_options=self.opt, n_dlcs=1)
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["base_M"][0], 0.0)
npt.assert_almost_equal(self.outputs["base_M"][-1], 0.0)
F0 = self.outputs["base_F"]
M0 = self.outputs["base_M"]
self.inputs["other_mass"] += 500e3
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["base_F"][2], F0[2] - 500e3 * gravity)
npt.assert_almost_equal(self.outputs["base_M"][0], 0.0)
npt.assert_almost_equal(self.outputs["base_M"][1], M0[1])
npt.assert_almost_equal(self.outputs["base_M"][2], 0.0)
self.inputs["M_mb1"] = 10e3 * np.arange(1, 4).reshape((3, 1))
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["base_F"][2], F0[2] - 500e3 * gravity)
npt.assert_almost_equal(self.outputs["base_M"][1], M0[1] + self.inputs["M_mb1"][1], decimal=0)
self.inputs["M_mb2"] = 20e3 * np.arange(1, 4).reshape((3, 1))
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["base_F"][2], F0[2] - 500e3 * gravity)
npt.assert_almost_equal(
self.outputs["base_M"][1], M0[1] + self.inputs["M_mb1"][1] + self.inputs["M_mb2"][1], decimal=-1
)
self.inputs["F_mb1"] = np.array([30e2, 40e2, 50e2]).reshape((3, 1))
self.inputs["F_mb2"] = np.array([30e2, 40e2, 50e2]).reshape((3, 1))
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][1], 2 * self.inputs["F_mb2"][1])
def testBaseF_BaseM_Downwind(self):
self.inputs["tilt"] = 0.0
self.discrete_inputs["upwind"] = False
self.inputs["F_mb1"] = np.zeros(3).reshape((3, 1))
self.inputs["F_mb2"] = np.zeros(3).reshape((3, 1))
self.inputs["M_mb1"] = np.zeros(3).reshape((3, 1))
self.inputs["M_mb2"] = np.zeros(3).reshape((3, 1))
self.compute_layout()
myobj = ds.Nose_Stator_Bedplate_Frame(modeling_options=self.opt, n_dlcs=1)
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2], 0.0)
npt.assert_almost_equal(self.outputs["base_M"][0], 0.0)
npt.assert_almost_equal(self.outputs["base_M"][-1], 0.0)
F0 = self.outputs["base_F"]
M0 = self.outputs["base_M"]
self.inputs["other_mass"] += 500e3
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2], 0.0)
npt.assert_almost_equal(self.outputs["base_F"][2], F0[2] - 500e3 * gravity)
npt.assert_almost_equal(self.outputs["base_M"][0], 0.0)
npt.assert_almost_equal(self.outputs["base_M"][1], M0[1])
npt.assert_almost_equal(self.outputs["base_M"][2], 0.0)
self.inputs["M_mb1"] = 10e3 * np.arange(1, 4).reshape((3, 1))
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2], 0.0)
npt.assert_almost_equal(self.outputs["base_F"][2], F0[2] - 500e3 * gravity)
npt.assert_almost_equal(self.outputs["base_M"], M0 + self.inputs["M_mb1"], decimal=0)
self.inputs["M_mb2"] = 20e3 * np.arange(1, 4).reshape((3, 1))
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2], 0.0)
npt.assert_almost_equal(self.outputs["base_F"][2], F0[2] - 500e3 * gravity)
npt.assert_almost_equal(self.outputs["base_M"], M0 + self.inputs["M_mb1"] + self.inputs["M_mb2"], decimal=-1)
self.inputs["F_mb1"] = np.array([30e2, 40e2, 50e2]).reshape((3, 1))
self.inputs["F_mb2"] = np.array([30e2, 40e2, 50e2]).reshape((3, 1))
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2], 2 * self.inputs["F_mb2"][:2])
npt.assert_almost_equal(self.outputs["base_F"][2], F0[2] - 500e3 * gravity + 2 * 50e2)
def testBaseF_BaseM_withTilt_Downwind(self):
self.inputs["tilt"] = 5.0
self.discrete_inputs["upwind"] = False
self.inputs["F_mb1"] = np.zeros(3).reshape((3, 1))
self.inputs["F_mb2"] = np.zeros(3).reshape((3, 1))
self.inputs["M_mb1"] = np.zeros(3).reshape((3, 1))
self.inputs["M_mb2"] = np.zeros(3).reshape((3, 1))
self.compute_layout()
myobj = ds.Nose_Stator_Bedplate_Frame(modeling_options=self.opt, n_dlcs=1)
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["base_M"][0], 0.0)
npt.assert_almost_equal(self.outputs["base_M"][-1], 0.0)
F0 = self.outputs["base_F"]
M0 = self.outputs["base_M"]
self.inputs["other_mass"] += 500e3
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["base_F"][2], F0[2] - 500e3 * gravity)
npt.assert_almost_equal(self.outputs["base_M"][0], 0.0)
npt.assert_almost_equal(self.outputs["base_M"][1], M0[1])
npt.assert_almost_equal(self.outputs["base_M"][2], 0.0)
self.inputs["M_mb1"] = 10e3 * np.arange(1, 4).reshape((3, 1))
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["base_F"][2], F0[2] - 500e3 * gravity)
npt.assert_almost_equal(self.outputs["base_M"][1], M0[1] + self.inputs["M_mb1"][1], decimal=0)
self.inputs["M_mb2"] = 20e3 * np.arange(1, 4).reshape((3, 1))
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["base_F"][2], F0[2] - 500e3 * gravity)
npt.assert_almost_equal(
self.outputs["base_M"][1], M0[1] + self.inputs["M_mb1"][1] + self.inputs["M_mb2"][1], decimal=-1
)
self.inputs["F_mb1"] = np.array([30e2, 40e2, 50e2]).reshape((3, 1))
self.inputs["F_mb2"] = np.array([30e2, 40e2, 50e2]).reshape((3, 1))
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][1], 2 * self.inputs["F_mb2"][1])
def testBaseF_BaseM_Geared(self):
self.inputs["tilt"] = 0.0
self.inputs["F_mb1"] = np.zeros(3).reshape((3, 1))
self.inputs["F_mb2"] = np.zeros(3).reshape((3, 1))
self.inputs["F_torq"] = np.zeros(3).reshape((3, 1))
self.inputs["F_generator"] = np.zeros(3).reshape((3, 1))
self.inputs["M_mb1"] = np.zeros(3).reshape((3, 1))
self.inputs["M_mb2"] = np.zeros(3).reshape((3, 1))
self.inputs["M_torq"] = np.zeros(3).reshape((3, 1))
self.inputs["M_generator"] = np.zeros(3).reshape((3, 1))
self.compute_layout(False)
myobj = ds.Bedplate_IBeam_Frame(modeling_options=self.opt, n_dlcs=1)
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2, 0], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["base_M"][[0, 2], 0], 0.0, decimal=2)
F0 = self.outputs["base_F"][:, 0]
M0 = self.outputs["base_M"][:, 0]
self.inputs["other_mass"] += 500e3
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2, 0], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["base_F"][2, 0], F0[2] - 500e3 * gravity)
npt.assert_almost_equal(self.outputs["base_M"][[0, 2], 0], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["base_M"][1], M0[1])
self.inputs["M_mb1"] = 10e3 * np.arange(1, 4).reshape((3, 1))
self.inputs["M_mb2"] = 20e3 * np.arange(1, 4).reshape((3, 1))
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2, 0], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["base_F"][2, 0], F0[2] - 500e3 * gravity, decimal=0)
# npt.assert_almost_equal(self.outputs['base_M'], M0+self.inputs['M_mb1']+self.inputs['M_mb2'], decimal=-1)
self.inputs["F_mb1"] = self.inputs["F_mb2"] = self.inputs["F_generator"] = self.inputs["F_torq"] = np.array(
[30e2, 40e2, 50e2]
).reshape((3, 1))
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2, 0], 4 * self.inputs["F_mb1"][:2, 0], decimal=1)
npt.assert_almost_equal(self.outputs["base_F"][2, 0], F0[2] - 500e3 * gravity + 4 * 50e2, decimal=0)
def testBaseF_BaseM_withTilt_Geared(self):
self.inputs["tilt"] = 5.0
self.inputs["F_mb1"] = np.zeros(3).reshape((3, 1))
self.inputs["F_mb2"] = np.zeros(3).reshape((3, 1))
self.inputs["F_torq"] = np.zeros(3).reshape((3, 1))
self.inputs["F_generator"] = np.zeros(3).reshape((3, 1))
self.inputs["M_mb1"] = np.zeros(3).reshape((3, 1))
self.inputs["M_mb2"] = np.zeros(3).reshape((3, 1))
self.inputs["M_torq"] = np.zeros(3).reshape((3, 1))
self.inputs["M_generator"] = np.zeros(3).reshape((3, 1))
self.compute_layout(False)
myobj = ds.Bedplate_IBeam_Frame(modeling_options=self.opt, n_dlcs=1)
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2, 0], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["base_M"][[0, 2], 0], 0.0, decimal=2)
F0 = self.outputs["base_F"][:, 0]
M0 = self.outputs["base_M"][:, 0]
self.inputs["other_mass"] += 500e3
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2, 0], 0.0, decimal=1)
npt.assert_almost_equal(self.outputs["base_F"][2, 0], F0[2] - 500e3 * gravity)
npt.assert_almost_equal(self.outputs["base_M"][[0, 2], 0], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["base_M"][1], M0[1])
self.inputs["M_mb1"] = 10e3 * np.arange(1, 4).reshape((3, 1))
self.inputs["M_mb2"] = 20e3 * np.arange(1, 4).reshape((3, 1))
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2, 0], 0.0, decimal=1)
npt.assert_almost_equal(self.outputs["base_F"][2, 0], F0[2] - 500e3 * gravity, decimal=0)
# npt.assert_almost_equal(self.outputs['base_M'], M0+self.inputs['M_mb1']+self.inputs['M_mb2'], decimal=-1)
self.inputs["F_mb1"] = self.inputs["F_mb2"] = self.inputs["F_generator"] = self.inputs["F_torq"] = np.array(
[30e2, 40e2, 50e2]
).reshape((3, 1))
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][1, 0], 4 * self.inputs["F_mb1"][1, 0], decimal=1)
def testRunRotatingDirect_noTilt(self):
self.inputs["tilt"] = 0.0
self.inputs["F_hub"] = np.zeros(3).reshape((3, 1))
self.inputs["M_hub"] = np.zeros(3).reshape((3, 1))
self.compute_layout()
myobj = ds.Hub_Rotor_LSS_Frame(n_dlcs=1, modeling_options=self.opt, direct_drive=True)
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
F0 = self.outputs["F_mb1"].flatten()
M0 = self.outputs["M_mb2"].flatten()
self.assertGreater(0.0, F0[-1])
# self.assertGreater(0.0, M0[1])
npt.assert_almost_equal(self.outputs["F_mb1"][:2], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["F_mb2"], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["F_torq"], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["M_mb1"], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["M_mb2"][[0, 2]], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["M_torq"], 0.0, decimal=2)
self.assertAlmostEqual(
self.outputs["lss_spring_constant"], 80.8e9 * np.pi * (3.3 ** 4 - 2.4 ** 4) / 32 / self.inputs["L_lss"], 4
)
g = np.array([30e2, 40e2, 50e2])
self.inputs["F_hub"] = g.reshape((3, 1))
self.inputs["M_hub"] = 2 * g.reshape((3, 1))
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["F_mb1"].flatten(), g + F0, decimal=2)
npt.assert_almost_equal(self.outputs["F_mb2"], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["F_torq"], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["M_mb1"], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["M_mb2"].flatten()[0], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["M_mb2"].flatten()[1], g[-1] * 1 + 2 * g[1] + M0[1], decimal=1) # *1=*L_h1
npt.assert_almost_equal(self.outputs["M_mb2"].flatten()[2], -g[1] * 1 + 2 * g[2], decimal=1) # *1=*L_h1
npt.assert_almost_equal(self.outputs["M_torq"].flatten(), np.r_[2 * g[0], 0.0, 0.0], decimal=2)
def testRunRotatingDirect_withTilt(self):
self.inputs["tilt"] = 5.0
self.inputs["F_hub"] = np.zeros(3).reshape((3, 1))
self.inputs["M_hub"] = np.zeros(3).reshape((3, 1))
self.compute_layout()
myobj = ds.Hub_Rotor_LSS_Frame(n_dlcs=1, modeling_options=self.opt, direct_drive=True)
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
F0 = self.outputs["F_mb1"].flatten()
M0 = self.outputs["M_mb2"].flatten()
self.assertGreater(0.0, F0[0])
self.assertGreater(0.0, F0[-1])
# self.assertGreater(0.0, M0[1])
npt.assert_almost_equal(self.outputs["F_mb1"][1], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["F_mb2"], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["F_torq"], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["M_mb1"], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["M_mb2"][[0, 2]], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["M_torq"], 0.0, decimal=2)
self.assertAlmostEqual(
self.outputs["lss_spring_constant"], 80.8e9 * np.pi * (3.3 ** 4 - 2.4 ** 4) / 32 / self.inputs["L_lss"], 4
)
g = np.array([30e2, 40e2, 50e2])
self.inputs["F_hub"] = g.reshape((3, 1))
self.inputs["M_hub"] = 2 * g.reshape((3, 1))
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["F_mb1"].flatten(), g + F0, decimal=2)
npt.assert_almost_equal(self.outputs["F_mb2"], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["F_torq"], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["M_mb1"], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["M_mb2"].flatten()[0], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["M_mb2"].flatten()[1], g[-1] * 1 + 2 * g[1] + M0[1], decimal=1) # *1=*L_h1
npt.assert_almost_equal(self.outputs["M_mb2"].flatten()[2], -g[1] * 1 + 2 * g[2], decimal=1) # *1=*L_h1
npt.assert_almost_equal(self.outputs["M_torq"].flatten(), np.r_[2 * g[0], 0.0, 0.0], decimal=2)
def testRunRotatingGeared_noTilt(self):
self.inputs["tilt"] = 0.0
self.inputs["gear_ratio"] = 50.0
self.inputs["F_hub"] = np.zeros(3).reshape((3, 1))
self.inputs["M_hub"] = np.zeros(3).reshape((3, 1))
self.compute_layout(False)
myobj = ds.Hub_Rotor_LSS_Frame(n_dlcs=1, modeling_options=self.opt, direct_drive=False)
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
F0 = self.outputs["F_mb1"].flatten()
M0 = self.outputs["M_mb2"].flatten()
self.assertGreater(0.0, F0[-1])
# self.assertGreater(0.0, M0[1])
npt.assert_almost_equal(self.outputs["F_mb1"][:2], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["F_mb2"], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["F_torq"], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["M_mb1"], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["M_mb2"][[0, 2]], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["M_torq"], 0.0, decimal=2)
self.assertAlmostEqual(
self.outputs["lss_spring_constant"], 80.8e9 * np.pi * (3.3 ** 4 - 2.4 ** 4) / 32 / self.inputs["L_lss"], 4
)
g = np.array([30e2, 40e2, 50e2])
self.inputs["F_hub"] = g.reshape((3, 1))
self.inputs["M_hub"] = 2 * g.reshape((3, 1))
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["F_mb1"].flatten(), g + F0, decimal=2)
npt.assert_almost_equal(self.outputs["F_mb2"], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["F_torq"], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["M_mb1"], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["M_mb2"].flatten()[0], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["M_mb2"].flatten()[1], g[-1] * 1 + 2 * g[1] + M0[1], decimal=2) # *1=*L_h1
npt.assert_almost_equal(self.outputs["M_mb2"].flatten()[2], -g[1] * 1 + 2 * g[2], decimal=2) # *1=*L_h1
npt.assert_almost_equal(self.outputs["M_torq"].flatten(), np.r_[2 * g[0], 0.0, 0.0], decimal=2)
def testRunRotatingGeared_withTilt(self):
self.inputs["tilt"] = 5.0
self.inputs["gear_ratio"] = 50.0
self.inputs["F_hub"] = np.zeros(3).reshape((3, 1))
self.inputs["M_hub"] = np.zeros(3).reshape((3, 1))
self.compute_layout(False)
myobj = ds.Hub_Rotor_LSS_Frame(n_dlcs=1, modeling_options=self.opt, direct_drive=False)
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
F0 = self.outputs["F_mb1"].flatten()
M0 = self.outputs["M_mb2"].flatten()
self.assertGreater(0.0, F0[0])
self.assertGreater(0.0, F0[-1])
# self.assertGreater(0.0, M0[1])
npt.assert_almost_equal(self.outputs["F_mb1"][1], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["F_mb2"], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["F_torq"], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["M_mb1"], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["M_mb2"][[0, 2]], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["M_torq"], 0.0, decimal=2)
self.assertAlmostEqual(
self.outputs["lss_spring_constant"], 80.8e9 * np.pi * (3.3 ** 4 - 2.4 ** 4) / 32 / self.inputs["L_lss"], 4
)
g = np.array([30e2, 40e2, 50e2])
self.inputs["F_hub"] = g.reshape((3, 1))
self.inputs["M_hub"] = 2 * g.reshape((3, 1))
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["F_mb1"].flatten(), g + F0, decimal=2)
npt.assert_almost_equal(self.outputs["F_mb2"], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["F_torq"], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["M_mb1"], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["M_mb2"].flatten()[0], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["M_mb2"].flatten()[1], g[-1] * 1 + 2 * g[1] + M0[1], decimal=2) # *1=*L_h1
npt.assert_almost_equal(self.outputs["M_mb2"].flatten()[2], -g[1] * 1 + 2 * g[2], decimal=2) # *1=*L_h1
npt.assert_almost_equal(self.outputs["M_torq"].flatten(), np.r_[2 * g[0], 0.0, 0.0], decimal=2)
def testHSS_noTilt(self):
self.inputs["tilt"] = 0.0
self.inputs["gear_ratio"] = 50.0
self.inputs["F_hub"] = np.zeros(3).reshape((3, 1))
self.inputs["M_hub"] = np.zeros(3).reshape((3, 1))
self.compute_layout(False)
myobj = ds.HSS_Frame(modeling_options=self.opt, n_dlcs=1)
myobj.compute(self.inputs, self.outputs)
F0 = self.outputs["F_generator"].flatten()
M0 = self.outputs["M_generator"].flatten()
self.assertGreater(0.0, F0[-1])
self.assertGreater(0.0, M0[1])
npt.assert_almost_equal(self.outputs["F_generator"].flatten()[:2], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["M_generator"].flatten()[[0, 2]], 0.0, decimal=2)
g = np.array([30e2, 40e2, 50e2])
self.inputs["F_hub"] = g.reshape((3, 1))
self.inputs["M_hub"] = 2 * g.reshape((3, 1))
self.compute_layout(False)
myobj = ds.HSS_Frame(modeling_options=self.opt, n_dlcs=1)
myobj.compute(self.inputs, self.outputs)
npt.assert_almost_equal(self.outputs["F_generator"].flatten(), F0, decimal=2)
npt.assert_almost_equal(self.outputs["M_generator"].flatten(), np.r_[2 * g[0] / 50.0, M0[1], 0.0], decimal=2)
def testHSS_withTilt(self):
self.inputs["tilt"] = 5.0
self.inputs["gear_ratio"] = 50.0
self.inputs["F_hub"] = np.zeros(3).reshape((3, 1))
self.inputs["M_hub"] = np.zeros(3).reshape((3, 1))
self.compute_layout(False)
myobj = ds.HSS_Frame(modeling_options=self.opt, n_dlcs=1)
myobj.compute(self.inputs, self.outputs)
F0 = self.outputs["F_generator"].flatten()
M0 = self.outputs["M_generator"].flatten()
self.assertGreater(0.0, F0[0])
self.assertGreater(0.0, F0[-1])
self.assertGreater(0.0, M0[1])
npt.assert_almost_equal(self.outputs["F_generator"].flatten()[1], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["M_generator"].flatten()[[0, 2]], 0.0, decimal=2)
g = np.array([30e2, 40e2, 50e2])
self.inputs["F_hub"] = g.reshape((3, 1))
self.inputs["M_hub"] = 2 * g.reshape((3, 1))
self.compute_layout(False)
myobj = ds.HSS_Frame(modeling_options=self.opt, n_dlcs=1)
myobj.compute(self.inputs, self.outputs)
npt.assert_almost_equal(self.outputs["F_generator"].flatten(), F0, decimal=2)
npt.assert_almost_equal(self.outputs["M_generator"].flatten(), np.r_[2 * g[0] / 50.0, M0[1], 0.0], decimal=2)
def testShaftTheoryLSS(self):
# https://www.engineersedge.com/calculators/torsional-stress-calculator.htm
self.inputs["tilt"] = 0.0
self.inputs["F_hub"] = np.zeros(3).reshape((3, 1))
self.inputs["M_hub"] = np.array([1e5, 0.0, 0.0]).reshape((3, 1))
self.inputs["brake_mass"] = 0.0
self.inputs["brake_I"] = np.zeros(3)
self.inputs["generator_rotor_mass"] = 0.0
self.inputs["cm_rotor"] = 0.0
self.inputs["generator_rotor_I"] = np.zeros(6)
self.inputs["hub_system_mass"] = 0.0
self.inputs["hub_system_cm"] = 0.0
self.inputs["hub_system_I"] = np.zeros(6)
myones = np.ones(5)
self.inputs["lss_diameter"] = 5 * myones
self.inputs["lss_wall_thickness"] = 0.5 * myones
self.inputs["G"] = 100e9
self.inputs["lss_rho"] = 1e-6
self.compute_layout()
myobj = ds.Hub_Rotor_LSS_Frame(n_dlcs=1, modeling_options=self.opt, direct_drive=True)
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
J = 0.5 * np.pi * (2.5 ** 4 - 2 ** 4)
sigma = 1e5 / J * 2.5
npt.assert_almost_equal(self.outputs["lss_axial_stress"], 0.0, decimal=4)
npt.assert_almost_equal(self.outputs["lss_shear_stress"].flatten(), np.r_[np.zeros(3), sigma], decimal=4)
def testShaftTheoryHSS(self):
# https://www.engineersedge.com/calculators/torsional-stress-calculator.htm
self.inputs["tilt"] = 0.0
self.inputs["gear_ratio"] = 50.0
self.inputs["s_hss"] = np.array([0.0, 0.5, 1.0])
self.inputs["M_hub"] = np.array([1e5, 0.0, 0.0]).reshape((3, 1))
self.inputs["s_generator"] = 0.0
self.inputs["generator_mass"] = 0.0
self.inputs["generator_I"] = np.zeros(3)
self.inputs["brake_mass"] = 0.0
self.inputs["brake_I"] = np.zeros(3)
self.inputs["hub_system_mass"] = 0.0
self.inputs["hub_system_cm"] = 0.0
self.inputs["hub_system_I"] = np.zeros(6)
myones = np.ones(3)
self.inputs["hss_diameter"] = 5 * myones
self.inputs["hss_wall_thickness"] = 0.5 * myones
self.inputs["G"] = 100e9
self.inputs["hss_rho"] = 1e-6
self.compute_layout()
myobj = ds.HSS_Frame(modeling_options=self.opt, n_dlcs=1)
myobj.compute(self.inputs, self.outputs)
J = 0.5 * np.pi * (2.5 ** 4 - 2 ** 4)
sigma = 1e5 / 50.0 / J * 2.5
npt.assert_almost_equal(self.outputs["hss_axial_stress"], 0.0, decimal=4)
npt.assert_almost_equal(self.outputs["hss_bending_stress"], 0.0, decimal=4)
npt.assert_almost_equal(self.outputs["hss_shear_stress"].flatten(), sigma * np.ones(2), decimal=4)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestDirectStructure))
return suite
if __name__ == "__main__":
result = unittest.TextTestRunner().run(suite())
if result.wasSuccessful():
exit(0)
else:
exit(1)
|
195037
|
from invoke import task
from os.path import basename
from faasmcli.util.endpoints import get_upload_host_port
from faasmcli.util.upload_util import curl_file
@task(default=True)
def upload(ctx, in_path, shared_path):
"""
Upload a shared file to Faasm
"""
host, port = get_upload_host_port()
url = "http://{}:{}/file/".format(host, port)
local_filename = basename(in_path)
print("Uploading {} to {}".format(local_filename, shared_path))
curl_file(
url,
in_path,
headers={
"FilePath": shared_path,
},
)
|
195057
|
from base_model.ResNet import resnet101 as Resnet
from base_model.Classifier import Classifier_Module as Classifier
import torch.nn.functional as F
import torch
import torch.nn as nn
__ALL__ = ['Generator']
class Generator(nn.Module):
def __init__(self):
super().__init__()
self.base_model = Resnet()
self.aspp = Classifier([6,12,18,24],[6,12,18,24],1)
# Default initialization method
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, 0, 0.01)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1.0)
nn.init.zeros_(m.bias)
def load_weight(self, pth_path):
cpu_device = torch.device('cpu')
model_dict = self.base_model.state_dict()
old_dict = torch.load(open(pth_path, 'rb'), map_location=cpu_device)
old_dict = {k: v for k, v in old_dict.items() if (k in model_dict)}
model_dict.update(old_dict)
self.base_model.load_state_dict(model_dict)
del old_dict
del model_dict
print(f'load {pth_path} success!')
return True
def forward(self, x):
base_model_out = self.base_model(x)
out = self.aspp(base_model_out)
return F.interpolate(out, size=x.shape[-2:], mode='bilinear',align_corners=True)
if __name__ == '__main__':
net = Generator()
x = torch.rand((1,3,321,321))
out = net(x)
print(out.shape)
|
195066
|
from django.shortcuts import get_object_or_404
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from micro_admin.models import User
from django.contrib.auth.mixins import LoginRequiredMixin
class UserPermissionRequiredMixin(LoginRequiredMixin):
def dispatch(self, request, *args, **kwargs):
user = get_object_or_404(User, id=kwargs.get('user_id'))
if not (
request.user.is_admin or request.user == user or
(
request.user.has_perm("branch_manager") and
request.user.branch == user.branch
)
):
return HttpResponseRedirect(reverse('micro_admin:userslist'))
return super(UserPermissionRequiredMixin, self).dispatch(
request, *args, **kwargs)
class BranchAccessRequiredMixin(object):
def dispatch(self, request, *args, **kwargs):
if not hasattr(self, 'object'):
self.object = self.get_object()
# Checking the permissions
if not(
request.user.is_admin or
request.user.branch == self.object.branch
):
# TODO: Add "PermissionDenied" message
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
return super(BranchAccessRequiredMixin, self).dispatch(
request, *args, **kwargs)
class BranchManagerRequiredMixin(object):
def dispatch(self, request, *args, **kwargs):
if not hasattr(self, 'object'):
self.object = self.get_object()
# Checking the permissions
if not(
request.user.is_admin or
(
request.user.has_perm("branch_manager") and
request.user.branch == self.object.branch
)
):
# TODO: Add "PermissionDenied" message
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
return super(BranchManagerRequiredMixin, self).dispatch(
request, *args, **kwargs)
class ContentManagerRequiredMixin(object):
def dispatch(self, request, *args, **kwargs):
if not hasattr(self, 'object'):
self.object = self.get_object()
# Checking the permissions
if not(
self.request.user.is_admin or
self.request.user.has_perm('content_manager')
):
# TODO: Add "PermissionDenied" message
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
return super(ContentManagerRequiredMixin, self).dispatch(
request, *args, **kwargs)
|
195067
|
import numpy as np
import tensorflow as tf
interpreter = tf.lite.Interpreter(model_path="hair_segmentation_512x512_float32.tflite")
# interpreter = tf.lite.Interpreter(model_path="hair_segmentation_512x512_weight_quant.tflite")
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
print('input:', input_details)
print('')
print('output:', output_details)
input_shape = input_details[0]['shape']
input_data = np.array(np.random.random_sample(input_shape), dtype=np.float32)
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
output_data = interpreter.get_tensor(output_details[0]['index'])
print('output_data.shape:', output_data.shape)
import cv2
|
195073
|
import asyncio
import aiorpcx
# Handlers are declared as normal python functions. aiorpcx automatically checks RPC
# arguments, including named arguments, and returns errors as appropriate
async def handle_echo(message):
return message
async def handle_sum(*values):
return sum(values, 0)
handlers = {
'echo': handle_echo,
'sum': handle_sum,
}
class ServerSession(aiorpcx.RPCSession):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
print(f'connection from {self.remote_address()}')
async def connection_lost(self):
await super().connection_lost()
print(f'{self.remote_address()} disconnected')
async def handle_request(self, request):
handler = handlers.get(request.method)
coro = aiorpcx.handler_invocation(handler, request)()
return await coro
loop = asyncio.get_event_loop()
loop.run_until_complete(aiorpcx.serve_rs(ServerSession, 'localhost', 8888))
loop.run_forever()
|
195120
|
from datetime import datetime
filenames = ['aes128cbc', 'aes128gcm', 'aes256cbc', 'aes256gcm']
for filename in filenames:
with open(filename, 'r') as f:
lines = f.readlines()
first_split = lines[0].strip('\n').split()
last_split = lines[-1].strip('\n').split()
prev_datetime = datetime.strptime(first_split[1][:-1], '%H:%M:%S')
s_time = prev_datetime
end_time = datetime.strptime(last_split[1][:-1], '%H:%M:%S')
total_datetime = int((end_time - s_time).total_seconds())
threshold = 0.6
secs = [0]
found = False
for l in lines:
if found:
splitted = l.strip('\n').split()
confidence = float(splitted[-1])
if confidence > threshold:
cur_time = splitted[1][:-1]
cur_datetime = datetime.strptime(cur_time, '%H:%M:%S')
t_diff = int((cur_datetime - prev_datetime).total_seconds())
secs.append(t_diff)
prev_datetime = cur_datetime
if ':267' in l:
found = not found
print "('" + filename + "',", str(secs) + '),', ' #', total_datetime
|
195151
|
import sys
import unittest
from ctypes import *
class MemFunctionsTest(unittest.TestCase):
## def test_overflow(self):
## # string_at and wstring_at must use the Python calling
## # convention (which acquires the GIL and checks the Python
## # error flag). Provoke an error and catch it; see also issue
## # #3554: <http://bugs.python.org/issue3554>
## self.assertRaises((OverflowError, MemoryError, SystemError),
## lambda: wstring_at(u"foo", sys.maxint - 1))
## self.assertRaises((OverflowError, MemoryError, SystemError),
## lambda: string_at("foo", sys.maxint - 1))
def test_memmove(self):
# large buffers apparently increase the chance that the memory
# is allocated in high address space.
a = create_string_buffer(1000000)
p = "Hello, World"
result = memmove(a, p, len(p))
self.failUnlessEqual(a.value, "Hello, World")
self.failUnlessEqual(string_at(result), "Hello, World")
self.failUnlessEqual(string_at(result, 5), "Hello")
self.failUnlessEqual(string_at(result, 16), "Hello, World\0\0\0\0")
self.failUnlessEqual(string_at(result, 0), "")
def test_memset(self):
a = create_string_buffer(1000000)
result = memset(a, ord('x'), 16)
self.failUnlessEqual(a.value, "xxxxxxxxxxxxxxxx")
self.failUnlessEqual(string_at(result), "xxxxxxxxxxxxxxxx")
self.failUnlessEqual(string_at(a), "xxxxxxxxxxxxxxxx")
self.failUnlessEqual(string_at(a, 20), "xxxxxxxxxxxxxxxx\0\0\0\0")
def test_cast(self):
a = (c_ubyte * 32)(*map(ord, "abcdef"))
self.failUnlessEqual(cast(a, c_char_p).value, "abcdef")
self.failUnlessEqual(cast(a, POINTER(c_byte))[:7],
[97, 98, 99, 100, 101, 102, 0])
self.failUnlessEqual(cast(a, POINTER(c_byte))[:7:],
[97, 98, 99, 100, 101, 102, 0])
self.failUnlessEqual(cast(a, POINTER(c_byte))[6:-1:-1],
[0, 102, 101, 100, 99, 98, 97])
self.failUnlessEqual(cast(a, POINTER(c_byte))[:7:2],
[97, 99, 101, 0])
self.failUnlessEqual(cast(a, POINTER(c_byte))[:7:7],
[97])
def test_string_at(self):
s = string_at("foo bar")
# XXX The following may be wrong, depending on how Python
# manages string instances
self.failUnlessEqual(2, sys.getrefcount(s))
self.failUnless(s, "foo bar")
self.failUnlessEqual(string_at("foo bar", 8), "foo bar\0")
self.failUnlessEqual(string_at("foo bar", 3), "foo")
try:
create_unicode_buffer
except NameError:
pass
else:
def test_wstring_at(self):
p = create_unicode_buffer("Hello, World")
a = create_unicode_buffer(1000000)
result = memmove(a, p, len(p) * sizeof(c_wchar))
self.failUnlessEqual(a.value, "Hello, World")
self.failUnlessEqual(wstring_at(a), "Hello, World")
self.failUnlessEqual(wstring_at(a, 5), "Hello")
self.failUnlessEqual(wstring_at(a, 16), "Hello, World\0\0\0\0")
self.failUnlessEqual(wstring_at(a, 0), "")
if __name__ == "__main__":
unittest.main()
|
195175
|
from typing import Any, Callable, List, Type, cast, Optional, Union
from . import CRUDGenerator, NOT_FOUND
from ._types import DEPENDENCIES, PAGINATION, PYDANTIC_SCHEMA as SCHEMA
CALLABLE = Callable[..., SCHEMA]
CALLABLE_LIST = Callable[..., List[SCHEMA]]
class MemoryCRUDRouter(CRUDGenerator[SCHEMA]):
def __init__(
self,
schema: Type[SCHEMA],
create_schema: Optional[Type[SCHEMA]] = None,
update_schema: Optional[Type[SCHEMA]] = None,
prefix: Optional[str] = None,
tags: Optional[List[str]] = None,
paginate: Optional[int] = None,
get_all_route: Union[bool, DEPENDENCIES] = True,
get_one_route: Union[bool, DEPENDENCIES] = True,
create_route: Union[bool, DEPENDENCIES] = True,
update_route: Union[bool, DEPENDENCIES] = True,
delete_one_route: Union[bool, DEPENDENCIES] = True,
delete_all_route: Union[bool, DEPENDENCIES] = True,
**kwargs: Any
) -> None:
super().__init__(
schema=schema,
create_schema=create_schema,
update_schema=update_schema,
prefix=prefix,
tags=tags,
paginate=paginate,
get_all_route=get_all_route,
get_one_route=get_one_route,
create_route=create_route,
update_route=update_route,
delete_one_route=delete_one_route,
delete_all_route=delete_all_route,
**kwargs
)
self.models: List[SCHEMA] = []
self._id = 1
def _get_all(self, *args: Any, **kwargs: Any) -> CALLABLE_LIST:
def route(pagination: PAGINATION = self.pagination) -> List[SCHEMA]:
skip, limit = pagination.get("skip"), pagination.get("limit")
skip = cast(int, skip)
return (
self.models[skip:]
if limit is None
else self.models[skip : skip + limit]
)
return route
def _get_one(self, *args: Any, **kwargs: Any) -> CALLABLE:
def route(item_id: int) -> SCHEMA:
for model in self.models:
if model.id == item_id: # type: ignore
return model
raise NOT_FOUND
return route
def _create(self, *args: Any, **kwargs: Any) -> CALLABLE:
def route(model: self.create_schema) -> SCHEMA: # type: ignore
model_dict = model.dict()
model_dict["id"] = self._get_next_id()
ready_model = self.schema(**model_dict)
self.models.append(ready_model)
return ready_model
return route
def _update(self, *args: Any, **kwargs: Any) -> CALLABLE:
def route(item_id: int, model: self.update_schema) -> SCHEMA: # type: ignore
for ind, model_ in enumerate(self.models):
if model_.id == item_id: # type: ignore
self.models[ind] = self.schema(
**model.dict(), id=model_.id # type: ignore
)
return self.models[ind]
raise NOT_FOUND
return route
def _delete_all(self, *args: Any, **kwargs: Any) -> CALLABLE_LIST:
def route() -> List[SCHEMA]:
self.models = []
return self.models
return route
def _delete_one(self, *args: Any, **kwargs: Any) -> CALLABLE:
def route(item_id: int) -> SCHEMA:
for ind, model in enumerate(self.models):
if model.id == item_id: # type: ignore
del self.models[ind]
return model
raise NOT_FOUND
return route
def _get_next_id(self) -> int:
id_ = self._id
self._id += 1
return id_
|
195201
|
import GRT
import sys
import argparse
def main():
'''GRT KMeansQuantizer Example
This examples demonstrates how to use the KMeansQuantizer module.
The KMeansQuantizer module quantizes the N-dimensional input vector to a 1-dimensional discrete
value. This value will be between [0 K-1], where K is the number of clusters used to create the
quantization model. Before you use the KMeansQuantizer, you need to train a quantization model.
To do this, you select the number of clusters you want your quantizer to have and then give it
any training data in the following formats:
- ClassificationData
- TimeSeriesClassificationData
- TimeSeriesClassificationDataStream
- UnlabelledClassificationData
- MatrixDouble
The example loads a basic dataset and uses this to train a quantization model. After the model
is trained, the data is then run through the quantizer and the quantized values are printed for
demonstration.'''
# Parse the data filename from the argument list
parser = argparse.ArgumentParser(description='Process some data.')
parser.add_argument('filename', help='A data file')
args = parser.parse_args()
# Load a basic dataset from a file
data = GRT.ClassificationData()
if not data.load(args.filename):
print("ERROR: Failed to load training data!\n")
sys.exit(1)
# Create a new KMeansQuantizer instance
quantizer = GRT.KMeansQuantizer ( 5 )
# Train the quantization model
if not quantizer.train( data ) :
print("ERROR: Failed to train quantizer!\n")
sys.exit(1)
# Save the model and settings to a file
if not quantizer.save("KMeansQuantizerSettings.grt") :
print("ERROR: Failed to save settings to file!\n")
sys.exit(1)
# Load the model and settings from a file
if not quantizer.load("KMeansQuantizerSettings.grt") :
print("ERROR: Failed to load settings from file!\n")
sys.exit(1)
# Run the data through the quantizer and print out the quantized value
for i in range(data.getNumSamples()):
sample = data.get(i).getSample()
# Quantize the i'th sample
quantizer.quantize( sample )
# Print out the sample values and the quantized value
print("Index: %d Sample: %s QuantizedValue: %d" % (i, str(sample), quantizer.getQuantizedValue()))
if __name__ == '__main__':
main()
sys.exit(0)
|
195230
|
import os
import re
import attr
import tempfile
from avalon import aftereffects
import pyblish.api
from openpype.settings import get_project_settings
from openpype.lib import abstract_collect_render
from openpype.lib.abstract_collect_render import RenderInstance
@attr.s
class AERenderInstance(RenderInstance):
# extend generic, composition name is needed
comp_name = attr.ib(default=None)
comp_id = attr.ib(default=None)
fps = attr.ib(default=None)
projectEntity = attr.ib(default=None)
stagingDir = attr.ib(default=None)
class CollectAERender(abstract_collect_render.AbstractCollectRender):
order = pyblish.api.CollectorOrder + 0.498
label = "Collect After Effects Render Layers"
hosts = ["aftereffects"]
# internal
family_remapping = {
"render": ("render.farm", "farm"), # (family, label)
"renderLocal": ("render", "local")
}
padding_width = 6
rendered_extension = 'png'
stub = aftereffects.stub()
def get_instances(self, context):
instances = []
current_file = context.data["currentFile"]
version = context.data["version"]
asset_entity = context.data["assetEntity"]
project_entity = context.data["projectEntity"]
compositions = self.stub.get_items(True)
compositions_by_id = {item.id: item for item in compositions}
for inst in self.stub.get_metadata():
schema = inst.get('schema')
# loaded asset container skip it
if schema and 'container' in schema:
continue
if not inst["members"]:
raise ValueError("Couldn't find id, unable to publish. " +
"Please recreate instance.")
item_id = inst["members"][0]
work_area_info = self.stub.get_work_area(int(item_id))
if not work_area_info:
self.log.warning("Orphaned instance, deleting metadata")
self.stub.remove_instance(int(item_id))
continue
frameStart = work_area_info.workAreaStart
frameEnd = round(work_area_info.workAreaStart +
float(work_area_info.workAreaDuration) *
float(work_area_info.frameRate)) - 1
fps = work_area_info.frameRate
# TODO add resolution when supported by extension
if inst["family"] in self.family_remapping.keys() \
and inst["active"]:
remapped_family = self.family_remapping[inst["family"]]
instance = AERenderInstance(
family=remapped_family[0],
families=[remapped_family[0]],
version=version,
time="",
source=current_file,
label="{} - {}".format(inst["subset"], remapped_family[1]),
subset=inst["subset"],
asset=context.data["assetEntity"]["name"],
attachTo=False,
setMembers='',
publish=True,
renderer='aerender',
name=inst["subset"],
resolutionWidth=asset_entity["data"].get(
"resolutionWidth",
project_entity["data"]["resolutionWidth"]),
resolutionHeight=asset_entity["data"].get(
"resolutionHeight",
project_entity["data"]["resolutionHeight"]),
pixelAspect=1,
tileRendering=False,
tilesX=0,
tilesY=0,
frameStart=frameStart,
frameEnd=frameEnd,
frameStep=1,
toBeRenderedOn='deadline',
fps=fps
)
comp = compositions_by_id.get(int(item_id))
if not comp:
raise ValueError("There is no composition for item {}".
format(item_id))
instance.comp_name = comp.name
instance.comp_id = item_id
instance._anatomy = context.data["anatomy"]
instance.anatomyData = context.data["anatomyData"]
instance.outputDir = self._get_output_dir(instance)
settings = get_project_settings(os.getenv("AVALON_PROJECT"))
reviewable_subset_filter = \
(settings["deadline"]
["publish"]
["ProcessSubmittedJobOnFarm"]
["aov_filter"])
if inst["family"] == "renderLocal":
# for local renders
instance.anatomyData["version"] = instance.version
instance.anatomyData["subset"] = instance.subset
instance.stagingDir = tempfile.mkdtemp()
instance.projectEntity = project_entity
if self.hosts[0] in reviewable_subset_filter.keys():
for aov_pattern in \
reviewable_subset_filter[self.hosts[0]]:
if re.match(aov_pattern, instance.subset):
instance.families.append("review")
instance.review = True
break
self.log.info("New instance:: {}".format(instance))
instances.append(instance)
return instances
def get_expected_files(self, render_instance):
"""
Returns list of rendered files that should be created by
Deadline. These are not published directly, they are source
for later 'submit_publish_job'.
Args:
render_instance (RenderInstance): to pull anatomy and parts used
in url
Returns:
(list) of absolut urls to rendered file
"""
start = render_instance.frameStart
end = render_instance.frameEnd
# pull file name from Render Queue Output module
render_q = self.stub.get_render_info()
if not render_q:
raise ValueError("No file extension set in Render Queue")
_, ext = os.path.splitext(os.path.basename(render_q.file_name))
base_dir = self._get_output_dir(render_instance)
expected_files = []
if "#" not in render_q.file_name: # single frame (mov)W
path = os.path.join(base_dir, "{}_{}_{}.{}".format(
render_instance.asset,
render_instance.subset,
"v{:03d}".format(render_instance.version),
ext.replace('.', '')
))
expected_files.append(path)
else:
for frame in range(start, end + 1):
path = os.path.join(base_dir, "{}_{}_{}.{}.{}".format(
render_instance.asset,
render_instance.subset,
"v{:03d}".format(render_instance.version),
str(frame).zfill(self.padding_width),
ext.replace('.', '')
))
expected_files.append(path)
return expected_files
def _get_output_dir(self, render_instance):
"""
Returns dir path of rendered files, used in submit_publish_job
for metadata.json location.
Should be in separate folder inside of work area.
Args:
render_instance (RenderInstance):
Returns:
(str): absolute path to rendered files
"""
# render to folder of workfile
base_dir = os.path.dirname(render_instance.source)
file_name, _ = os.path.splitext(
os.path.basename(render_instance.source))
base_dir = os.path.join(base_dir, 'renders', 'aftereffects', file_name)
# for submit_publish_job
return base_dir
|
195238
|
from .glob import global_add_pool, global_mean_pool, global_max_pool
from .glob import GlobalPooling
from .sort import global_sort_pool
from .attention import GlobalAttention
from .gmt import GraphMultisetTransformer
__all__ = [
'global_add_pool',
'global_mean_pool',
'global_max_pool',
'GlobalPooling',
'global_sort_pool',
'GlobalAttention',
'GraphMultisetTransformer',
]
classes = __all__
from torch_geometric.deprecation import deprecated # noqa
from torch_geometric.nn.aggr import Set2Set # noqa
Set2Set = deprecated(
details="use 'nn.aggr.Set2Set' instead",
func_name='nn.glob.Set2Set',
)(Set2Set)
|
195251
|
import sacrebleu
from .base import EvaluationMetricBase
class BLEUTranslation(EvaluationMetricBase):
def __init__(self):
super(BLEUTranslation, self).__init__()
def calculate_scores(self, ground_truth, predict):
"""
The standard BLEU calculation function for translation. It will compute the BLEU \
scores using sacrebleu tools.
Parameters
----------
ground_truth: list[string]
The ground truth (correct) target values. It is a list of strings.
predict: list[string]
The predicted target values. It is a list of strings.
Returns
-------
score: float
The final bleu score
"""
bleu = sacrebleu.corpus_bleu(predict, [ground_truth], lowercase=True)
return bleu.score
|
195266
|
from aws_cdk.aws_ec2 import SubnetType
from aws_cdk import (
aws_ec2 as ec2,
aws_autoscaling as autoscaling,
aws_elasticloadbalancingv2 as elbv2,
core
)
class ASGStack(core.Stack):
def __init__(self, scope: core.Construct, id: str, props, **kwargs) -> None:
super().__init__(scope, id, **kwargs)
userdata_file = open("./userdata.sh", "rb").read()
# Creates a userdata object for Linux hosts
userdata = ec2.UserData.for_linux()
# Adds one or more commands to the userdata object.
userdata.add_commands(str(userdata_file, 'utf-8'))
asg = autoscaling.AutoScalingGroup(
self,
"app-asg",
vpc=props['vpc'],
instance_type=ec2.InstanceType.of(
ec2.InstanceClass.MEMORY5, ec2.InstanceSize.XLARGE
),
machine_image=ec2.AmazonLinuxImage(
generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX_2
),
key_name="evan",
vpc_subnets=ec2.SubnetSelection(subnet_type=SubnetType.PRIVATE),
user_data=userdata,
)
# Creates a security group for our application
sg_nextcloud = ec2.SecurityGroup(
self,
id="sg_nextcloud",
vpc=props['vpc'],
security_group_name="sg_nextcloud"
)
# Allows only the IP of "172.16.17.32"
# to access this security group for SSH
sg_nextcloud.add_ingress_rule(
peer=ec2.Peer.ipv4("192.168.127.12/32"),
connection=ec2.Port.tcp(22)
)
# Creates a security group for the application load balancer
sg_alb = ec2.SecurityGroup(
self,
id="sg_alb",
vpc=props['vpc'],
security_group_name="sg_alb"
)
# Allows connections from security group "sg_alb"
# inside the "sg_nextcloud" security group to access port 8080
# where our app listens
sg_nextcloud.connections.allow_from(
sg_alb, ec2.Port.tcp(8080), "Ingress")
# Adds the security group 'sg_nextcloud' to the autoscaling group
asg.add_security_group(sg_nextcloud)
# Creates an application load balance
lb = elbv2.ApplicationLoadBalancer(
self,
"ALB",
vpc=props['vpc'],
security_group=sg_alb,
internet_facing=True)
listener = lb.add_listener("Listener", port=80)
# Adds the autoscaling group's (asg) instance to be registered
# as targets on port 8080
listener.add_targets("Target", port=8080, targets=[asg])
# This creates a "0.0.0.0/0" rule to allow every one to access the
# application
listener.connections.allow_default_port_from_any_ipv4(
"Open to the world"
)
|
195269
|
import bpy, blf, bgl, os, gpu
from gpu_extras.batch import batch_for_shader
class ViewportDraw:
def __init__(self, context, text):
bakefile = "TLM_Overlay.png"
scriptDir = os.path.dirname(os.path.realpath(__file__))
bakefile_path = os.path.abspath(os.path.join(scriptDir, '..', '..', 'assets/' + bakefile))
image_name = "TLM_Overlay.png"
bpy.ops.image.open(filepath=bakefile_path)
print("Self path: " + bakefile_path)
image = bpy.data.images[image_name]
x = 15
y = 15
w = 400
h = 200
self.shader = gpu.shader.from_builtin('2D_IMAGE')
self.batch = batch_for_shader(
self.shader, 'TRI_FAN',
{
"pos": ((x, y), (x+w, y), (x+w, y+h), (x, y+h)),
"texCoord": ((0, 0), (1, 0), (1, 1), (0, 1)),
},
)
if image.gl_load():
raise Exception()
self.text = text
self.image = image
#self.handle = bpy.types.SpaceView3D.draw_handler_add(self.draw_text_callback, (context,), 'WINDOW', 'POST_PIXEL')
self.handle2 = bpy.types.SpaceView3D.draw_handler_add(self.draw_image_callback, (context,), 'WINDOW', 'POST_PIXEL')
def draw_text_callback(self, context):
font_id = 0
blf.position(font_id, 15, 15, 0)
blf.size(font_id, 20, 72)
blf.draw(font_id, "%s" % (self.text))
def draw_image_callback(self, context):
if self.image:
bgl.glEnable(bgl.GL_BLEND)
bgl.glActiveTexture(bgl.GL_TEXTURE0)
bgl.glBindTexture(bgl.GL_TEXTURE_2D, self.image.bindcode)
self.shader.bind()
self.shader.uniform_int("image", 0)
self.batch.draw(self.shader)
bgl.glDisable(bgl.GL_BLEND)
def update_text(self, text):
self.text = text
def remove_handle(self):
#bpy.types.SpaceView3D.draw_handler_remove(self.handle, 'WINDOW')
bpy.types.SpaceView3D.draw_handler_remove(self.handle2, 'WINDOW')
|
195283
|
from conans import ConanFile, tools
from conans.errors import ConanInvalidConfiguration
import os
class MathterConan(ConanFile):
name = "mathter"
license = "MIT"
homepage = "https://github.com/petiaccja/Mathter"
url = "https://github.com/conan-io/conan-center-index/"
description = "Powerful 3D math and small-matrix linear algebra library for games and science."
topics = ("game-dev", "linear-algebra", "vector-math", "matrix-library")
no_copy_source = True
settings = "compiler"
@property
def _source_subfolder(self):
return "source_subfolder"
@property
def _compilers_minimum_version(self):
return {
"apple-clang": 10,
"clang": 6,
"gcc": 7,
"Visual Studio": 16,
}
def configure(self):
if self.settings.compiler.cppstd:
tools.check_min_cppstd(self, "17")
minimum_version = self._compilers_minimum_version.get(str(self.settings.compiler), False)
if minimum_version:
if tools.Version(self.settings.compiler.version) < minimum_version:
raise ConanInvalidConfiguration("mathter requires C++17, which your compiler does not support.")
else:
self.output.warn("mathter requires C++17. Your compiler is unknown. Assuming it supports C++17.")
def source(self):
tools.get(**self.conan_data["sources"][self.version])
os.rename("Mathter-" + self.version, self._source_subfolder)
def package(self):
self.copy("*.hpp", dst=os.path.join("include", "Mathter"), src=os.path.join(self._source_subfolder, "Mathter"))
self.copy("*.natvis", dst=os.path.join("include", "Mathter"), src=os.path.join(self._source_subfolder, "Mathter"))
self.copy("LICENCE", dst="licenses", src=self._source_subfolder)
def package_id(self):
self.info.header_only()
|
195293
|
import zmq
import json
import time
import sys
def get_nodes(model):
node_set = set()
node_list = []
for layer in model['layers']:
if layer['bottom_nodes']:
for node in layer['bottom_nodes']:
if node not in node_set:
node_set.add(node)
node_list.append(node)
if layer['top_nodes']:
for node in layer['top_nodes']:
if node not in node_set:
node_set.add(node)
node_list.append(node)
print node_list
return node_list
context = zmq.Context()
# Socket to talk to server
print("Connecting to hello world server...")
socket = context.socket(zmq.REQ)
socket.connect("tcp://127.0.0.1:5000")
node_list = get_nodes(json.loads(open(sys.argv[1]).read()))
data_file = open("sample.data","w")
# Do 10 requests, waiting each time for a response
for request in range(1000):
print("Sending request %s ..." % request)
for node in node_list:
socket.send_string(b'{"msg_type":2, "node_name": "%s", "static_node": ["diff"], "static_value": ["mean"]}' % node)
# Get the reply.
message = socket.recv()
print >> data_file, message
message = json.loads(message)
print("Received reply %s [ %s ]" % (request, message))
time.sleep(1)
data_file.close()
|
195295
|
from django.apps import AppConfig
class CustomThemeDemoAppConfig(AppConfig):
name = 'django_cradmin.demo.custom_theme_demo'
verbose_name = "Django CRadmin custom theme demo"
def ready(self):
from django_cradmin.apps.cradmin_kss_styleguide import styleguide_registry
styleguide = styleguide_registry.CradminStyleGuide(
unique_id='django_cradmin_theme_example',
label='Django CRadmin example theme',
appname='custom_theme_demo',
sourcefolder='styles/cradmin_theme_example',
sourcefile='styleguide.scss',
)
styleguide_registry.Registry.get_instance().add(styleguide)
|
195296
|
from footmark.market.productobject import TaggedPRODUCTObject
class Product(TaggedPRODUCTObject):
def __init__(self, connection=None):
super(Product, self).__init__(connection)
def __repr__(self):
return 'Product:%s' % self.id
def __getattr__(self, name):
if name == 'price':
return self.suggested_price
def __setattr__(self, name, value):
super(TaggedPRODUCTObject, self).__setattr__(name, value)
def get(self):
return self.connection.describe_product(code=self.code)
def read(self):
product = {}
for name, value in list(self.__dict__.items()):
if name in ["connection", "region_id", "region", "request_id", "description", "product_extras","shop_info", "pic_url"]:
continue
if name == 'product_skus':
for m in value['product_sku'][0]['modules']['module']:
if m['code'] == 'img_id':
res = m['properties']['property'][0]['property_values']['property_value']
product['image_ids'] = res
continue
product[name] = value
return product
|
195307
|
from kobin import Kobin, request, Response, TemplateResponse, load_config_from_pyfile
config = load_config_from_pyfile('config.py')
app = Kobin(config=config)
@app.route('/')
def index():
return TemplateResponse(
'hello_jinja2.html', name='Kobin', headers={'foo': 'bar'}
)
@app.route('/user/{name}')
def hello(name: str):
body = """
<p>Hello {}</p>
<p>Request Path: {}</p>
""".format(name, request.path)
return Response(body)
|
195355
|
from django.contrib.auth import get_user_model
from django.utils.translation import ugettext_lazy as _
from rest_framework import serializers, validators
from user_management.utils.validators import validate_password_strength
User = get_user_model()
class UniqueEmailValidator(validators.UniqueValidator):
def filter_queryset(self, value, queryset):
"""Check lower-cased email is unique."""
return super(UniqueEmailValidator, self).filter_queryset(
value.lower(),
queryset,
)
unique_email_validator = UniqueEmailValidator(
queryset=User.objects.all(),
message=_('That email address has already been registered.'),
)
class ValidateEmailMixin(object):
def validate_email(self, value):
return value.lower()
class EmailSerializerBase(serializers.Serializer):
"""Serializer defining a read-only `email` field."""
email = serializers.EmailField(max_length=511, label=_('Email address'))
class Meta:
fields = ('email',)
class RegistrationSerializer(ValidateEmailMixin, serializers.ModelSerializer):
email = serializers.EmailField(
label=_('Email address'),
validators=[unique_email_validator],
)
password = serializers.CharField(
write_only=True,
min_length=8,
label=_('Password'),
validators=[validate_password_strength],
)
password2 = serializers.CharField(
write_only=True,
min_length=8,
label=_('Repeat password'),
)
class Meta:
fields = ('name', 'email', 'password', 'password2')
model = User
def validate(self, attrs):
password2 = attrs.pop('password2')
if password2 != attrs.get('password'):
msg = _('Your passwords do not match.')
raise serializers.ValidationError({'password2': msg})
return attrs
def create(self, validated_data):
password = validated_data.pop('password')
user = self.Meta.model.objects.create(**validated_data)
user.set_password(password)
user.save()
return user
class PasswordChangeSerializer(serializers.ModelSerializer):
old_password = serializers.CharField(
write_only=True,
label=_('Old password'),
)
new_password = serializers.CharField(
write_only=True,
min_length=8,
label=_('New password'),
validators=[validate_password_strength],
)
new_password2 = serializers.CharField(
write_only=True,
min_length=8,
label=_('Repeat new password'),
)
class Meta:
model = User
fields = ('old_password', 'new_password', 'new_<PASSWORD>')
def update(self, instance, validated_data):
"""Check the old password is valid and set the new password."""
if not instance.check_password(validated_data['old_password']):
msg = _('Invalid password.')
raise serializers.ValidationError({'old_password': msg})
instance.set_password(validated_data['new_password'])
instance.save()
return instance
def validate(self, attrs):
if attrs.get('new_password') != attrs['new_password2']:
msg = _('Your new passwords do not match.')
raise serializers.ValidationError({'new_password2': msg})
if attrs.get('old_password') == attrs.get('new_password'):
msg = _('Your new password must not be the same as your old password.')
raise serializers.ValidationError({'new_password': msg})
return attrs
class PasswordResetSerializer(serializers.ModelSerializer):
new_password = serializers.CharField(
write_only=True,
min_length=8,
label=_('New password'),
validators=[validate_password_strength],
)
new_password2 = serializers.CharField(
write_only=True,
min_length=8,
label=_('Repeat new password'),
)
class Meta:
model = User
fields = ('new_password', '<PASSWORD>')
def update(self, instance, validated_data):
"""Set the new password for the user."""
instance.set_password(validated_data['<PASSWORD>_password'])
instance.save()
return instance
def validate(self, attrs):
if attrs.get('new_password') != attrs['new_password2']:
msg = _('Your new passwords do not match.')
raise serializers.ValidationError({'new_password2': msg})
return attrs
class PasswordResetEmailSerializer(EmailSerializerBase):
"""Serializer defining an `email` field to reset password."""
class ProfileSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('name', 'email', 'date_joined')
read_only_fields = ('email', 'date_joined')
class ResendConfirmationEmailSerializer(EmailSerializerBase):
"""Serializer defining an `email` field to resend a confirmation email."""
def validate_email(self, email):
"""
Validate if email exists and requires a verification.
`validate_email` will set a `user` attribute on the instance allowing
the view to send an email confirmation.
"""
try:
self.user = User.objects.get_by_natural_key(email)
except User.DoesNotExist:
msg = _('A user with this email address does not exist.')
raise serializers.ValidationError(msg)
if self.user.email_verified:
msg = _('User email address is already verified.')
raise serializers.ValidationError(msg)
return email
class UserSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = User
fields = ('url', 'name', 'email', 'date_joined')
read_only_fields = ('email', 'date_joined')
extra_kwargs = {
'url': {
'lookup_field': 'pk',
'view_name': 'user_management_api_users:user_detail',
}
}
class UserSerializerCreate(ValidateEmailMixin, UserSerializer):
email = serializers.EmailField(
label=_('Email address'),
validators=[unique_email_validator],
)
class Meta(UserSerializer.Meta):
read_only_fields = ('date_joined',)
|
195425
|
import pandas as pd
import numpy as np
import gc
import os
# read data
col_dict = {'mjd': np.float64, 'flux': np.float32, 'flux_err': np.float32, 'object_id': np.int32, 'passband': np.int8,
'detected': np.int8}
train_meta = pd.read_csv(os.path.join('data', 'training_set_metadata.csv'))
train = pd.read_csv(os.path.join('data', 'training_set.csv'), dtype=col_dict)
def calc_aggs(all_data, exact):
# Normalise the flux, following the Bayesian approach here:
# https://www.statlect.com/fundamentals-of-statistics/normal-distribution-Bayesian-estimation
# Similar idea (but not the same) as the normalisation done in the Starter Kit
# https://www.kaggle.com/michaelapers/the-plasticc-astronomy-starter-kit?scriptVersionId=6040398
prior_mean = all_data.groupby(['object_id', 'passband'])['flux'].transform('mean')
prior_std = all_data.groupby(['object_id', 'passband'])['flux'].transform('std')
prior_std.loc[prior_std.isnull()] = all_data.loc[prior_std.isnull(), 'flux_err']
obs_std = all_data['flux_err'] # since the above kernel tells us that the flux error is the 68% confidence interval
all_data['bayes_flux'] = (all_data['flux'] / obs_std**2 + prior_mean / prior_std**2) \
/ (1 / obs_std**2 + 1 / prior_std**2)
all_data.loc[all_data['bayes_flux'].notnull(), 'flux'] \
= all_data.loc[all_data['bayes_flux'].notnull(), 'bayes_flux']
# Estimate the flux at source, using the fact that light is proportional
# to inverse square of distance from source.
# This is hinted at here: https://www.kaggle.com/c/PLAsTiCC-2018/discussion/70725#417195
redshift = all_meta.set_index('object_id')[['hostgal_specz', 'hostgal_photoz']]
if exact:
redshift['redshift'] = redshift['hostgal_specz']
redshift.loc[redshift['redshift'].isnull(), 'redshift'] \
= redshift.loc[redshift['redshift'].isnull(), 'hostgal_photoz']
else:
redshift['redshift'] = redshift['hostgal_photoz']
all_data = pd.merge(all_data, redshift, 'left', 'object_id')
nonzero_redshift = all_data['redshift'] > 0
all_data.loc[nonzero_redshift, 'flux'] = all_data.loc[nonzero_redshift, 'flux'] \
* all_data.loc[nonzero_redshift, 'redshift']**2
# aggregate features
band_aggs = all_data.groupby(['object_id', 'passband'])['flux'].agg(['mean', 'std', 'max', 'min']).unstack(-1)
band_aggs.columns = [x + '_' + str(y) for x in band_aggs.columns.levels[0]
for y in band_aggs.columns.levels[1]]
all_data.sort_values(['object_id', 'passband', 'flux'], inplace=True)
# this way of calculating quantiles is faster than using the pandas quantile builtin on the groupby object
all_data['group_count'] = all_data.groupby(['object_id', 'passband']).cumcount()
all_data['group_size'] = all_data.groupby(['object_id', 'passband'])['flux'].transform('size')
q_list = [0.25, 0.75]
for q in q_list:
all_data['q_' + str(q)] = all_data.loc[
(all_data['group_size'] * q).astype(int) == all_data['group_count'], 'flux']
quantiles = all_data.groupby(['object_id', 'passband'])[['q_' + str(q) for q in q_list]].max().unstack(-1)
quantiles.columns = [str(x) + '_' + str(y) + '_quantile' for x in quantiles.columns.levels[0]
for y in quantiles.columns.levels[1]]
# max detected flux
max_detected = all_data.loc[all_data['detected'] == 1].groupby('object_id')['flux'].max().to_frame('max_detected')
def most_extreme(df_in, k, positive=True, suffix='', include_max=True, include_dur=True, include_interval=False):
# find the "most extreme" time for each object, and for each band, retrieve the k data points on either side
# k points before
df = df_in.copy()
df['object_passband_mean'] = df.groupby(['object_id', 'passband'])['flux'].transform('median')
if positive:
df['dist_from_mean'] = (df['flux'] - df['object_passband_mean'])
else:
df['dist_from_mean'] = -(df['flux'] - df['object_passband_mean'])
max_time = df.loc[df['detected'] == 1].groupby('object_id')['dist_from_mean'].idxmax().to_frame(
'max_ind')
max_time['mjd_max' + suffix] = df.loc[max_time['max_ind'].values, 'mjd'].values
df = pd.merge(df, max_time[['mjd_max' + suffix]], 'left', left_on=['object_id'], right_index=True)
df['time_after_mjd_max'] = df['mjd'] - df['mjd_max' + suffix]
df['time_before_mjd_max'] = -df['time_after_mjd_max']
# first k after event
df.sort_values(['object_id', 'passband', 'time_after_mjd_max'], inplace=True)
df['row_num_after'] = df.loc[df['time_after_mjd_max'] >= 0].groupby(
['object_id', 'passband']).cumcount()
first_k_after = df.loc[(df['row_num_after'] < k) & (df['time_after_mjd_max'] <= 50),
['object_id', 'passband', 'flux', 'row_num_after']]
first_k_after.set_index(['object_id', 'passband', 'row_num_after'], inplace=True)
first_k_after = first_k_after.unstack(level=-1).unstack(level=-1)
first_k_after.columns = [str(x) + '_' + str(y) + '_after' for x in first_k_after.columns.levels[1]
for y in first_k_after.columns.levels[2]]
extreme_data = first_k_after
time_bands = [[-50, -20], [-20, -10], [-10, 0], [0, 10], [10, 20], [20, 50], [50, 100], [100, 200], [200, 500]]
if include_interval:
interval_arr = []
for start, end in time_bands:
band_data = df.loc[(start <= df['time_after_mjd_max']) & (df['time_after_mjd_max'] <= end)]
interval_agg = band_data.groupby(['object_id', 'passband'])['flux'].mean().unstack(-1)
interval_agg.columns = ['{}_start_{}_end_{}'.format(c, start, end) for c in interval_agg.columns]
interval_arr.append(interval_agg)
interval_data = pd.concat(interval_arr, axis=1)
extreme_data = pd.concat([extreme_data, interval_data], axis=1)
if include_dur:
# detection duration in each passband after event
duration_after = df.loc[(df['time_after_mjd_max'] >= 0) & (df['detected'] == 0)] \
.groupby(['object_id', 'passband'])['time_after_mjd_max'].first().unstack(-1)
duration_after.columns = ['dur_after_' + str(c) for c in range(6)]
extreme_data = pd.concat([extreme_data, duration_after], axis=1)
# last k before event
df.sort_values(['object_id', 'passband', 'time_before_mjd_max'], inplace=True)
df['row_num_before'] = df.loc[df['time_before_mjd_max'] >= 0].groupby(
['object_id', 'passband']).cumcount()
first_k_before = df.loc[(df['row_num_before'] < k) & (df['time_after_mjd_max'] <= 50),
['object_id', 'passband', 'flux', 'row_num_before']]
first_k_before.set_index(['object_id', 'passband', 'row_num_before'], inplace=True)
first_k_before = first_k_before.unstack(level=-1).unstack(level=-1)
first_k_before.columns = [str(x) + '_' + str(y) + '_before' for x in first_k_before.columns.levels[1]
for y in first_k_before.columns.levels[2]]
extreme_data = pd.concat([extreme_data, first_k_before], axis=1)
if include_dur:
# detection duration in each passband before event
duration_before = df.loc[(df['time_before_mjd_max'] >= 0) & (df['detected'] == 0)] \
.groupby(['object_id', 'passband'])['time_before_mjd_max'].first().unstack(-1)
duration_before.columns = ['dur_before_' + str(c) for c in range(6)]
extreme_data = pd.concat([extreme_data, duration_before], axis=1)
if include_max:
# passband with maximum detected flux for each object
max_pb = df.loc[max_time['max_ind'].values].groupby('object_id')['passband'].max().to_frame(
'max_passband')
# time of max in each passband, relative to extreme max
band_max_ind = df.groupby(['object_id', 'passband'])['flux'].idxmax()
band_mjd_max = df.loc[band_max_ind.values].groupby(['object_id', 'passband'])['mjd'].max().unstack(-1)
cols = ['max_time_' + str(i) for i in range(6)]
band_mjd_max.columns = cols
band_mjd_max = pd.merge(band_mjd_max, max_time, 'left', 'object_id')
for c in cols:
band_mjd_max[c] -= band_mjd_max['mjd_max' + suffix]
band_mjd_max.drop(['mjd_max' + suffix, 'max_ind'], axis=1, inplace=True)
extreme_data = pd.concat([extreme_data, max_pb, band_mjd_max], axis=1)
extreme_data.columns = [c + suffix for c in extreme_data.columns]
return extreme_data
extreme_max = most_extreme(all_data, 1, positive=True, suffix='', include_max=True, include_dur=True,
include_interval=True)
extreme_min = most_extreme(all_data, 1, positive=False, suffix='_min', include_max=False, include_dur=True)
# add the feature mentioned here, attempts to identify periodicity:
# https://www.kaggle.com/c/PLAsTiCC-2018/discussion/69696#410538
time_between_detections = all_data.loc[all_data['detected'] == 1].groupby('object_id')['mjd'].agg(['max', 'min'])
time_between_detections['det_period'] = time_between_detections['max'] - time_between_detections['min']
# same feature but grouped by passband
time_between_detections_pb \
= all_data.loc[all_data['detected'] == 1].groupby(['object_id', 'passband'])['mjd'].agg(['max', 'min'])
time_between_detections_pb['det_period'] = time_between_detections_pb['max'] - time_between_detections_pb['min']
time_between_detections_pb = time_between_detections_pb['det_period'].unstack(-1)
time_between_detections_pb.columns = ['det_period_pb_' + str(i) for i in range(6)]
# similar feature based on high values
all_data['threshold'] = all_data.groupby(['object_id'])['flux'].transform('max') * 0.75
all_data['high'] = ((all_data['flux'] >= all_data['threshold']) & (all_data['detected'] == 1)).astype(int)
time_between_highs = all_data.loc[all_data['high'] == 1].groupby('object_id')['mjd'].agg(['max', 'min'])
time_between_highs['det_period_high'] = time_between_highs['max'] - time_between_highs['min']
# aggregate values of the features during the detection period
all_data = pd.merge(all_data, time_between_detections, 'left', 'object_id')
det_data = all_data.loc[(all_data['mjd'] >= all_data['min']) & (all_data['mjd'] <= all_data['max'])]
det_aggs = det_data.groupby(['object_id', 'passband'])['flux'].agg(['min', 'max', 'std', 'median'])
det_aggs['prop_detected'] = det_data.groupby(['object_id', 'passband'])['detected'].mean()
det_aggs = det_aggs.unstack(-1)
det_aggs.columns = [x + '_' + str(y) + '_det_period' for x in det_aggs.columns.levels[0]
for y in det_aggs.columns.levels[1]]
# time distribution of detections in each band
detection_time_dist \
= all_data.loc[all_data['detected'] == 1].groupby(['object_id', 'passband'])['mjd'].std().unstack(-1)
detection_time_dist.columns = ['time_dist_' + str(i) for i in range(6)]
detection_time_dist_all \
= all_data.loc[all_data['detected'] == 1].groupby(['object_id'])['mjd'].std().to_frame('time_dist')
# scale data and recalculate band aggs
all_data['abs_flux'] = all_data['flux'].abs()
all_data['flux'] = (all_data['flux']) / all_data.groupby('object_id')['abs_flux'].transform('max')
band_aggs_s = all_data.groupby(['object_id', 'passband'])['flux'].agg(['mean', 'std', 'max', 'min']).unstack(-1)
band_aggs_s.columns = [x + '_' + str(y) + '_scaled' for x in band_aggs_s.columns.levels[0]
for y in band_aggs_s.columns.levels[1]]
all_data.sort_values(['object_id', 'passband', 'flux'], inplace=True)
for q in q_list:
all_data['q_' + str(q)] = all_data.loc[
(all_data['group_size'] * q).astype(int) == all_data['group_count'], 'flux']
quantiles_s = all_data.groupby(['object_id', 'passband'])[['q_' + str(q) for q in q_list]].max().unstack(-1)
quantiles_s.columns = [str(x) + '_' + str(y) + '_quantile_s' for x in quantiles_s.columns.levels[0]
for y in quantiles_s.columns.levels[1]]
extreme_max_s = most_extreme(all_data, 1, positive=True, suffix='_s', include_max=False, include_dur=False,
include_interval=True)
extreme_min_s = most_extreme(all_data, 1, positive=False, suffix='_min_s', include_max=False, include_dur=False)
new_data = pd.concat([band_aggs, quantiles, band_aggs_s, max_detected, time_between_detections[['det_period']],
time_between_detections_pb, extreme_max, extreme_min, extreme_max_s, extreme_min_s,
time_between_highs[['det_period_high']], quantiles_s, detection_time_dist,
detection_time_dist_all, det_aggs], axis=1)
return new_data
# get the metadata
test_meta = pd.read_csv(os.path.join('data', 'test_set_metadata.csv'))
all_meta = pd.concat([train_meta, test_meta], axis=0, ignore_index=True, sort=True).reset_index()
all_meta.drop('index', axis=1, inplace=True)
n_chunks = 100
# calculate features
new_data_exact = calc_aggs(train.copy(), True)
new_data_approx = calc_aggs(train.copy(), False)
train_meta_exact = pd.merge(train_meta, new_data_exact, 'left', left_on='object_id', right_index=True)
train_meta_approx = pd.merge(train_meta, new_data_approx, 'left', left_on='object_id', right_index=True)
# process training set (not actually used, just to get right shape of dataframe)
new_data_arr = []
new_data_arr.append(calc_aggs(train.copy(), True))
# process test set
for i in range(n_chunks):
df = pd.read_hdf(os.path.join('data', 'split_{}'.format(n_chunks), 'chunk_{}.hdf5'.format(i)), key='file0')
df.drop('index', axis=1, inplace=True)
print('Read chunk {}'.format(i))
new_data_arr.append(calc_aggs(df.copy(), True))
print('Calculated features for chunk {}'.format(i))
del df
gc.collect()
new_data = pd.concat(new_data_arr, axis=0, sort=True)
# merge
all_meta = pd.merge(all_meta, new_data, 'left', left_on='object_id', right_index=True)
# write output
dir_name = 'features'
if not os.path.exists(os.path.join('data', dir_name)):
os.mkdir(os.path.join('data', dir_name))
all_meta.to_hdf(os.path.join('data', dir_name, 'all_data.hdf5'), key='file0')
train_meta_exact.to_hdf(os.path.join('data', dir_name, 'train_meta_exact.hdf5'), key='file0')
train_meta_approx.to_hdf(os.path.join('data', dir_name, 'train_meta_approx.hdf5'), key='file0')
|
195434
|
import torch
import numpy as np
import math
from scipy.stats import norm
import matplotlib.pyplot as plt
def plot_gaussian_mixture_1d(var, weights, mu=None):
"""
Visualize 1D Gaussian mixture
"""
if mu is None:
mu = np.zeros_like(var)
x = np.linspace(start = -10, stop = 10, num = 2000)
y_cum = np.zeros_like(x)
for ii in range(var.shape[0]):
y = norm(0,np.sqrt(var[ii].item())).pdf(x)
y_cum = y * weights[ii].item() + y_cum
plt.plot(x, y_cum)
def standardize(data_train, *args):
"""
Standardize a dataset to have zero mean and unit standard deviation.
:param data_train: 2-D Numpy array. Training data.
:param data_test: 2-D Numpy array. Test data.
:return: (train_set, test_set, mean, std), The standardized dataset and
their mean and standard deviation before processing.
"""
std = np.std(data_train, 0, keepdims=True)
std[std == 0] = 1
mean = np.mean(data_train, 0, keepdims=True)
data_train_standardized = (data_train - mean) / std
output = [data_train_standardized]
for d in args:
dd = (d - mean) / std
output.append(dd)
output.append(mean)
output.append(std)
return output
def GP_noise(y1, K11, K12, K22, epsilon_noise, device):
"""
Calculate the posterior mean and covariance matrix for y2 based on the noisy observations y1 and the given kernel matrix
"""
# Kernel of the noisy observations
K11 = K11 + epsilon_noise * torch.eye(K11.shape[0]).to(device)
solved, _ = torch.solve(K12, K11)
# Compute posterior mean
mu_2 = torch.matmul(solved.T, y1)
var_2 = K22 - torch.matmul(solved.T, K12)
return mu_2, var_2 # mean, covariance
def cal_marg_likelihood_single(K, f, epsilon, device):
N = f.shape[0]
L = torch.cholesky(K+epsilon*torch.eye(N).to(device))
singular_values = L.diagonal(offset=0)
logdet = torch.sum(torch.log(singular_values)*2)
data_fit = -(f.transpose(-1,-2)).matmul(torch.inverse(K+epsilon*torch.eye(N).to(device))).matmul(f).squeeze(-1)
AvgMLL = (0.5*data_fit - 0.5*logdet)/N - 0.5*math.log(2*math.pi)
return AvgMLL
def cal_marg_likelihood_single_L(f, L):
N = f.shape[0]
singular_values = L.diagonal(offset=0)
logdet = torch.sum(torch.log(singular_values)*2)
L_inv = torch.inverse(L)
f_bar = L_inv.matmul(f)
data_fit = -(f_bar.transpose(-1,-2).matmul(f_bar)).squeeze(-1)
AvgMLL = (0.5*data_fit - 0.5*logdet)/N - 0.5*math.log(2*math.pi)
return AvgMLL
def cal_marg_likelihood(K, f, epsilon, kernel_mask, diagonal_mask, N, device):
# K: B X N X N
# f: B X N X 1 (filled with zeros)
diag_size = f.shape[1]
K = (K + epsilon*torch.eye(diag_size).to(device).unsqueeze(0))*kernel_mask # fill the rest with zeros
K = K+torch.eye(diag_size).to(device).unsqueeze(0)*(1-kernel_mask) # add ones to the diagonal
L = torch.cholesky(K)
singular_values = L.diagonal(offset=0, dim1=1, dim2=2)
logdet = torch.sum(torch.log(singular_values)*2*(1-diagonal_mask),1)
data_fit = -(f.transpose(-1,-2)).matmul(torch.inverse(K)).matmul(f).squeeze(1).squeeze(1)
AvgMLL = (0.5*data_fit - 0.5*logdet)/N - 0.5*math.log(2*math.pi)
return AvgMLL
def cal_kern_per(X1,X2,period,lengthscale):
#lengthscale: (B or None) X D
#period:(B or None) X D
#X1: (B or None) X N1 X D, X2: (B or None) X N2 X D
period = period.unsqueeze(-2) # (B or None) X 1 X D
X1 = X1.div(period).unsqueeze(-2) #shape --> (B or None) X N X 1 X D
X2 = X2.div(period).unsqueeze(-3) #shape --> (B or None) x 1 x N x D
X_diff = torch.abs(X1 - X2) #shape --> B x N x N x D
lengthscale = (lengthscale**2).unsqueeze(-2).unsqueeze(-2) # B X 1 X 1 X D
K = (-2*(torch.sin(math.pi*X_diff)**2)/lengthscale).exp_() # B X N X N X D
K = torch.prod(K,-1) # B X N X N
return K
def cal_kern_rbf(X1,X2,lengthscale):
#lengthscale: B or None X D
#X1: B or None X N1 X D, X2: B or None X N2 X D
lengthscale = lengthscale.unsqueeze(-2)#B X 1 X D
X1 = X1.div(lengthscale)
X2 = X2.div(lengthscale)
X1_norm = torch.sum(X1 ** 2, dim = -1).unsqueeze(-1)#B X N1 X 1
X2_norm = torch.sum(X2 ** 2, dim = -1).unsqueeze(-2)#B X 1 X N2
Distance_squared = (X1_norm + X2_norm - 2 * torch.matmul(X1, X2.transpose(-1,-2))).clamp_min_(0)
K = torch.exp(-Distance_squared) #shape: B X N1 X N2
return K
def cal_kern_matern(X1,X2,lengthscale,nu=0.5):
#lengthscale: B X D
#X1: B X N1 X D, X2: B X N2 X D
lengthscale = lengthscale.unsqueeze(-2)#B X 1 X D
X1 = X1.div(lengthscale)
X2 = X2.div(lengthscale)
X1_norm = torch.sum(X1 ** 2, dim = -1).unsqueeze(-1)#B X N1 X 1
X2_norm = torch.sum(X2 ** 2, dim = -1).unsqueeze(-2)#B X 1 X N2
Distance_squared = (X1_norm + X2_norm - 2 * torch.matmul(X1, X2.transpose(-1,-2))).clamp_min_(1e-30)
Distance = torch.sqrt(Distance_squared)
exp_component = torch.exp(-math.sqrt(nu * 2) * Distance)
if nu == 0.5:
constant_component = 1
elif nu == 1.5:
constant_component = (math.sqrt(3) * Distance).add(1)
elif nu == 2.5:
constant_component = (math.sqrt(5) * Distance).add(1).add(5.0 / 3.0 * (Distance) ** 2)
K = torch.mul(constant_component,exp_component) #shape: B X N1 X N2
return K
def cal_kern_spec_mix_sep(X1, X2, mu, var, weights):
#X1: shape B X N1 X D, X2: B X N2 X D
#mu: B X M X (D or 1)
#var: B X M X (D or 1)
#weights: B X M X (D or 1)
X1 = X1.unsqueeze(-2) #shape --> (B or None) X N X 1 X D
X2 = X2.unsqueeze(-3) #shape --> B x 1 x N x D
X_diff = (X1 - X2).unsqueeze(-4) #shape --> B x 1 x N x N x D
X_diff_squared = X_diff**2
var = var.unsqueeze(-2).unsqueeze(-2) # shape --> B x M x 1 x 1 x (D or 1)
mu = mu.unsqueeze(-2).unsqueeze(-2) # shape --> B x M x 1 x 1 x (D or 1)
kern_all = (weights.unsqueeze(-2).unsqueeze(-2))*torch.exp(-2*(math.pi**2)*X_diff_squared*var)*torch.cos(2*math.pi*X_diff*mu) # shape --> B x M x N x N x D
kern_all = torch.sum(kern_all,-4) #sum up the average of the mixture of kernels, shape --> B x N x N x D
kern = torch.prod(kern_all,-1) #shape --> B x N x N
return kern
def cal_kern_spec_mix_nomu_sep(X1, X2, var, weights):
X1 = X1.unsqueeze(-2) #shape --> (B or None) X N X 1 X D
X2 = X2.unsqueeze(-3) #shape --> B x 1 x N x D
X_diff = (X1 - X2).unsqueeze(-4) #shape --> B x 1 x N x N x D
X_diff_squared = X_diff**2
var = var.unsqueeze(-2).unsqueeze(-2) # shape --> B x M x 1 x 1 x (D or 1)
kern_all = (weights.unsqueeze(-2).unsqueeze(-2))*torch.exp(-2*(math.pi**2)*X_diff_squared*var) # shape --> B x M x N x N x D
kern_all = torch.sum(kern_all,-4) #sum up the average of the mixture of kernels, shape --> B x N x N x D
kern = torch.prod(kern_all,-1) #shape --> B x N x N
return kern
def cal_kern_spec_mix(X1, X2, mu, var, weights):
#X1: shape B X N1 X D, X2: B X N2 X D
#mu: B X M X (D or 1)
#var: B X M X (D or 1)
#weights: B X M
X1 = X1.unsqueeze(-2) #shape --> B X N1 X 1 X D
X2 = X2.unsqueeze(-3) #shape --> B x 1 x N2 x D
X_diff = (X1 - X2).unsqueeze(-4) #shape --> B x 1 x N1 x N2 x D
X_diff_squared = X_diff**2
var = var.unsqueeze(-2).unsqueeze(-2) # shape --> B x M x 1 x 1 x (D or 1)
mu = mu.unsqueeze(-2).unsqueeze(-2) # shape --> B x M x 1 x 1 x (D or 1)
log_exp_component = -2*(math.pi**2)*X_diff_squared*var
exp_component = torch.exp(torch.sum(log_exp_component,-1)) #shape --> B x M x N1 x N2
cos_component = torch.prod(torch.cos(2*math.pi*X_diff*mu),-1)# product of all D dimensions
weights = weights.unsqueeze(-1).unsqueeze(-1) # shape --> B x M x 1 x 1
kern_all = weights*exp_component*cos_component # shape --> B x M x N1 x N2
kern = torch.sum(kern_all,-3) #sum up the average of the mixture of kernels
return kern
def cal_kern_spec_mix_nomu(X1, X2, var, weights):
#X1: shape B X N1 X D, X2: B X N2 X D
#var: B X M X (D or 1)
#weights: B X M
X1 = X1.unsqueeze(-2) #shape --> B X N1 X 1 X D
X2 = X2.unsqueeze(-3) #shape --> B x 1 x N2 x D
X_diff = (X1 - X2).unsqueeze(-4) #shape --> B x 1 x N1 x N2 x D
X_diff_squared = X_diff**2
var = var.unsqueeze(-2).unsqueeze(-2) # shape --> B x M x 1 x 1 x (D or 1)
log_exp_component = -2*(math.pi**2)*X_diff_squared*var
exp_component = torch.exp(torch.sum(log_exp_component,-1)) #shape --> B x M x N1 x N2
weights = weights.unsqueeze(-1).unsqueeze(-1) # shape --> B x M x 1 x 1
kern_all = weights*exp_component # shape --> B x M x N1 x N2
kern = torch.sum(kern_all,-3) #sum up the average of the mixture of kernels
return kern
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.