code stringlengths 2k 1.04M | repo_path stringlengths 5 517 | parsed_code stringlengths 0 1.04M | quality_prob float64 0.02 0.95 | learning_prob float64 0.02 0.93 |
|---|---|---|---|---|
from sklearn.feature_extraction.text import CountVectorizer
import pandas as pd
from sklearn.model_selection import train_test_split
import numpy as np
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import confusion_matrix
from sklearn.metrics import precision_recall_fscore_support
from sklearn.svm import SVC
from sklearn.naive_bayes import MultinomialNB
import pickle
import sys
from time import time
def p_log(*ks, **kwargs):
print(*ks, **kwargs)
sys.stdout.flush()
def construct_traffic(filename, LABELS):
X = [[] for i in range(len(LABELS))]
with open(filename) as f:
for i in f.readlines():
i = i.strip().split()
tag = i[0].split('//')[0]
if tag in LABELS:
X[LABELS[tag]].append(' '.join(i[1:1501]))
return X
def cmp_my(x):
return x[1]
class Securitas():
def __init__(self, X, labels, voca_size, n_topic):
self.LABELS = labels
self.voca_size = voca_size
self.n_topic = n_topic
self.voca = self.get_vocaubulary(X, self.voca_size)
self.lda = LatentDirichletAllocation(n_components=n_topic, doc_topic_prior=0.1, topic_word_prior=0.01)
self.lda = self.fit(X)
def fit(self, X):
X = self.get_input_vectors(X)
self.lda.fit(X)
return self.lda
def get_vocaubulary(self, X, need_size):
vec = CountVectorizer(min_df=1, ngram_range=(3,3),decode_error="ignore")
X = vec.fit_transform(X)
if need_size >= len(vec.get_feature_names()):
need_size = len(vec.get_feature_names())
# print('shape of X:', X.shape)
X = X.toarray()
X = np.sum(X, axis = 0)
voca_indexs = {value:key for key, value in vec.vocabulary_.items()}
X = sorted([(i,r) for i, r in enumerate(X)], key = cmp_my, reverse = True)
res = {voca_indexs[item[0]]:i for i, item in enumerate(X[:need_size])}
return res
def get_input_vectors(self, X):
vec = CountVectorizer(min_df = 1, ngram_range=(3,3), decode_error= "ignore", vocabulary= self.voca)
X = vec.fit_transform(X)
return X.toarray()
def get_features(self, X):
X_input_features = self.get_input_vectors(X)
X_features = self.lda.transform(X_input_features)
return X_features
def deal_to_binary(target, y):
for i in range(len(y)):
if y[i] != target:
y[i] = 0
else:
y[i] = 1
return y
def f1(p, r):
return float(2*p*r) / float(p+r)
# LABELS = {'vimeo': 0, 'spotify': 1, 'voipbuster': 2, 'sinauc': 3, 'cloudmusic': 4, 'weibo': 5, 'baidu': 6, 'tudou': 7, 'amazon': 8, 'thunder': 9, 'gmail': 10, 'pplive': 11, 'qq': 12, 'taobao': 13, 'yahoomail': 14, 'itunes': 15, 'twitter': 16, 'jd': 17, 'sohu': 18, 'youtube': 19, 'youku': 20, 'netflix': 21, 'aimchat': 22, 'kugou': 23, 'skype': 24, 'facebook': 25, 'google': 26, 'mssql': 27, 'ms-exchange': 28}
# LABELS = {'audio': 0, 'browsing': 1, 'chat': 2, 'file': 3, 'mail': 4,
# 'p2p': 5, 'video': 6, 'voip': 7}
LABELS = {'reddit': 0, 'facebook': 1, 'NeteaseMusic': 2,
'twitter': 3, 'qqmail': 4, 'instagram': 5,
'weibo': 6, 'iqiyi': 7, 'imdb': 8,
'TED': 9, 'douban': 10,
'amazon': 11, 'youtube': 12, 'JD': 13,
'youku': 14, 'baidu': 15,
'google': 16, 'tieba': 17, 'taobao': 18,
'bing': 19}
pp, rr, f1s = [[], [], []], [[], [], []], [[], [], []]
# filename is the same as BSNN
p_log('start construct_traffic')
X_total = construct_traffic('../bsnn/data/20_header_payload_all.traffic', LABELS)
for i, k in enumerate(X_total):
p_log(i, ' ', len(k))
def go(X_total):
securitas_time_logs = []
for target in range(len(LABELS.keys())):
p_log('Target: {}'.format(target))
X1 = X_total[target]
if len(X1) > 2000:
X1 = list(np.random.choice(X1, size=[2000,]))
len_negative = 2000 / (len(LABELS) - 1)
len_negative = int(len_negative)
p_log('len_negative: {}'.format(len_negative))
X2 = []
for i in range(len(X_total)):
if i != target:
X2 += list(np.random.choice(X_total[i], size=[len_negative,]))
y1 = [1]*len(X1)
y2 = [0]*len(X2)
X = X1 + X2
y = y1 + y2
p_log('positve samples : {}, total: {}'.format(len(X1), len(X)))
p_log('dataset ok')
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, stratify = y, random_state = 1)
# y_train = deal_to_binary(14, y_train)
# y_test = deal_to_binary(14, y_train)
securitas_start_time = time()
securitas = Securitas(X_train, LABELS, voca_size = 1500, n_topic = 45)
securitas_train_time = time() - securitas_start_time
p_log('securitas create ok')
X_train_features = securitas.get_features(X_train)
securitas_start_time = time()
X_test_features = securitas.get_features(X_test)
securitas_preprocess_time = time() - securitas_start_time
p_log('securitas features ok, begin to train ML model')
models = [DecisionTreeClassifier(), SVC(), MultinomialNB()]
ML_time = {}
for i in range(len(models)):
model = models[i]
p_log('model {}'.format(model.__class__.__name__))
s_t = time()
model.fit(X_train_features, y_train)
time_fit = time() - s_t
s_t = time()
predicts = model.predict(X_test_features)
time_pred = time() - s_t
cmatrix = confusion_matrix(y_test, predicts)
p_log(cmatrix)
# p_sum = cmatrix.sum(axis = 1)
# r_sum = cmatrix.sum(axis = 0)
# p = cmatrix[1][1] / float(p_sum[1]+0.0001) + 0.0001
# r = cmatrix[1][1] / float(r_sum[1]+0.0001) + 0.0001
p, r, f1, _ = precision_recall_fscore_support(
y_test, predicts, labels=[1,])
pp[i].append(p)
rr[i].append(r)
f1s[i].append(f1)
# f1_ = f1(p, r)
p_log('precision: {}, recall: {}, f1: {}'.format(
p, r, f1))
p_log('time fit: {}, time predict: {}'.format(
time_fit, time_pred))
ML_time[model.__class__.__name__] = {
'train': time_fit, 'test': time_pred}
securitas_time_logs.append({
'train': securitas_train_time,
'preprocessing': securitas_preprocess_time,
'mode': ML_time
})
p_log('Securitas time log: {}'.format(securitas_time_logs))
p_log('start train')
go(X_total)
names = [i.__class__.__name__ for i in [DecisionTreeClassifier(), SVC(), MultinomialNB()]]
data = {}
for i, n in enumerate(names):
data[n+'_precision}'] = pp[i]
data[n+'_recall'] = rr[i]
data[n+'_f1score'] = f1s[i]
df = pd.DataFrame(data)
df.to_excel('securitas_results_dataset_20_new.xlsx')
p_log('ok') | securitas/utils.py | from sklearn.feature_extraction.text import CountVectorizer
import pandas as pd
from sklearn.model_selection import train_test_split
import numpy as np
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import confusion_matrix
from sklearn.metrics import precision_recall_fscore_support
from sklearn.svm import SVC
from sklearn.naive_bayes import MultinomialNB
import pickle
import sys
from time import time
def p_log(*ks, **kwargs):
print(*ks, **kwargs)
sys.stdout.flush()
def construct_traffic(filename, LABELS):
X = [[] for i in range(len(LABELS))]
with open(filename) as f:
for i in f.readlines():
i = i.strip().split()
tag = i[0].split('//')[0]
if tag in LABELS:
X[LABELS[tag]].append(' '.join(i[1:1501]))
return X
def cmp_my(x):
return x[1]
class Securitas():
def __init__(self, X, labels, voca_size, n_topic):
self.LABELS = labels
self.voca_size = voca_size
self.n_topic = n_topic
self.voca = self.get_vocaubulary(X, self.voca_size)
self.lda = LatentDirichletAllocation(n_components=n_topic, doc_topic_prior=0.1, topic_word_prior=0.01)
self.lda = self.fit(X)
def fit(self, X):
X = self.get_input_vectors(X)
self.lda.fit(X)
return self.lda
def get_vocaubulary(self, X, need_size):
vec = CountVectorizer(min_df=1, ngram_range=(3,3),decode_error="ignore")
X = vec.fit_transform(X)
if need_size >= len(vec.get_feature_names()):
need_size = len(vec.get_feature_names())
# print('shape of X:', X.shape)
X = X.toarray()
X = np.sum(X, axis = 0)
voca_indexs = {value:key for key, value in vec.vocabulary_.items()}
X = sorted([(i,r) for i, r in enumerate(X)], key = cmp_my, reverse = True)
res = {voca_indexs[item[0]]:i for i, item in enumerate(X[:need_size])}
return res
def get_input_vectors(self, X):
vec = CountVectorizer(min_df = 1, ngram_range=(3,3), decode_error= "ignore", vocabulary= self.voca)
X = vec.fit_transform(X)
return X.toarray()
def get_features(self, X):
X_input_features = self.get_input_vectors(X)
X_features = self.lda.transform(X_input_features)
return X_features
def deal_to_binary(target, y):
for i in range(len(y)):
if y[i] != target:
y[i] = 0
else:
y[i] = 1
return y
def f1(p, r):
return float(2*p*r) / float(p+r)
# LABELS = {'vimeo': 0, 'spotify': 1, 'voipbuster': 2, 'sinauc': 3, 'cloudmusic': 4, 'weibo': 5, 'baidu': 6, 'tudou': 7, 'amazon': 8, 'thunder': 9, 'gmail': 10, 'pplive': 11, 'qq': 12, 'taobao': 13, 'yahoomail': 14, 'itunes': 15, 'twitter': 16, 'jd': 17, 'sohu': 18, 'youtube': 19, 'youku': 20, 'netflix': 21, 'aimchat': 22, 'kugou': 23, 'skype': 24, 'facebook': 25, 'google': 26, 'mssql': 27, 'ms-exchange': 28}
# LABELS = {'audio': 0, 'browsing': 1, 'chat': 2, 'file': 3, 'mail': 4,
# 'p2p': 5, 'video': 6, 'voip': 7}
LABELS = {'reddit': 0, 'facebook': 1, 'NeteaseMusic': 2,
'twitter': 3, 'qqmail': 4, 'instagram': 5,
'weibo': 6, 'iqiyi': 7, 'imdb': 8,
'TED': 9, 'douban': 10,
'amazon': 11, 'youtube': 12, 'JD': 13,
'youku': 14, 'baidu': 15,
'google': 16, 'tieba': 17, 'taobao': 18,
'bing': 19}
pp, rr, f1s = [[], [], []], [[], [], []], [[], [], []]
# filename is the same as BSNN
p_log('start construct_traffic')
X_total = construct_traffic('../bsnn/data/20_header_payload_all.traffic', LABELS)
for i, k in enumerate(X_total):
p_log(i, ' ', len(k))
def go(X_total):
securitas_time_logs = []
for target in range(len(LABELS.keys())):
p_log('Target: {}'.format(target))
X1 = X_total[target]
if len(X1) > 2000:
X1 = list(np.random.choice(X1, size=[2000,]))
len_negative = 2000 / (len(LABELS) - 1)
len_negative = int(len_negative)
p_log('len_negative: {}'.format(len_negative))
X2 = []
for i in range(len(X_total)):
if i != target:
X2 += list(np.random.choice(X_total[i], size=[len_negative,]))
y1 = [1]*len(X1)
y2 = [0]*len(X2)
X = X1 + X2
y = y1 + y2
p_log('positve samples : {}, total: {}'.format(len(X1), len(X)))
p_log('dataset ok')
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, stratify = y, random_state = 1)
# y_train = deal_to_binary(14, y_train)
# y_test = deal_to_binary(14, y_train)
securitas_start_time = time()
securitas = Securitas(X_train, LABELS, voca_size = 1500, n_topic = 45)
securitas_train_time = time() - securitas_start_time
p_log('securitas create ok')
X_train_features = securitas.get_features(X_train)
securitas_start_time = time()
X_test_features = securitas.get_features(X_test)
securitas_preprocess_time = time() - securitas_start_time
p_log('securitas features ok, begin to train ML model')
models = [DecisionTreeClassifier(), SVC(), MultinomialNB()]
ML_time = {}
for i in range(len(models)):
model = models[i]
p_log('model {}'.format(model.__class__.__name__))
s_t = time()
model.fit(X_train_features, y_train)
time_fit = time() - s_t
s_t = time()
predicts = model.predict(X_test_features)
time_pred = time() - s_t
cmatrix = confusion_matrix(y_test, predicts)
p_log(cmatrix)
# p_sum = cmatrix.sum(axis = 1)
# r_sum = cmatrix.sum(axis = 0)
# p = cmatrix[1][1] / float(p_sum[1]+0.0001) + 0.0001
# r = cmatrix[1][1] / float(r_sum[1]+0.0001) + 0.0001
p, r, f1, _ = precision_recall_fscore_support(
y_test, predicts, labels=[1,])
pp[i].append(p)
rr[i].append(r)
f1s[i].append(f1)
# f1_ = f1(p, r)
p_log('precision: {}, recall: {}, f1: {}'.format(
p, r, f1))
p_log('time fit: {}, time predict: {}'.format(
time_fit, time_pred))
ML_time[model.__class__.__name__] = {
'train': time_fit, 'test': time_pred}
securitas_time_logs.append({
'train': securitas_train_time,
'preprocessing': securitas_preprocess_time,
'mode': ML_time
})
p_log('Securitas time log: {}'.format(securitas_time_logs))
p_log('start train')
go(X_total)
names = [i.__class__.__name__ for i in [DecisionTreeClassifier(), SVC(), MultinomialNB()]]
data = {}
for i, n in enumerate(names):
data[n+'_precision}'] = pp[i]
data[n+'_recall'] = rr[i]
data[n+'_f1score'] = f1s[i]
df = pd.DataFrame(data)
df.to_excel('securitas_results_dataset_20_new.xlsx')
p_log('ok') | 0.280715 | 0.326164 |
import matplotlib.pyplot as plt
import numpy as np
from filterpy.common import Q_discrete_white_noise
from numpy.random import randn
from filterpy.kalman import UnscentedKalmanFilter as UKF
from filterpy.kalman import MerweScaledSigmaPoints
import book_format
book_format.set_style()
class INSim:
def __init__(self, pos, dt_):
self.pos = pos
self.dt = dt_
def update(self, vel):
""" Compute and returns next position. Incorporates
random variation in velocity. """
dx = vel*self.dt
self.pos += dx
return self.pos
class LagHardwareModel:
# hardware model
def __init__(self, pos, tau_, dt_, pos_std_):
self.pos = pos
self.tau = tau_
self.dt = dt_
self.pos_std = pos_std_
def noisy_reading(self, pos_in):
""" Return pos with
simulated noise"""
pos_past = self.pos
vel = (pos_in - self.pos) / self.tau
self.pos += vel*self.dt + randn()*self.pos_std
return pos_past, vel
def fx_calc(self, x, dt_, u, t_=None, z_=None, tau_=None):
""" innovation function """
out = np.empty_like(x)
out[0] = x[1]*dt_ + x[0] # adds past value like noisy_reading
if tau_ is None:
out[1] = (u - out[0])/self.tau
else:
out[1] = (u - out[0])/tau_
return out
def h_lag(x):
""" feedback function """
return [x[0]]
# complete tracking ukf
tau_hardware = 0.159 # Hardware lag (0.159 for 1 Hz -3dB bandwidth)
tau_fx = 0.159 # Kalman lag estimate (0.159 for 1 Hz -3dB bandwidth)
dt = 0.1
pos_sense_std = 5 # Hardware sensor variation (1)
# UKF settings
r_std = .1 # Kalman sensor uncertainty (0.1)
q_std = 7 # Process uncertainty (7)
in_pos = 0
in_vel = 0
lag_pos = in_pos
lag_vel = in_vel
# Hardware simulation
in_lag20 = INSim(lag_pos, dt)
lag20_hardware = LagHardwareModel(lag_pos, tau_hardware, dt, pos_sense_std)
# Setup the UKF
points = MerweScaledSigmaPoints(n=2, alpha=.001, beta=2., kappa=1.)
kf = UKF(dim_x=2, dim_z=1, dt=dt, fx=lag20_hardware.fx_calc, hx=h_lag, points=points)
kf.Q = Q_discrete_white_noise(dim=2, dt=dt, var=q_std*q_std)
kf.R = r_std**2
kf.x = np.array([lag_pos, lag_vel])
kf.P = np.eye(2)*100
np.random.seed(200)
t = np.arange(0, 5+dt, dt)
n = len(t)
zs = []
refs = []
xs = []
vs = []
vhs = []
prior_x_est = []
prior_v_est = []
Ks = []
for i in range(len(t)):
if t[i] < 1:
v = 0
elif t[i] < 1.8:
v = 100
elif t[i] < 3.0:
v = 0
elif t[i] < 3.8:
v = -100
else:
v = 0
ref = in_lag20.update(v)
z, vh = lag20_hardware.noisy_reading(ref)
kf.predict(u=ref, tau_=tau_fx)
kf.update(z)
refs.append(ref)
zs.append(z)
vhs.append(vh)
prior_x_est.append(kf.x_prior[0])
prior_v_est.append(kf.x_prior[1])
xs.append(kf.x[0])
vs.append(kf.x[1])
Ks.append(kf.K[0,0])
# UKF.batch_filter does not support keyword arguments fx_args, hx_args
print(kf.x, 'log-likelihood', kf.log_likelihood, 'Kalman gain', kf.K.T)
plt.figure()
plt.subplot(221); plt.title('Ex 20 lag UKF.py')
plt.scatter(t, prior_x_est, color='green', label='Post X', marker='o')
plt.scatter(t, zs, color='black', label='Meas X', marker='.')
plt.plot(t, xs, color='green', label='Est X')
plt.plot(t, refs, color='blue', linestyle='--', label='Ref X')
plt.legend(loc=2)
plt.subplot(222)
plt.scatter(t, vhs, color='black', label='Meas V', marker='.')
plt.plot(t, prior_v_est, color='green', label='Post V')
plt.legend(loc=3)
plt.subplot(223)
plt.plot(t, Ks, color='green', label='K')
plt.legend(loc=3)
plt.show() | SOC_Photon/Battery State/EKF/sandbox/Ex 20 lag UKF.py | import matplotlib.pyplot as plt
import numpy as np
from filterpy.common import Q_discrete_white_noise
from numpy.random import randn
from filterpy.kalman import UnscentedKalmanFilter as UKF
from filterpy.kalman import MerweScaledSigmaPoints
import book_format
book_format.set_style()
class INSim:
def __init__(self, pos, dt_):
self.pos = pos
self.dt = dt_
def update(self, vel):
""" Compute and returns next position. Incorporates
random variation in velocity. """
dx = vel*self.dt
self.pos += dx
return self.pos
class LagHardwareModel:
# hardware model
def __init__(self, pos, tau_, dt_, pos_std_):
self.pos = pos
self.tau = tau_
self.dt = dt_
self.pos_std = pos_std_
def noisy_reading(self, pos_in):
""" Return pos with
simulated noise"""
pos_past = self.pos
vel = (pos_in - self.pos) / self.tau
self.pos += vel*self.dt + randn()*self.pos_std
return pos_past, vel
def fx_calc(self, x, dt_, u, t_=None, z_=None, tau_=None):
""" innovation function """
out = np.empty_like(x)
out[0] = x[1]*dt_ + x[0] # adds past value like noisy_reading
if tau_ is None:
out[1] = (u - out[0])/self.tau
else:
out[1] = (u - out[0])/tau_
return out
def h_lag(x):
""" feedback function """
return [x[0]]
# complete tracking ukf
tau_hardware = 0.159 # Hardware lag (0.159 for 1 Hz -3dB bandwidth)
tau_fx = 0.159 # Kalman lag estimate (0.159 for 1 Hz -3dB bandwidth)
dt = 0.1
pos_sense_std = 5 # Hardware sensor variation (1)
# UKF settings
r_std = .1 # Kalman sensor uncertainty (0.1)
q_std = 7 # Process uncertainty (7)
in_pos = 0
in_vel = 0
lag_pos = in_pos
lag_vel = in_vel
# Hardware simulation
in_lag20 = INSim(lag_pos, dt)
lag20_hardware = LagHardwareModel(lag_pos, tau_hardware, dt, pos_sense_std)
# Setup the UKF
points = MerweScaledSigmaPoints(n=2, alpha=.001, beta=2., kappa=1.)
kf = UKF(dim_x=2, dim_z=1, dt=dt, fx=lag20_hardware.fx_calc, hx=h_lag, points=points)
kf.Q = Q_discrete_white_noise(dim=2, dt=dt, var=q_std*q_std)
kf.R = r_std**2
kf.x = np.array([lag_pos, lag_vel])
kf.P = np.eye(2)*100
np.random.seed(200)
t = np.arange(0, 5+dt, dt)
n = len(t)
zs = []
refs = []
xs = []
vs = []
vhs = []
prior_x_est = []
prior_v_est = []
Ks = []
for i in range(len(t)):
if t[i] < 1:
v = 0
elif t[i] < 1.8:
v = 100
elif t[i] < 3.0:
v = 0
elif t[i] < 3.8:
v = -100
else:
v = 0
ref = in_lag20.update(v)
z, vh = lag20_hardware.noisy_reading(ref)
kf.predict(u=ref, tau_=tau_fx)
kf.update(z)
refs.append(ref)
zs.append(z)
vhs.append(vh)
prior_x_est.append(kf.x_prior[0])
prior_v_est.append(kf.x_prior[1])
xs.append(kf.x[0])
vs.append(kf.x[1])
Ks.append(kf.K[0,0])
# UKF.batch_filter does not support keyword arguments fx_args, hx_args
print(kf.x, 'log-likelihood', kf.log_likelihood, 'Kalman gain', kf.K.T)
plt.figure()
plt.subplot(221); plt.title('Ex 20 lag UKF.py')
plt.scatter(t, prior_x_est, color='green', label='Post X', marker='o')
plt.scatter(t, zs, color='black', label='Meas X', marker='.')
plt.plot(t, xs, color='green', label='Est X')
plt.plot(t, refs, color='blue', linestyle='--', label='Ref X')
plt.legend(loc=2)
plt.subplot(222)
plt.scatter(t, vhs, color='black', label='Meas V', marker='.')
plt.plot(t, prior_v_est, color='green', label='Post V')
plt.legend(loc=3)
plt.subplot(223)
plt.plot(t, Ks, color='green', label='K')
plt.legend(loc=3)
plt.show() | 0.761716 | 0.687768 |
import numpy as np
# Values used in the paper
use_minX = np.array([-7.9118004, 0., -9.394201, 0., -3.9944992, 0., -4.2058992, 0., -2.851099, 0., -6.1702003, 0., -4.963501, 0., -6.359, 0., -5.72029], dtype=np.float32)
use_maxX = np.array([5.9019985, 0.5281896, 5.8084, 0.46895373, 2.9131012, 0.52544963, 3.900301, 0.45075417, 3.905901, 0.5185917, 4.9472, 0.4172655, 6.077201, 0.5891852, 7.9728994, 0.46186885, 3.2700593], dtype=np.float32)
# Default output npy filename
outfn = "infer.npy"
# the 17 columns as input features
# column 4: g-r in mean PSF AB magnitude
# column 5: uncertainty of the column 4
# column 6: g-r in mean Kron AB magnitude
# column 7: uncertainty of the column 6
# column 8: r-i in mean PSF AB magnitude
# column 9: uncertainty of the column 8
# column 10: r-i in mean Kron AB magnitude
# column 11: uncertainty of the column 10
# column 12: i-z in mean PSF AB magnitude
# column 13: uncertainty of the column 12
# column 14: i-z in mean Kron AB magnitude
# column 15: uncertainty of the column 14
# column 16: z-y in mean PSF AB magnitude
# column 17: uncertainty of the column 16
# column 18: z-y in mean Kron AB magnitude
# column 19: uncertainty of the column 18
# column 20: E(B-V)
phot_data = np.genfromtxt("example_inference_data.csv", delimiter=",",
dtype=np.float32, usecols=range(3,20))
# the second column = spectroscopic redshift which is not required for inference.
# For inference, simply put some number.
zspec = np.genfromtxt("example_inference_data.csv", delimiter=",",
dtype=np.float32, usecols=(1))
# the third column = uncertainty of spectroscopic redshift which is not required for inference.
# For inference, simply put some number.
zerr = np.genfromtxt("example_inference_data.csv", delimiter=",",
dtype=np.float32, usecols=(2))
X = phot_data
X[:,-1] = np.log(X[:,-1])
labels = np.zeros(len(zspec))
Y = np.vstack((labels, zspec, zerr)).astype(np.float32).T
normedX = np.zeros(X.shape)
n_features = X.shape[1]
for feature_ind in range(0, n_features):
normedX[:,feature_ind] = (X[:,feature_ind]-use_minX[feature_ind])/(use_maxX[feature_ind]-use_minX[feature_ind])*2.-1.
normed = np.hstack((Y, normedX.astype(np.float32)))
print(normed.shape)
np.save(outfn, normed) | convert_csv_to_npy.py |
import numpy as np
# Values used in the paper
use_minX = np.array([-7.9118004, 0., -9.394201, 0., -3.9944992, 0., -4.2058992, 0., -2.851099, 0., -6.1702003, 0., -4.963501, 0., -6.359, 0., -5.72029], dtype=np.float32)
use_maxX = np.array([5.9019985, 0.5281896, 5.8084, 0.46895373, 2.9131012, 0.52544963, 3.900301, 0.45075417, 3.905901, 0.5185917, 4.9472, 0.4172655, 6.077201, 0.5891852, 7.9728994, 0.46186885, 3.2700593], dtype=np.float32)
# Default output npy filename
outfn = "infer.npy"
# the 17 columns as input features
# column 4: g-r in mean PSF AB magnitude
# column 5: uncertainty of the column 4
# column 6: g-r in mean Kron AB magnitude
# column 7: uncertainty of the column 6
# column 8: r-i in mean PSF AB magnitude
# column 9: uncertainty of the column 8
# column 10: r-i in mean Kron AB magnitude
# column 11: uncertainty of the column 10
# column 12: i-z in mean PSF AB magnitude
# column 13: uncertainty of the column 12
# column 14: i-z in mean Kron AB magnitude
# column 15: uncertainty of the column 14
# column 16: z-y in mean PSF AB magnitude
# column 17: uncertainty of the column 16
# column 18: z-y in mean Kron AB magnitude
# column 19: uncertainty of the column 18
# column 20: E(B-V)
phot_data = np.genfromtxt("example_inference_data.csv", delimiter=",",
dtype=np.float32, usecols=range(3,20))
# the second column = spectroscopic redshift which is not required for inference.
# For inference, simply put some number.
zspec = np.genfromtxt("example_inference_data.csv", delimiter=",",
dtype=np.float32, usecols=(1))
# the third column = uncertainty of spectroscopic redshift which is not required for inference.
# For inference, simply put some number.
zerr = np.genfromtxt("example_inference_data.csv", delimiter=",",
dtype=np.float32, usecols=(2))
X = phot_data
X[:,-1] = np.log(X[:,-1])
labels = np.zeros(len(zspec))
Y = np.vstack((labels, zspec, zerr)).astype(np.float32).T
normedX = np.zeros(X.shape)
n_features = X.shape[1]
for feature_ind in range(0, n_features):
normedX[:,feature_ind] = (X[:,feature_ind]-use_minX[feature_ind])/(use_maxX[feature_ind]-use_minX[feature_ind])*2.-1.
normed = np.hstack((Y, normedX.astype(np.float32)))
print(normed.shape)
np.save(outfn, normed) | 0.606964 | 0.512205 |
import argparse
import torch
import utils
import os
import pickle
import gym
import envs
from torch.utils import data
import numpy as np
from collections import defaultdict
import modules
import matplotlib.pyplot as plt
import matplotlib as mpl
import ffmpeg
input_shape = None
def load_env():
global input_shape
env = gym.make("ShapesTrain-v0")
(state, obs) = env.reset()
input_shape = obs.shape
return env
def load_model(meta_file, model_file, cuda):
args = pickle.load(open(meta_file, 'rb'))['args']
args.batch_size = 100
args.seed = 0
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if cuda:
torch.cuda.manual_seed(args.seed)
device = torch.device('cuda' if args.cuda else 'cpu')
model = modules.ContrastiveSWM(
embedding_dim=args.embedding_dim,
hidden_dim=args.hidden_dim,
action_dim=args.action_dim,
input_dims=input_shape,
num_objects=args.num_objects,
sigma=args.sigma,
hinge=args.hinge,
ignore_action=args.ignore_action,
copy_action=args.copy_action,
encoder=args.encoder).to(device)
model.load_state_dict(torch.load(model_file))
model.eval()
return model
def visual_rollout(env, model, render_folder):
latent_render_folder = os.path.join(render_folder, "latent")
os.makedirs(latent_render_folder, exist_ok=True)
obs_render_folder = os.path.join(render_folder, "obs")
os.makedirs(obs_render_folder, exist_ok=True)
merge_render_folder = os.path.join(render_folder, "merge")
os.makedirs(merge_render_folder, exist_ok=True)
cnames = ['blue', 'black', 'green', 'red', 'cyan', 'magenta', 'navy', 'lime', 'gold', 'coral']
os.makedirs(render_folder, exist_ok=True)
(state, obs) = env.reset()
timer = 0
latent_render_range = None
while True:
with torch.no_grad():
torch_obs = torch.unsqueeze(torch.Tensor(obs), 0)
torch_z = model(torch_obs)
numpy_z = torch.squeeze(torch_z).numpy()
if latent_render_range is None:
x_min, y_min = numpy_z.min(0) - 1
x_max, y_max = numpy_z.max(0) + 1
latent_render_range = [x_min, x_max, y_min, y_max]
# obs render
plt.imshow(obs.transpose(), interpolation='nearest')
plt.savefig(os.path.join(obs_render_folder, "img{:04d}.png".format(timer)))
plt.close()
# latent render
plt.axis(latent_render_range)
plt.scatter(numpy_z[:, 0], numpy_z[:, 1], c=cnames[:len(numpy_z)], s=100, marker="s")
plt.savefig(os.path.join(latent_render_folder, "img{:04d}.png".format(timer)))
plt.close()
# merge render
_, axes = plt.subplots(1, 2, figsize=(8, 4))
axes[0].imshow(obs.transpose(), interpolation='nearest')
plt.axis(latent_render_range)
axes[1].scatter(numpy_z[:, 0], numpy_z[:, 1], c=cnames[:len(numpy_z)], s=100, marker="s")
plt.savefig(os.path.join(merge_render_folder, "img{:04d}.png".format(timer)))
plt.close()
timer += 1
(state, obs), reward, done, _ = env.step(env.action_space.sample())
if done:
break
# video
# examples:
# ffmpeg -y -f image2 -i render/shapes_double_num/merge/img%04d.png render/shapes_double_num/output.mp4
command = "ffmpeg -r 5 -y -f image2 -i {} {}".format(os.path.join(merge_render_folder, "img%04d.png"),
os.path.join(render_folder, "output.mp4"))
os.system(command)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--checkpoints-folder', type=str,
default='checkpoints',
help='Path to checkpoints.')
parser.add_argument('--render-folder', type=str,
default='render',
help='Path to save render result.')
parser.add_argument('--name', type=str,
default='shapes',
help='Experiment name.')
parser.add_argument('--num-steps', type=int, default=1,
help='Number of prediction steps to evaluate.')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='Disable CUDA training.')
args_eval = parser.parse_args()
save_folder = os.path.join(args_eval.checkpoints_folder, args_eval.name)
meta_file = os.path.join(save_folder, 'metadata.pkl')
model_file = os.path.join(save_folder, 'model.pt')
env = load_env()
cuda = not args_eval.no_cuda and torch.cuda.is_available()
model = load_model(meta_file, model_file, cuda)
render_folder = os.path.join(args_eval.render_folder, args_eval.name)
visual_rollout(env, model, render_folder) | visual_shapes.py | import argparse
import torch
import utils
import os
import pickle
import gym
import envs
from torch.utils import data
import numpy as np
from collections import defaultdict
import modules
import matplotlib.pyplot as plt
import matplotlib as mpl
import ffmpeg
input_shape = None
def load_env():
global input_shape
env = gym.make("ShapesTrain-v0")
(state, obs) = env.reset()
input_shape = obs.shape
return env
def load_model(meta_file, model_file, cuda):
args = pickle.load(open(meta_file, 'rb'))['args']
args.batch_size = 100
args.seed = 0
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if cuda:
torch.cuda.manual_seed(args.seed)
device = torch.device('cuda' if args.cuda else 'cpu')
model = modules.ContrastiveSWM(
embedding_dim=args.embedding_dim,
hidden_dim=args.hidden_dim,
action_dim=args.action_dim,
input_dims=input_shape,
num_objects=args.num_objects,
sigma=args.sigma,
hinge=args.hinge,
ignore_action=args.ignore_action,
copy_action=args.copy_action,
encoder=args.encoder).to(device)
model.load_state_dict(torch.load(model_file))
model.eval()
return model
def visual_rollout(env, model, render_folder):
latent_render_folder = os.path.join(render_folder, "latent")
os.makedirs(latent_render_folder, exist_ok=True)
obs_render_folder = os.path.join(render_folder, "obs")
os.makedirs(obs_render_folder, exist_ok=True)
merge_render_folder = os.path.join(render_folder, "merge")
os.makedirs(merge_render_folder, exist_ok=True)
cnames = ['blue', 'black', 'green', 'red', 'cyan', 'magenta', 'navy', 'lime', 'gold', 'coral']
os.makedirs(render_folder, exist_ok=True)
(state, obs) = env.reset()
timer = 0
latent_render_range = None
while True:
with torch.no_grad():
torch_obs = torch.unsqueeze(torch.Tensor(obs), 0)
torch_z = model(torch_obs)
numpy_z = torch.squeeze(torch_z).numpy()
if latent_render_range is None:
x_min, y_min = numpy_z.min(0) - 1
x_max, y_max = numpy_z.max(0) + 1
latent_render_range = [x_min, x_max, y_min, y_max]
# obs render
plt.imshow(obs.transpose(), interpolation='nearest')
plt.savefig(os.path.join(obs_render_folder, "img{:04d}.png".format(timer)))
plt.close()
# latent render
plt.axis(latent_render_range)
plt.scatter(numpy_z[:, 0], numpy_z[:, 1], c=cnames[:len(numpy_z)], s=100, marker="s")
plt.savefig(os.path.join(latent_render_folder, "img{:04d}.png".format(timer)))
plt.close()
# merge render
_, axes = plt.subplots(1, 2, figsize=(8, 4))
axes[0].imshow(obs.transpose(), interpolation='nearest')
plt.axis(latent_render_range)
axes[1].scatter(numpy_z[:, 0], numpy_z[:, 1], c=cnames[:len(numpy_z)], s=100, marker="s")
plt.savefig(os.path.join(merge_render_folder, "img{:04d}.png".format(timer)))
plt.close()
timer += 1
(state, obs), reward, done, _ = env.step(env.action_space.sample())
if done:
break
# video
# examples:
# ffmpeg -y -f image2 -i render/shapes_double_num/merge/img%04d.png render/shapes_double_num/output.mp4
command = "ffmpeg -r 5 -y -f image2 -i {} {}".format(os.path.join(merge_render_folder, "img%04d.png"),
os.path.join(render_folder, "output.mp4"))
os.system(command)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--checkpoints-folder', type=str,
default='checkpoints',
help='Path to checkpoints.')
parser.add_argument('--render-folder', type=str,
default='render',
help='Path to save render result.')
parser.add_argument('--name', type=str,
default='shapes',
help='Experiment name.')
parser.add_argument('--num-steps', type=int, default=1,
help='Number of prediction steps to evaluate.')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='Disable CUDA training.')
args_eval = parser.parse_args()
save_folder = os.path.join(args_eval.checkpoints_folder, args_eval.name)
meta_file = os.path.join(save_folder, 'metadata.pkl')
model_file = os.path.join(save_folder, 'model.pt')
env = load_env()
cuda = not args_eval.no_cuda and torch.cuda.is_available()
model = load_model(meta_file, model_file, cuda)
render_folder = os.path.join(args_eval.render_folder, args_eval.name)
visual_rollout(env, model, render_folder) | 0.608594 | 0.392511 |
import pprint
import re # noqa: F401
import six
from ubiops.configuration import Configuration
class EnvironmentVariableCopy(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'source_deployment': 'str',
'source_version': 'str'
}
attribute_map = {
'source_deployment': 'source_deployment',
'source_version': 'source_version'
}
def __init__(self, source_deployment=None, source_version=None, local_vars_configuration=None): # noqa: E501
"""EnvironmentVariableCopy - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._source_deployment = None
self._source_version = None
self.discriminator = None
self.source_deployment = source_deployment
if source_version is not None:
self.source_version = source_version
@property
def source_deployment(self):
"""Gets the source_deployment of this EnvironmentVariableCopy. # noqa: E501
:return: The source_deployment of this EnvironmentVariableCopy. # noqa: E501
:rtype: str
"""
return self._source_deployment
@source_deployment.setter
def source_deployment(self, source_deployment):
"""Sets the source_deployment of this EnvironmentVariableCopy.
:param source_deployment: The source_deployment of this EnvironmentVariableCopy. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and source_deployment is None: # noqa: E501
raise ValueError("Invalid value for `source_deployment`, must not be `None`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
source_deployment is not None and not isinstance(source_deployment, str)):
raise ValueError("Parameter `source_deployment` must be a string") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
source_deployment is not None and len(source_deployment) < 1):
raise ValueError("Invalid value for `source_deployment`, length must be greater than or equal to `1`") # noqa: E501
self._source_deployment = source_deployment
@property
def source_version(self):
"""Gets the source_version of this EnvironmentVariableCopy. # noqa: E501
:return: The source_version of this EnvironmentVariableCopy. # noqa: E501
:rtype: str
"""
return self._source_version
@source_version.setter
def source_version(self, source_version):
"""Sets the source_version of this EnvironmentVariableCopy.
:param source_version: The source_version of this EnvironmentVariableCopy. # noqa: E501
:type: str
"""
if (self.local_vars_configuration.client_side_validation and
source_version is not None and not isinstance(source_version, str)):
raise ValueError("Parameter `source_version` must be a string") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
source_version is not None and len(source_version) < 1):
raise ValueError("Invalid value for `source_version`, length must be greater than or equal to `1`") # noqa: E501
self._source_version = source_version
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, EnvironmentVariableCopy):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, EnvironmentVariableCopy):
return True
return self.to_dict() != other.to_dict() | ubiops/models/environment_variable_copy.py | import pprint
import re # noqa: F401
import six
from ubiops.configuration import Configuration
class EnvironmentVariableCopy(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'source_deployment': 'str',
'source_version': 'str'
}
attribute_map = {
'source_deployment': 'source_deployment',
'source_version': 'source_version'
}
def __init__(self, source_deployment=None, source_version=None, local_vars_configuration=None): # noqa: E501
"""EnvironmentVariableCopy - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._source_deployment = None
self._source_version = None
self.discriminator = None
self.source_deployment = source_deployment
if source_version is not None:
self.source_version = source_version
@property
def source_deployment(self):
"""Gets the source_deployment of this EnvironmentVariableCopy. # noqa: E501
:return: The source_deployment of this EnvironmentVariableCopy. # noqa: E501
:rtype: str
"""
return self._source_deployment
@source_deployment.setter
def source_deployment(self, source_deployment):
"""Sets the source_deployment of this EnvironmentVariableCopy.
:param source_deployment: The source_deployment of this EnvironmentVariableCopy. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and source_deployment is None: # noqa: E501
raise ValueError("Invalid value for `source_deployment`, must not be `None`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
source_deployment is not None and not isinstance(source_deployment, str)):
raise ValueError("Parameter `source_deployment` must be a string") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
source_deployment is not None and len(source_deployment) < 1):
raise ValueError("Invalid value for `source_deployment`, length must be greater than or equal to `1`") # noqa: E501
self._source_deployment = source_deployment
@property
def source_version(self):
"""Gets the source_version of this EnvironmentVariableCopy. # noqa: E501
:return: The source_version of this EnvironmentVariableCopy. # noqa: E501
:rtype: str
"""
return self._source_version
@source_version.setter
def source_version(self, source_version):
"""Sets the source_version of this EnvironmentVariableCopy.
:param source_version: The source_version of this EnvironmentVariableCopy. # noqa: E501
:type: str
"""
if (self.local_vars_configuration.client_side_validation and
source_version is not None and not isinstance(source_version, str)):
raise ValueError("Parameter `source_version` must be a string") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
source_version is not None and len(source_version) < 1):
raise ValueError("Invalid value for `source_version`, length must be greater than or equal to `1`") # noqa: E501
self._source_version = source_version
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, EnvironmentVariableCopy):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, EnvironmentVariableCopy):
return True
return self.to_dict() != other.to_dict() | 0.698227 | 0.107766 |
import os
import tempfile
import logging
import signal
import pytest
from pybnb.common import inf, nan
from pybnb.misc import (
_cast_to_float_or_int,
MPI_InterruptHandler,
metric_format,
time_format,
get_gap_labels,
as_stream,
get_default_args,
get_keyword_docs,
get_simple_logger,
)
from six import StringIO
yaml_available = False
try:
import yaml # noqa: F401
yaml_available = True
except ImportError:
pass
numpy_available = False
try:
import numpy # noqa: F401
numpy_available = True
except ImportError:
pass
class Test(object):
def test_MPI_InterruptHandler(self):
assert len(MPI_InterruptHandler._sigs) > 0
original_handlers = [
(signum, signal.getsignal(signum)) for signum in MPI_InterruptHandler._sigs
]
with MPI_InterruptHandler(lambda s, f: None) as h:
assert not h._released
assert h._released
for i, signum in enumerate(MPI_InterruptHandler._sigs):
orig = signal.getsignal(signum)
assert original_handlers[i][0] == signum
assert original_handlers[i][1] is orig
def fn(s, f):
fn.called = True
fn.called = False
with MPI_InterruptHandler(fn) as h:
assert not h._released
signum = MPI_InterruptHandler._sigs[0]
signal.getsignal(signum)(None, None)
assert h._released
for i, signum in enumerate(MPI_InterruptHandler._sigs):
orig = signal.getsignal(signum)
assert original_handlers[i][0] == signum
assert original_handlers[i][1] is orig
assert fn.called
def test_metric_format(self):
assert metric_format(None) == "<unknown>"
assert metric_format(0.0) == "0.0 s"
assert metric_format(0.0, align_unit=True) == "0.0 s "
assert metric_format(0.0, unit="B") == "0.0 B"
assert metric_format(0.0, digits=2) == "0.00 s"
assert metric_format(1000.23, digits=3) == "1.000 Ks"
assert metric_format(1000.23, digits=4) == "1.0002 Ks"
assert metric_format(1000000.23, digits=4) == "1.0000 Ms"
assert metric_format(0.23334, digits=1) == "233.3 ms"
assert metric_format(0.23334, digits=2) == "233.34 ms"
assert metric_format(0.00023334, digits=1) == "233.3 us"
assert metric_format(0.00023334, digits=2) == "233.34 us"
assert metric_format(0.0009999, digits=1) == "999.9 us"
assert metric_format(0.00099999, digits=1) == "1.0 ms"
assert metric_format(0.00099999, digits=2) == "999.99 us"
assert metric_format(0.000999999, digits=2) == "1.00 ms"
assert metric_format(0.000999999, digits=3) == "999.999 us"
assert metric_format(0.0009999999, digits=3) == "1.000 ms"
assert metric_format(0.0009999999, digits=4) == "999.9999 us"
assert metric_format(0.00099999999, digits=4) == "1.0000 ms"
assert metric_format(0.00099999999, digits=5) == "999.99999 us"
assert metric_format(0.000999999999, digits=5) == "1.00000 ms"
assert metric_format(0.000999999999, digits=6) == "999.999999 us"
def test_time_format(self):
assert time_format(None) == "<unknown>"
assert time_format(0.0) == "0.0 s"
assert time_format(0.0, align_unit=True) == "0.0 s "
assert time_format(0.0, digits=2) == "0.00 s"
assert time_format(24.9) == "24.9 s"
assert time_format(93.462, digits=3) == "1.558 m"
assert time_format(93.462, digits=4) == "1.5577 m"
assert time_format(93.462, digits=4, align_unit=True) == "1.5577 m "
assert time_format(5607.72, digits=3) == "1.558 h"
assert time_format(5607.72, digits=4) == "1.5577 h"
assert time_format(5607.72, digits=4, align_unit=True) == "1.5577 h "
assert time_format(134585.28, digits=3) == "1.558 d"
assert time_format(134585.28, digits=4) == "1.5577 d"
assert time_format(134585.28, digits=4, align_unit=True) == "1.5577 d "
assert time_format(0.23334, digits=1) == "233.3 ms"
assert time_format(0.23334, digits=2) == "233.34 ms"
assert time_format(0.00023334, digits=1) == "233.3 us"
assert time_format(0.00023334, digits=2) == "233.34 us"
assert time_format(0.0009999, digits=1) == "999.9 us"
assert time_format(0.00099999, digits=1) == "1.0 ms"
assert time_format(0.00099999, digits=2) == "999.99 us"
assert time_format(0.000999999, digits=2) == "1.00 ms"
assert time_format(0.000999999, digits=3) == "999.999 us"
assert time_format(0.0009999999, digits=3) == "1.000 ms"
assert time_format(0.0009999999, digits=4) == "999.9999 us"
assert time_format(0.00099999999, digits=4) == "1.0000 ms"
assert time_format(0.00099999999, digits=5) == "999.99999 us"
assert time_format(0.000999999999, digits=5) == "1.00000 ms"
assert time_format(0.000999999999, digits=6) == "999.999999 us"
def test_get_gap_labels(self):
l0, l1, l2 = get_gap_labels(10)
assert l0 == 10
assert l1 == "{gap:>10}"
assert l2 == "{gap:>10.2f}"
l0, l1, l2 = get_gap_labels(1)
assert l0 == 10
assert l1 == "{gap:>10}"
assert l2 == "{gap:>10.2f}"
l0, l1, l2 = get_gap_labels(0.1)
assert l0 == 10
assert l1 == "{gap:>10}"
assert l2 == "{gap:>10.3f}"
l0, l1, l2 = get_gap_labels(0.01)
assert l0 == 10
assert l1 == "{gap:>10}"
assert l2 == "{gap:>10.4f}"
l0, l1, l2 = get_gap_labels(0.001)
assert l0 == 10
assert l1 == "{gap:>10}"
assert l2 == "{gap:>10.5f}"
l0, l1, l2 = get_gap_labels(0.0001)
assert l0 == 11
assert l1 == "{gap:>11}"
assert l2 == "{gap:>11.6f}"
l0, l1, l2 = get_gap_labels(0.00001)
assert l0 == 12
assert l1 == "{gap:>12}"
assert l2 == "{gap:>12.7f}"
l0, l1, l2 = get_gap_labels(0.000001, key="rgap")
assert l0 == 13
assert l1 == "{rgap:>13}"
assert l2 == "{rgap:>13.8f}"
l0, l1, l2 = get_gap_labels(0.0000001, key="agap", format="g")
assert l0 == 14
assert l1 == "{agap:>14}"
assert l2 == "{agap:>14.9g}"
def test_as_stream(self):
fid, fname = tempfile.mkstemp()
os.close(fid)
with as_stream(fname) as f:
assert not f.closed
assert hasattr(f, "write")
assert f.closed
fid, fname = tempfile.mkstemp()
os.close(fid)
with as_stream(u"" + fname) as f:
assert not f.closed
assert hasattr(f, "write")
assert f.closed
with open(fname) as f:
assert not f.closed
with as_stream(f) as f_:
assert f is f_
assert not f.closed
assert not f.closed
def test_get_default_args(self):
def f(a): # pragma:nocover
pass
assert get_default_args(f) == {}
def f(a, b): # pragma:nocover
pass
assert get_default_args(f) == {}
def f(*args): # pragma:nocover
pass
assert get_default_args(f) == {}
def f(**kwds): # pragma:nocover
pass
assert get_default_args(f) == {}
def f(*args, **kwds): # pragma:nocover
pass
assert get_default_args(f) == {}
def f(a, b=1): # pragma:nocover
pass
assert get_default_args(f) == {"b": 1}
def f(a=1): # pragma:nocover
pass
assert get_default_args(f) == {"a": 1}
def f(a=(1,)): # pragma:nocover
pass
assert get_default_args(f) == {"a": (1,)}
def test_get_keyword_docs(self):
if not yaml_available:
pytest.skip("yaml is not available")
import pybnb.solver
data = get_keyword_docs(pybnb.solver.Solver.solve.__doc__)
kwds = get_default_args(pybnb.solver.Solver.solve)
assert len(data) > 1
for key in data:
if "default" in data[key]:
assert data[key]["default"] == kwds[key]
assert "choices" not in data[key]
def f():
"""Something
Parameters
----------
junk1 : {"a", "b", 1}
Junk1 description.
junk2 : {"c", "d"}, optional
Junk2 description more than one
line. (default: "c")
junk3 : int
Junk3 description.
"""
data = get_keyword_docs(f.__doc__)
assert data == {
"junk1": {"choices": ["a", "b", 1], "doc": "Junk1 description."},
"junk2": {
"choices": ["c", "d"],
"default": "c",
"doc": "Junk2 description more than one line.",
},
"junk3": {"doc": "Junk3 description."},
}
def test_get_simple_logger(self):
log = get_simple_logger(console=False)
assert log.disabled
log = get_simple_logger()
assert not log.disabled
log = get_simple_logger(console=True)
assert not log.disabled
assert len(log.handlers) == 2
log.info("junk")
fid, fname = tempfile.mkstemp()
out = StringIO()
os.close(fid)
formatter = logging.Formatter("[%(levelname)s] %(message)s")
try:
log = get_simple_logger(
filename=fname,
stream=out,
console=True,
formatter=formatter,
level=logging.WARNING,
)
assert len(log.handlers) == 4
log.error("error_line")
log.warning("warning_line")
log.info("info_line")
log.debug("debug_line")
for handler in log.handlers:
handler.close()
with open(fname) as f:
lines = f.readlines()
assert len(lines) == 2
assert lines[0].strip() == "[ERROR] error_line"
assert lines[1].strip() == "[WARNING] warning_line"
del lines
lines = out.getvalue().splitlines()
assert lines[0].strip() == "[ERROR] error_line"
assert lines[1].strip() == "[WARNING] warning_line"
finally:
os.remove(fname)
def test_cast_to_float_or_int(self):
assert type(_cast_to_float_or_int(inf)) is float
assert type(_cast_to_float_or_int(nan)) is float
assert type(_cast_to_float_or_int(1.0)) is float
assert type(_cast_to_float_or_int(1.1)) is float
assert type(_cast_to_float_or_int(1)) is int
assert type(_cast_to_float_or_int(True)) is int
with pytest.raises(TypeError):
_cast_to_float_or_int(None)
if numpy_available:
numpy_types = []
numpy_types.append(("bool", int))
numpy_types.append(("bool_", float)) # edge case
numpy_types.append(("int_", int))
numpy_types.append(("intc", int))
numpy_types.append(("intp", int))
numpy_types.append(("int8", int))
numpy_types.append(("int16", int))
numpy_types.append(("int32", int))
numpy_types.append(("int64", int))
numpy_types.append(("uint8", int))
numpy_types.append(("uint16", int))
numpy_types.append(("uint32", int))
numpy_types.append(("uint64", int))
numpy_types.append(("float_", float))
numpy_types.append(("float16", float))
numpy_types.append(("float32", float))
numpy_types.append(("float64", float))
numpy_types.append(("float128", float))
numpy_types.append(("complex_", float))
numpy_types.append(("complex64", float))
numpy_types.append(("complex128", float))
for name, cast_type in numpy_types:
try:
type_ = getattr(numpy, name)
except: # pragma:nocover
continue
assert type(_cast_to_float_or_int(type_())) is cast_type | src/tests/test_misc.py | import os
import tempfile
import logging
import signal
import pytest
from pybnb.common import inf, nan
from pybnb.misc import (
_cast_to_float_or_int,
MPI_InterruptHandler,
metric_format,
time_format,
get_gap_labels,
as_stream,
get_default_args,
get_keyword_docs,
get_simple_logger,
)
from six import StringIO
yaml_available = False
try:
import yaml # noqa: F401
yaml_available = True
except ImportError:
pass
numpy_available = False
try:
import numpy # noqa: F401
numpy_available = True
except ImportError:
pass
class Test(object):
def test_MPI_InterruptHandler(self):
assert len(MPI_InterruptHandler._sigs) > 0
original_handlers = [
(signum, signal.getsignal(signum)) for signum in MPI_InterruptHandler._sigs
]
with MPI_InterruptHandler(lambda s, f: None) as h:
assert not h._released
assert h._released
for i, signum in enumerate(MPI_InterruptHandler._sigs):
orig = signal.getsignal(signum)
assert original_handlers[i][0] == signum
assert original_handlers[i][1] is orig
def fn(s, f):
fn.called = True
fn.called = False
with MPI_InterruptHandler(fn) as h:
assert not h._released
signum = MPI_InterruptHandler._sigs[0]
signal.getsignal(signum)(None, None)
assert h._released
for i, signum in enumerate(MPI_InterruptHandler._sigs):
orig = signal.getsignal(signum)
assert original_handlers[i][0] == signum
assert original_handlers[i][1] is orig
assert fn.called
def test_metric_format(self):
assert metric_format(None) == "<unknown>"
assert metric_format(0.0) == "0.0 s"
assert metric_format(0.0, align_unit=True) == "0.0 s "
assert metric_format(0.0, unit="B") == "0.0 B"
assert metric_format(0.0, digits=2) == "0.00 s"
assert metric_format(1000.23, digits=3) == "1.000 Ks"
assert metric_format(1000.23, digits=4) == "1.0002 Ks"
assert metric_format(1000000.23, digits=4) == "1.0000 Ms"
assert metric_format(0.23334, digits=1) == "233.3 ms"
assert metric_format(0.23334, digits=2) == "233.34 ms"
assert metric_format(0.00023334, digits=1) == "233.3 us"
assert metric_format(0.00023334, digits=2) == "233.34 us"
assert metric_format(0.0009999, digits=1) == "999.9 us"
assert metric_format(0.00099999, digits=1) == "1.0 ms"
assert metric_format(0.00099999, digits=2) == "999.99 us"
assert metric_format(0.000999999, digits=2) == "1.00 ms"
assert metric_format(0.000999999, digits=3) == "999.999 us"
assert metric_format(0.0009999999, digits=3) == "1.000 ms"
assert metric_format(0.0009999999, digits=4) == "999.9999 us"
assert metric_format(0.00099999999, digits=4) == "1.0000 ms"
assert metric_format(0.00099999999, digits=5) == "999.99999 us"
assert metric_format(0.000999999999, digits=5) == "1.00000 ms"
assert metric_format(0.000999999999, digits=6) == "999.999999 us"
def test_time_format(self):
assert time_format(None) == "<unknown>"
assert time_format(0.0) == "0.0 s"
assert time_format(0.0, align_unit=True) == "0.0 s "
assert time_format(0.0, digits=2) == "0.00 s"
assert time_format(24.9) == "24.9 s"
assert time_format(93.462, digits=3) == "1.558 m"
assert time_format(93.462, digits=4) == "1.5577 m"
assert time_format(93.462, digits=4, align_unit=True) == "1.5577 m "
assert time_format(5607.72, digits=3) == "1.558 h"
assert time_format(5607.72, digits=4) == "1.5577 h"
assert time_format(5607.72, digits=4, align_unit=True) == "1.5577 h "
assert time_format(134585.28, digits=3) == "1.558 d"
assert time_format(134585.28, digits=4) == "1.5577 d"
assert time_format(134585.28, digits=4, align_unit=True) == "1.5577 d "
assert time_format(0.23334, digits=1) == "233.3 ms"
assert time_format(0.23334, digits=2) == "233.34 ms"
assert time_format(0.00023334, digits=1) == "233.3 us"
assert time_format(0.00023334, digits=2) == "233.34 us"
assert time_format(0.0009999, digits=1) == "999.9 us"
assert time_format(0.00099999, digits=1) == "1.0 ms"
assert time_format(0.00099999, digits=2) == "999.99 us"
assert time_format(0.000999999, digits=2) == "1.00 ms"
assert time_format(0.000999999, digits=3) == "999.999 us"
assert time_format(0.0009999999, digits=3) == "1.000 ms"
assert time_format(0.0009999999, digits=4) == "999.9999 us"
assert time_format(0.00099999999, digits=4) == "1.0000 ms"
assert time_format(0.00099999999, digits=5) == "999.99999 us"
assert time_format(0.000999999999, digits=5) == "1.00000 ms"
assert time_format(0.000999999999, digits=6) == "999.999999 us"
def test_get_gap_labels(self):
l0, l1, l2 = get_gap_labels(10)
assert l0 == 10
assert l1 == "{gap:>10}"
assert l2 == "{gap:>10.2f}"
l0, l1, l2 = get_gap_labels(1)
assert l0 == 10
assert l1 == "{gap:>10}"
assert l2 == "{gap:>10.2f}"
l0, l1, l2 = get_gap_labels(0.1)
assert l0 == 10
assert l1 == "{gap:>10}"
assert l2 == "{gap:>10.3f}"
l0, l1, l2 = get_gap_labels(0.01)
assert l0 == 10
assert l1 == "{gap:>10}"
assert l2 == "{gap:>10.4f}"
l0, l1, l2 = get_gap_labels(0.001)
assert l0 == 10
assert l1 == "{gap:>10}"
assert l2 == "{gap:>10.5f}"
l0, l1, l2 = get_gap_labels(0.0001)
assert l0 == 11
assert l1 == "{gap:>11}"
assert l2 == "{gap:>11.6f}"
l0, l1, l2 = get_gap_labels(0.00001)
assert l0 == 12
assert l1 == "{gap:>12}"
assert l2 == "{gap:>12.7f}"
l0, l1, l2 = get_gap_labels(0.000001, key="rgap")
assert l0 == 13
assert l1 == "{rgap:>13}"
assert l2 == "{rgap:>13.8f}"
l0, l1, l2 = get_gap_labels(0.0000001, key="agap", format="g")
assert l0 == 14
assert l1 == "{agap:>14}"
assert l2 == "{agap:>14.9g}"
def test_as_stream(self):
fid, fname = tempfile.mkstemp()
os.close(fid)
with as_stream(fname) as f:
assert not f.closed
assert hasattr(f, "write")
assert f.closed
fid, fname = tempfile.mkstemp()
os.close(fid)
with as_stream(u"" + fname) as f:
assert not f.closed
assert hasattr(f, "write")
assert f.closed
with open(fname) as f:
assert not f.closed
with as_stream(f) as f_:
assert f is f_
assert not f.closed
assert not f.closed
def test_get_default_args(self):
def f(a): # pragma:nocover
pass
assert get_default_args(f) == {}
def f(a, b): # pragma:nocover
pass
assert get_default_args(f) == {}
def f(*args): # pragma:nocover
pass
assert get_default_args(f) == {}
def f(**kwds): # pragma:nocover
pass
assert get_default_args(f) == {}
def f(*args, **kwds): # pragma:nocover
pass
assert get_default_args(f) == {}
def f(a, b=1): # pragma:nocover
pass
assert get_default_args(f) == {"b": 1}
def f(a=1): # pragma:nocover
pass
assert get_default_args(f) == {"a": 1}
def f(a=(1,)): # pragma:nocover
pass
assert get_default_args(f) == {"a": (1,)}
def test_get_keyword_docs(self):
if not yaml_available:
pytest.skip("yaml is not available")
import pybnb.solver
data = get_keyword_docs(pybnb.solver.Solver.solve.__doc__)
kwds = get_default_args(pybnb.solver.Solver.solve)
assert len(data) > 1
for key in data:
if "default" in data[key]:
assert data[key]["default"] == kwds[key]
assert "choices" not in data[key]
def f():
"""Something
Parameters
----------
junk1 : {"a", "b", 1}
Junk1 description.
junk2 : {"c", "d"}, optional
Junk2 description more than one
line. (default: "c")
junk3 : int
Junk3 description.
"""
data = get_keyword_docs(f.__doc__)
assert data == {
"junk1": {"choices": ["a", "b", 1], "doc": "Junk1 description."},
"junk2": {
"choices": ["c", "d"],
"default": "c",
"doc": "Junk2 description more than one line.",
},
"junk3": {"doc": "Junk3 description."},
}
def test_get_simple_logger(self):
log = get_simple_logger(console=False)
assert log.disabled
log = get_simple_logger()
assert not log.disabled
log = get_simple_logger(console=True)
assert not log.disabled
assert len(log.handlers) == 2
log.info("junk")
fid, fname = tempfile.mkstemp()
out = StringIO()
os.close(fid)
formatter = logging.Formatter("[%(levelname)s] %(message)s")
try:
log = get_simple_logger(
filename=fname,
stream=out,
console=True,
formatter=formatter,
level=logging.WARNING,
)
assert len(log.handlers) == 4
log.error("error_line")
log.warning("warning_line")
log.info("info_line")
log.debug("debug_line")
for handler in log.handlers:
handler.close()
with open(fname) as f:
lines = f.readlines()
assert len(lines) == 2
assert lines[0].strip() == "[ERROR] error_line"
assert lines[1].strip() == "[WARNING] warning_line"
del lines
lines = out.getvalue().splitlines()
assert lines[0].strip() == "[ERROR] error_line"
assert lines[1].strip() == "[WARNING] warning_line"
finally:
os.remove(fname)
def test_cast_to_float_or_int(self):
assert type(_cast_to_float_or_int(inf)) is float
assert type(_cast_to_float_or_int(nan)) is float
assert type(_cast_to_float_or_int(1.0)) is float
assert type(_cast_to_float_or_int(1.1)) is float
assert type(_cast_to_float_or_int(1)) is int
assert type(_cast_to_float_or_int(True)) is int
with pytest.raises(TypeError):
_cast_to_float_or_int(None)
if numpy_available:
numpy_types = []
numpy_types.append(("bool", int))
numpy_types.append(("bool_", float)) # edge case
numpy_types.append(("int_", int))
numpy_types.append(("intc", int))
numpy_types.append(("intp", int))
numpy_types.append(("int8", int))
numpy_types.append(("int16", int))
numpy_types.append(("int32", int))
numpy_types.append(("int64", int))
numpy_types.append(("uint8", int))
numpy_types.append(("uint16", int))
numpy_types.append(("uint32", int))
numpy_types.append(("uint64", int))
numpy_types.append(("float_", float))
numpy_types.append(("float16", float))
numpy_types.append(("float32", float))
numpy_types.append(("float64", float))
numpy_types.append(("float128", float))
numpy_types.append(("complex_", float))
numpy_types.append(("complex64", float))
numpy_types.append(("complex128", float))
for name, cast_type in numpy_types:
try:
type_ = getattr(numpy, name)
except: # pragma:nocover
continue
assert type(_cast_to_float_or_int(type_())) is cast_type | 0.553505 | 0.588475 |
import tensorflow as tf
from tensorflow.keras import Model, Input
from tensorflow.keras.layers import ConvLSTM2D, Conv2D
from tensorflow.keras.layers import MaxPooling2D, UpSampling2D, Concatenate
from tensorflow.keras.layers import BatchNormalization, Activation, Dropout, TimeDistributed
# Conv layer.
def conv_layer(x, filters, kernel_size=5, activation="relu", batch_norm=True):
x = Conv2D(filters=filters,
kernel_size=kernel_size,
strides=1,
padding="same",
activation=activation)(x)
if batch_norm:
x = BatchNormalization()(x)
x = Dropout(0.1)(x)
return x
# ConvLSTM layer.
def convlstm_layer(x, filters, kernel_size=5, strides=1, activation="tanh", return_sequences=True, batch_norm=True):
x = ConvLSTM2D(filters=filters,
kernel_size=kernel_size,
strides=strides,
padding="same",
activation=activation,
dropout=0.1,
recurrent_dropout=0.15,
go_backwards=False,
return_sequences=return_sequences)(x)
if batch_norm:
x = BatchNormalization()(x)
return x
# ConvLSTM prediction model.
def convlstm_model(input_size,
scale,
input_frames,
final_filter,
final_activation,
dropout,
batch_norm):
scaled_input = (input_frames, int(input_size[0] * scale), int(input_size[1] * scale), input_size[2])
convlstm_input = Input(shape=(scaled_input))
convlstm1 = convlstm_layer(x=convlstm_input, filters=32, kernel_size=5)
pool1 = MaxPooling3D(pool_size=(1,2,2), padding="same")(convlstm1)
convlstm2 = convlstm_layer(x=pool1, filters=32, kernel_size=5)
pool2 = MaxPooling3D(pool_size=(1,2,2), padding="same")(convlstm2)
convlstm3 = convlstm_layer(x=pool2, filters=64, kernel_size=5)
pool3 = MaxPooling3D(pool_size=(1,2,2), padding="same")(convlstm3)
convlstm4 = convlstm_layer(x=pool3, filters=64, kernel_size=5)
pool4 = MaxPooling3D(pool_size=(1,2,2), padding="same")(convlstm4)
convlstm5 = convlstm_layer(x=pool4, filters=128, kernel_size=5)
up5 = UpSampling3D(size=(1,2,2))(convlstm5)
convlstm6 = convlstm_layer(x=up5, filters=64, kernel_size=5)
up6 = UpSampling3D(size=(1,2,2))(convlstm6)
convlstm7 = convlstm_layer(x=up6, filters=64, kernel_size=5)
up7 = UpSampling3D(size=(1,2,2))(convlstm7)
convlstm8 = convlstm_layer(x=up7, filters=32, kernel_size=5)
up8 = UpSampling3D(size=(1,2,2))(convlstm8)
convlstm9 = convlstm_layer(x=up8, filters=32, kernel_size=5, return_sequences=False)
conv10 = conv_layer(x=convlstm9, filters=final_filter, kernel_size=1, activation=final_activation)
convlstm_output = conv10
model = Model(inputs=convlstm_input, outputs=convlstm_output)
return model
def convlstm_model_skip(input_size,
scale,
input_frames,
final_filter,
final_activation,
batch_norm):
scaled_input = (input_frames, int(input_size[0] * scale), int(input_size[1] * scale), input_size[2])
convlstm_input = Input(shape=(scaled_input))
convlstm1 = convlstm_layer(x=convlstm_input, filters=32, kernel_size=7)
pool1 = TimeDistributed(MaxPooling2D(pool_size=2, padding="same"))(convlstm1)
convlstm2 = convlstm_layer(x=pool1, filters=32, kernel_size=7)
pool2 = TimeDistributed(MaxPooling2D(pool_size=2, padding="same"))(convlstm2)
convlstm3 = convlstm_layer(x=pool2, filters=64, kernel_size=5)
pool3 = TimeDistributed(MaxPooling2D(pool_size=2, padding="same"))(convlstm3)
convlstm4 = convlstm_layer(x=pool3, filters=64, kernel_size=5)
pool4 = TimeDistributed(MaxPooling2D(pool_size=2, padding="same"))(convlstm4)
convlstm5_1 = convlstm_layer(x=pool4, filters=128, kernel_size=3)
convlstm5_2 = convlstm_layer(x=convlstm5_1, filters=128, kernel_size=3)
up5 = TimeDistributed(UpSampling2D(size=2))(convlstm5_2)
convlstm6 = convlstm_layer(x=up5, filters=64, kernel_size=5)
concat6 = Concatenate(axis=-1)([convlstm4, convlstm6])
up6 = TimeDistributed(UpSampling2D(size=2))(concat6)
convlstm7 = convlstm_layer(x=up6, filters=64, kernel_size=5)
concat7 = Concatenate(axis=-1)([convlstm3, convlstm7])
up7 = TimeDistributed(UpSampling2D(size=2))(concat7)
convlstm8 = convlstm_layer(x=up7, filters=32, kernel_size=7)
concat8 = Concatenate(axis=-1)([convlstm2, convlstm8])
up8 = TimeDistributed(UpSampling2D(size=2))(concat8)
convlstm9_1 = convlstm_layer(x=up8, filters=32, kernel_size=7)
concat9 = Concatenate(axis=-1)([convlstm1, convlstm9_1])
convlstm9_2 = convlstm_layer(x=concat9, filters=32, kernel_size=7, return_sequences=False)
conv10 = conv_layer(x=convlstm9_2, filters=final_filter, kernel_size=1, activation=final_activation)
convlstm_output = conv10
model = Model(inputs=convlstm_input, outputs=convlstm_output)
return model
def convlstm_model_simple(input_size,
scale,
input_frames,
final_filter,
final_activation,
batch_norm):
scaled_input = (input_frames, int(input_size[0] * scale), int(input_size[1] * scale), input_size[2])
convlstm_input = Input(shape=(scaled_input))
convlstm1 = convlstm_layer(x=convlstm_input, filters=64, kernel_size=5)
convlstm2 = convlstm_layer(x=convlstm1, filters=64, kernel_size=5)
convlstm3 = convlstm_layer(x=convlstm2, filters=64, kernel_size=5)
convlstm4 = convlstm_layer(x=convlstm3, filters=64, kernel_size=5)
convlstm5 = convlstm_layer(x=convlstm4, filters=64, kernel_size=5, return_sequences=False)
conv6 = conv_layer(x=convlstm5, filters=final_filter, kernel_size=1, activation=final_activation)
convlstm_output = conv6
model = Model(inputs=convlstm_input, outputs=convlstm_output)
return model
# Get ConvLSTM model.
def get_convlstm_skip():
params = {'input_size': (288, 288, 1),
'scale': 0.5,
'input_frames': 4,
'final_filter': 1,
'final_activation': "sigmoid",
'batch_norm': True}
model = convlstm_model_skip(**params)
return model
def get_convlstm_simple():
params = {'input_size': (288, 288, 1),
'scale': 0.5,
'input_frames': 4,
'final_filter': 1,
'final_activation': "sigmoid",
'batch_norm': True}
model = convlstm_model_simple(**params)
return model
def get_convlstm():
params = {'input_size': (288, 288, 2),
'scale': 0.5,
'input_frames': 4,
'final_filter': 1,
'final_activation': "tanh",
'dropout': 0.0,
'batch_norm': True}
model = convlstm_model(**params)
return model | modules/old/model_convlstm.py | import tensorflow as tf
from tensorflow.keras import Model, Input
from tensorflow.keras.layers import ConvLSTM2D, Conv2D
from tensorflow.keras.layers import MaxPooling2D, UpSampling2D, Concatenate
from tensorflow.keras.layers import BatchNormalization, Activation, Dropout, TimeDistributed
# Conv layer.
def conv_layer(x, filters, kernel_size=5, activation="relu", batch_norm=True):
x = Conv2D(filters=filters,
kernel_size=kernel_size,
strides=1,
padding="same",
activation=activation)(x)
if batch_norm:
x = BatchNormalization()(x)
x = Dropout(0.1)(x)
return x
# ConvLSTM layer.
def convlstm_layer(x, filters, kernel_size=5, strides=1, activation="tanh", return_sequences=True, batch_norm=True):
x = ConvLSTM2D(filters=filters,
kernel_size=kernel_size,
strides=strides,
padding="same",
activation=activation,
dropout=0.1,
recurrent_dropout=0.15,
go_backwards=False,
return_sequences=return_sequences)(x)
if batch_norm:
x = BatchNormalization()(x)
return x
# ConvLSTM prediction model.
def convlstm_model(input_size,
scale,
input_frames,
final_filter,
final_activation,
dropout,
batch_norm):
scaled_input = (input_frames, int(input_size[0] * scale), int(input_size[1] * scale), input_size[2])
convlstm_input = Input(shape=(scaled_input))
convlstm1 = convlstm_layer(x=convlstm_input, filters=32, kernel_size=5)
pool1 = MaxPooling3D(pool_size=(1,2,2), padding="same")(convlstm1)
convlstm2 = convlstm_layer(x=pool1, filters=32, kernel_size=5)
pool2 = MaxPooling3D(pool_size=(1,2,2), padding="same")(convlstm2)
convlstm3 = convlstm_layer(x=pool2, filters=64, kernel_size=5)
pool3 = MaxPooling3D(pool_size=(1,2,2), padding="same")(convlstm3)
convlstm4 = convlstm_layer(x=pool3, filters=64, kernel_size=5)
pool4 = MaxPooling3D(pool_size=(1,2,2), padding="same")(convlstm4)
convlstm5 = convlstm_layer(x=pool4, filters=128, kernel_size=5)
up5 = UpSampling3D(size=(1,2,2))(convlstm5)
convlstm6 = convlstm_layer(x=up5, filters=64, kernel_size=5)
up6 = UpSampling3D(size=(1,2,2))(convlstm6)
convlstm7 = convlstm_layer(x=up6, filters=64, kernel_size=5)
up7 = UpSampling3D(size=(1,2,2))(convlstm7)
convlstm8 = convlstm_layer(x=up7, filters=32, kernel_size=5)
up8 = UpSampling3D(size=(1,2,2))(convlstm8)
convlstm9 = convlstm_layer(x=up8, filters=32, kernel_size=5, return_sequences=False)
conv10 = conv_layer(x=convlstm9, filters=final_filter, kernel_size=1, activation=final_activation)
convlstm_output = conv10
model = Model(inputs=convlstm_input, outputs=convlstm_output)
return model
def convlstm_model_skip(input_size,
scale,
input_frames,
final_filter,
final_activation,
batch_norm):
scaled_input = (input_frames, int(input_size[0] * scale), int(input_size[1] * scale), input_size[2])
convlstm_input = Input(shape=(scaled_input))
convlstm1 = convlstm_layer(x=convlstm_input, filters=32, kernel_size=7)
pool1 = TimeDistributed(MaxPooling2D(pool_size=2, padding="same"))(convlstm1)
convlstm2 = convlstm_layer(x=pool1, filters=32, kernel_size=7)
pool2 = TimeDistributed(MaxPooling2D(pool_size=2, padding="same"))(convlstm2)
convlstm3 = convlstm_layer(x=pool2, filters=64, kernel_size=5)
pool3 = TimeDistributed(MaxPooling2D(pool_size=2, padding="same"))(convlstm3)
convlstm4 = convlstm_layer(x=pool3, filters=64, kernel_size=5)
pool4 = TimeDistributed(MaxPooling2D(pool_size=2, padding="same"))(convlstm4)
convlstm5_1 = convlstm_layer(x=pool4, filters=128, kernel_size=3)
convlstm5_2 = convlstm_layer(x=convlstm5_1, filters=128, kernel_size=3)
up5 = TimeDistributed(UpSampling2D(size=2))(convlstm5_2)
convlstm6 = convlstm_layer(x=up5, filters=64, kernel_size=5)
concat6 = Concatenate(axis=-1)([convlstm4, convlstm6])
up6 = TimeDistributed(UpSampling2D(size=2))(concat6)
convlstm7 = convlstm_layer(x=up6, filters=64, kernel_size=5)
concat7 = Concatenate(axis=-1)([convlstm3, convlstm7])
up7 = TimeDistributed(UpSampling2D(size=2))(concat7)
convlstm8 = convlstm_layer(x=up7, filters=32, kernel_size=7)
concat8 = Concatenate(axis=-1)([convlstm2, convlstm8])
up8 = TimeDistributed(UpSampling2D(size=2))(concat8)
convlstm9_1 = convlstm_layer(x=up8, filters=32, kernel_size=7)
concat9 = Concatenate(axis=-1)([convlstm1, convlstm9_1])
convlstm9_2 = convlstm_layer(x=concat9, filters=32, kernel_size=7, return_sequences=False)
conv10 = conv_layer(x=convlstm9_2, filters=final_filter, kernel_size=1, activation=final_activation)
convlstm_output = conv10
model = Model(inputs=convlstm_input, outputs=convlstm_output)
return model
def convlstm_model_simple(input_size,
scale,
input_frames,
final_filter,
final_activation,
batch_norm):
scaled_input = (input_frames, int(input_size[0] * scale), int(input_size[1] * scale), input_size[2])
convlstm_input = Input(shape=(scaled_input))
convlstm1 = convlstm_layer(x=convlstm_input, filters=64, kernel_size=5)
convlstm2 = convlstm_layer(x=convlstm1, filters=64, kernel_size=5)
convlstm3 = convlstm_layer(x=convlstm2, filters=64, kernel_size=5)
convlstm4 = convlstm_layer(x=convlstm3, filters=64, kernel_size=5)
convlstm5 = convlstm_layer(x=convlstm4, filters=64, kernel_size=5, return_sequences=False)
conv6 = conv_layer(x=convlstm5, filters=final_filter, kernel_size=1, activation=final_activation)
convlstm_output = conv6
model = Model(inputs=convlstm_input, outputs=convlstm_output)
return model
# Get ConvLSTM model.
def get_convlstm_skip():
params = {'input_size': (288, 288, 1),
'scale': 0.5,
'input_frames': 4,
'final_filter': 1,
'final_activation': "sigmoid",
'batch_norm': True}
model = convlstm_model_skip(**params)
return model
def get_convlstm_simple():
params = {'input_size': (288, 288, 1),
'scale': 0.5,
'input_frames': 4,
'final_filter': 1,
'final_activation': "sigmoid",
'batch_norm': True}
model = convlstm_model_simple(**params)
return model
def get_convlstm():
params = {'input_size': (288, 288, 2),
'scale': 0.5,
'input_frames': 4,
'final_filter': 1,
'final_activation': "tanh",
'dropout': 0.0,
'batch_norm': True}
model = convlstm_model(**params)
return model | 0.871092 | 0.654343 |
from __future__ import absolute_import
from __future__ import print_function
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten
from tensorflow.keras.layers import Conv2D, MaxPooling2D
import os
import numpy as np
from trainer.environment import create_trainer_environment
NUM_CLASSES = 10
EPOCHS = 10
NUM_PREDICTIONS = 20
MODEL_NAME = 'keras_cifar10_trained_model.h5'
# the trainer environment contains useful information about
env = create_trainer_environment()
print('creating SageMaker trainer environment:\n%s' % str(env))
# getting the hyperparameters
batch_size = env.hyperparameters.get('batch_size', object_type=int)
data_augmentation = env.hyperparameters.get('data_augmentation', default=True, object_type=bool)
learning_rate = env.hyperparameters.get('learning_rate', default=.0001, object_type=float)
width_shift_range = env.hyperparameters.get('width_shift_range', object_type=float)
height_shift_range = env.hyperparameters.get('height_shift_range', object_type=float)
EPOCHS = env.hyperparameters.get('epochs', default=10, object_type=int)
# reading data from train and test channels
train_data = np.load(os.path.join(env.channel_dirs['train'], 'cifar-10-npz-compressed.npz'))
(x_train, y_train) = train_data['x'], train_data['y']
test_data = np.load(os.path.join(env.channel_dirs['test'], 'cifar-10-npz-compressed.npz'))
(x_test, y_test) = test_data['x'], test_data['y']
model = Sequential()
model.add(Conv2D(32, (3, 3), padding='same', input_shape=x_train.shape[1:]))
model.add(Activation('relu'))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(NUM_CLASSES))
model.add(Activation('softmax'))
# initiate RMSprop optimizer
opt = keras.optimizers.RMSprop(lr=learning_rate, decay=1e-6)
# Let's train the model using RMSprop
model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
if not data_augmentation:
print('Not using data augmentation.')
model.fit(x_train, y_train, batch_size=batch_size, epochs=EPOCHS, validation_data=(x_test, y_test), shuffle=True)
else:
print('Using real-time data augmentation.')
# This will do preprocessing and real time data augmentation:
data_generator = ImageDataGenerator(
featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range=0, # randomly rotate images in the range (degrees, 0 to 180)
width_shift_range=width_shift_range, # randomly shift images horizontally (fraction of total width)
height_shift_range=height_shift_range, # randomly shift images vertically (fraction of total height)
horizontal_flip=True, # randomly flip images
vertical_flip=False) # randomly flip images
# Compute quantities required for feature-wise normalization
# (std, mean, and principal components if ZCA whitening is applied).
data_generator.fit(x_train)
# Fit the model on the batches generated by data_generator.flow().
data_generator_flow = data_generator.flow(x_train, y_train, batch_size=batch_size)
model.fit_generator(data_generator_flow, epochs=EPOCHS, validation_data=(x_test, y_test), workers=4)
# Save model and weights
model_path = os.path.join(env.model_dir, MODEL_NAME)
model.save(model_path)
print('Saved trained model at %s ' % model_path)
# Score trained model.
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1]) | hyperparameter_tuning/keras_bring_your_own/trainer/start.py | from __future__ import absolute_import
from __future__ import print_function
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten
from tensorflow.keras.layers import Conv2D, MaxPooling2D
import os
import numpy as np
from trainer.environment import create_trainer_environment
NUM_CLASSES = 10
EPOCHS = 10
NUM_PREDICTIONS = 20
MODEL_NAME = 'keras_cifar10_trained_model.h5'
# the trainer environment contains useful information about
env = create_trainer_environment()
print('creating SageMaker trainer environment:\n%s' % str(env))
# getting the hyperparameters
batch_size = env.hyperparameters.get('batch_size', object_type=int)
data_augmentation = env.hyperparameters.get('data_augmentation', default=True, object_type=bool)
learning_rate = env.hyperparameters.get('learning_rate', default=.0001, object_type=float)
width_shift_range = env.hyperparameters.get('width_shift_range', object_type=float)
height_shift_range = env.hyperparameters.get('height_shift_range', object_type=float)
EPOCHS = env.hyperparameters.get('epochs', default=10, object_type=int)
# reading data from train and test channels
train_data = np.load(os.path.join(env.channel_dirs['train'], 'cifar-10-npz-compressed.npz'))
(x_train, y_train) = train_data['x'], train_data['y']
test_data = np.load(os.path.join(env.channel_dirs['test'], 'cifar-10-npz-compressed.npz'))
(x_test, y_test) = test_data['x'], test_data['y']
model = Sequential()
model.add(Conv2D(32, (3, 3), padding='same', input_shape=x_train.shape[1:]))
model.add(Activation('relu'))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(NUM_CLASSES))
model.add(Activation('softmax'))
# initiate RMSprop optimizer
opt = keras.optimizers.RMSprop(lr=learning_rate, decay=1e-6)
# Let's train the model using RMSprop
model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
if not data_augmentation:
print('Not using data augmentation.')
model.fit(x_train, y_train, batch_size=batch_size, epochs=EPOCHS, validation_data=(x_test, y_test), shuffle=True)
else:
print('Using real-time data augmentation.')
# This will do preprocessing and real time data augmentation:
data_generator = ImageDataGenerator(
featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range=0, # randomly rotate images in the range (degrees, 0 to 180)
width_shift_range=width_shift_range, # randomly shift images horizontally (fraction of total width)
height_shift_range=height_shift_range, # randomly shift images vertically (fraction of total height)
horizontal_flip=True, # randomly flip images
vertical_flip=False) # randomly flip images
# Compute quantities required for feature-wise normalization
# (std, mean, and principal components if ZCA whitening is applied).
data_generator.fit(x_train)
# Fit the model on the batches generated by data_generator.flow().
data_generator_flow = data_generator.flow(x_train, y_train, batch_size=batch_size)
model.fit_generator(data_generator_flow, epochs=EPOCHS, validation_data=(x_test, y_test), workers=4)
# Save model and weights
model_path = os.path.join(env.model_dir, MODEL_NAME)
model.save(model_path)
print('Saved trained model at %s ' % model_path)
# Score trained model.
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1]) | 0.899348 | 0.357511 |
import datetime
from bs4 import BeautifulSoup
from flask import current_app, url_for
from flask_sqlalchemy import BaseQuery
from sqlalchemy_searchable import SearchQueryMixin, make_searchable
from sqlalchemy_utils.types import TSVectorType
from app.extensions import db
from lib.model_utils import GetOr404Mixin, GetOrCreateMixin
make_searchable()
class NoteQuery(BaseQuery, SearchQueryMixin):
pass
def sanitize(content):
soup = BeautifulSoup(content, 'html.parser')
nodes = soup.recursiveChildGenerator()
text_nodes = [e for e in nodes if isinstance(e, str)]
return ''.join(text_nodes)
tags = db.Table(
'note_tag',
db.Column('tag.id', db.Integer, db.ForeignKey('tag.id')),
db.Column('note.id', db.Integer, db.ForeignKey('note.id')))
class Note(db.Model, GetOr404Mixin, GetOrCreateMixin):
query_class = NoteQuery
id = db.Column(db.Integer, primary_key=True)
content = db.Column(db.Text)
created = db.Column(db.DateTime)
updated = db.Column(db.DateTime)
is_email = db.Column(db.Boolean)
history = db.relationship('NoteHistory', backref='note', cascade='delete')
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
author = db.relationship('User', backref='notes')
search_vector = db.Column(TSVectorType('content'))
tags = db.relationship('Tag', backref='notes', secondary=tags)
class VersionDoesNotExist(Exception):
def __init__(self, note, version):
super(Note.VersionDoesNotExist, self).__init__(
'Note version {} not found in history of note {}'.format(
version,
note.id))
@classmethod
def create(cls, content, author, is_email=False):
note = Note(
content=sanitize(content),
author=author,
is_email=is_email)
note.created = datetime.datetime.utcnow()
note.updated = note.created
db.session.add(note)
db.session.commit()
return note
def update(self, content):
now = datetime.datetime.utcnow()
version = NoteHistory(self, now)
db.session.add(version)
self.history.append(version)
self.content = sanitize(content)
self.updated = now
db.session.add(self)
db.session.commit()
def revert(self, version=None):
if version is None:
version = len(self.history) - 1
versions = {rev.version: rev for rev in self.history}
if version not in versions:
raise Note.VersionDoesNotExist(self, version)
self.update(versions[version].content)
def delete(self):
db.session.delete(self)
db.session.commit()
def add_tag(self, tag_name):
if tag_name and not self.has_tag(tag_name):
self.tags.append(Tag(name=sanitize(tag_name), author=self.author))
db.session.add(self)
db.session.commit()
def has_tag(self, tag_name):
return Note.query.join(tags).filter(
Note.id == self.id,
Tag.author == self.author,
Tag.name == tag_name).count() > 0
def remove_tag(self, tag_name):
if self.has_tag(tag_name):
tag = [tag for tag in self.tags if tag.name == tag_name]
self.tags.remove(tag[0])
db.session.add(self)
db.session.commit()
@classmethod
def search(cls, term, user):
return Note.query.filter(Note.author == user).search(
term, sort=True)
@property
def rendered(self):
markdown = current_app.jinja_env.filters['markdown']
return markdown(self.content)
@property
def truncated(self):
truncate = current_app.jinja_env.filters['truncate_html']
return truncate(self.rendered, 250, end=" \u2026")
@property
def edit_url(self):
return url_for('notes.edit', id=self.id)
@property
def just_updated(self):
undo_timeout = (
datetime.datetime.utcnow() - datetime.timedelta(minutes=2))
return bool(self.history and self.updated > undo_timeout)
@property
def undo_url(self):
return url_for('notes.undo', id=self.id)
@property
def timestamp(self):
return self.updated.strftime('%Y%m%d%H%M%S.%f')
@property
def friendly_updated(self):
humanize = current_app.jinja_env.filters['humanize']
return humanize(self.updated)
def json(self):
return {
'id': self.id,
'truncated': self.truncated,
'edit_url': self.edit_url,
'content': self.content,
'just_updated': self.just_updated,
'undo_url': self.undo_url,
'timestamp': self.timestamp,
'friendly_updated': self.friendly_updated,
'is_email': self.is_email,
'tags': [{
'name': tag.name,
'url': tag.url} for tag in self.tags]
}
class NoteHistory(db.Model):
id = db.Column(db.Integer, primary_key=True)
note_id = db.Column(db.Integer, db.ForeignKey('note.id'))
version = db.Column(db.Integer)
content = db.Column(db.Text)
created = db.Column(db.DateTime)
def __init__(self, note, now):
self.note = note
self.created = now
self.content = note.content
self.version = 0
versions = [rev.version for rev in note.history]
if versions:
self.version = max(0, *versions) + 1
class Tag(db.Model, GetOr404Mixin):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String)
namespace = db.Column(db.String)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
author = db.relationship('User', backref='tags')
@property
def url(self):
return url_for('notes.by_tag', tag=self.name)
@property
def usage_count(self):
return len(self.notes) | app/blueprints/notes/models.py | import datetime
from bs4 import BeautifulSoup
from flask import current_app, url_for
from flask_sqlalchemy import BaseQuery
from sqlalchemy_searchable import SearchQueryMixin, make_searchable
from sqlalchemy_utils.types import TSVectorType
from app.extensions import db
from lib.model_utils import GetOr404Mixin, GetOrCreateMixin
make_searchable()
class NoteQuery(BaseQuery, SearchQueryMixin):
pass
def sanitize(content):
soup = BeautifulSoup(content, 'html.parser')
nodes = soup.recursiveChildGenerator()
text_nodes = [e for e in nodes if isinstance(e, str)]
return ''.join(text_nodes)
tags = db.Table(
'note_tag',
db.Column('tag.id', db.Integer, db.ForeignKey('tag.id')),
db.Column('note.id', db.Integer, db.ForeignKey('note.id')))
class Note(db.Model, GetOr404Mixin, GetOrCreateMixin):
query_class = NoteQuery
id = db.Column(db.Integer, primary_key=True)
content = db.Column(db.Text)
created = db.Column(db.DateTime)
updated = db.Column(db.DateTime)
is_email = db.Column(db.Boolean)
history = db.relationship('NoteHistory', backref='note', cascade='delete')
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
author = db.relationship('User', backref='notes')
search_vector = db.Column(TSVectorType('content'))
tags = db.relationship('Tag', backref='notes', secondary=tags)
class VersionDoesNotExist(Exception):
def __init__(self, note, version):
super(Note.VersionDoesNotExist, self).__init__(
'Note version {} not found in history of note {}'.format(
version,
note.id))
@classmethod
def create(cls, content, author, is_email=False):
note = Note(
content=sanitize(content),
author=author,
is_email=is_email)
note.created = datetime.datetime.utcnow()
note.updated = note.created
db.session.add(note)
db.session.commit()
return note
def update(self, content):
now = datetime.datetime.utcnow()
version = NoteHistory(self, now)
db.session.add(version)
self.history.append(version)
self.content = sanitize(content)
self.updated = now
db.session.add(self)
db.session.commit()
def revert(self, version=None):
if version is None:
version = len(self.history) - 1
versions = {rev.version: rev for rev in self.history}
if version not in versions:
raise Note.VersionDoesNotExist(self, version)
self.update(versions[version].content)
def delete(self):
db.session.delete(self)
db.session.commit()
def add_tag(self, tag_name):
if tag_name and not self.has_tag(tag_name):
self.tags.append(Tag(name=sanitize(tag_name), author=self.author))
db.session.add(self)
db.session.commit()
def has_tag(self, tag_name):
return Note.query.join(tags).filter(
Note.id == self.id,
Tag.author == self.author,
Tag.name == tag_name).count() > 0
def remove_tag(self, tag_name):
if self.has_tag(tag_name):
tag = [tag for tag in self.tags if tag.name == tag_name]
self.tags.remove(tag[0])
db.session.add(self)
db.session.commit()
@classmethod
def search(cls, term, user):
return Note.query.filter(Note.author == user).search(
term, sort=True)
@property
def rendered(self):
markdown = current_app.jinja_env.filters['markdown']
return markdown(self.content)
@property
def truncated(self):
truncate = current_app.jinja_env.filters['truncate_html']
return truncate(self.rendered, 250, end=" \u2026")
@property
def edit_url(self):
return url_for('notes.edit', id=self.id)
@property
def just_updated(self):
undo_timeout = (
datetime.datetime.utcnow() - datetime.timedelta(minutes=2))
return bool(self.history and self.updated > undo_timeout)
@property
def undo_url(self):
return url_for('notes.undo', id=self.id)
@property
def timestamp(self):
return self.updated.strftime('%Y%m%d%H%M%S.%f')
@property
def friendly_updated(self):
humanize = current_app.jinja_env.filters['humanize']
return humanize(self.updated)
def json(self):
return {
'id': self.id,
'truncated': self.truncated,
'edit_url': self.edit_url,
'content': self.content,
'just_updated': self.just_updated,
'undo_url': self.undo_url,
'timestamp': self.timestamp,
'friendly_updated': self.friendly_updated,
'is_email': self.is_email,
'tags': [{
'name': tag.name,
'url': tag.url} for tag in self.tags]
}
class NoteHistory(db.Model):
id = db.Column(db.Integer, primary_key=True)
note_id = db.Column(db.Integer, db.ForeignKey('note.id'))
version = db.Column(db.Integer)
content = db.Column(db.Text)
created = db.Column(db.DateTime)
def __init__(self, note, now):
self.note = note
self.created = now
self.content = note.content
self.version = 0
versions = [rev.version for rev in note.history]
if versions:
self.version = max(0, *versions) + 1
class Tag(db.Model, GetOr404Mixin):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String)
namespace = db.Column(db.String)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
author = db.relationship('User', backref='tags')
@property
def url(self):
return url_for('notes.by_tag', tag=self.name)
@property
def usage_count(self):
return len(self.notes) | 0.547706 | 0.107204 |
if __name__ == "__main__":
import sys
sys.path.append("..")
del sys
from fenalib.assert_utils import assert_type
from fenalib.lexical_token import Token
from fenalib.nodes import CmdNode, StmtNode, ProgramNode
class TreePostfixTraversal:
"""
Visitor from https://ruslanspivak.com/
"""
def _visit(self, method_start, node, **kwargs):
"""
Visits the specified node with the method starter
Args:
method_start (str): The starting part of the method name
eg. method_start="visit", method_name=visit_<node>
node (class type that inherits from Node)
Returns:
Whatever is gotten with the visitor method
"""
assert_type(method_start, str)
class_name = type(node).__name__
method_name = f"{method_start}_{class_name}"
visitor_method = getattr(self, method_name, "invalid")
if visitor_method == "invalid":
raise NotImplementedError(f"Invalid method: {method_name}")
return visitor_method(node, **kwargs)
class NodeVisitor(TreePostfixTraversal):
"""
Uses the visit method to visit any statment node
"""
def visit(self, node, **kwargs):
"""
Visits the specified node
Returns:
Whatever is gotten with the visitor method
"""
assert_type(node, StmtNode, Token, ProgramNode)
return self._visit("visit", node, **kwargs)
class NodeBuilder(TreePostfixTraversal):
"""
Uses the build method to traverse the tree to build fena commands
"""
def build(self, node, **kwargs):
"""
Builds the specified node
Args:
node (class type that inherits from Node)
Returns:
str: Whatever is gotten with the build method
"""
assert_type(node, CmdNode, Token, str)
return self._visit("build", node, **kwargs)
def iter_build(self, nodes, join_value=None):
"""
Args:
nodes (iterable object)
join_value (str or None)
Returns:
generator (if join_value is None): generator to map all nodes to the build method
str (if join_value is str): Full string of built objects from its nodes joined by the join_value
"""
assert_type(join_value, str, optional=True)
build_generator = map(self.build, nodes)
if join_value is None:
return build_generator
return join_value.join(build_generator) | fenalib/node_visitors.py | if __name__ == "__main__":
import sys
sys.path.append("..")
del sys
from fenalib.assert_utils import assert_type
from fenalib.lexical_token import Token
from fenalib.nodes import CmdNode, StmtNode, ProgramNode
class TreePostfixTraversal:
"""
Visitor from https://ruslanspivak.com/
"""
def _visit(self, method_start, node, **kwargs):
"""
Visits the specified node with the method starter
Args:
method_start (str): The starting part of the method name
eg. method_start="visit", method_name=visit_<node>
node (class type that inherits from Node)
Returns:
Whatever is gotten with the visitor method
"""
assert_type(method_start, str)
class_name = type(node).__name__
method_name = f"{method_start}_{class_name}"
visitor_method = getattr(self, method_name, "invalid")
if visitor_method == "invalid":
raise NotImplementedError(f"Invalid method: {method_name}")
return visitor_method(node, **kwargs)
class NodeVisitor(TreePostfixTraversal):
"""
Uses the visit method to visit any statment node
"""
def visit(self, node, **kwargs):
"""
Visits the specified node
Returns:
Whatever is gotten with the visitor method
"""
assert_type(node, StmtNode, Token, ProgramNode)
return self._visit("visit", node, **kwargs)
class NodeBuilder(TreePostfixTraversal):
"""
Uses the build method to traverse the tree to build fena commands
"""
def build(self, node, **kwargs):
"""
Builds the specified node
Args:
node (class type that inherits from Node)
Returns:
str: Whatever is gotten with the build method
"""
assert_type(node, CmdNode, Token, str)
return self._visit("build", node, **kwargs)
def iter_build(self, nodes, join_value=None):
"""
Args:
nodes (iterable object)
join_value (str or None)
Returns:
generator (if join_value is None): generator to map all nodes to the build method
str (if join_value is str): Full string of built objects from its nodes joined by the join_value
"""
assert_type(join_value, str, optional=True)
build_generator = map(self.build, nodes)
if join_value is None:
return build_generator
return join_value.join(build_generator) | 0.523664 | 0.345326 |
import scipy.misc
import random
import os
train_set = []
test_set = []
"""
Load set of images in a directory.
This will automatically allocate a
random 20% of the images as a test set
data_dir: path to directory containing images
"""
def load_dataset(data_dir):
img_files = os.listdir(data_dir)
test_size = int(len(img_files) * 0.2) # rate of test in all
test_indices = random.sample(range(len(img_files)), test_size)
for i in range(len(img_files)):
# set input image list of test and train
if i in test_indices:
test_set.append(os.path.join(data_dir, img_files[i]))
else:
train_set.append(os.path.join(data_dir, img_files[i]))
return
"""
Get test set from the loaded dataset
size (optional): if this argument is chosen,
each element of the test set will be cropped
to the first (size x size) pixels in the image.
returns the test set of your data
"""
def get_test_set(original_size, shrunk_size):
y_imgs = []
x_imgs = []
for i in range(len(test_set)):
img = scipy.misc.imread(test_set[i])
img = crop_center(img, original_size, original_size) # get cropped image
x_img = scipy.misc.imresize(img, (shrunk_size, shrunk_size))
y_imgs.append(img)
x_imgs.append(x_img)
return x_imgs, y_imgs # cropped image in y_imgs, cropped and resized image in x_imgs
"""
Get a batch of images from the training
set of images.
batch_size: size of the batch
original_size: size for target images
shrunk_size: size for shrunk images
returns x,y where:
-x is the input set of shape [batch_size, shrunk_size, shrunk_size, channels]
-y is the target set of shape [batch_size, original_size,original_size, channels]
"""
def get_batch(batch_size, original_size, shrunk_size):
x = []
y = []
img_indices = random.sample(range(len(train_set)), batch_size)
for i in range(len(img_indices)):
index = img_indices[i]
img = scipy.misc.imread(train_set[index])
img = crop_center(img, original_size, original_size)
x_img = scipy.misc.imresize(img, (shrunk_size, shrunk_size))
x.append(x_img)
y.append(img)
return x, y
"""
Simple method to crop center of image
img: image to crop
cropx: width of crop
cropy: height of crop
returns cropped image
"""
def crop_center(img, cropx, cropy):
y, x, _ = img.shape
startx = x // 2 - (cropx // 2)
starty = y // 2 - (cropy // 2)
return img[starty:starty + cropy, startx:startx + cropx] | data.py |
import scipy.misc
import random
import os
train_set = []
test_set = []
"""
Load set of images in a directory.
This will automatically allocate a
random 20% of the images as a test set
data_dir: path to directory containing images
"""
def load_dataset(data_dir):
img_files = os.listdir(data_dir)
test_size = int(len(img_files) * 0.2) # rate of test in all
test_indices = random.sample(range(len(img_files)), test_size)
for i in range(len(img_files)):
# set input image list of test and train
if i in test_indices:
test_set.append(os.path.join(data_dir, img_files[i]))
else:
train_set.append(os.path.join(data_dir, img_files[i]))
return
"""
Get test set from the loaded dataset
size (optional): if this argument is chosen,
each element of the test set will be cropped
to the first (size x size) pixels in the image.
returns the test set of your data
"""
def get_test_set(original_size, shrunk_size):
y_imgs = []
x_imgs = []
for i in range(len(test_set)):
img = scipy.misc.imread(test_set[i])
img = crop_center(img, original_size, original_size) # get cropped image
x_img = scipy.misc.imresize(img, (shrunk_size, shrunk_size))
y_imgs.append(img)
x_imgs.append(x_img)
return x_imgs, y_imgs # cropped image in y_imgs, cropped and resized image in x_imgs
"""
Get a batch of images from the training
set of images.
batch_size: size of the batch
original_size: size for target images
shrunk_size: size for shrunk images
returns x,y where:
-x is the input set of shape [batch_size, shrunk_size, shrunk_size, channels]
-y is the target set of shape [batch_size, original_size,original_size, channels]
"""
def get_batch(batch_size, original_size, shrunk_size):
x = []
y = []
img_indices = random.sample(range(len(train_set)), batch_size)
for i in range(len(img_indices)):
index = img_indices[i]
img = scipy.misc.imread(train_set[index])
img = crop_center(img, original_size, original_size)
x_img = scipy.misc.imresize(img, (shrunk_size, shrunk_size))
x.append(x_img)
y.append(img)
return x, y
"""
Simple method to crop center of image
img: image to crop
cropx: width of crop
cropy: height of crop
returns cropped image
"""
def crop_center(img, cropx, cropy):
y, x, _ = img.shape
startx = x // 2 - (cropx // 2)
starty = y // 2 - (cropy // 2)
return img[starty:starty + cropy, startx:startx + cropx] | 0.517327 | 0.430656 |
from dataclasses import dataclass, field
import datetime
import io
import itertools as it
import json
from typing import Dict, Union, List
import pandas as pd
import requests
from redfin import Redfin
from dask import delayed
from dask.distributed import Client, as_completed
REDFIN_ENDPOINT='https://www.redfin.com/stingray/api/gis-csv?'
def gen_headers():
""" Request headers"""
headers = {
'authority': 'www.redfin.com',
'content-length': '0',
'sec-ch-ua': '"Chromium";v="92", " Not A;Brand";v="99", "Google Chrome";v="92"',
'sec-ch-ua-mobile': '?0',
'user-agent': 'Mozilla/5.0 (X11; CrOS x86_64 13982.82.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.157 Safari/537.36',
'content-type': 'text/plain;charset=UTF-8',
'accept': '*/*',
'origin': 'https://www.redfin.com',
'sec-fetch-site': 'same-origin',
'sec-fetch-mode': 'no-cors',
'sec-fetch-dest': 'empty',
'referer': 'https://www.redfin.com/city/12839/DC/Washington-DC',
'accept-language': 'en-US,en;q=0.9',
}
return headers
def gen_params():
""" Redfin search parameters"""
params = {
'al': 1,
'hoa': 150,
'market': 'dc',
'max_listing_approx_size': 3000,
'min_listing_approx_size': 1700,
'max_num_beds': 4,
'max_price': 800_000,
'num_baths': 2,
'num_beds': 2,
'num_homes': 450,
'page_number': 1,
'region_id': 2965,
'region_type': 5,
'sf': '1,2,3,5,6,7',
'status': 9,
'uipt': '1,2,3,4,5,6,7,8',
'v': 8
}
return params
def gen_cols():
""" Columns to keep from Redfin listings"""
relevant_columns = [
'ADDRESS', 'CITY', 'STATE OR PROVINCE', 'ZIP OR POSTAL CODE', "PRICE",
]
return relevant_columns
def gen_final_cols():
final_columns = [
'ADDRESS', 'CITY', 'STATE OR PROVINCE', 'ZIP OR POSTAL CODE', "PRICE",
'tax_assessed_value', 'date'
]
return final_columns
@dataclass
class Agent:
""" Stores Redfin query parameters, runs Redfin
query, digests output"""
request_headers: Dict[str, str] = field(default_factory=gen_headers)
redfin_query_params: Dict[str, Union[int, str]] = field(default_factory=gen_params)
keep_cols: List[str] = field(default_factory=gen_cols)
final_cols: List[str] = field(default_factory=gen_final_cols)
def pull_listings(self):
""" Query redfin for listings"""
with requests.Session() as session:
session.headers.update(self.request_headers)
download = session.get(
REDFIN_ENDPOINT,
params=self.redfin_query_params,
)
return download
def digest_listings(self, download: requests.Response):
""" Convert get request into dataframe"""
if download.status_code != 200:
raise RuntimeError(
"Error making listings request: " +
f"{download.content.decode('UTF-8')}"
)
df = pd.read_csv(
io.StringIO(download.content.decode("utf-8")),
low_memory=False, error_bad_lines=False
)
missing_cols = [c for c in self.keep_cols if c not in df.columns]
if len(missing_cols) > 0:
raise RuntimeError(
f"Redfin listings missing {len(missing_cols)} " +
f"columns: {missing_cols}"
)
return df[self.keep_cols].assign(
full_address=lambda x: x['ADDRESS'] + ', ' + x['CITY']
+ ' ' + x['STATE OR PROVINCE']
)
def pull_details(self, list_of_addresses: List[str]):
""" Query for below-the-fold information"""
daskclient = Client()
redfinclient = Redfin()
with requests.Session() as session:
session.headers.update(self.request_headers)
session_scattered = daskclient.scatter(session)
redfinclient_scattered = daskclient.scatter(redfinclient)
futures = [
delayed(self.query_redfin_dask)(session_scattered,
redfinclient_scattered, address)
for address in list_of_addresses
]
futures = daskclient.compute(futures)
completed_results = [
result for a, result in
as_completed(futures, raise_errors=False, with_results=True)
]
daskclient.close()
return completed_results
def digest_details(
self,
df: pd.DataFrame,
completed_results: List[Dict[str, float]]
):
""" Process below-the-fold responses into dataframe"""
timestamp = datetime.datetime.now().strftime("%Y-%m-%d")
processed_df = (
df.merge(
self.compile_results(completed_results),
left_on='full_address', right_index=True
).assign(date=timestamp)
)
return processed_df[self.final_cols]
def query_redfin_dask(
self,
session: requests.Session,
redfinclient: Redfin,
address: str,
**kwargs
):
""" For a given address, query redfin and identify tax-assessed value
This is the function we submit to the dask client
"""
response = session.get(
'https://redfin.com/stingray/do/location-autocomplete',
params={
'location': address,
'v': 2,
**kwargs
},
)
return {address: self.process_redfin_response(response, redfinclient)}
def process_redfin_response(
self,
response: requests.Response,
redfinclient: Redfin
):
""" Given a response from redfin API, return the tax-assessed value
Notes
-----
This can get messy because this response is deeply-nested JSON, and
there are many chances for us to fail at pulling tax values.
In all the places where things can go wrong, I do a very sloppy check and
then return -1 if something broke
"""
if response.status_code != 200:
return -1
else:
resp_dict = json.loads(response.text[4:])
if (
(resp_dict.get('errorMessage', None) == 'Success') &
('exactMatch' in resp_dict['payload'])
):
# Pull property metadata
url = resp_dict['payload']['exactMatch']['url']
data = redfinclient.initial_info(url)['payload']
if data['responseCode'] != 200:
return -1
property_id = data['propertyId']
listing_id = data['listingId']
info = redfinclient.below_the_fold(property_id)
# Pull latest tax-assessed value
if len(info['payload']['publicRecordsInfo']['allTaxInfo']) > 0:
tax_assessment = (
pd.DataFrame(
info['payload']['publicRecordsInfo']['allTaxInfo']
) .sort_values("rollYear", ascending=False)
).iloc[0]
return (
tax_assessment.get('taxableLandValue', 0) +
tax_assessment.get('taxableImprovementValue', 0)
)
else:
return -1
else:
return -1
def compile_results(self, results: List[Dict[str, float]]):
""" Aggregate the results from all the redfin requests into a single series
Take a list of dictionaries (from the dask future objects),
flatten them into one dictionary, then turn into a pandas series
"""
compiled = pd.Series(
dict(it.chain.from_iterable(a.items() for a in results)),
name='tax_assessed_value'
)
return compiled | yellowtail/agent.py | from dataclasses import dataclass, field
import datetime
import io
import itertools as it
import json
from typing import Dict, Union, List
import pandas as pd
import requests
from redfin import Redfin
from dask import delayed
from dask.distributed import Client, as_completed
REDFIN_ENDPOINT='https://www.redfin.com/stingray/api/gis-csv?'
def gen_headers():
""" Request headers"""
headers = {
'authority': 'www.redfin.com',
'content-length': '0',
'sec-ch-ua': '"Chromium";v="92", " Not A;Brand";v="99", "Google Chrome";v="92"',
'sec-ch-ua-mobile': '?0',
'user-agent': 'Mozilla/5.0 (X11; CrOS x86_64 13982.82.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.157 Safari/537.36',
'content-type': 'text/plain;charset=UTF-8',
'accept': '*/*',
'origin': 'https://www.redfin.com',
'sec-fetch-site': 'same-origin',
'sec-fetch-mode': 'no-cors',
'sec-fetch-dest': 'empty',
'referer': 'https://www.redfin.com/city/12839/DC/Washington-DC',
'accept-language': 'en-US,en;q=0.9',
}
return headers
def gen_params():
""" Redfin search parameters"""
params = {
'al': 1,
'hoa': 150,
'market': 'dc',
'max_listing_approx_size': 3000,
'min_listing_approx_size': 1700,
'max_num_beds': 4,
'max_price': 800_000,
'num_baths': 2,
'num_beds': 2,
'num_homes': 450,
'page_number': 1,
'region_id': 2965,
'region_type': 5,
'sf': '1,2,3,5,6,7',
'status': 9,
'uipt': '1,2,3,4,5,6,7,8',
'v': 8
}
return params
def gen_cols():
""" Columns to keep from Redfin listings"""
relevant_columns = [
'ADDRESS', 'CITY', 'STATE OR PROVINCE', 'ZIP OR POSTAL CODE', "PRICE",
]
return relevant_columns
def gen_final_cols():
final_columns = [
'ADDRESS', 'CITY', 'STATE OR PROVINCE', 'ZIP OR POSTAL CODE', "PRICE",
'tax_assessed_value', 'date'
]
return final_columns
@dataclass
class Agent:
""" Stores Redfin query parameters, runs Redfin
query, digests output"""
request_headers: Dict[str, str] = field(default_factory=gen_headers)
redfin_query_params: Dict[str, Union[int, str]] = field(default_factory=gen_params)
keep_cols: List[str] = field(default_factory=gen_cols)
final_cols: List[str] = field(default_factory=gen_final_cols)
def pull_listings(self):
""" Query redfin for listings"""
with requests.Session() as session:
session.headers.update(self.request_headers)
download = session.get(
REDFIN_ENDPOINT,
params=self.redfin_query_params,
)
return download
def digest_listings(self, download: requests.Response):
""" Convert get request into dataframe"""
if download.status_code != 200:
raise RuntimeError(
"Error making listings request: " +
f"{download.content.decode('UTF-8')}"
)
df = pd.read_csv(
io.StringIO(download.content.decode("utf-8")),
low_memory=False, error_bad_lines=False
)
missing_cols = [c for c in self.keep_cols if c not in df.columns]
if len(missing_cols) > 0:
raise RuntimeError(
f"Redfin listings missing {len(missing_cols)} " +
f"columns: {missing_cols}"
)
return df[self.keep_cols].assign(
full_address=lambda x: x['ADDRESS'] + ', ' + x['CITY']
+ ' ' + x['STATE OR PROVINCE']
)
def pull_details(self, list_of_addresses: List[str]):
""" Query for below-the-fold information"""
daskclient = Client()
redfinclient = Redfin()
with requests.Session() as session:
session.headers.update(self.request_headers)
session_scattered = daskclient.scatter(session)
redfinclient_scattered = daskclient.scatter(redfinclient)
futures = [
delayed(self.query_redfin_dask)(session_scattered,
redfinclient_scattered, address)
for address in list_of_addresses
]
futures = daskclient.compute(futures)
completed_results = [
result for a, result in
as_completed(futures, raise_errors=False, with_results=True)
]
daskclient.close()
return completed_results
def digest_details(
self,
df: pd.DataFrame,
completed_results: List[Dict[str, float]]
):
""" Process below-the-fold responses into dataframe"""
timestamp = datetime.datetime.now().strftime("%Y-%m-%d")
processed_df = (
df.merge(
self.compile_results(completed_results),
left_on='full_address', right_index=True
).assign(date=timestamp)
)
return processed_df[self.final_cols]
def query_redfin_dask(
self,
session: requests.Session,
redfinclient: Redfin,
address: str,
**kwargs
):
""" For a given address, query redfin and identify tax-assessed value
This is the function we submit to the dask client
"""
response = session.get(
'https://redfin.com/stingray/do/location-autocomplete',
params={
'location': address,
'v': 2,
**kwargs
},
)
return {address: self.process_redfin_response(response, redfinclient)}
def process_redfin_response(
self,
response: requests.Response,
redfinclient: Redfin
):
""" Given a response from redfin API, return the tax-assessed value
Notes
-----
This can get messy because this response is deeply-nested JSON, and
there are many chances for us to fail at pulling tax values.
In all the places where things can go wrong, I do a very sloppy check and
then return -1 if something broke
"""
if response.status_code != 200:
return -1
else:
resp_dict = json.loads(response.text[4:])
if (
(resp_dict.get('errorMessage', None) == 'Success') &
('exactMatch' in resp_dict['payload'])
):
# Pull property metadata
url = resp_dict['payload']['exactMatch']['url']
data = redfinclient.initial_info(url)['payload']
if data['responseCode'] != 200:
return -1
property_id = data['propertyId']
listing_id = data['listingId']
info = redfinclient.below_the_fold(property_id)
# Pull latest tax-assessed value
if len(info['payload']['publicRecordsInfo']['allTaxInfo']) > 0:
tax_assessment = (
pd.DataFrame(
info['payload']['publicRecordsInfo']['allTaxInfo']
) .sort_values("rollYear", ascending=False)
).iloc[0]
return (
tax_assessment.get('taxableLandValue', 0) +
tax_assessment.get('taxableImprovementValue', 0)
)
else:
return -1
else:
return -1
def compile_results(self, results: List[Dict[str, float]]):
""" Aggregate the results from all the redfin requests into a single series
Take a list of dictionaries (from the dask future objects),
flatten them into one dictionary, then turn into a pandas series
"""
compiled = pd.Series(
dict(it.chain.from_iterable(a.items() for a in results)),
name='tax_assessed_value'
)
return compiled | 0.738103 | 0.143728 |
import argparse
import shutil
from commands import compile_jupyter
from commands import compile_latex
from commands import compile_latex_project
from commands import copy_meta_file
JUPYTER_SRC_PATH = './jupyter'
LATEX_SRC_PATH = './latex'
OUTPUT_ROOT_PATH = './out'
OUTPUT_JUPYTER_PATH = '{}/{}'.format(OUTPUT_ROOT_PATH, 'jupyter')
OUTPUT_LATEX_PATH = '{}/{}'.format(OUTPUT_ROOT_PATH, 'latex')
def build_jupyter():
compile_jupyter(OUTPUT_JUPYTER_PATH, JUPYTER_SRC_PATH)
copy_meta_file(OUTPUT_JUPYTER_PATH, JUPYTER_SRC_PATH)
def build_latex():
compile_latex(OUTPUT_LATEX_PATH, LATEX_SRC_PATH)
copy_meta_file(OUTPUT_LATEX_PATH, LATEX_SRC_PATH)
def main():
# Create top-level root parser
root_parser = argparse.ArgumentParser(description='Document compilation utility')
subparsers = root_parser.add_subparsers(dest='subparser_name')
# Create compile subparser
compile_parser = subparsers.add_parser('compile')
compile_parser.add_argument('command')
compile_parser.add_argument('-p', '--project', help='Specify LaTeX project to compile')
# Create clean subparser
clean_parser = subparsers.add_parser('clean')
# Parse arguments
args = root_parser.parse_args()
if args.subparser_name == 'compile':
if args.command == 'all':
print('Compiling Jupyter...')
build_jupyter()
print('Compiling Latex...')
build_latex()
elif args.command == 'jupyter':
build_jupyter()
elif args.command == 'latex':
if args.project != None:
compile_latex_project(OUTPUT_ROOT_PATH, LATEX_SRC_PATH, \
args.project)
else:
build_latex()
else:
available_commands = [
'all', 'jupyter', 'latex'
]
msg = 'Available commands:\n'
for command in available_commands:
msg += '\t' + command + '\n'
print(msg)
elif args.subparser_name == 'clean':
try:
shutil.rmtree(OUTPUT_ROOT_PATH)
except FileNotFoundError:
pass
else:
root_parser.print_help()
if __name__ == '__main__':
main() | app/__main__.py | import argparse
import shutil
from commands import compile_jupyter
from commands import compile_latex
from commands import compile_latex_project
from commands import copy_meta_file
JUPYTER_SRC_PATH = './jupyter'
LATEX_SRC_PATH = './latex'
OUTPUT_ROOT_PATH = './out'
OUTPUT_JUPYTER_PATH = '{}/{}'.format(OUTPUT_ROOT_PATH, 'jupyter')
OUTPUT_LATEX_PATH = '{}/{}'.format(OUTPUT_ROOT_PATH, 'latex')
def build_jupyter():
compile_jupyter(OUTPUT_JUPYTER_PATH, JUPYTER_SRC_PATH)
copy_meta_file(OUTPUT_JUPYTER_PATH, JUPYTER_SRC_PATH)
def build_latex():
compile_latex(OUTPUT_LATEX_PATH, LATEX_SRC_PATH)
copy_meta_file(OUTPUT_LATEX_PATH, LATEX_SRC_PATH)
def main():
# Create top-level root parser
root_parser = argparse.ArgumentParser(description='Document compilation utility')
subparsers = root_parser.add_subparsers(dest='subparser_name')
# Create compile subparser
compile_parser = subparsers.add_parser('compile')
compile_parser.add_argument('command')
compile_parser.add_argument('-p', '--project', help='Specify LaTeX project to compile')
# Create clean subparser
clean_parser = subparsers.add_parser('clean')
# Parse arguments
args = root_parser.parse_args()
if args.subparser_name == 'compile':
if args.command == 'all':
print('Compiling Jupyter...')
build_jupyter()
print('Compiling Latex...')
build_latex()
elif args.command == 'jupyter':
build_jupyter()
elif args.command == 'latex':
if args.project != None:
compile_latex_project(OUTPUT_ROOT_PATH, LATEX_SRC_PATH, \
args.project)
else:
build_latex()
else:
available_commands = [
'all', 'jupyter', 'latex'
]
msg = 'Available commands:\n'
for command in available_commands:
msg += '\t' + command + '\n'
print(msg)
elif args.subparser_name == 'clean':
try:
shutil.rmtree(OUTPUT_ROOT_PATH)
except FileNotFoundError:
pass
else:
root_parser.print_help()
if __name__ == '__main__':
main() | 0.322633 | 0.068944 |
import numpy as np
import os
import argparse
def check_size(submission_file):
max_size = 60*1024*1024
if os.path.getsize(submission_file) > max_size:
raise IOError,"File size exceeds the specified maximum size, which is 60M for the server."
def remove_ignored_det(dt_box, ig_box):
remain_box = []
for p in dt_box:
if len(p)>4:
_,pl,pt,pr,pb = p
else:
pl,pt,pr,pb = p
p_area = float((pr-pl)*(pb-pt))
overlap = -0.01
for c in ig_box:
cl,ct,cr,cb = c
if (cr>pl) and (cl<pr) and (ct<pb) and (cb>pt):
overlap += (min(cr,pr)-max(cl,pl)+1.0)*(min(cb,pb)-max(ct,pt)+1.0)
if p_area <= 0:
remain_box.append(p)
continue
if overlap/p_area <= 0.5:
remain_box.append(p)
return remain_box
def parse_ignore_file(ignore_file):
with open(ignore_file, 'r') as f:
lines = f.readlines()
ignore = {}
for line in lines:
line = line.strip().split()
image_id = line[0]
bbox = []
ignore_num = (len(line)-1)/4
for i in range(ignore_num):
b = []
b.append(int(line[1+4*i]))
b.append(int(line[2+4*i]))
b.append(int(line[1+4*i])+int(line[3+4*i]))
b.append(int(line[2+4*i])+int(line[4+4*i]))
bbox.append(b)
ignore[image_id] = bbox
return ignore
def parse_gt_file(gt_file):
with open(gt_file, 'r') as f:
lines = f.readlines()
gts = {}
for line in lines:
line = line.strip()
if line.startswith('#'):
name = line[1:].strip()
gts[name] = {}
gts[name]['bbox'] = []
continue
assert name is not None
assert name in gts
line = line.split(' ')
gts[name]['bbox'].append([float(line[0]), float(line[1]), float(line[0])+float(line[2]), float(line[1])+float(line[3])])
return gts
def parse_submission_file(sub_file, img_lst):
with open(sub_file,'r') as f:
lines = f.readlines()
subs = {}
for line in lines:
line = line.strip()
if line.startswith('#'):
image_id = line[1:].strip()
subs.setdefault(image_id, [])
continue
line = line.split(' ')
if image_id not in img_lst:
raise KeyError("Can not find image {} in the groundtruth file, did you submit the result file for the right dataset?".format(image_id))
subs[image_id].append([float(line[4]), float(line[0]), float(line[1]), float(line[0])+float(line[2]), float(line[1])+float(line[3])])
final_subs = []
for key in img_lst:
if key not in subs.keys(): continue
for item in subs[key]:
final_subs.append({'image_id':key, 'score':item[0], 'bbox':item[1:]})
final_subs = sorted(final_subs, key=lambda x: -x['score'])
return final_subs
def compute_ap(rec, prec):
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], prec, [0.]))
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
i = np.where(mrec[1:] != mrec[:-1])[0]
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
def pedestrian_eval(dts, gt):
aap = []
nd = len(dts)
ovethr = np.arange(0.5,1.0,0.05)
for ove in ovethr:
npos = 0
for image_id in gt.keys():
npos += len(gt[image_id]['bbox'])
gt[image_id]['det'] = [False] * len(gt[image_id]['bbox'])
tp = np.zeros(nd)
fp = np.zeros(nd)
for i in range(nd):
bb = dts[i]['bbox']
image_id = dts[i]['image_id']
BBGT = np.array(gt[image_id]['bbox'])
ovmax = -np.inf
if BBGT.size > 0:
ixmin = np.maximum(BBGT[:, 0], bb[0])
iymin = np.maximum(BBGT[:, 1], bb[1])
ixmax = np.minimum(BBGT[:, 2], bb[2])
iymax = np.minimum(BBGT[:, 3], bb[3])
iw = np.maximum(ixmax - ixmin + 1., 0.)
ih = np.maximum(iymax - iymin + 1., 0.)
inters = iw * ih
uni = ((bb[2] - bb[0] + 1.) * (bb[3] - bb[1] + 1.) + (BBGT[:, 2] - BBGT[:, 0] + 1.) * \
(BBGT[:, 3] - BBGT[:, 1] + 1.) - inters)
overlaps = inters / uni
ovmax = np.max(overlaps)
jmax = np.argmax(overlaps)
if ovmax > ove:
if not gt[image_id]['det'][jmax]:
tp[i] = 1.
gt[image_id]['det'][jmax] = 1.
else:
fp[i] = 1.
else:
fp[i] = 1.
fp = np.cumsum(fp)
tp = np.cumsum(tp)
rec = tp / float(npos)
prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
ap = compute_ap(rec, prec)
aap.append(ap)
mAP = np.mean(aap)
return mAP
if __name__ == '__main__':
gt_file = 'data/retinaface/val/label.txt'
submit_file = 'wout_r50_fpn_dcn_retina.txt'
check_size(submit_file)
gt = parse_gt_file(gt_file)
dts = parse_submission_file(submit_file, sorted(gt.keys()))
mAP = pedestrian_eval(dts, gt)
out = {'Average AP': mAP}
print(out)
#strings = ['{}: {}\n'.format(k, v) for k, v in out.items()]
#open(os.path.join(output_dir, 'scores.txt'), 'w').writelines(strings) | demo/python/evaluate.py | import numpy as np
import os
import argparse
def check_size(submission_file):
max_size = 60*1024*1024
if os.path.getsize(submission_file) > max_size:
raise IOError,"File size exceeds the specified maximum size, which is 60M for the server."
def remove_ignored_det(dt_box, ig_box):
remain_box = []
for p in dt_box:
if len(p)>4:
_,pl,pt,pr,pb = p
else:
pl,pt,pr,pb = p
p_area = float((pr-pl)*(pb-pt))
overlap = -0.01
for c in ig_box:
cl,ct,cr,cb = c
if (cr>pl) and (cl<pr) and (ct<pb) and (cb>pt):
overlap += (min(cr,pr)-max(cl,pl)+1.0)*(min(cb,pb)-max(ct,pt)+1.0)
if p_area <= 0:
remain_box.append(p)
continue
if overlap/p_area <= 0.5:
remain_box.append(p)
return remain_box
def parse_ignore_file(ignore_file):
with open(ignore_file, 'r') as f:
lines = f.readlines()
ignore = {}
for line in lines:
line = line.strip().split()
image_id = line[0]
bbox = []
ignore_num = (len(line)-1)/4
for i in range(ignore_num):
b = []
b.append(int(line[1+4*i]))
b.append(int(line[2+4*i]))
b.append(int(line[1+4*i])+int(line[3+4*i]))
b.append(int(line[2+4*i])+int(line[4+4*i]))
bbox.append(b)
ignore[image_id] = bbox
return ignore
def parse_gt_file(gt_file):
with open(gt_file, 'r') as f:
lines = f.readlines()
gts = {}
for line in lines:
line = line.strip()
if line.startswith('#'):
name = line[1:].strip()
gts[name] = {}
gts[name]['bbox'] = []
continue
assert name is not None
assert name in gts
line = line.split(' ')
gts[name]['bbox'].append([float(line[0]), float(line[1]), float(line[0])+float(line[2]), float(line[1])+float(line[3])])
return gts
def parse_submission_file(sub_file, img_lst):
with open(sub_file,'r') as f:
lines = f.readlines()
subs = {}
for line in lines:
line = line.strip()
if line.startswith('#'):
image_id = line[1:].strip()
subs.setdefault(image_id, [])
continue
line = line.split(' ')
if image_id not in img_lst:
raise KeyError("Can not find image {} in the groundtruth file, did you submit the result file for the right dataset?".format(image_id))
subs[image_id].append([float(line[4]), float(line[0]), float(line[1]), float(line[0])+float(line[2]), float(line[1])+float(line[3])])
final_subs = []
for key in img_lst:
if key not in subs.keys(): continue
for item in subs[key]:
final_subs.append({'image_id':key, 'score':item[0], 'bbox':item[1:]})
final_subs = sorted(final_subs, key=lambda x: -x['score'])
return final_subs
def compute_ap(rec, prec):
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], prec, [0.]))
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
i = np.where(mrec[1:] != mrec[:-1])[0]
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
def pedestrian_eval(dts, gt):
aap = []
nd = len(dts)
ovethr = np.arange(0.5,1.0,0.05)
for ove in ovethr:
npos = 0
for image_id in gt.keys():
npos += len(gt[image_id]['bbox'])
gt[image_id]['det'] = [False] * len(gt[image_id]['bbox'])
tp = np.zeros(nd)
fp = np.zeros(nd)
for i in range(nd):
bb = dts[i]['bbox']
image_id = dts[i]['image_id']
BBGT = np.array(gt[image_id]['bbox'])
ovmax = -np.inf
if BBGT.size > 0:
ixmin = np.maximum(BBGT[:, 0], bb[0])
iymin = np.maximum(BBGT[:, 1], bb[1])
ixmax = np.minimum(BBGT[:, 2], bb[2])
iymax = np.minimum(BBGT[:, 3], bb[3])
iw = np.maximum(ixmax - ixmin + 1., 0.)
ih = np.maximum(iymax - iymin + 1., 0.)
inters = iw * ih
uni = ((bb[2] - bb[0] + 1.) * (bb[3] - bb[1] + 1.) + (BBGT[:, 2] - BBGT[:, 0] + 1.) * \
(BBGT[:, 3] - BBGT[:, 1] + 1.) - inters)
overlaps = inters / uni
ovmax = np.max(overlaps)
jmax = np.argmax(overlaps)
if ovmax > ove:
if not gt[image_id]['det'][jmax]:
tp[i] = 1.
gt[image_id]['det'][jmax] = 1.
else:
fp[i] = 1.
else:
fp[i] = 1.
fp = np.cumsum(fp)
tp = np.cumsum(tp)
rec = tp / float(npos)
prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
ap = compute_ap(rec, prec)
aap.append(ap)
mAP = np.mean(aap)
return mAP
if __name__ == '__main__':
gt_file = 'data/retinaface/val/label.txt'
submit_file = 'wout_r50_fpn_dcn_retina.txt'
check_size(submit_file)
gt = parse_gt_file(gt_file)
dts = parse_submission_file(submit_file, sorted(gt.keys()))
mAP = pedestrian_eval(dts, gt)
out = {'Average AP': mAP}
print(out)
#strings = ['{}: {}\n'.format(k, v) for k, v in out.items()]
#open(os.path.join(output_dir, 'scores.txt'), 'w').writelines(strings) | 0.38445 | 0.290402 |
from gi.repository import PeasGtk
from gi.repository import GObject
from gi.repository import GLib
from gi.repository import Gtk
from gi.repository import Gio
import os
import json
class discord_status_prefs(GObject.Object, PeasGtk.Configurable):
__gtype_name__ = "discord_status_prefs"
object = GObject.property(type=GObject.Object)
def __init__(self):
path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "settings.json")
with open(path) as file:
self.settings = json.load(file)
self.time_style = self.settings["time_style"]
self.show_notifs = self.settings["show_notifs"]
def do_create_configure_widget(self):
path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "discord-status-prefs.ui")
self.builder = Gtk.Builder()
self.builder.add_from_file(path)
self.builder.connect_signals(self)
self.builder.get_object("show_notif_checkbox").set_active(self.settings["show_notifs"])
if self.settings["time_style"] == 0:
self.builder.get_object("elapsed_radio_button").set_active(True)
elif self.settings["time_style"] == 1:
self.builder.get_object("remaining_radio_button").set_active(False)
return self.builder.get_object("discord-status-prefs")
def update_settings(self):
self.settings["time_style"] = self.time_style
self.settings["show_notifs"] = self.show_notifs
path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "settings.json")
with open(path, "w") as file:
json.dump(self.settings, file)
def show_notifs_toggled(self, checkbox):
self.show_notifs = checkbox.get_active()
self.update_settings()
def elapsed_radio_button_toggled(self, toggle_button):
print("elapsed")
if (toggle_button.get_active()):
self.time_style = 0
self.update_settings()
def remaining_radio_button_toggled(self, toggle_button):
print("remaining")
if (toggle_button.get_active()):
self.time_style = 1
self.update_settings() | status_prefs.py | from gi.repository import PeasGtk
from gi.repository import GObject
from gi.repository import GLib
from gi.repository import Gtk
from gi.repository import Gio
import os
import json
class discord_status_prefs(GObject.Object, PeasGtk.Configurable):
__gtype_name__ = "discord_status_prefs"
object = GObject.property(type=GObject.Object)
def __init__(self):
path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "settings.json")
with open(path) as file:
self.settings = json.load(file)
self.time_style = self.settings["time_style"]
self.show_notifs = self.settings["show_notifs"]
def do_create_configure_widget(self):
path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "discord-status-prefs.ui")
self.builder = Gtk.Builder()
self.builder.add_from_file(path)
self.builder.connect_signals(self)
self.builder.get_object("show_notif_checkbox").set_active(self.settings["show_notifs"])
if self.settings["time_style"] == 0:
self.builder.get_object("elapsed_radio_button").set_active(True)
elif self.settings["time_style"] == 1:
self.builder.get_object("remaining_radio_button").set_active(False)
return self.builder.get_object("discord-status-prefs")
def update_settings(self):
self.settings["time_style"] = self.time_style
self.settings["show_notifs"] = self.show_notifs
path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "settings.json")
with open(path, "w") as file:
json.dump(self.settings, file)
def show_notifs_toggled(self, checkbox):
self.show_notifs = checkbox.get_active()
self.update_settings()
def elapsed_radio_button_toggled(self, toggle_button):
print("elapsed")
if (toggle_button.get_active()):
self.time_style = 0
self.update_settings()
def remaining_radio_button_toggled(self, toggle_button):
print("remaining")
if (toggle_button.get_active()):
self.time_style = 1
self.update_settings() | 0.399929 | 0.055978 |
__author__ = "<NAME>"
__email__ = "<EMAIL>"
import unittest
from netease_im import ImClient
from netease_im import components
from netease_im.constants.params import *
KEY = '271f99c2ad5a414459fc02071eb1e405'
SECRET = '<KEY>'
BASE_URI = 'https://api.netease.im/nimserver'
def suite():
"""Define all the tests of the module."""
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(CreateTestCase))
return suite
class CreateTestCase(unittest.TestCase):
def setUp(self):
self.component = components.user.UserComponent(
base_uri=BASE_URI,
config={
'api_key': KEY,
'api_secret': SECRET
}
)
def test_can_create(self):
client = ImClient(KEY, SECRET)
res = client.user.create(**{
'accid': 'jingyuxiaoban_accid',
'name': 'jingyuxiaoban_name',
'icon': '',
'token': '',
'props': '',
}).json()
print res
self.assertEqual(res['code'], 200)
def test_can_update(self):
client = ImClient(KEY, SECRET)
res = client.user.update(**{
'accid': 'jingyuxiaoban_accid',
'token': '',
'props': '',
}).json()
print res
self.assertEqual(res['code'], 200)
def test_can_refresh_token(self):
client = ImClient(KEY, SECRET)
res = client.user.refresh_token(**{
'accid': 'jingyuxiaoban_accid',
}).json()
print res
self.assertEqual(res['code'], 200)
def test_can_block(self):
client = ImClient(KEY, SECRET)
res = client.user.block(**{
'accid': 'jingyuxiaoban_accid',
}).json()
print res
self.assertEqual(res['code'], 200)
def test_can_unblock(self):
client = ImClient(KEY, SECRET)
res = client.user.unblock(**{
'accid': 'jingyuxiaoban_accid',
}).json()
print res
self.assertEqual(res['code'], 200)
def test_can_update_info(self):
client = ImClient(KEY, SECRET)
res = client.user.update_info(**{
'accid': 'jingyuxiaoban_accid',
}).json()
print res
self.assertEqual(res['code'], 200)
def test_can_get_info(self):
client = ImClient(KEY, SECRET)
res = client.user.get_info(**{
'accids': ['jingyuxiaoban_accid'],
}).json()
print res
self.assertEqual(res['code'], 200)
def test_can_set_donnop(self):
client = ImClient(KEY, SECRET)
res = client.user.set_donnop(**{
'accid': 'jingyuxiaoban_accid',
'donnopOpen': True
}).json()
print res
self.assertEqual(res['code'], 200)
def test_can_set_special_relation(self):
client = ImClient(KEY, SECRET)
res = client.user.set_special_relation(**{
'accid': 'jingyuxiaoban_accid',
'targetAcc': 'jingyuxiaoban_accid1',
'relationType': RELATION_TYPE_BLACK,
'value': OP_VALUE_ADD
}).json()
print res
self.assertEqual(res['code'], 200)
def test_can_list_black_and_mute(self):
client = ImClient(KEY, SECRET)
res = client.user.list_black_and_mute(**{
'accid': 'jingyuxiaoban_accid',
}).json()
print res
self.assertEqual(res['code'], 200)
if __name__ == '__main__':
unittest.main() | netease_im/tests/netease_im/components/test_user.py |
__author__ = "<NAME>"
__email__ = "<EMAIL>"
import unittest
from netease_im import ImClient
from netease_im import components
from netease_im.constants.params import *
KEY = '271f99c2ad5a414459fc02071eb1e405'
SECRET = '<KEY>'
BASE_URI = 'https://api.netease.im/nimserver'
def suite():
"""Define all the tests of the module."""
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(CreateTestCase))
return suite
class CreateTestCase(unittest.TestCase):
def setUp(self):
self.component = components.user.UserComponent(
base_uri=BASE_URI,
config={
'api_key': KEY,
'api_secret': SECRET
}
)
def test_can_create(self):
client = ImClient(KEY, SECRET)
res = client.user.create(**{
'accid': 'jingyuxiaoban_accid',
'name': 'jingyuxiaoban_name',
'icon': '',
'token': '',
'props': '',
}).json()
print res
self.assertEqual(res['code'], 200)
def test_can_update(self):
client = ImClient(KEY, SECRET)
res = client.user.update(**{
'accid': 'jingyuxiaoban_accid',
'token': '',
'props': '',
}).json()
print res
self.assertEqual(res['code'], 200)
def test_can_refresh_token(self):
client = ImClient(KEY, SECRET)
res = client.user.refresh_token(**{
'accid': 'jingyuxiaoban_accid',
}).json()
print res
self.assertEqual(res['code'], 200)
def test_can_block(self):
client = ImClient(KEY, SECRET)
res = client.user.block(**{
'accid': 'jingyuxiaoban_accid',
}).json()
print res
self.assertEqual(res['code'], 200)
def test_can_unblock(self):
client = ImClient(KEY, SECRET)
res = client.user.unblock(**{
'accid': 'jingyuxiaoban_accid',
}).json()
print res
self.assertEqual(res['code'], 200)
def test_can_update_info(self):
client = ImClient(KEY, SECRET)
res = client.user.update_info(**{
'accid': 'jingyuxiaoban_accid',
}).json()
print res
self.assertEqual(res['code'], 200)
def test_can_get_info(self):
client = ImClient(KEY, SECRET)
res = client.user.get_info(**{
'accids': ['jingyuxiaoban_accid'],
}).json()
print res
self.assertEqual(res['code'], 200)
def test_can_set_donnop(self):
client = ImClient(KEY, SECRET)
res = client.user.set_donnop(**{
'accid': 'jingyuxiaoban_accid',
'donnopOpen': True
}).json()
print res
self.assertEqual(res['code'], 200)
def test_can_set_special_relation(self):
client = ImClient(KEY, SECRET)
res = client.user.set_special_relation(**{
'accid': 'jingyuxiaoban_accid',
'targetAcc': 'jingyuxiaoban_accid1',
'relationType': RELATION_TYPE_BLACK,
'value': OP_VALUE_ADD
}).json()
print res
self.assertEqual(res['code'], 200)
def test_can_list_black_and_mute(self):
client = ImClient(KEY, SECRET)
res = client.user.list_black_and_mute(**{
'accid': 'jingyuxiaoban_accid',
}).json()
print res
self.assertEqual(res['code'], 200)
if __name__ == '__main__':
unittest.main() | 0.585575 | 0.162712 |
import numpy as np
import cv2
import os
from tqdm import tqdm
import random
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import matplotlib.patches as patches
import pickle
from glob import glob
import imgaug as ia
from imgaug import augmenters as iaa
from shapely.geometry import Polygon
cardW=63
cardH=87
cornerXmin=1
cornerXmax=8.95
cornerYmin=3
cornerYmax=23
# We convert the measures from mm to pixels: multiply by an arbitrary factor 'zoom'
# You shouldn't need to change this
zoom=4
cardW*=zoom
cardH*=zoom
cornerXmin=int(cornerXmin*zoom)
cornerXmax=int(cornerXmax*zoom)
cornerYmin=int(cornerYmin*zoom)
cornerYmax=int(cornerYmax*zoom)
data_dir='../data/card_data'
cards_pck_fn=data_dir+"/cards.pkl"
backgrounds_pck_fn=data_dir+"/backgrounds.pkl"
imgW=416
imgH=416
refCard=np.array([[0,0],[cardW,0],[cardW,cardH],[0,cardH]],dtype=np.float32)
refCardRot=np.array([[cardW,0],[cardW,cardH],[0,cardH],[0,0]],dtype=np.float32)
refCornerHL=np.array([[cornerXmin,cornerYmin],[cornerXmax,cornerYmin],[cornerXmax,cornerYmax],[cornerXmin,cornerYmax]],dtype=np.float32)
refCornerLR=np.array([[cardW-cornerXmax,cardH-cornerYmax],[cardW-cornerXmin,cardH-cornerYmax],[cardW-cornerXmin,cardH-cornerYmin],[cardW-cornerXmax,cardH-cornerYmin]],dtype=np.float32)
refCorners=np.array([refCornerHL,refCornerLR])
class Cards():
def __init__(self,cards_pck_fn=cards_pck_fn):
self._cards=pickle.load(open(cards_pck_fn,'rb'))
# self._cards is a dictionary where keys are card names (ex:'Kc') and values are lists of (img,hullHL,hullLR)
self._nb_cards_by_value={k:len(self._cards[k]) for k in self._cards}
print("cards loaded per suit/rank:", self._nb_cards_by_value) # >>>
def get_random(self, card_name=None, display=False):
if card_name is None:
card_name= random.choice(list(self._cards.keys()))
card,hull1,hull2=self._cards[card_name][random.randint(0,self._nb_cards_by_value[card_name]-1)]
if display:
if display: display_img(card,[hull1,hull2],"rgb")
return card,card_name,hull1,hull2
class Backgrounds():
def __init__(self,backgrounds_pck_fn=backgrounds_pck_fn):
self._images=pickle.load(open(backgrounds_pck_fn,'rb'))
self._nb_images=len(self._images)
print("images loaded:", self._nb_images)
def get_random(self, display=False):
bg=self._images[random.randint(0,self._nb_images-1)]
if display: plt.imshow(bg)
return bg
def display_img(img,polygons=[],channels="bgr",size=9):
"""
Function to display an inline image, and draw optional polygons (bounding boxes, convex hulls) on it.
Use the param 'channels' to specify the order of the channels ("bgr" for an image coming from OpenCV world)
"""
if not isinstance(polygons,list):
polygons=[polygons]
if channels=="bgr": # bgr (cv2 image)
nb_channels=img.shape[2]
if nb_channels==4:
img=cv2.cvtColor(img,cv2.COLOR_BGRA2RGBA)
else:
img=cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
fig,ax=plt.subplots(figsize=(size,size))
ax.set_facecolor((0,0,0))
ax.imshow(img)
for polygon in polygons:
# An polygon has either shape (n,2),
# either (n,1,2) if it is a cv2 contour (like convex hull).
# In the latter case, reshape in (n,2)
if len(polygon.shape)==3:
polygon=polygon.reshape(-1,2)
patch=patches.Polygon(polygon,linewidth=1,edgecolor='g',facecolor='none')
ax.add_patch(patch)
def give_me_filename(dirname, suffixes, prefix=""):
"""
Function that returns a filename or a list of filenames in directory 'dirname'
that does not exist yet. If 'suffixes' is a list, one filename per suffix in 'suffixes':
filename = dirname + "/" + prefix + random number + "." + suffix
Same random number for all the file name
Ex:
> give_me_filename("dir","jpg", prefix="prefix")
'dir/prefix408290659.jpg'
> give_me_filename("dir",["jpg","xml"])
['dir/877739594.jpg', 'dir/877739594.xml']
"""
if not isinstance(suffixes, list):
suffixes=[suffixes]
suffixes=[p if p[0]=='.' else '.'+p for p in suffixes]
while True:
bname="%09d"%random.randint(0,999999999)
fnames=[]
for suffix in suffixes:
fname=os.path.join(dirname,prefix+bname+suffix)
if not os.path.isfile(fname):
fnames.append(fname)
if len(fnames) == len(suffixes): break
if len(fnames)==1:
return fnames[0]
else:
return fnames
def varianceOfLaplacian(img):
"""
Compute the Laplacian of the image and then return the focus
measure, which is simply the variance of the Laplacian
Source: A.Rosebrock, https://www.pyimagesearch.com/2015/09/07/blur-detection-with-opencv/
"""
return cv2.Laplacian(img, cv2.CV_64F).var()
def extract_card (img, alphamask, output_fn=None, min_focus=120, debug=False):
"""
"""
imgwarp=None
# Check the image is not too blurry
focus=varianceOfLaplacian(img)
if focus < min_focus:
if debug: print("Focus too low :", focus)
return False,None
# Convert in gray color
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# Noise-reducing and edge-preserving filter
gray=cv2.bilateralFilter(gray,11,17,17)
# Edge extraction
edge=cv2.Canny(gray,30,200)
# Find the contours in the edged image
cnts, _ = cv2.findContours(edge.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# We suppose that the contour with largest area corresponds to the contour delimiting the card
cnt = sorted(cnts, key = cv2.contourArea, reverse = True)[0]
# We want to check that 'cnt' is the contour of a rectangular shape
# First, determine 'box', the minimum area bounding rectangle of 'cnt'
# Then compare area of 'cnt' and area of 'box'
# Both areas sould be very close
rect=cv2.minAreaRect(cnt)
box=cv2.boxPoints(rect)
box=np.int0(box)
areaCnt=cv2.contourArea(cnt)
areaBox=cv2.contourArea(box)
valid=areaCnt/areaBox>0.95
if valid:
# We want transform the zone inside the contour into the reference rectangle of dimensions (cardW,cardH)
((xr,yr),(wr,hr),thetar)=rect
# Determine 'Mp' the transformation that transforms 'box' into the reference rectangle
if wr>hr:
Mp=cv2.getPerspectiveTransform(np.float32(box),refCard)
else:
Mp=cv2.getPerspectiveTransform(np.float32(box),refCardRot)
# Determine the warped image by applying the transformation to the image
imgwarp=cv2.warpPerspective(img,Mp,(cardW,cardH))
# Add alpha layer
imgwarp=cv2.cvtColor(imgwarp,cv2.COLOR_BGR2BGRA)
# Shape of 'cnt' is (n,1,2), type=int with n = number of points
# We reshape into (1,n,2), type=float32, before feeding to perspectiveTransform
cnta=cnt.reshape(1,-1,2).astype(np.float32)
# Apply the transformation 'Mp' to the contour
cntwarp=cv2.perspectiveTransform(cnta,Mp)
cntwarp=cntwarp.astype(np.int)
# We build the alpha channel so that we have transparency on the
# external border of the card
# First, initialize alpha channel fully transparent
alphachannel=np.zeros(imgwarp.shape[:2],dtype=np.uint8)
# Then fill in the contour to make opaque this zone of the card
cv2.drawContours(alphachannel,cntwarp,0,255,-1)
# Apply the alphamask onto the alpha channel to clean it
alphachannel=cv2.bitwise_and(alphachannel,alphamask)
# Add the alphachannel to the warped image
imgwarp[:,:,3]=alphachannel
# Save the image to file
if output_fn is not None:
cv2.imwrite(output_fn,imgwarp)
if debug:
cv2.imshow("Gray",gray)
cv2.imshow("Canny",edge)
edge_bgr=cv2.cvtColor(edge,cv2.COLOR_GRAY2BGR)
cv2.drawContours(edge_bgr,[box],0,(0,0,255),3)
cv2.drawContours(edge_bgr,[cnt],0,(0,255,0),-1)
cv2.imshow("Contour with biggest area",edge_bgr)
if valid:
cv2.imshow("Alphachannel",alphachannel)
cv2.imshow("Extracted card",imgwarp)
return valid, imgwarp
def findHull(img, corner=refCornerHL, debug="no"):
"""
Find in the zone 'corner' of image 'img' and return, the convex hull delimiting
the value and suit symbols
'corner' (shape (4,2)) is an array of 4 points delimiting a rectangular zone,
takes one of the 2 possible values : refCornerHL or refCornerLR
debug=
"""
kernel = np.ones((3,3),np.uint8)
corner=corner.astype(np.int)
# We will focus on the zone of 'img' delimited by 'corner'
x1=int(corner[0][0])
y1=int(corner[0][1])
x2=int(corner[2][0])
y2=int(corner[2][1])
w=x2-x1
h=y2-y1
zone=img[y1:y2,x1:x2].copy()
strange_cnt=np.zeros_like(zone)
gray=cv2.cvtColor(zone,cv2.COLOR_BGR2GRAY)
thld=cv2.Canny(gray,30,200)
thld = cv2.dilate(thld,kernel,iterations=1)
if debug!="no": cv2.imshow("thld",thld)
# Find the contours
contours,_=cv2.findContours(thld.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
min_area=30 # We will reject contours with small area. TWEAK, 'zoom' dependant
min_solidity=.3 # Reject contours with a low solidity. TWEAK
concat_contour=None # We will aggregate in 'concat_contour' the contours that we want to keep
ok=True
for c in contours:
area=cv2.contourArea(c)
hull = cv2.convexHull(c)
hull_area = cv2.contourArea(hull)
solidity = float(area)/hull_area
# Determine the center of gravity (cx,cy) of the contour
M=cv2.moments(c)
cx=int(M['m10']/M['m00'])
cy=int(M['m01']/M['m00'])
# abs(w/2-cx)<w*0.3 and abs(h/2-cy)<h*0.4 : TWEAK, the idea here is to keep only the contours which are closed to the center of the zone
if area >= min_area and abs(w/2-cx)<w*0.3 and abs(h/2-cy)<h*0.4 and solidity>min_solidity:
if debug != "no" :
cv2.drawContours(zone,[c],0,(255,0,0),-1)
if concat_contour is None:
concat_contour=c
else:
concat_contour=np.concatenate((concat_contour,c))
if debug != "no" and solidity <= min_solidity :
print("Solidity",solidity)
cv2.drawContours(strange_cnt,[c],0,255,2)
cv2.imshow("Strange contours",strange_cnt)
if concat_contour is not None:
# At this point, we suppose that 'concat_contour' contains only the contours corresponding the value and suit symbols
# We can now determine the hull
hull=cv2.convexHull(concat_contour)
hull_area=cv2.contourArea(hull)
# If the area of the hull is to small or too big, there may be a problem
min_hull_area=950 # TWEAK, deck and 'zoom' dependant
max_hull_area=2000 # TWEAK, deck and 'zoom' dependant
if hull_area < min_hull_area or hull_area > max_hull_area:
ok=False
if debug!="no":
print("Hull area=",hull_area,"too large or too small")
# So far, the coordinates of the hull are relative to 'zone'
# We need the coordinates relative to the image -> 'hull_in_img'
hull_in_img=hull+corner[0]
else:
ok=False
if debug != "no" :
if concat_contour is not None:
cv2.drawContours(zone,[hull],0,(0,255,0),1)
cv2.drawContours(img,[hull_in_img],0,(0,255,0),1)
cv2.imshow("Zone",zone)
cv2.imshow("Image",img)
if ok and debug!="pause_always":
key=cv2.waitKey(1)
else:
key=cv2.waitKey(0)
if key==27:
return None
if ok == False:
return None
return hull_in_img
xml_body_1="""<annotation>
<folder>FOLDER</folder>
<filename>{FILENAME}</filename>
<path>{PATH}</path>
<source>
<database>Unknown</database>
</source>
<size>
<width>{WIDTH}</width>
<height>{HEIGHT}</height>
<depth>3</depth>
</size>
"""
xml_object=""" <object>
<name>{CLASS}</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<bndbox>
<xmin>{XMIN}</xmin>
<ymin>{YMIN}</ymin>
<xmax>{XMAX}</xmax>
<ymax>{YMAX}</ymax>
</bndbox>
</object>
"""
xml_body_2="""</annotation>
"""
def create_voc_xml(xml_file, img_file,listbba,display=False):
with open(xml_file,"w") as f:
f.write(xml_body_1.format(**{'FILENAME':os.path.basename(img_file), 'PATH':img_file,'WIDTH':imgW,'HEIGHT':imgH}))
for bba in listbba:
f.write(xml_object.format(**{'CLASS':bba.classname,'XMIN':bba.x1,'YMIN':bba.y1,'XMAX':bba.x2,'YMAX':bba.y2}))
f.write(xml_body_2)
if display: print("New xml",xml_file)
# Scenario with 2 cards:
# The original image of a card has the shape (cardH,cardW,4)
# We first paste it in a zero image of shape (imgH,imgW,4) at position decalX, decalY
# so that the original image is centerd in the zero image
decalX=int((imgW-cardW)/2)
decalY=int((imgH-cardH)/2)
# Scenario with 3 cards : decal values are different
decalX3=int(imgW/2)
decalY3=int(imgH/2-cardH)
def kps_to_polygon(kps):
"""
Convert imgaug keypoints to shapely polygon
"""
pts=[(kp.x,kp.y) for kp in kps]
return Polygon(pts)
def hull_to_kps(hull, decalX=decalX, decalY=decalY):
"""
Convert hull to imgaug keypoints
"""
# hull is a cv2.Contour, shape : Nx1x2
kps=[ia.Keypoint(x=p[0]+decalX,y=p[1]+decalY) for p in hull.reshape(-1,2)]
kps=ia.KeypointsOnImage(kps, shape=(imgH,imgW,3))
return kps
def kps_to_BB(kps):
"""
Determine imgaug bounding box from imgaug keypoints
"""
extend=3 # To make the bounding box a little bit bigger
kpsx=[kp.x for kp in kps.keypoints]
minx=max(0,int(min(kpsx)-extend))
maxx=min(imgW,int(max(kpsx)+extend))
kpsy=[kp.y for kp in kps.keypoints]
miny=max(0,int(min(kpsy)-extend))
maxy=min(imgH,int(max(kpsy)+extend))
if minx==maxx or miny==maxy:
return None
else:
return ia.BoundingBox(x1=minx,y1=miny,x2=maxx,y2=maxy)
# imgaug keypoints of the bounding box of a whole card
cardKP = ia.KeypointsOnImage([
ia.Keypoint(x=decalX,y=decalY),
ia.Keypoint(x=decalX+cardW,y=decalY),
ia.Keypoint(x=decalX+cardW,y=decalY+cardH),
ia.Keypoint(x=decalX,y=decalY+cardH)
], shape=(imgH,imgW,3))
# imgaug transformation for one card in scenario with 2 cards
transform_1card = iaa.Sequential([
iaa.Affine(scale=[0.65,1]),
iaa.Affine(rotate=(-180,180)),
iaa.Affine(translate_percent={"x":(-0.25,0.25),"y":(-0.25,0.25)}),
])
# For the 3 cards scenario, we use 3 imgaug transforms, the first 2 are for individual cards,
# and the third one for the group of 3 cards
trans_rot1 = iaa.Sequential([
iaa.Affine(translate_px={"x": (10, 20)}),
iaa.Affine(rotate=(22,30))
])
trans_rot2 = iaa.Sequential([
iaa.Affine(translate_px={"x": (0, 5)}),
iaa.Affine(rotate=(10,15))
])
transform_3cards = iaa.Sequential([
iaa.Affine(translate_px={"x":decalX-decalX,"y":decalY-decalY}),
iaa.Affine(scale=[0.65,1]),
iaa.Affine(rotate=(-180,180)),
iaa.Affine(translate_percent={"x":(-0.2,0.2),"y":(-0.2,0.2)})
])
# imgaug transformation for the background
scaleBg=iaa.Resize({"height": imgH, "width": imgW})
def augment(img, list_kps, seq, restart=True):
"""
Apply augmentation 'seq' to image 'img' and keypoints 'list_kps'
If restart is False, the augmentation has been made deterministic outside the function (used for 3 cards scenario)
"""
# Make sequence deterministic
while True:
if restart:
myseq=seq.to_deterministic()
else:
myseq=seq
# Augment image, keypoints and bbs
img_aug = myseq.augment_images([img])[0]
list_kps_aug = [myseq.augment_keypoints([kp])[0] for kp in list_kps]
list_bbs = [kps_to_BB(list_kps_aug[1]),kps_to_BB(list_kps_aug[2])]
valid=True
# Check the card bounding box stays inside the image
for bb in list_bbs:
if bb is None or int(round(bb.x2)) >= imgW or int(round(bb.y2)) >= imgH or int(bb.x1)<=0 or int(bb.y1)<=0:
valid=False
break
if valid: break
elif not restart:
img_aug=None
break
return img_aug,list_kps_aug,list_bbs
class BBA: # Bounding box + annotations
def __init__(self,bb,classname):
self.x1=int(round(bb.x1))
self.y1=int(round(bb.y1))
self.x2=int(round(bb.x2))
self.y2=int(round(bb.y2))
self.classname=classname
class Scene:
def __init__(self,bg,img1, class1, hulla1,hullb1,img2, class2,hulla2,hullb2,img3=None, class3=None,hulla3=None,hullb3=None):
if img3 is not None:
self.create3CardsScene(bg,img1, class1, hulla1,hullb1,img2, class2,hulla2,hullb2,img3, class3,hulla3,hullb3)
else:
self.create2CardsScene(bg,img1, class1, hulla1,hullb1,img2, class2,hulla2,hullb2)
def create2CardsScene(self,bg,img1, class1, hulla1,hullb1,img2, class2,hulla2,hullb2):
kpsa1=hull_to_kps(hulla1)
kpsb1=hull_to_kps(hullb1)
kpsa2=hull_to_kps(hulla2)
kpsb2=hull_to_kps(hullb2)
# Randomly transform 1st card
self.img1=np.zeros((imgH,imgW,4),dtype=np.uint8)
self.img1[decalY:decalY+cardH,decalX:decalX+cardW,:]=img1
self.img1,self.lkps1,self.bbs1=augment(self.img1,[cardKP,kpsa1,kpsb1],transform_1card)
# Randomly transform 2nd card. We want that card 2 does not partially cover a corner of 1 card.
# If so, we apply a new random transform to card 2
while True:
self.listbba=[]
self.img2=np.zeros((imgH,imgW,4),dtype=np.uint8)
self.img2[decalY:decalY+cardH,decalX:decalX+cardW,:]=img2
self.img2,self.lkps2,self.bbs2=augment(self.img2,[cardKP,kpsa2,kpsb2],transform_1card)
# mainPoly2: shapely polygon of card 2
mainPoly2=kps_to_polygon(self.lkps2[0].keypoints[0:4])
invalid=False
intersect_ratio=0.1
for i in range(1,3):
# smallPoly1: shapely polygon of one of the hull of card 1
smallPoly1=kps_to_polygon(self.lkps1[i].keypoints[:])
a=smallPoly1.area
# We calculate area of the intersection of card 1 corner with card 2
intersect=mainPoly2.intersection(smallPoly1)
ai=intersect.area
# If intersection area is small enough, we accept card 2
if (a-ai)/a > 1-intersect_ratio:
self.listbba.append(BBA(self.bbs1[i-1],class1))
# If intersectio area is not small, but also not big enough, we want apply new transform to card 2
elif (a-ai)/a>intersect_ratio:
invalid=True
break
if not invalid: break
self.class1=class1
self.class2=class2
for bb in self.bbs2:
self.listbba.append(BBA(bb,class2))
# Construct final image of the scene by superimposing: bg, img1 and img2
self.bg=scaleBg.augment_image(bg)
mask1=self.img1[:,:,3]
self.mask1=np.stack([mask1]*3,-1)
self.final=np.where(self.mask1,self.img1[:,:,0:3],self.bg)
mask2=self.img2[:,:,3]
self.mask2=np.stack([mask2]*3,-1)
self.final=np.where(self.mask2,self.img2[:,:,0:3],self.final)
def display(self):
fig,ax=plt.subplots(1,figsize=(8,8))
ax.imshow(self.final)
for bb in self.listbba:
rect=patches.Rectangle((bb.x1,bb.y1),bb.x2-bb.x1,bb.y2-bb.y1,linewidth=1,edgecolor='b',facecolor='none')
ax.add_patch(rect)
def res(self):
return self.final
def write_files(self,save_dir,display=False):
jpg_fn, xml_fn=give_me_filename(save_dir, ["jpg","xml"])
plt.imsave(jpg_fn,self.final)
if display: print("New image saved in",jpg_fn)
create_voc_xml(xml_fn,jpg_fn, self.listbba,display=display) | code/deck.py | import numpy as np
import cv2
import os
from tqdm import tqdm
import random
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import matplotlib.patches as patches
import pickle
from glob import glob
import imgaug as ia
from imgaug import augmenters as iaa
from shapely.geometry import Polygon
cardW=63
cardH=87
cornerXmin=1
cornerXmax=8.95
cornerYmin=3
cornerYmax=23
# We convert the measures from mm to pixels: multiply by an arbitrary factor 'zoom'
# You shouldn't need to change this
zoom=4
cardW*=zoom
cardH*=zoom
cornerXmin=int(cornerXmin*zoom)
cornerXmax=int(cornerXmax*zoom)
cornerYmin=int(cornerYmin*zoom)
cornerYmax=int(cornerYmax*zoom)
data_dir='../data/card_data'
cards_pck_fn=data_dir+"/cards.pkl"
backgrounds_pck_fn=data_dir+"/backgrounds.pkl"
imgW=416
imgH=416
refCard=np.array([[0,0],[cardW,0],[cardW,cardH],[0,cardH]],dtype=np.float32)
refCardRot=np.array([[cardW,0],[cardW,cardH],[0,cardH],[0,0]],dtype=np.float32)
refCornerHL=np.array([[cornerXmin,cornerYmin],[cornerXmax,cornerYmin],[cornerXmax,cornerYmax],[cornerXmin,cornerYmax]],dtype=np.float32)
refCornerLR=np.array([[cardW-cornerXmax,cardH-cornerYmax],[cardW-cornerXmin,cardH-cornerYmax],[cardW-cornerXmin,cardH-cornerYmin],[cardW-cornerXmax,cardH-cornerYmin]],dtype=np.float32)
refCorners=np.array([refCornerHL,refCornerLR])
class Cards():
def __init__(self,cards_pck_fn=cards_pck_fn):
self._cards=pickle.load(open(cards_pck_fn,'rb'))
# self._cards is a dictionary where keys are card names (ex:'Kc') and values are lists of (img,hullHL,hullLR)
self._nb_cards_by_value={k:len(self._cards[k]) for k in self._cards}
print("cards loaded per suit/rank:", self._nb_cards_by_value) # >>>
def get_random(self, card_name=None, display=False):
if card_name is None:
card_name= random.choice(list(self._cards.keys()))
card,hull1,hull2=self._cards[card_name][random.randint(0,self._nb_cards_by_value[card_name]-1)]
if display:
if display: display_img(card,[hull1,hull2],"rgb")
return card,card_name,hull1,hull2
class Backgrounds():
def __init__(self,backgrounds_pck_fn=backgrounds_pck_fn):
self._images=pickle.load(open(backgrounds_pck_fn,'rb'))
self._nb_images=len(self._images)
print("images loaded:", self._nb_images)
def get_random(self, display=False):
bg=self._images[random.randint(0,self._nb_images-1)]
if display: plt.imshow(bg)
return bg
def display_img(img,polygons=[],channels="bgr",size=9):
"""
Function to display an inline image, and draw optional polygons (bounding boxes, convex hulls) on it.
Use the param 'channels' to specify the order of the channels ("bgr" for an image coming from OpenCV world)
"""
if not isinstance(polygons,list):
polygons=[polygons]
if channels=="bgr": # bgr (cv2 image)
nb_channels=img.shape[2]
if nb_channels==4:
img=cv2.cvtColor(img,cv2.COLOR_BGRA2RGBA)
else:
img=cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
fig,ax=plt.subplots(figsize=(size,size))
ax.set_facecolor((0,0,0))
ax.imshow(img)
for polygon in polygons:
# An polygon has either shape (n,2),
# either (n,1,2) if it is a cv2 contour (like convex hull).
# In the latter case, reshape in (n,2)
if len(polygon.shape)==3:
polygon=polygon.reshape(-1,2)
patch=patches.Polygon(polygon,linewidth=1,edgecolor='g',facecolor='none')
ax.add_patch(patch)
def give_me_filename(dirname, suffixes, prefix=""):
"""
Function that returns a filename or a list of filenames in directory 'dirname'
that does not exist yet. If 'suffixes' is a list, one filename per suffix in 'suffixes':
filename = dirname + "/" + prefix + random number + "." + suffix
Same random number for all the file name
Ex:
> give_me_filename("dir","jpg", prefix="prefix")
'dir/prefix408290659.jpg'
> give_me_filename("dir",["jpg","xml"])
['dir/877739594.jpg', 'dir/877739594.xml']
"""
if not isinstance(suffixes, list):
suffixes=[suffixes]
suffixes=[p if p[0]=='.' else '.'+p for p in suffixes]
while True:
bname="%09d"%random.randint(0,999999999)
fnames=[]
for suffix in suffixes:
fname=os.path.join(dirname,prefix+bname+suffix)
if not os.path.isfile(fname):
fnames.append(fname)
if len(fnames) == len(suffixes): break
if len(fnames)==1:
return fnames[0]
else:
return fnames
def varianceOfLaplacian(img):
"""
Compute the Laplacian of the image and then return the focus
measure, which is simply the variance of the Laplacian
Source: A.Rosebrock, https://www.pyimagesearch.com/2015/09/07/blur-detection-with-opencv/
"""
return cv2.Laplacian(img, cv2.CV_64F).var()
def extract_card (img, alphamask, output_fn=None, min_focus=120, debug=False):
"""
"""
imgwarp=None
# Check the image is not too blurry
focus=varianceOfLaplacian(img)
if focus < min_focus:
if debug: print("Focus too low :", focus)
return False,None
# Convert in gray color
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# Noise-reducing and edge-preserving filter
gray=cv2.bilateralFilter(gray,11,17,17)
# Edge extraction
edge=cv2.Canny(gray,30,200)
# Find the contours in the edged image
cnts, _ = cv2.findContours(edge.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# We suppose that the contour with largest area corresponds to the contour delimiting the card
cnt = sorted(cnts, key = cv2.contourArea, reverse = True)[0]
# We want to check that 'cnt' is the contour of a rectangular shape
# First, determine 'box', the minimum area bounding rectangle of 'cnt'
# Then compare area of 'cnt' and area of 'box'
# Both areas sould be very close
rect=cv2.minAreaRect(cnt)
box=cv2.boxPoints(rect)
box=np.int0(box)
areaCnt=cv2.contourArea(cnt)
areaBox=cv2.contourArea(box)
valid=areaCnt/areaBox>0.95
if valid:
# We want transform the zone inside the contour into the reference rectangle of dimensions (cardW,cardH)
((xr,yr),(wr,hr),thetar)=rect
# Determine 'Mp' the transformation that transforms 'box' into the reference rectangle
if wr>hr:
Mp=cv2.getPerspectiveTransform(np.float32(box),refCard)
else:
Mp=cv2.getPerspectiveTransform(np.float32(box),refCardRot)
# Determine the warped image by applying the transformation to the image
imgwarp=cv2.warpPerspective(img,Mp,(cardW,cardH))
# Add alpha layer
imgwarp=cv2.cvtColor(imgwarp,cv2.COLOR_BGR2BGRA)
# Shape of 'cnt' is (n,1,2), type=int with n = number of points
# We reshape into (1,n,2), type=float32, before feeding to perspectiveTransform
cnta=cnt.reshape(1,-1,2).astype(np.float32)
# Apply the transformation 'Mp' to the contour
cntwarp=cv2.perspectiveTransform(cnta,Mp)
cntwarp=cntwarp.astype(np.int)
# We build the alpha channel so that we have transparency on the
# external border of the card
# First, initialize alpha channel fully transparent
alphachannel=np.zeros(imgwarp.shape[:2],dtype=np.uint8)
# Then fill in the contour to make opaque this zone of the card
cv2.drawContours(alphachannel,cntwarp,0,255,-1)
# Apply the alphamask onto the alpha channel to clean it
alphachannel=cv2.bitwise_and(alphachannel,alphamask)
# Add the alphachannel to the warped image
imgwarp[:,:,3]=alphachannel
# Save the image to file
if output_fn is not None:
cv2.imwrite(output_fn,imgwarp)
if debug:
cv2.imshow("Gray",gray)
cv2.imshow("Canny",edge)
edge_bgr=cv2.cvtColor(edge,cv2.COLOR_GRAY2BGR)
cv2.drawContours(edge_bgr,[box],0,(0,0,255),3)
cv2.drawContours(edge_bgr,[cnt],0,(0,255,0),-1)
cv2.imshow("Contour with biggest area",edge_bgr)
if valid:
cv2.imshow("Alphachannel",alphachannel)
cv2.imshow("Extracted card",imgwarp)
return valid, imgwarp
def findHull(img, corner=refCornerHL, debug="no"):
"""
Find in the zone 'corner' of image 'img' and return, the convex hull delimiting
the value and suit symbols
'corner' (shape (4,2)) is an array of 4 points delimiting a rectangular zone,
takes one of the 2 possible values : refCornerHL or refCornerLR
debug=
"""
kernel = np.ones((3,3),np.uint8)
corner=corner.astype(np.int)
# We will focus on the zone of 'img' delimited by 'corner'
x1=int(corner[0][0])
y1=int(corner[0][1])
x2=int(corner[2][0])
y2=int(corner[2][1])
w=x2-x1
h=y2-y1
zone=img[y1:y2,x1:x2].copy()
strange_cnt=np.zeros_like(zone)
gray=cv2.cvtColor(zone,cv2.COLOR_BGR2GRAY)
thld=cv2.Canny(gray,30,200)
thld = cv2.dilate(thld,kernel,iterations=1)
if debug!="no": cv2.imshow("thld",thld)
# Find the contours
contours,_=cv2.findContours(thld.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
min_area=30 # We will reject contours with small area. TWEAK, 'zoom' dependant
min_solidity=.3 # Reject contours with a low solidity. TWEAK
concat_contour=None # We will aggregate in 'concat_contour' the contours that we want to keep
ok=True
for c in contours:
area=cv2.contourArea(c)
hull = cv2.convexHull(c)
hull_area = cv2.contourArea(hull)
solidity = float(area)/hull_area
# Determine the center of gravity (cx,cy) of the contour
M=cv2.moments(c)
cx=int(M['m10']/M['m00'])
cy=int(M['m01']/M['m00'])
# abs(w/2-cx)<w*0.3 and abs(h/2-cy)<h*0.4 : TWEAK, the idea here is to keep only the contours which are closed to the center of the zone
if area >= min_area and abs(w/2-cx)<w*0.3 and abs(h/2-cy)<h*0.4 and solidity>min_solidity:
if debug != "no" :
cv2.drawContours(zone,[c],0,(255,0,0),-1)
if concat_contour is None:
concat_contour=c
else:
concat_contour=np.concatenate((concat_contour,c))
if debug != "no" and solidity <= min_solidity :
print("Solidity",solidity)
cv2.drawContours(strange_cnt,[c],0,255,2)
cv2.imshow("Strange contours",strange_cnt)
if concat_contour is not None:
# At this point, we suppose that 'concat_contour' contains only the contours corresponding the value and suit symbols
# We can now determine the hull
hull=cv2.convexHull(concat_contour)
hull_area=cv2.contourArea(hull)
# If the area of the hull is to small or too big, there may be a problem
min_hull_area=950 # TWEAK, deck and 'zoom' dependant
max_hull_area=2000 # TWEAK, deck and 'zoom' dependant
if hull_area < min_hull_area or hull_area > max_hull_area:
ok=False
if debug!="no":
print("Hull area=",hull_area,"too large or too small")
# So far, the coordinates of the hull are relative to 'zone'
# We need the coordinates relative to the image -> 'hull_in_img'
hull_in_img=hull+corner[0]
else:
ok=False
if debug != "no" :
if concat_contour is not None:
cv2.drawContours(zone,[hull],0,(0,255,0),1)
cv2.drawContours(img,[hull_in_img],0,(0,255,0),1)
cv2.imshow("Zone",zone)
cv2.imshow("Image",img)
if ok and debug!="pause_always":
key=cv2.waitKey(1)
else:
key=cv2.waitKey(0)
if key==27:
return None
if ok == False:
return None
return hull_in_img
xml_body_1="""<annotation>
<folder>FOLDER</folder>
<filename>{FILENAME}</filename>
<path>{PATH}</path>
<source>
<database>Unknown</database>
</source>
<size>
<width>{WIDTH}</width>
<height>{HEIGHT}</height>
<depth>3</depth>
</size>
"""
xml_object=""" <object>
<name>{CLASS}</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<bndbox>
<xmin>{XMIN}</xmin>
<ymin>{YMIN}</ymin>
<xmax>{XMAX}</xmax>
<ymax>{YMAX}</ymax>
</bndbox>
</object>
"""
xml_body_2="""</annotation>
"""
def create_voc_xml(xml_file, img_file,listbba,display=False):
with open(xml_file,"w") as f:
f.write(xml_body_1.format(**{'FILENAME':os.path.basename(img_file), 'PATH':img_file,'WIDTH':imgW,'HEIGHT':imgH}))
for bba in listbba:
f.write(xml_object.format(**{'CLASS':bba.classname,'XMIN':bba.x1,'YMIN':bba.y1,'XMAX':bba.x2,'YMAX':bba.y2}))
f.write(xml_body_2)
if display: print("New xml",xml_file)
# Scenario with 2 cards:
# The original image of a card has the shape (cardH,cardW,4)
# We first paste it in a zero image of shape (imgH,imgW,4) at position decalX, decalY
# so that the original image is centerd in the zero image
decalX=int((imgW-cardW)/2)
decalY=int((imgH-cardH)/2)
# Scenario with 3 cards : decal values are different
decalX3=int(imgW/2)
decalY3=int(imgH/2-cardH)
def kps_to_polygon(kps):
"""
Convert imgaug keypoints to shapely polygon
"""
pts=[(kp.x,kp.y) for kp in kps]
return Polygon(pts)
def hull_to_kps(hull, decalX=decalX, decalY=decalY):
"""
Convert hull to imgaug keypoints
"""
# hull is a cv2.Contour, shape : Nx1x2
kps=[ia.Keypoint(x=p[0]+decalX,y=p[1]+decalY) for p in hull.reshape(-1,2)]
kps=ia.KeypointsOnImage(kps, shape=(imgH,imgW,3))
return kps
def kps_to_BB(kps):
"""
Determine imgaug bounding box from imgaug keypoints
"""
extend=3 # To make the bounding box a little bit bigger
kpsx=[kp.x for kp in kps.keypoints]
minx=max(0,int(min(kpsx)-extend))
maxx=min(imgW,int(max(kpsx)+extend))
kpsy=[kp.y for kp in kps.keypoints]
miny=max(0,int(min(kpsy)-extend))
maxy=min(imgH,int(max(kpsy)+extend))
if minx==maxx or miny==maxy:
return None
else:
return ia.BoundingBox(x1=minx,y1=miny,x2=maxx,y2=maxy)
# imgaug keypoints of the bounding box of a whole card
cardKP = ia.KeypointsOnImage([
ia.Keypoint(x=decalX,y=decalY),
ia.Keypoint(x=decalX+cardW,y=decalY),
ia.Keypoint(x=decalX+cardW,y=decalY+cardH),
ia.Keypoint(x=decalX,y=decalY+cardH)
], shape=(imgH,imgW,3))
# imgaug transformation for one card in scenario with 2 cards
transform_1card = iaa.Sequential([
iaa.Affine(scale=[0.65,1]),
iaa.Affine(rotate=(-180,180)),
iaa.Affine(translate_percent={"x":(-0.25,0.25),"y":(-0.25,0.25)}),
])
# For the 3 cards scenario, we use 3 imgaug transforms, the first 2 are for individual cards,
# and the third one for the group of 3 cards
trans_rot1 = iaa.Sequential([
iaa.Affine(translate_px={"x": (10, 20)}),
iaa.Affine(rotate=(22,30))
])
trans_rot2 = iaa.Sequential([
iaa.Affine(translate_px={"x": (0, 5)}),
iaa.Affine(rotate=(10,15))
])
transform_3cards = iaa.Sequential([
iaa.Affine(translate_px={"x":decalX-decalX,"y":decalY-decalY}),
iaa.Affine(scale=[0.65,1]),
iaa.Affine(rotate=(-180,180)),
iaa.Affine(translate_percent={"x":(-0.2,0.2),"y":(-0.2,0.2)})
])
# imgaug transformation for the background
scaleBg=iaa.Resize({"height": imgH, "width": imgW})
def augment(img, list_kps, seq, restart=True):
"""
Apply augmentation 'seq' to image 'img' and keypoints 'list_kps'
If restart is False, the augmentation has been made deterministic outside the function (used for 3 cards scenario)
"""
# Make sequence deterministic
while True:
if restart:
myseq=seq.to_deterministic()
else:
myseq=seq
# Augment image, keypoints and bbs
img_aug = myseq.augment_images([img])[0]
list_kps_aug = [myseq.augment_keypoints([kp])[0] for kp in list_kps]
list_bbs = [kps_to_BB(list_kps_aug[1]),kps_to_BB(list_kps_aug[2])]
valid=True
# Check the card bounding box stays inside the image
for bb in list_bbs:
if bb is None or int(round(bb.x2)) >= imgW or int(round(bb.y2)) >= imgH or int(bb.x1)<=0 or int(bb.y1)<=0:
valid=False
break
if valid: break
elif not restart:
img_aug=None
break
return img_aug,list_kps_aug,list_bbs
class BBA: # Bounding box + annotations
def __init__(self,bb,classname):
self.x1=int(round(bb.x1))
self.y1=int(round(bb.y1))
self.x2=int(round(bb.x2))
self.y2=int(round(bb.y2))
self.classname=classname
class Scene:
def __init__(self,bg,img1, class1, hulla1,hullb1,img2, class2,hulla2,hullb2,img3=None, class3=None,hulla3=None,hullb3=None):
if img3 is not None:
self.create3CardsScene(bg,img1, class1, hulla1,hullb1,img2, class2,hulla2,hullb2,img3, class3,hulla3,hullb3)
else:
self.create2CardsScene(bg,img1, class1, hulla1,hullb1,img2, class2,hulla2,hullb2)
def create2CardsScene(self,bg,img1, class1, hulla1,hullb1,img2, class2,hulla2,hullb2):
kpsa1=hull_to_kps(hulla1)
kpsb1=hull_to_kps(hullb1)
kpsa2=hull_to_kps(hulla2)
kpsb2=hull_to_kps(hullb2)
# Randomly transform 1st card
self.img1=np.zeros((imgH,imgW,4),dtype=np.uint8)
self.img1[decalY:decalY+cardH,decalX:decalX+cardW,:]=img1
self.img1,self.lkps1,self.bbs1=augment(self.img1,[cardKP,kpsa1,kpsb1],transform_1card)
# Randomly transform 2nd card. We want that card 2 does not partially cover a corner of 1 card.
# If so, we apply a new random transform to card 2
while True:
self.listbba=[]
self.img2=np.zeros((imgH,imgW,4),dtype=np.uint8)
self.img2[decalY:decalY+cardH,decalX:decalX+cardW,:]=img2
self.img2,self.lkps2,self.bbs2=augment(self.img2,[cardKP,kpsa2,kpsb2],transform_1card)
# mainPoly2: shapely polygon of card 2
mainPoly2=kps_to_polygon(self.lkps2[0].keypoints[0:4])
invalid=False
intersect_ratio=0.1
for i in range(1,3):
# smallPoly1: shapely polygon of one of the hull of card 1
smallPoly1=kps_to_polygon(self.lkps1[i].keypoints[:])
a=smallPoly1.area
# We calculate area of the intersection of card 1 corner with card 2
intersect=mainPoly2.intersection(smallPoly1)
ai=intersect.area
# If intersection area is small enough, we accept card 2
if (a-ai)/a > 1-intersect_ratio:
self.listbba.append(BBA(self.bbs1[i-1],class1))
# If intersectio area is not small, but also not big enough, we want apply new transform to card 2
elif (a-ai)/a>intersect_ratio:
invalid=True
break
if not invalid: break
self.class1=class1
self.class2=class2
for bb in self.bbs2:
self.listbba.append(BBA(bb,class2))
# Construct final image of the scene by superimposing: bg, img1 and img2
self.bg=scaleBg.augment_image(bg)
mask1=self.img1[:,:,3]
self.mask1=np.stack([mask1]*3,-1)
self.final=np.where(self.mask1,self.img1[:,:,0:3],self.bg)
mask2=self.img2[:,:,3]
self.mask2=np.stack([mask2]*3,-1)
self.final=np.where(self.mask2,self.img2[:,:,0:3],self.final)
def display(self):
fig,ax=plt.subplots(1,figsize=(8,8))
ax.imshow(self.final)
for bb in self.listbba:
rect=patches.Rectangle((bb.x1,bb.y1),bb.x2-bb.x1,bb.y2-bb.y1,linewidth=1,edgecolor='b',facecolor='none')
ax.add_patch(rect)
def res(self):
return self.final
def write_files(self,save_dir,display=False):
jpg_fn, xml_fn=give_me_filename(save_dir, ["jpg","xml"])
plt.imsave(jpg_fn,self.final)
if display: print("New image saved in",jpg_fn)
create_voc_xml(xml_fn,jpg_fn, self.listbba,display=display) | 0.442637 | 0.138958 |
from django.db import models
class Network(models.Model):
"""定义台网信息"""
code = models.CharField(max_length=50, unique=True, verbose_name="台网代码")
name = models.CharField(max_length=50, blank=True, verbose_name="台网名称")
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
remark = models.TextField(blank=True, verbose_name="备注")
class Meta:
"""Meta definition for Network."""
ordering = ("code",)
verbose_name = "测震台网信息"
verbose_name_plural = "测震台网信息"
def __str__(self):
"""Unicode representation of Network."""
return self.code
def get_stations_count(self):
"""统计台网拥有的台站数"""
return self.stations.count()
class Station(models.Model):
"""Model definition for Station."""
"""定义台站信息"""
SELECTION = "selection"
ONLINE = "online"
SUSPEND = "suspend"
OFFLINE = "offline"
STATUS_TYPE = (
(SELECTION, "勘选"),
(ONLINE, "在线"),
(SUSPEND, "暂停"),
(OFFLINE, "下线"),
)
network = models.ForeignKey(
"Network", on_delete=models.CASCADE, related_name="stations", verbose_name="台网"
)
code = models.CharField(max_length=50, verbose_name="台站代码")
name = models.CharField(max_length=50, blank=True, verbose_name="台站名称")
latitude = models.FloatField(default=0.0, verbose_name="纬度")
longitude = models.FloatField(default=0.0, verbose_name="经度")
altitude = models.FloatField(default=0.0, verbose_name="高程")
status = models.CharField(
max_length=50, choices=STATUS_TYPE, default=SELECTION, verbose_name="状态"
)
selection = models.DateField(blank=True, null=True, verbose_name="勘选时间")
establish = models.DateField(blank=True, null=True, verbose_name="建台时间")
removal = models.DateField(blank=True, null=True, verbose_name="撤台时间")
remark = models.TextField(blank=True, verbose_name="备注")
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
class Meta:
"""Meta definition for Station."""
unique_together = (("network", "code"),)
ordering = (
"network",
"code",
)
verbose_name = "测震台站信息"
verbose_name_plural = "测震台站信息"
def __str__(self):
"""Unicode representation of Station."""
return f"{self.network.code}-{self.code}"
def get_count(self):
"""统计台网拥有的台站数"""
return Station.objects.count()
class StationMoreInfo(models.Model):
"""
台站其他信息
"""
station = models.OneToOneField(
"Station", on_delete=models.CASCADE, related_name="more_info"
)
geo_desciription = models.TextField(blank=True, verbose_name="位置描述")
lithology_description = models.TextField(blank=True, verbose_name="岩性描述")
other_info = models.TextField(blank=True, verbose_name="其他信息")
class StationStatus(models.Model):
"""
记录台站每次状态变化的时间
"""
SELECTION = "selection"
ONLINE = "online"
SUSPEND = "suspend"
OFFLINE = "offline"
STATUS_TYPE = (
(SELECTION, "勘选"),
(ONLINE, "在线"),
(SUSPEND, "暂停"),
(OFFLINE, "下线"),
)
station = models.ForeignKey("Station", on_delete=models.CASCADE)
status = status = models.CharField(
max_length=50, choices=STATUS_TYPE, default=SELECTION, verbose_name="状态"
)
changed_at = models.DateTimeField(verbose_name="状态改变时间")
remark = models.TextField(blank=True, verbose_name="说明")
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True) | backend/basicinfo/models.py | from django.db import models
class Network(models.Model):
"""定义台网信息"""
code = models.CharField(max_length=50, unique=True, verbose_name="台网代码")
name = models.CharField(max_length=50, blank=True, verbose_name="台网名称")
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
remark = models.TextField(blank=True, verbose_name="备注")
class Meta:
"""Meta definition for Network."""
ordering = ("code",)
verbose_name = "测震台网信息"
verbose_name_plural = "测震台网信息"
def __str__(self):
"""Unicode representation of Network."""
return self.code
def get_stations_count(self):
"""统计台网拥有的台站数"""
return self.stations.count()
class Station(models.Model):
"""Model definition for Station."""
"""定义台站信息"""
SELECTION = "selection"
ONLINE = "online"
SUSPEND = "suspend"
OFFLINE = "offline"
STATUS_TYPE = (
(SELECTION, "勘选"),
(ONLINE, "在线"),
(SUSPEND, "暂停"),
(OFFLINE, "下线"),
)
network = models.ForeignKey(
"Network", on_delete=models.CASCADE, related_name="stations", verbose_name="台网"
)
code = models.CharField(max_length=50, verbose_name="台站代码")
name = models.CharField(max_length=50, blank=True, verbose_name="台站名称")
latitude = models.FloatField(default=0.0, verbose_name="纬度")
longitude = models.FloatField(default=0.0, verbose_name="经度")
altitude = models.FloatField(default=0.0, verbose_name="高程")
status = models.CharField(
max_length=50, choices=STATUS_TYPE, default=SELECTION, verbose_name="状态"
)
selection = models.DateField(blank=True, null=True, verbose_name="勘选时间")
establish = models.DateField(blank=True, null=True, verbose_name="建台时间")
removal = models.DateField(blank=True, null=True, verbose_name="撤台时间")
remark = models.TextField(blank=True, verbose_name="备注")
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
class Meta:
"""Meta definition for Station."""
unique_together = (("network", "code"),)
ordering = (
"network",
"code",
)
verbose_name = "测震台站信息"
verbose_name_plural = "测震台站信息"
def __str__(self):
"""Unicode representation of Station."""
return f"{self.network.code}-{self.code}"
def get_count(self):
"""统计台网拥有的台站数"""
return Station.objects.count()
class StationMoreInfo(models.Model):
"""
台站其他信息
"""
station = models.OneToOneField(
"Station", on_delete=models.CASCADE, related_name="more_info"
)
geo_desciription = models.TextField(blank=True, verbose_name="位置描述")
lithology_description = models.TextField(blank=True, verbose_name="岩性描述")
other_info = models.TextField(blank=True, verbose_name="其他信息")
class StationStatus(models.Model):
"""
记录台站每次状态变化的时间
"""
SELECTION = "selection"
ONLINE = "online"
SUSPEND = "suspend"
OFFLINE = "offline"
STATUS_TYPE = (
(SELECTION, "勘选"),
(ONLINE, "在线"),
(SUSPEND, "暂停"),
(OFFLINE, "下线"),
)
station = models.ForeignKey("Station", on_delete=models.CASCADE)
status = status = models.CharField(
max_length=50, choices=STATUS_TYPE, default=SELECTION, verbose_name="状态"
)
changed_at = models.DateTimeField(verbose_name="状态改变时间")
remark = models.TextField(blank=True, verbose_name="说明")
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True) | 0.653127 | 0.284697 |
import numpy as np
from htm import HTM
from Jos import Jo
from JRxs import JRx
from JRys import JRy
from JRzs import JRz
from Jo_dots import Jo_dot
from JRx_dots import JRx_dot
from JRy_dots import JRy_dot
from JRz_dots import JRz_dot
class Baxter(HTM, Jo, JRx, JRy, JRz, Jo_dot, JRx_dot, JRy_dot, JRz_dot):
q_neutral = np.array([[0.0, -31.0, 0.0, 43.0, 0.0, 72.0, 0.0]]).T
q_max = np.array([[51.0, 60.0, 173.0, 150.0, 175.0, 120.0, 175.0]]).T
q_min = np.array([[-141.0, -123.0, -173.0, -3.0, -175.0, -90.0, -175.0]]).T
def __init__(self,):
self.static_os = [self.o_W0(self.q_neutral), self.o_BR(self.q_neutral)]
self.static_rxs = [self.rx_W0(self.q_neutral), self.rx_BR(self.q_neutral)]
self.static_rys = [self.ry_W0(self.q_neutral), self.ry_BR(self.q_neutral)]
self.static_rzs = [self.rz_W0(self.q_neutral), self.rz_BR(self.q_neutral)]
self.static_jos = [self.jo_W0(self.q_neutral), self.jo_BR(self.q_neutral)]
self.static_jrxs = [self.jrx_W0(self.q_neutral), self.jrx_BR(self.q_neutral)]
self.static_jrys = [self.jry_W0(self.q_neutral), self.jry_BR(self.q_neutral)]
self.static_jrzs = [self.jrz_W0(self.q_neutral), self.jrz_BR(self.q_neutral)]
self.static_jo_dots = [self.jo_W0_dot(self.q_neutral, np.zeros_like(self.q_neutral)), self.jo_BR_dot(self.q_neutral, np.zeros_like(self.q_neutral))]
self.static_jrx_dots = [self.jrx_W0_dot(self.q_neutral, np.zeros_like(self.q_neutral)), self.jrx_BR_dot(self.q_neutral, np.zeros_like(self.q_neutral))]
self.static_jry_dots = [self.jry_W0_dot(self.q_neutral, np.zeros_like(self.q_neutral)), self.jry_BR_dot(self.q_neutral, np.zeros_like(self.q_neutral))]
self.static_jrz_dots = [self.jrz_W0_dot(self.q_neutral, np.zeros_like(self.q_neutral)), self.jrz_BR_dot(self.q_neutral, np.zeros_like(self.q_neutral))]
self.os_func = [self.o_0, self.o_1, self.o_2, self.o_3, self.o_4, self.o_5, self.o_6, self.o_ee]
self.rxs_func = [self.rx_0, self.rx_1, self.rx_2, self.rx_3, self.rx_4, self.rx_5, self.rx_6, self.rx_ee]
self.rys_func = [self.ry_0, self.ry_1, self.ry_2, self.ry_3, self.ry_4, self.ry_5, self.ry_6, self.ry_ee]
self.rzs_func = [self.rz_0, self.rz_1, self.rz_2, self.rz_3, self.rz_4, self.rz_5, self.rz_6, self.rz_ee]
self.jos_func = [self.jo_0, self.jo_1, self.jo_2, self.jo_3, self.jo_4, self.jo_5, self.jo_6, self.jo_ee]
self.jrxs_func = [self.jrx_0, self.jrx_1, self.jrx_2, self.jrx_3, self.jrx_4, self.jrx_5, self.jrx_6, self.jrx_ee]
self.jrys_func = [self.jry_0, self.jry_1, self.jry_2, self.jry_3, self.jry_4, self.jry_5, self.jry_6, self.jry_ee]
self.jrzs_func = [self.jrz_0, self.jrz_1, self.jrz_2, self.jrz_3, self.jrz_4, self.jrz_5, self.jrz_6, self.jrz_ee]
self.jo_dots_func = [self.jo_0_dot, self.jo_1_dot, self.jo_2_dot, self.jo_3_dot, self.jo_4_dot, self.jo_5_dot, self.jo_6_dot, self.jo_ee_dot]
self.jrx_dots_func = [self.jrx_0_dot, self.jrx_1_dot, self.jrx_2_dot, self.jrx_3_dot, self.jrx_4_dot, self.jrx_5_dot, self.jrx_6_dot, self.jrx_ee_dot]
self.jry_dots_func = [self.jry_0_dot, self.jry_1_dot, self.jry_2_dot, self.jry_3_dot, self.jry_4_dot, self.jry_5_dot, self.jry_6_dot, self.jry_ee_dot]
self.jrz_dots_func = [self.jrz_0_dot, self.jrz_1_dot, self.jrz_2_dot, self.jrz_3_dot, self.jrz_4_dot, self.jrz_5_dot, self.jrz_6_dot, self.jrz_ee_dot]
self.os = [f(self.q_neutral) for f in self.os_func]
self.rxs = [f(self.q_neutral) for f in self.rxs_func]
self.rys = [f(self.q_neutral) for f in self.rys_func]
self.rzs = [f(self.q_neutral) for f in self.rzs_func]
self.jos = [f(self.q_neutral) for f in self.jos_func]
self.jrxs = [f(self.q_neutral) for f in self.jrxs_func]
self.jrys = [f(self.q_neutral) for f in self.jrys_func]
self.jrzs = [f(self.q_neutral) for f in self.jrzs_func]
self.jo_dots = [f(self.q_neutral, np.zeros_like(self.q_neutral)) for f in self.jo_dots_func]
self.jrx_dots = [f(self.q_neutral, np.zeros_like(self.q_neutral)) for f in self.jrx_dots_func]
self.jry_dots = [f(self.q_neutral, np.zeros_like(self.q_neutral)) for f in self.jry_dots_func]
self.jrz_dots = [f(self.q_neutral, np.zeros_like(self.q_neutral)) for f in self.jrz_dots_func]
def update(self, q, q_dot):
for i, f in enumerate(self.os_func):
self.os[i] = f(q)
for i, f in enumerate(self.os_func):
self.os[i] = f(q)
for i, f in enumerate(self.os_func):
self.os[i] = f(q)
for i, f in enumerate(self.os_func):
self.os[i] = f(q)
if __name__ == "__main__":
hoge = Baxter()
q = np.array([[0, 0, 0, 0, 0, 0, 0]]).T
hoge.o_W0(q) | misc/baxter/src_py/integral.py | import numpy as np
from htm import HTM
from Jos import Jo
from JRxs import JRx
from JRys import JRy
from JRzs import JRz
from Jo_dots import Jo_dot
from JRx_dots import JRx_dot
from JRy_dots import JRy_dot
from JRz_dots import JRz_dot
class Baxter(HTM, Jo, JRx, JRy, JRz, Jo_dot, JRx_dot, JRy_dot, JRz_dot):
q_neutral = np.array([[0.0, -31.0, 0.0, 43.0, 0.0, 72.0, 0.0]]).T
q_max = np.array([[51.0, 60.0, 173.0, 150.0, 175.0, 120.0, 175.0]]).T
q_min = np.array([[-141.0, -123.0, -173.0, -3.0, -175.0, -90.0, -175.0]]).T
def __init__(self,):
self.static_os = [self.o_W0(self.q_neutral), self.o_BR(self.q_neutral)]
self.static_rxs = [self.rx_W0(self.q_neutral), self.rx_BR(self.q_neutral)]
self.static_rys = [self.ry_W0(self.q_neutral), self.ry_BR(self.q_neutral)]
self.static_rzs = [self.rz_W0(self.q_neutral), self.rz_BR(self.q_neutral)]
self.static_jos = [self.jo_W0(self.q_neutral), self.jo_BR(self.q_neutral)]
self.static_jrxs = [self.jrx_W0(self.q_neutral), self.jrx_BR(self.q_neutral)]
self.static_jrys = [self.jry_W0(self.q_neutral), self.jry_BR(self.q_neutral)]
self.static_jrzs = [self.jrz_W0(self.q_neutral), self.jrz_BR(self.q_neutral)]
self.static_jo_dots = [self.jo_W0_dot(self.q_neutral, np.zeros_like(self.q_neutral)), self.jo_BR_dot(self.q_neutral, np.zeros_like(self.q_neutral))]
self.static_jrx_dots = [self.jrx_W0_dot(self.q_neutral, np.zeros_like(self.q_neutral)), self.jrx_BR_dot(self.q_neutral, np.zeros_like(self.q_neutral))]
self.static_jry_dots = [self.jry_W0_dot(self.q_neutral, np.zeros_like(self.q_neutral)), self.jry_BR_dot(self.q_neutral, np.zeros_like(self.q_neutral))]
self.static_jrz_dots = [self.jrz_W0_dot(self.q_neutral, np.zeros_like(self.q_neutral)), self.jrz_BR_dot(self.q_neutral, np.zeros_like(self.q_neutral))]
self.os_func = [self.o_0, self.o_1, self.o_2, self.o_3, self.o_4, self.o_5, self.o_6, self.o_ee]
self.rxs_func = [self.rx_0, self.rx_1, self.rx_2, self.rx_3, self.rx_4, self.rx_5, self.rx_6, self.rx_ee]
self.rys_func = [self.ry_0, self.ry_1, self.ry_2, self.ry_3, self.ry_4, self.ry_5, self.ry_6, self.ry_ee]
self.rzs_func = [self.rz_0, self.rz_1, self.rz_2, self.rz_3, self.rz_4, self.rz_5, self.rz_6, self.rz_ee]
self.jos_func = [self.jo_0, self.jo_1, self.jo_2, self.jo_3, self.jo_4, self.jo_5, self.jo_6, self.jo_ee]
self.jrxs_func = [self.jrx_0, self.jrx_1, self.jrx_2, self.jrx_3, self.jrx_4, self.jrx_5, self.jrx_6, self.jrx_ee]
self.jrys_func = [self.jry_0, self.jry_1, self.jry_2, self.jry_3, self.jry_4, self.jry_5, self.jry_6, self.jry_ee]
self.jrzs_func = [self.jrz_0, self.jrz_1, self.jrz_2, self.jrz_3, self.jrz_4, self.jrz_5, self.jrz_6, self.jrz_ee]
self.jo_dots_func = [self.jo_0_dot, self.jo_1_dot, self.jo_2_dot, self.jo_3_dot, self.jo_4_dot, self.jo_5_dot, self.jo_6_dot, self.jo_ee_dot]
self.jrx_dots_func = [self.jrx_0_dot, self.jrx_1_dot, self.jrx_2_dot, self.jrx_3_dot, self.jrx_4_dot, self.jrx_5_dot, self.jrx_6_dot, self.jrx_ee_dot]
self.jry_dots_func = [self.jry_0_dot, self.jry_1_dot, self.jry_2_dot, self.jry_3_dot, self.jry_4_dot, self.jry_5_dot, self.jry_6_dot, self.jry_ee_dot]
self.jrz_dots_func = [self.jrz_0_dot, self.jrz_1_dot, self.jrz_2_dot, self.jrz_3_dot, self.jrz_4_dot, self.jrz_5_dot, self.jrz_6_dot, self.jrz_ee_dot]
self.os = [f(self.q_neutral) for f in self.os_func]
self.rxs = [f(self.q_neutral) for f in self.rxs_func]
self.rys = [f(self.q_neutral) for f in self.rys_func]
self.rzs = [f(self.q_neutral) for f in self.rzs_func]
self.jos = [f(self.q_neutral) for f in self.jos_func]
self.jrxs = [f(self.q_neutral) for f in self.jrxs_func]
self.jrys = [f(self.q_neutral) for f in self.jrys_func]
self.jrzs = [f(self.q_neutral) for f in self.jrzs_func]
self.jo_dots = [f(self.q_neutral, np.zeros_like(self.q_neutral)) for f in self.jo_dots_func]
self.jrx_dots = [f(self.q_neutral, np.zeros_like(self.q_neutral)) for f in self.jrx_dots_func]
self.jry_dots = [f(self.q_neutral, np.zeros_like(self.q_neutral)) for f in self.jry_dots_func]
self.jrz_dots = [f(self.q_neutral, np.zeros_like(self.q_neutral)) for f in self.jrz_dots_func]
def update(self, q, q_dot):
for i, f in enumerate(self.os_func):
self.os[i] = f(q)
for i, f in enumerate(self.os_func):
self.os[i] = f(q)
for i, f in enumerate(self.os_func):
self.os[i] = f(q)
for i, f in enumerate(self.os_func):
self.os[i] = f(q)
if __name__ == "__main__":
hoge = Baxter()
q = np.array([[0, 0, 0, 0, 0, 0, 0]]).T
hoge.o_W0(q) | 0.270866 | 0.117876 |
import argparse
import os
import numpy as np
import torch as t
from torch.optim import Adam
from utils.batch_loader import BatchLoader
from utils.parameters import Parameters
from model.rvae_dilated import RVAE_dilated
if __name__ == "__main__":
if not os.path.exists('data/word_embeddings.npy'):
raise FileNotFoundError("word embeddings file was't found")
parser = argparse.ArgumentParser(description='RVAE_dilated')
parser.add_argument('--num-iterations', type=int, default=25000, metavar='NI',
help='num iterations (default: 25000)')
parser.add_argument('--batch-size', type=int, default=45, metavar='BS',
help='batch size (default: 45)')
parser.add_argument('--use-cuda', type=bool, default=True, metavar='CUDA',
help='use cuda (default: True)')
parser.add_argument('--learning-rate', type=float, default=0.0005, metavar='LR',
help='learning rate (default: 0.0005)')
parser.add_argument('--dropout', type=float, default=0.3, metavar='DR',
help='dropout (default: 0.3)')
parser.add_argument('--use-trained', type=bool, default=False, metavar='UT',
help='load pretrained model (default: False)')
parser.add_argument('--ppl-result', default='', metavar='CE',
help='ce result path (default: '')')
parser.add_argument('--kld-result', default='', metavar='KLD',
help='ce result path (default: '')')
args = parser.parse_args()
batch_loader = BatchLoader('')
parameters = Parameters(batch_loader.max_word_len,
batch_loader.max_seq_len,
batch_loader.words_vocab_size,
batch_loader.chars_vocab_size)
rvae = RVAE_dilated(parameters)
if args.use_trained:
rvae.load_state_dict(t.load('trained_RVAE'))
if args.use_cuda:
rvae = rvae.cuda()
optimizer = Adam(rvae.learnable_parameters(), args.learning_rate)
train_step = rvae.trainer(optimizer, batch_loader)
validate = rvae.validater(batch_loader)
ppl_result = []
kld_result = []
for iteration in range(args.num_iterations):
ppl, kld = train_step(iteration, args.batch_size, args.use_cuda, args.dropout)
if iteration % 10 == 0:
print('\n')
print('------------TRAIN-------------')
print('----------ITERATION-----------')
print(iteration)
print('---------PERPLEXITY-----------')
print(ppl.data.cpu().numpy()[0])
print('-------------KLD--------------')
print(kld.data.cpu().numpy()[0])
print('------------------------------')
if iteration % 10 == 0:
ppl, kld = validate(args.batch_size, args.use_cuda)
ppl = ppl.data.cpu().numpy()[0]
kld = kld.data.cpu().numpy()[0]
print('\n')
print('------------VALID-------------')
print('---------PERPLEXITY-----------')
print(ppl)
print('-------------KLD--------------')
print(kld)
print('------------------------------')
ppl_result += [ppl]
kld_result += [kld]
if iteration % 20 == 0:
seed = np.random.normal(size=[1, parameters.latent_variable_size])
sample = rvae.sample(batch_loader, 50, seed, args.use_cuda)
print('\n')
print('------------SAMPLE------------')
print(sample)
print('------------------------------')
t.save(rvae.state_dict(), 'trained_RVAE')
np.save('ppl_result_{}.npy'.format(args.ppl_result), np.array(ppl_result))
np.save('kld_result_npy_{}'.format(args.kld_result), np.array(kld_result)) | train.py | import argparse
import os
import numpy as np
import torch as t
from torch.optim import Adam
from utils.batch_loader import BatchLoader
from utils.parameters import Parameters
from model.rvae_dilated import RVAE_dilated
if __name__ == "__main__":
if not os.path.exists('data/word_embeddings.npy'):
raise FileNotFoundError("word embeddings file was't found")
parser = argparse.ArgumentParser(description='RVAE_dilated')
parser.add_argument('--num-iterations', type=int, default=25000, metavar='NI',
help='num iterations (default: 25000)')
parser.add_argument('--batch-size', type=int, default=45, metavar='BS',
help='batch size (default: 45)')
parser.add_argument('--use-cuda', type=bool, default=True, metavar='CUDA',
help='use cuda (default: True)')
parser.add_argument('--learning-rate', type=float, default=0.0005, metavar='LR',
help='learning rate (default: 0.0005)')
parser.add_argument('--dropout', type=float, default=0.3, metavar='DR',
help='dropout (default: 0.3)')
parser.add_argument('--use-trained', type=bool, default=False, metavar='UT',
help='load pretrained model (default: False)')
parser.add_argument('--ppl-result', default='', metavar='CE',
help='ce result path (default: '')')
parser.add_argument('--kld-result', default='', metavar='KLD',
help='ce result path (default: '')')
args = parser.parse_args()
batch_loader = BatchLoader('')
parameters = Parameters(batch_loader.max_word_len,
batch_loader.max_seq_len,
batch_loader.words_vocab_size,
batch_loader.chars_vocab_size)
rvae = RVAE_dilated(parameters)
if args.use_trained:
rvae.load_state_dict(t.load('trained_RVAE'))
if args.use_cuda:
rvae = rvae.cuda()
optimizer = Adam(rvae.learnable_parameters(), args.learning_rate)
train_step = rvae.trainer(optimizer, batch_loader)
validate = rvae.validater(batch_loader)
ppl_result = []
kld_result = []
for iteration in range(args.num_iterations):
ppl, kld = train_step(iteration, args.batch_size, args.use_cuda, args.dropout)
if iteration % 10 == 0:
print('\n')
print('------------TRAIN-------------')
print('----------ITERATION-----------')
print(iteration)
print('---------PERPLEXITY-----------')
print(ppl.data.cpu().numpy()[0])
print('-------------KLD--------------')
print(kld.data.cpu().numpy()[0])
print('------------------------------')
if iteration % 10 == 0:
ppl, kld = validate(args.batch_size, args.use_cuda)
ppl = ppl.data.cpu().numpy()[0]
kld = kld.data.cpu().numpy()[0]
print('\n')
print('------------VALID-------------')
print('---------PERPLEXITY-----------')
print(ppl)
print('-------------KLD--------------')
print(kld)
print('------------------------------')
ppl_result += [ppl]
kld_result += [kld]
if iteration % 20 == 0:
seed = np.random.normal(size=[1, parameters.latent_variable_size])
sample = rvae.sample(batch_loader, 50, seed, args.use_cuda)
print('\n')
print('------------SAMPLE------------')
print(sample)
print('------------------------------')
t.save(rvae.state_dict(), 'trained_RVAE')
np.save('ppl_result_{}.npy'.format(args.ppl_result), np.array(ppl_result))
np.save('kld_result_npy_{}'.format(args.kld_result), np.array(kld_result)) | 0.566019 | 0.092155 |
import math
from . import gan_layer_architecture_shapes
from . import image_masks
def get_generator_config():
"""Gets generator config.
Returns:
Dictionary of generator configs.
"""
generator_dict = dict()
# Which paper to use for generator architecture: "berg", "GANomaly".
generator_dict["architecture"] = "GANomaly"
# Whether generator will be trained or not.
generator_dict["train"] = True
# Number of steps to train generator for per cycle.
generator_dict["train_steps"] = 1
# The latent size of the berg input noise vector or the GANomaly
# generator's encoder logits vector.
generator_dict["latent_size"] = 512
# Whether to normalize latent vector before projection.
generator_dict["normalize_latents"] = True
# Whether to use pixel norm op after each convolution.
generator_dict["use_pixel_norm"] = True
# Small value to add to denominator for numerical stability.
generator_dict["pixel_norm_epsilon"] = 1e-8
# The 3D dimensions to project latent noise vector into.
generator_dict["projection_dims"] = [4, 4, 512]
# The amount of leakyness of generator's leaky relus.
generator_dict["leaky_relu_alpha"] = 0.2
# The final activation function of generator: None, sigmoid, tanh, relu.
generator_dict["final_activation"] = "None"
# Whether to add uniform noise to fake images.
generator_dict["add_uniform_noise_to_fake_images"] = True
# Scale factor for L1 regularization for generator.
generator_dict["l1_regularization_scale"] = 0.
# Scale factor for L2 regularization for generator.
generator_dict["l2_regularization_scale"] = 0.
# Name of optimizer to use for generator.
generator_dict["optimizer"] = "Adam"
# How quickly we train model by scaling the gradient for generator.
generator_dict["learning_rate"] = 0.001
# Adam optimizer's beta1 hyperparameter for first moment.
generator_dict["adam_beta1"] = 0.0
# Adam optimizer's beta2 hyperparameter for second moment.
generator_dict["adam_beta2"] = 0.99
# Adam optimizer's epsilon hyperparameter for numerical stability.
generator_dict["adam_epsilon"] = 1e-8
# Global clipping to prevent gradient norm to exceed this value for generator.
generator_dict["clip_gradients"] = None
generator_berg_dict = dict()
generator_ganomaly_dict = dict()
generator_berg_losses_dict = dict()
generator_ganomaly_losses_dict = dict()
if generator_dict["architecture"] == "berg":
# The latent vector's random normal mean.
generator_berg_dict["latent_mean"] = 0.0
# The latent vector's random normal standard deviation.
generator_berg_dict["latent_stddev"] = 1.0
# These are just example values, yours will vary.
# Weights to multiply loss of D(G(z))
generator_berg_losses_dict["D_of_G_of_z_loss_weight"] = 1.0
# Weights to multiply loss of D(G(E(x)))
generator_berg_losses_dict["D_of_G_of_E_of_x_loss_weight"] = 0.0
# Weights to multiply loss of D(G(E(G(z)))
generator_berg_losses_dict["D_of_G_of_E_of_G_of_z_loss_weight"] = 0.0
# Weights to multiply loss of z - E(G(z))
generator_berg_losses_dict["z_minus_E_of_G_of_z_l1_loss_weight"] = 0.0
generator_berg_losses_dict["z_minus_E_of_G_of_z_l2_loss_weight"] = 0.0
# Weights to multiply loss of G(z) - G(E(G(z))
generator_berg_losses_dict["G_of_z_minus_G_of_E_of_G_of_z_l1_loss_weight"] = 0.0
generator_berg_losses_dict["G_of_z_minus_G_of_E_of_G_of_z_l2_loss_weight"] = 0.0
# Weights to multiply loss of E(x) - E(G(E(x)))
generator_berg_losses_dict["E_of_x_minus_E_of_G_of_E_of_x_l1_loss_weight"] = 1.0
generator_berg_losses_dict["E_of_x_minus_E_of_G_of_E_of_x_l2_loss_weight"] = 0.0
# Weights to multiply loss of x - G(E(x))
generator_berg_losses_dict["x_minus_G_of_E_of_x_l1_loss_weight"] = 0.0
generator_berg_losses_dict["x_minus_G_of_E_of_x_l2_loss_weight"] = 0.0
# GANomaly parameters to zero.
# Weights to multiply loss of D(G(x))
generator_ganomaly_losses_dict["D_of_G_of_x_loss_weight"] = 0.0
# Weights to multiply loss of x - G(x)
generator_ganomaly_losses_dict["x_minus_G_of_x_l1_loss_weight"] = 0.0
generator_ganomaly_losses_dict["x_minus_G_of_x_l2_loss_weight"] = 0.0
# Weights to multiply loss of Ge(x) - E(G(x))
generator_ganomaly_losses_dict["Ge_of_x_minus_E_of_G_of_x_l1_loss_weight"] = 0.0
generator_ganomaly_losses_dict["Ge_of_x_minus_E_of_G_of_x_l2_loss_weight"] = 0.0
else: # GANomaly
# Whether generator GANomaly architecture uses U-net skip connection for each block.
generator_ganomaly_dict["use_unet_skip_connections"] = [True] * 9
# Percent of masking image inputs to generator.
generator_ganomaly_dict["mask_generator_input_images_percent"] = 0.2
# Integer amount to randomly shift image mask block sizes.
generator_ganomaly_dict["image_mask_block_random_shift_amount"] = 0
# Whether to use shuffle or dead image block masking.
generator_ganomaly_dict["use_shuffle_image_masks"] = True
# Whether to add uniform noise to GANomaly Z vector.
generator_ganomaly_dict["add_uniform_noise_to_z"] = True
# These are just example values, yours will vary.
# Weights to multiply loss of D(G(x))
generator_ganomaly_losses_dict["D_of_G_of_x_loss_weight"] = 1.0
# Weights to multiply loss of x - G(x)
generator_ganomaly_losses_dict["x_minus_G_of_x_l1_loss_weight"] = 0.0
generator_ganomaly_losses_dict["x_minus_G_of_x_l2_loss_weight"] = 100.0
# Weights to multiply loss of Ge(x) - E(G(x))
generator_ganomaly_losses_dict["Ge_of_x_minus_E_of_G_of_x_l1_loss_weight"] = 0.0
generator_ganomaly_losses_dict["Ge_of_x_minus_E_of_G_of_x_l2_loss_weight"] = 0.0
# Berg parameters to zero.
# Weights to multiply loss of D(G(z))
generator_berg_losses_dict["D_of_G_of_z_loss_weight"] = 0.0
# Weights to multiply loss of D(G(E(x)))
generator_berg_losses_dict["D_of_G_of_E_of_x_loss_weight"] = 0.0
# Weights to multiply loss of D(G(E(G(z)))
generator_berg_losses_dict["D_of_G_of_E_of_G_of_z_loss_weight"] = 0.0
# Weights to multiply loss of z - E(G(z))
generator_berg_losses_dict["z_minus_E_of_G_of_z_l1_loss_weight"] = 0.0
generator_berg_losses_dict["z_minus_E_of_G_of_z_l2_loss_weight"] = 0.0
# Weights to multiply loss of G(z) - G(E(G(z))
generator_berg_losses_dict["G_of_z_minus_G_of_E_of_G_of_z_l1_loss_weight"] = 0.0
generator_berg_losses_dict["G_of_z_minus_G_of_E_of_G_of_z_l2_loss_weight"] = 0.0
# Weights to multiply loss of E(x) - E(G(E(x)))
generator_berg_losses_dict["E_of_x_minus_E_of_G_of_E_of_x_l1_loss_weight"] = 0.0
generator_berg_losses_dict["E_of_x_minus_E_of_G_of_E_of_x_l2_loss_weight"] = 0.0
# Weights to multiply loss of x - G(E(x))
generator_berg_losses_dict["x_minus_G_of_E_of_x_l1_loss_weight"] = 0.0
generator_berg_losses_dict["x_minus_G_of_E_of_x_l2_loss_weight"] = 0.0
generator_dict["berg"] = generator_berg_dict
generator_dict["GANomaly"] = generator_ganomaly_dict
generator_dict["losses"] = {}
generator_dict["losses"]["berg"] = generator_berg_losses_dict
generator_dict["losses"]["GANomaly"] = generator_ganomaly_losses_dict
return generator_dict
def get_encoder_config():
"""Gets encoder config.
Returns:
Dictionary of encoder configs.
"""
encoder_dict = dict()
# These are optional if using GANomaly architecture, required for berg.
# Whether encoder will be created or not.
encoder_dict["create"] = True
# Whether encoder will be trained or not.
encoder_dict["train"] = True
# Whether to use minibatch stddev op before first base conv layer.
encoder_dict["use_minibatch_stddev"] = True
# The size of groups to split minibatch examples into.
encoder_dict["minibatch_stddev_group_size"] = 4
# Whether to average across feature maps and pixels for minibatch stddev.
encoder_dict["minibatch_stddev_use_averaging"] = True
# The amount of leakyness of encoder's leaky relus.
encoder_dict["leaky_relu_alpha"] = 0.2
# Scale factor for L1 regularization for encoder.
encoder_dict["l1_regularization_scale"] = 0.
# Scale factor for L2 regularization for encoder.
encoder_dict["l2_regularization_scale"] = 0.
# Name of optimizer to use for encoder.
encoder_dict["optimizer"] = "Adam"
# How quickly we train model by scaling the gradient for encoder.
encoder_dict["learning_rate"] = 0.001
# Adam optimizer's beta1 hyperparameter for first moment.
encoder_dict["adam_beta1"] = 0.0
# Adam optimizer's beta2 hyperparameter for second moment.
encoder_dict["adam_beta2"] = 0.99
# Adam optimizer's epsilon hyperparameter for numerical stability.
encoder_dict["adam_epsilon"] = 1e-8
# Global clipping to prevent gradient norm to exceed this value for encoder.
encoder_dict["clip_gradients"] = None
encoder_losses_dict = dict()
# Berg Losses
encoder_losses_berg_dict = dict()
# Weights to multiply loss of D(G(E(x)))
encoder_losses_berg_dict["D_of_G_of_E_of_x_loss_weight"] = 0.0
# Weights to multiply loss of D(G(E(G(z)))
encoder_losses_berg_dict["D_of_G_of_E_of_G_of_z_loss_weight"] = 0.0
# Weights to multiply loss of z - E(G(z))
encoder_losses_berg_dict["z_minus_E_of_G_of_z_l1_loss_weight"] = 0.0
encoder_losses_berg_dict["z_minus_E_of_G_of_z_l2_loss_weight"] = 0.0
# Weights to multiply loss of G(z) - G(E(G(z))
encoder_losses_berg_dict["G_of_z_minus_G_of_E_of_G_of_z_l1_loss_weight"] = 0.0
encoder_losses_berg_dict["G_of_z_minus_G_of_E_of_G_of_z_l2_loss_weight"] = 0.0
# Weights to multiply loss of E(x) - E(G(E(x)))
encoder_losses_berg_dict["E_of_x_minus_E_of_G_of_E_of_x_l1_loss_weight"] = 0.0
encoder_losses_berg_dict["E_of_x_minus_E_of_G_of_E_of_x_l2_loss_weight"] = 0.0
# Weights to multiply loss of x - G(E(x))
encoder_losses_berg_dict["x_minus_G_of_E_of_x_l1_loss_weight"] = 0.0
encoder_losses_berg_dict["x_minus_G_of_E_of_x_l2_loss_weight"] = 0.0
# GANomaly Losses
encoder_losses_ganomaly_dict = dict()
# Weights to multiply loss of Ge(x) - E(G(x))
encoder_losses_ganomaly_dict["Ge_of_x_minus_E_of_G_of_x_l1_loss_weight"] = 0.0
encoder_losses_ganomaly_dict["Ge_of_x_minus_E_of_G_of_x_l2_loss_weight"] = 1.0
encoder_losses_dict["berg"] = encoder_losses_berg_dict
encoder_losses_dict["GANomaly"] = encoder_losses_ganomaly_dict
encoder_dict["losses"] = encoder_losses_dict
return encoder_dict
def get_discriminator_config():
"""Gets discriminator config.
Returns:
Dictionary of discriminator configs.
"""
discriminator_dict = dict()
# Whether discriminator will be created or not.
discriminator_dict["create"] = True
# Whether discriminator will be trained or not.
discriminator_dict["train"] = True
# Number of steps to train discriminator for per cycle.
discriminator_dict["train_steps"] = 1
# Whether to use minibatch stddev op before first base conv layer.
discriminator_dict["use_minibatch_stddev"] = True
# The size of groups to split minibatch examples into.
discriminator_dict["minibatch_stddev_group_size"] = 4
# Whether to average across feature maps and pixels for minibatch stddev.
discriminator_dict["minibatch_stddev_use_averaging"] = True
# The amount of leakyness of discriminator's leaky relus.
discriminator_dict["leaky_relu_alpha"] = 0.2
# Scale factor for L1 regularization for discriminator.
discriminator_dict["l1_regularization_scale"] = 0.
# Scale factor for L2 regularization for discriminator.
discriminator_dict["l2_regularization_scale"] = 0.
# Name of optimizer to use for discriminator.
discriminator_dict["optimizer"] = "Adam"
# How quickly we train model by scaling the gradient for discriminator.
discriminator_dict["learning_rate"] = 0.001
# Adam optimizer's beta1 hyperparameter for first moment.
discriminator_dict["adam_beta1"] = 0.0
# Adam optimizer's beta2 hyperparameter for second moment.
discriminator_dict["adam_beta2"] = 0.99
# Adam optimizer's epsilon hyperparameter for numerical stability.
discriminator_dict["adam_epsilon"] = 1e-8
# Global clipping to prevent gradient norm to exceed this value for discriminator.
discriminator_dict["clip_gradients"] = None
# Coefficient of gradient penalty for discriminator.
discriminator_dict["gradient_penalty_coefficient"] = 10.0
# Target value of gradient magnitudes for gradient penalty for discriminator.
discriminator_dict["gradient_penalty_target"] = 1.0
# Coefficient of epsilon drift penalty for discriminator.
discriminator_dict["epsilon_drift"] = 0.001
# Losses
discriminator_losses_dict = dict()
# Weight to multiply loss of D(x)
discriminator_losses_dict["D_of_x_loss_weight"] = 1.0
# Berg Losses
discriminator_losses_berg_dict = dict()
# Weight to multiply loss of D(G(z))
discriminator_losses_berg_dict["D_of_G_of_z_loss_weight"] = 0.0
# Weight to multiply loss of D(G(E(x)))
discriminator_losses_berg_dict["D_of_G_of_E_of_x_loss_weight"] = 0.0
# Weight to multiply loss of D(G(E(G(z)))
discriminator_losses_berg_dict["D_of_G_of_E_of_G_of_z_loss_weight"] = 0.0
# GANomaly Losses
discriminator_losses_ganomaly_dict = dict()
# Weight to multiply loss of D(G(x))
discriminator_losses_ganomaly_dict["D_of_G_of_x_loss_weight"] = 1.0
discriminator_losses_dict["berg"] = discriminator_losses_berg_dict
discriminator_losses_dict["GANomaly"] = discriminator_losses_ganomaly_dict
discriminator_dict["losses"] = discriminator_losses_dict
return discriminator_dict
def get_reconstruction_config():
"""Gets reconstruction config.
Returns:
Dictionary of reconstruction configs.
"""
reconstruction_dict = dict()
# Whether using multiple resolutions across a list of TF Records.
reconstruction_dict["use_multiple_resolution_records"] = True
# GCS locations to read reconstruction training data.
reconstruction_dict["train_file_patterns"] = [
"data/cifar10_car/train_{0}x{0}_*.tfrecord".format(4 * 2 ** i)
for i in range(4)
]
# GCS locations to read reconstruction evaluation data.
reconstruction_dict["eval_file_patterns"] = [
"data/cifar10_car/test_{0}x{0}_*.tfrecord".format(4 * 2 ** i)
for i in range(4)
]
# Which dataset to use for reconstruction training:
# "mnist", "cifar10", "cifar10_car", "tf_record"
reconstruction_dict["dataset"] = "tf_record"
# TF Record Example feature schema for reconstruction.
reconstruction_dict["tf_record_example_schema"] = [
{
"name": "image_raw",
"type": "FixedLen",
"shape": [],
"dtype": "str"
},
{
"name": "label",
"type": "FixedLen",
"shape": [],
"dtype": "int"
}
]
# Name of image feature within schema dictionary.
reconstruction_dict["image_feature_name"] = "image_raw"
# Encoding of image: raw, png, or jpeg.
reconstruction_dict["image_encoding"] = "raw"
# Height of predownscaled image if NOT using multiple resolution records.
reconstruction_dict["image_predownscaled_height"] = 32
# Width of predownscaled image if NOT using multiple resolution records.
reconstruction_dict["image_predownscaled_width"] = 32
# Depth of image, number of channels.
reconstruction_dict["image_depth"] = 3
# Name of label feature within schema dictionary.
reconstruction_dict["label_feature_name"] = "label"
# Schedule list of number of epochs to train for reconstruction.
reconstruction_dict["num_epochs_schedule"] = [1] * 9
# Number of examples in one epoch of reconstruction training set.
reconstruction_dict["train_dataset_length"] = 400
# Schedule list of number of examples in reconstruction training batch for each resolution block.
reconstruction_dict["train_batch_size_schedule"] = [4] * 9
# Schedule list of number of examples in reconstruction evaluation batch for each resolution block.
reconstruction_dict["eval_batch_size_schedule"] = [4] * 9
# Number of steps/batches to evaluate for reconstruction.
reconstruction_dict["eval_steps"] = 1
# List of number of examples until block added to networks.
reconstruction_dict["num_examples_until_growth_schedule"] = [
epochs * reconstruction_dict["train_dataset_length"]
for epochs in reconstruction_dict["num_epochs_schedule"]
]
# List of number of steps/batches until block added to networks.
reconstruction_dict["num_steps_until_growth_schedule"] = [
ex // bs
for ex, bs in zip(
reconstruction_dict["num_examples_until_growth_schedule"],
reconstruction_dict["train_batch_size_schedule"]
)
]
# Whether to autotune input function performance for reconstruction datasets.
reconstruction_dict["input_fn_autotune"] = True
# How many steps to train before writing steps and loss to log.
reconstruction_dict["log_step_count_steps"] = 10
# How many steps to train before saving a summary.
reconstruction_dict["save_summary_steps"] = 10
# Whether to write loss summaries for TensorBoard.
reconstruction_dict["write_loss_summaries"] = False
# Whether to write generator image summaries for TensorBoard.
reconstruction_dict["write_generator_image_summaries"] = False
# Whether to write encoder image summaries for TensorBoard.
reconstruction_dict["write_encoder_image_summaries"] = False
# Whether to write variable histogram summaries for TensorBoard.
reconstruction_dict["write_variable_histogram_summaries"] = False
# Whether to write gradient histogram summaries for TensorBoard.
reconstruction_dict["write_gradient_histogram_summaries"] = False
# How many steps to train reconstruction before saving a checkpoint.
reconstruction_dict["save_checkpoints_steps"] = 10000
# Max number of reconstruction checkpoints to keep.
reconstruction_dict["keep_checkpoint_max"] = 10
# Whether to save checkpoint every growth phase.
reconstruction_dict["checkpoint_every_growth_phase"] = True
# Whether to save checkpoint every epoch.
reconstruction_dict["checkpoint_every_epoch"] = True
# Checkpoint growth index to restore checkpoint.
reconstruction_dict["checkpoint_growth_idx"] = 0
# Checkpoint epoch index to restore checkpoint.
reconstruction_dict["checkpoint_epoch_idx"] = 0
# The checkpoint save path for saving and restoring.
reconstruction_dict["checkpoint_save_path"] = ""
# Whether to store loss logs.
reconstruction_dict["store_loss_logs"] = True
# Whether to normalize loss logs.
reconstruction_dict["normalized_loss_logs"] = True
# Whether to print model summaries.
reconstruction_dict["print_training_model_summaries"] = False
# Initial growth index to resume training midway.
reconstruction_dict["initial_growth_idx"] = 0
# Initial epoch index to resume training midway.
reconstruction_dict["initial_epoch_idx"] = 0
# Max number of times training loop can be restarted such as for NaN losses.
reconstruction_dict["max_training_loop_restarts"] = 10
# Whether to scale layer weights to equalize learning rate each forward pass.
reconstruction_dict["use_equalized_learning_rate"] = True
# Whether to normalize reconstruction losses by number of pixels.
reconstruction_dict["normalize_reconstruction_losses"] = True
return reconstruction_dict
def get_error_distribution_config():
"""Gets error_distribution config.
Returns:
Dictionary of error_distribution configs.
"""
error_distribution_dict = dict()
# Whether using multiple resolutions across a list of TF Records.
error_distribution_dict["use_multiple_resolution_records"] = False
# GCS locations to read error distribution training data.
error_distribution_dict["train_file_pattern"] = "data/cifar10_car/train_32x32_*.tfrecord"
# GCS locations to read error distribution training data.
error_distribution_dict["eval_file_pattern"] = "data/cifar10_car/train_32x32_*.tfrecord"
# Which dataset to use for error distribution training:
# "mnist", "cifar10", "cifar10_car", "tf_record"
error_distribution_dict["dataset"] = "tf_record"
# TF Record Example feature schema for error distribution.
error_distribution_dict["tf_record_example_schema"] = [
{
"name": "image_raw",
"type": "FixedLen",
"shape": [],
"dtype": "str"
},
{
"name": "label",
"type": "FixedLen",
"shape": [],
"dtype": "int"
}
]
# Name of image feature within schema dictionary.
error_distribution_dict["image_feature_name"] = "image_raw"
# Encoding of image: raw, png, or jpeg.
error_distribution_dict["image_encoding"] = "raw"
# Height of predownscaled image if NOT using multiple resolution records.
error_distribution_dict["image_predownscaled_height"] = 32
# Width of predownscaled image if NOT using multiple resolution records.
error_distribution_dict["image_predownscaled_width"] = 32
# Depth of image, number of channels.
error_distribution_dict["image_depth"] = 3
# Name of label feature within schema dictionary.
error_distribution_dict["label_feature_name"] = "label"
# Number of examples in one epoch of error distribution training set.
error_distribution_dict["train_dataset_length"] = 400
# Number of examples in error distribution training batch.
error_distribution_dict["train_batch_size"] = 32
# Number of steps/batches to evaluate for error distribution.
error_distribution_dict["eval_steps"] = 10
# Whether to autotune input function performance for error distribution datasets.
error_distribution_dict["input_fn_autotune"] = True
# How many steps to train error distribution before saving a checkpoint.
error_distribution_dict["save_checkpoints_steps"] = 10000
# Max number of error distribution checkpoints to keep.
error_distribution_dict["keep_checkpoint_max"] = 10
# The checkpoint save path for saving and restoring.
error_distribution_dict["checkpoint_save_path"] = ""
# Max number of times training loop can be restarted.
error_distribution_dict["max_training_loop_restarts"] = 10
# Whether using sample or population covariance for error distribution.
error_distribution_dict["use_sample_covariance"] = True
return error_distribution_dict
def get_dynamic_threshold_config():
"""Gets dynamic_threshold config.
Returns:
Dictionary of dynamic_threshold configs.
"""
dynamic_threshold_dict = dict()
# Whether using multiple resolutions across a list of TF Records.
dynamic_threshold_dict["use_multiple_resolution_records"] = False
# GCS locations to read dynamic threshold training data.
dynamic_threshold_dict["train_file_pattern"] = "data/cifar10_car/train_32x32_*.tfrecord"
# GCS locations to read dynamic threshold evaluation data.
dynamic_threshold_dict["eval_file_pattern"] = "data/cifar10_car/train_32x32_*.tfrecord"
# Which dataset to use for dynamic threshold training:
# "mnist", "cifar10", "cifar10_car", "tf_record"
dynamic_threshold_dict["dataset"] = "tf_record"
# TF Record Example feature schema for dynamic threshold.
dynamic_threshold_dict["tf_record_example_schema"] = [
{
"name": "image_raw",
"type": "FixedLen",
"shape": [],
"dtype": "str"
},
{
"name": "label",
"type": "FixedLen",
"shape": [],
"dtype": "int"
}
]
# Name of image feature within schema dictionary.
dynamic_threshold_dict["image_feature_name"] = "image_raw"
# Encoding of image: raw, png, or jpeg.
dynamic_threshold_dict["image_encoding"] = "raw"
# Height of predownscaled image if NOT using multiple resolution records.
dynamic_threshold_dict["image_predownscaled_height"] = 32
# Width of predownscaled image if NOT using multiple resolution records.
dynamic_threshold_dict["image_predownscaled_width"] = 32
# Depth of image, number of channels.
dynamic_threshold_dict["image_depth"] = 3
# Name of label feature within schema dictionary.
dynamic_threshold_dict["label_feature_name"] = "label"
# Number of examples in one epoch of dynamic threshold training set.
dynamic_threshold_dict["train_dataset_length"] = 400
# Number of examples in dynamic threshold training batch.
dynamic_threshold_dict["train_batch_size"] = 32
# Number of steps/batches to evaluate for dynamic threshold.
dynamic_threshold_dict["eval_steps"] = 10
# Whether to autotune input function performance for dynamic threshold datasets.
dynamic_threshold_dict["input_fn_autotune"] = True
# How many steps to train dynamic threshold before saving a checkpoint.
dynamic_threshold_dict["save_checkpoints_steps"] = 10000
# Max number of dynamic threshold checkpoints to keep.
dynamic_threshold_dict["keep_checkpoint_max"] = 10
# The checkpoint save path for saving and restoring.
dynamic_threshold_dict["checkpoint_save_path"] = ""
# Max number of times training loop can be restarted.
dynamic_threshold_dict["max_training_loop_restarts"] = 10
# Whether using supervised dynamic thresholding or unsupervised.
dynamic_threshold_dict["use_supervised"] = False
supervised_dict = dict()
# Beta value for supervised F-beta score.
supervised_dict["f_score_beta"] = 0.05
unsupervised_dict = dict()
# Whether using sample or population covariance for dynamic threshold.
unsupervised_dict["use_sample_covariance"] = True
# Max standard deviations of Mahalanobis distance to flag as outlier.
unsupervised_dict["max_mahalanobis_stddevs"] = 3.0
dynamic_threshold_dict["supervised_dict"] = supervised_dict
dynamic_threshold_dict["unsupervised_dict"] = unsupervised_dict
return dynamic_threshold_dict
def get_training_config():
"""Gets training config.
Returns:
Dictionary of training configs.
"""
training_dict = dict()
# GCS location to write checkpoints, loss logs, and export models.
training_dict["output_dir"] = "trained_models/experiment_0"
# Version of TensorFlow.
training_dict["tf_version"] = 2.3
# Whether to use graph mode or not (eager).
training_dict["use_graph_mode"] = True
# Which distribution strategy to use, if any.
training_dict["distribution_strategy"] = "Mirrored"
# Whether we subclass models or use Functional API.
training_dict["subclass_models"] = True
# Whether performing training phase 1 or not.
training_dict["train_reconstruction"] = True
# Whether performing training phase 2 or not.
training_dict["train_error_distribution"] = True
# Whether performing training phase 3 or not.
training_dict["train_dynamic_threshold"] = True
training_dict["reconstruction"] = get_reconstruction_config()
training_dict["error_distribution"] = get_error_distribution_config()
training_dict["dynamic_threshold"] = get_dynamic_threshold_config()
return training_dict
def get_export_config():
"""Gets export config.
Returns:
Dictionary of export configs.
"""
export_dict = dict()
# Most recent export's growth index so that there are no repeat exports.
export_dict["most_recent_export_growth_idx"] = -1
# Most recent export's epoch index so that there are no repeat exports.
export_dict["most_recent_export_epoch_idx"] = -1
# Whether to export SavedModel every growth phase.
export_dict["export_every_growth_phase"] = True
# Whether to export SavedModel every epoch.
export_dict["export_every_epoch"] = True
# Whether to export all growth phases or just current.
export_dict["export_all_growth_phases"] = True
# Using a random noise vector Z with shape (batch_size, generator_latent_size) for berg.
# Whether to export Z.
export_dict["export_Z"] = True
# Whether to export generated images, G(z).
export_dict["export_generated_images"] = True
# Whether to export encoded generated logits, E(G(z)).
export_dict["export_encoded_generated_logits"] = True
# Whether to export encoded generated images, G(E(G(z))).
export_dict["export_encoded_generated_images"] = True
# Whether to export Z generated images, Gd(z).
export_dict["export_Z_generated_images"] = True
# Using a query image with shape (batch_size, height, width, depth)
# Whether to export query images.
export_dict["export_query_images"] = True
# Berg encoded exports.
# Whether to export encoded query logits, E(x).
export_dict["export_query_encoded_logits"] = True
# Whether to export encoded query images, G(E(x)).
export_dict["export_query_encoded_images"] = True
# GANomaly encoded exports.
# Whether to export generator encoded query logits, Ge(x).
export_dict["export_query_gen_encoded_logits"] = True
# Whether to export generator encoded query images, G(x) = Gd(Ge(x)).
export_dict["export_query_gen_encoded_images"] = True
# Whether to export encoder encoded query logits, E(G(x)).
export_dict["export_query_enc_encoded_logits"] = True
# Whether to export encoder encoded query images, Gd(E(G(x))).
export_dict["export_query_enc_encoded_images"] = True
# Anomaly exports.
# Whether to export query anomaly images using sigmoid scaling.
export_dict["export_query_anomaly_images_sigmoid"] = True
# Whether to export query anomaly images using linear scaling.
export_dict["export_query_anomaly_images_linear"] = True
# Whether to export query Mahalanobis distances.
export_dict["export_query_mahalanobis_distances"] = True
# Whether to export query Mahalanobis distance images using sigmoid scaling.
export_dict["export_query_mahalanobis_distance_images_sigmoid"] = True
# Whether to export query Mahalanobis distance images using linear scaling.
export_dict["export_query_mahalanobis_distance_images_linear"] = True
# Whether to export query pixel anomaly flag binary images.
export_dict["export_query_pixel_anomaly_flag_images"] = True
# Whether to export query pixel anomaly flag binary images.
export_dict["export_query_pixel_anomaly_flag_counts"] = True
# Whether to export query pixel anomaly flag binary images.
export_dict["export_query_pixel_anomaly_flag_percentages"] = True
# Whether to export query anomaly scores, only for Berg.
export_dict["export_query_anomaly_scores"] = False
# Whether to export query anomaly flags, only for Berg.
export_dict["export_query_anomaly_flags"] = False
# Anomaly parameters.
# The threshold value at which above flags scores images as anomalous.
export_dict["anomaly_threshold"] = 5.0
# The anomaly convex combination factor for weighting the two anomaly losses.
export_dict["anom_convex_combo_factor"] = 0.05
# Whether to print model summaries.
export_dict["print_serving_model_summaries"] = False
return export_dict
def get_default_config():
"""Gets default config.
"""
arguments = dict()
arguments["generator"] = get_generator_config()
arguments["encoder"] = get_encoder_config()
arguments["discriminator"] = get_discriminator_config()
arguments["training"] = get_training_config()
arguments["export"] = get_export_config()
# Full lists for full 1024x1024 network growth.
full_conv_num_filters = [[512, 512], [512, 512], [512, 512], [512, 512], [256, 256], [128, 128], [64, 64], [32, 32], [16, 16]]
full_conv_kernel_sizes = [[4, 3], [3, 3], [3, 3], [3, 3], [3, 3], [3, 3], [3, 3], [3, 3], [3, 3]]
full_conv_strides = [[1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1]]
# Set final image size as a multiple of 2, starting at 4.
image_size = 1024
num_conv_blocks = max(
min(int(math.log(image_size, 2) - 1), len(full_conv_num_filters)), 1
)
arguments["conv_num_filters"] = full_conv_num_filters[0:num_conv_blocks]
arguments["conv_kernel_sizes"] = full_conv_kernel_sizes[0:num_conv_blocks]
arguments["conv_strides"] = full_conv_strides[0:num_conv_blocks]
# Get conv layer properties for generator and discriminator.
(generator,
discriminator) = (
gan_layer_architecture_shapes.calc_generator_discriminator_conv_layer_properties(
arguments["conv_num_filters"],
arguments["conv_kernel_sizes"],
arguments["conv_strides"],
arguments["training"]["reconstruction"]["image_depth"]
)
)
# Split up generator properties into separate lists.
(generator_base_conv_blocks,
generator_growth_conv_blocks,
generator_to_rgb_layers) = (
gan_layer_architecture_shapes.split_up_generator_conv_layer_properties(
generator,
arguments["conv_num_filters"],
arguments["conv_strides"],
arguments["training"]["reconstruction"]["image_depth"]
)
)
# Generator list of list of lists of base conv block layer shapes.
arguments["generator"]["base_conv_blocks"] = generator_base_conv_blocks
# Generator list of list of lists of growth conv block layer shapes.
arguments["generator"]["growth_conv_blocks"] = (
generator_growth_conv_blocks
)
# Generator list of list of lists of to_RGB layer shapes.
arguments["generator"]["to_rgb_layers"] = generator_to_rgb_layers
# Split up discriminator properties into separate lists.
(discriminator_from_rgb_layers,
discriminator_base_conv_blocks,
discriminator_growth_conv_blocks) = (
gan_layer_architecture_shapes.split_up_discriminator_conv_layer_properties(
discriminator,
arguments["conv_num_filters"],
arguments["conv_strides"],
arguments["training"]["reconstruction"]["image_depth"]
)
)
# Discriminator list of list of lists of from_RGB layer shapes.
arguments["discriminator"]["from_rgb_layers"] = (
discriminator_from_rgb_layers
)
# Discriminator list of list of lists of base conv block layer shapes.
arguments["discriminator"]["base_conv_blocks"] = (
discriminator_base_conv_blocks
)
# Discriminator list of list of lists of growth conv block layer shapes.
arguments["discriminator"]["growth_conv_blocks"] = (
discriminator_growth_conv_blocks
)
if (arguments["generator"]["architecture"] == "GANomaly" and
arguments["generator"]["GANomaly"]["mask_generator_input_images_percent"] > 0.):
# Image mask block pixel sizes list of lists.
arguments["generator"]["image_mask_block_sizes"] = (
image_masks.calculate_image_mask_block_sizes_per_resolution(
num_resolutions=num_conv_blocks,
min_height=arguments["generator"]["projection_dims"][0],
min_width=arguments["generator"]["projection_dims"][1],
pixel_mask_percent=(
arguments["generator"]["GANomaly"][
"mask_generator_input_images_percent"]
)
)
)
return arguments | proganomaly_modules/training_module/trainer/defaults.py |
import math
from . import gan_layer_architecture_shapes
from . import image_masks
def get_generator_config():
"""Gets generator config.
Returns:
Dictionary of generator configs.
"""
generator_dict = dict()
# Which paper to use for generator architecture: "berg", "GANomaly".
generator_dict["architecture"] = "GANomaly"
# Whether generator will be trained or not.
generator_dict["train"] = True
# Number of steps to train generator for per cycle.
generator_dict["train_steps"] = 1
# The latent size of the berg input noise vector or the GANomaly
# generator's encoder logits vector.
generator_dict["latent_size"] = 512
# Whether to normalize latent vector before projection.
generator_dict["normalize_latents"] = True
# Whether to use pixel norm op after each convolution.
generator_dict["use_pixel_norm"] = True
# Small value to add to denominator for numerical stability.
generator_dict["pixel_norm_epsilon"] = 1e-8
# The 3D dimensions to project latent noise vector into.
generator_dict["projection_dims"] = [4, 4, 512]
# The amount of leakyness of generator's leaky relus.
generator_dict["leaky_relu_alpha"] = 0.2
# The final activation function of generator: None, sigmoid, tanh, relu.
generator_dict["final_activation"] = "None"
# Whether to add uniform noise to fake images.
generator_dict["add_uniform_noise_to_fake_images"] = True
# Scale factor for L1 regularization for generator.
generator_dict["l1_regularization_scale"] = 0.
# Scale factor for L2 regularization for generator.
generator_dict["l2_regularization_scale"] = 0.
# Name of optimizer to use for generator.
generator_dict["optimizer"] = "Adam"
# How quickly we train model by scaling the gradient for generator.
generator_dict["learning_rate"] = 0.001
# Adam optimizer's beta1 hyperparameter for first moment.
generator_dict["adam_beta1"] = 0.0
# Adam optimizer's beta2 hyperparameter for second moment.
generator_dict["adam_beta2"] = 0.99
# Adam optimizer's epsilon hyperparameter for numerical stability.
generator_dict["adam_epsilon"] = 1e-8
# Global clipping to prevent gradient norm to exceed this value for generator.
generator_dict["clip_gradients"] = None
generator_berg_dict = dict()
generator_ganomaly_dict = dict()
generator_berg_losses_dict = dict()
generator_ganomaly_losses_dict = dict()
if generator_dict["architecture"] == "berg":
# The latent vector's random normal mean.
generator_berg_dict["latent_mean"] = 0.0
# The latent vector's random normal standard deviation.
generator_berg_dict["latent_stddev"] = 1.0
# These are just example values, yours will vary.
# Weights to multiply loss of D(G(z))
generator_berg_losses_dict["D_of_G_of_z_loss_weight"] = 1.0
# Weights to multiply loss of D(G(E(x)))
generator_berg_losses_dict["D_of_G_of_E_of_x_loss_weight"] = 0.0
# Weights to multiply loss of D(G(E(G(z)))
generator_berg_losses_dict["D_of_G_of_E_of_G_of_z_loss_weight"] = 0.0
# Weights to multiply loss of z - E(G(z))
generator_berg_losses_dict["z_minus_E_of_G_of_z_l1_loss_weight"] = 0.0
generator_berg_losses_dict["z_minus_E_of_G_of_z_l2_loss_weight"] = 0.0
# Weights to multiply loss of G(z) - G(E(G(z))
generator_berg_losses_dict["G_of_z_minus_G_of_E_of_G_of_z_l1_loss_weight"] = 0.0
generator_berg_losses_dict["G_of_z_minus_G_of_E_of_G_of_z_l2_loss_weight"] = 0.0
# Weights to multiply loss of E(x) - E(G(E(x)))
generator_berg_losses_dict["E_of_x_minus_E_of_G_of_E_of_x_l1_loss_weight"] = 1.0
generator_berg_losses_dict["E_of_x_minus_E_of_G_of_E_of_x_l2_loss_weight"] = 0.0
# Weights to multiply loss of x - G(E(x))
generator_berg_losses_dict["x_minus_G_of_E_of_x_l1_loss_weight"] = 0.0
generator_berg_losses_dict["x_minus_G_of_E_of_x_l2_loss_weight"] = 0.0
# GANomaly parameters to zero.
# Weights to multiply loss of D(G(x))
generator_ganomaly_losses_dict["D_of_G_of_x_loss_weight"] = 0.0
# Weights to multiply loss of x - G(x)
generator_ganomaly_losses_dict["x_minus_G_of_x_l1_loss_weight"] = 0.0
generator_ganomaly_losses_dict["x_minus_G_of_x_l2_loss_weight"] = 0.0
# Weights to multiply loss of Ge(x) - E(G(x))
generator_ganomaly_losses_dict["Ge_of_x_minus_E_of_G_of_x_l1_loss_weight"] = 0.0
generator_ganomaly_losses_dict["Ge_of_x_minus_E_of_G_of_x_l2_loss_weight"] = 0.0
else: # GANomaly
# Whether generator GANomaly architecture uses U-net skip connection for each block.
generator_ganomaly_dict["use_unet_skip_connections"] = [True] * 9
# Percent of masking image inputs to generator.
generator_ganomaly_dict["mask_generator_input_images_percent"] = 0.2
# Integer amount to randomly shift image mask block sizes.
generator_ganomaly_dict["image_mask_block_random_shift_amount"] = 0
# Whether to use shuffle or dead image block masking.
generator_ganomaly_dict["use_shuffle_image_masks"] = True
# Whether to add uniform noise to GANomaly Z vector.
generator_ganomaly_dict["add_uniform_noise_to_z"] = True
# These are just example values, yours will vary.
# Weights to multiply loss of D(G(x))
generator_ganomaly_losses_dict["D_of_G_of_x_loss_weight"] = 1.0
# Weights to multiply loss of x - G(x)
generator_ganomaly_losses_dict["x_minus_G_of_x_l1_loss_weight"] = 0.0
generator_ganomaly_losses_dict["x_minus_G_of_x_l2_loss_weight"] = 100.0
# Weights to multiply loss of Ge(x) - E(G(x))
generator_ganomaly_losses_dict["Ge_of_x_minus_E_of_G_of_x_l1_loss_weight"] = 0.0
generator_ganomaly_losses_dict["Ge_of_x_minus_E_of_G_of_x_l2_loss_weight"] = 0.0
# Berg parameters to zero.
# Weights to multiply loss of D(G(z))
generator_berg_losses_dict["D_of_G_of_z_loss_weight"] = 0.0
# Weights to multiply loss of D(G(E(x)))
generator_berg_losses_dict["D_of_G_of_E_of_x_loss_weight"] = 0.0
# Weights to multiply loss of D(G(E(G(z)))
generator_berg_losses_dict["D_of_G_of_E_of_G_of_z_loss_weight"] = 0.0
# Weights to multiply loss of z - E(G(z))
generator_berg_losses_dict["z_minus_E_of_G_of_z_l1_loss_weight"] = 0.0
generator_berg_losses_dict["z_minus_E_of_G_of_z_l2_loss_weight"] = 0.0
# Weights to multiply loss of G(z) - G(E(G(z))
generator_berg_losses_dict["G_of_z_minus_G_of_E_of_G_of_z_l1_loss_weight"] = 0.0
generator_berg_losses_dict["G_of_z_minus_G_of_E_of_G_of_z_l2_loss_weight"] = 0.0
# Weights to multiply loss of E(x) - E(G(E(x)))
generator_berg_losses_dict["E_of_x_minus_E_of_G_of_E_of_x_l1_loss_weight"] = 0.0
generator_berg_losses_dict["E_of_x_minus_E_of_G_of_E_of_x_l2_loss_weight"] = 0.0
# Weights to multiply loss of x - G(E(x))
generator_berg_losses_dict["x_minus_G_of_E_of_x_l1_loss_weight"] = 0.0
generator_berg_losses_dict["x_minus_G_of_E_of_x_l2_loss_weight"] = 0.0
generator_dict["berg"] = generator_berg_dict
generator_dict["GANomaly"] = generator_ganomaly_dict
generator_dict["losses"] = {}
generator_dict["losses"]["berg"] = generator_berg_losses_dict
generator_dict["losses"]["GANomaly"] = generator_ganomaly_losses_dict
return generator_dict
def get_encoder_config():
"""Gets encoder config.
Returns:
Dictionary of encoder configs.
"""
encoder_dict = dict()
# These are optional if using GANomaly architecture, required for berg.
# Whether encoder will be created or not.
encoder_dict["create"] = True
# Whether encoder will be trained or not.
encoder_dict["train"] = True
# Whether to use minibatch stddev op before first base conv layer.
encoder_dict["use_minibatch_stddev"] = True
# The size of groups to split minibatch examples into.
encoder_dict["minibatch_stddev_group_size"] = 4
# Whether to average across feature maps and pixels for minibatch stddev.
encoder_dict["minibatch_stddev_use_averaging"] = True
# The amount of leakyness of encoder's leaky relus.
encoder_dict["leaky_relu_alpha"] = 0.2
# Scale factor for L1 regularization for encoder.
encoder_dict["l1_regularization_scale"] = 0.
# Scale factor for L2 regularization for encoder.
encoder_dict["l2_regularization_scale"] = 0.
# Name of optimizer to use for encoder.
encoder_dict["optimizer"] = "Adam"
# How quickly we train model by scaling the gradient for encoder.
encoder_dict["learning_rate"] = 0.001
# Adam optimizer's beta1 hyperparameter for first moment.
encoder_dict["adam_beta1"] = 0.0
# Adam optimizer's beta2 hyperparameter for second moment.
encoder_dict["adam_beta2"] = 0.99
# Adam optimizer's epsilon hyperparameter for numerical stability.
encoder_dict["adam_epsilon"] = 1e-8
# Global clipping to prevent gradient norm to exceed this value for encoder.
encoder_dict["clip_gradients"] = None
encoder_losses_dict = dict()
# Berg Losses
encoder_losses_berg_dict = dict()
# Weights to multiply loss of D(G(E(x)))
encoder_losses_berg_dict["D_of_G_of_E_of_x_loss_weight"] = 0.0
# Weights to multiply loss of D(G(E(G(z)))
encoder_losses_berg_dict["D_of_G_of_E_of_G_of_z_loss_weight"] = 0.0
# Weights to multiply loss of z - E(G(z))
encoder_losses_berg_dict["z_minus_E_of_G_of_z_l1_loss_weight"] = 0.0
encoder_losses_berg_dict["z_minus_E_of_G_of_z_l2_loss_weight"] = 0.0
# Weights to multiply loss of G(z) - G(E(G(z))
encoder_losses_berg_dict["G_of_z_minus_G_of_E_of_G_of_z_l1_loss_weight"] = 0.0
encoder_losses_berg_dict["G_of_z_minus_G_of_E_of_G_of_z_l2_loss_weight"] = 0.0
# Weights to multiply loss of E(x) - E(G(E(x)))
encoder_losses_berg_dict["E_of_x_minus_E_of_G_of_E_of_x_l1_loss_weight"] = 0.0
encoder_losses_berg_dict["E_of_x_minus_E_of_G_of_E_of_x_l2_loss_weight"] = 0.0
# Weights to multiply loss of x - G(E(x))
encoder_losses_berg_dict["x_minus_G_of_E_of_x_l1_loss_weight"] = 0.0
encoder_losses_berg_dict["x_minus_G_of_E_of_x_l2_loss_weight"] = 0.0
# GANomaly Losses
encoder_losses_ganomaly_dict = dict()
# Weights to multiply loss of Ge(x) - E(G(x))
encoder_losses_ganomaly_dict["Ge_of_x_minus_E_of_G_of_x_l1_loss_weight"] = 0.0
encoder_losses_ganomaly_dict["Ge_of_x_minus_E_of_G_of_x_l2_loss_weight"] = 1.0
encoder_losses_dict["berg"] = encoder_losses_berg_dict
encoder_losses_dict["GANomaly"] = encoder_losses_ganomaly_dict
encoder_dict["losses"] = encoder_losses_dict
return encoder_dict
def get_discriminator_config():
"""Gets discriminator config.
Returns:
Dictionary of discriminator configs.
"""
discriminator_dict = dict()
# Whether discriminator will be created or not.
discriminator_dict["create"] = True
# Whether discriminator will be trained or not.
discriminator_dict["train"] = True
# Number of steps to train discriminator for per cycle.
discriminator_dict["train_steps"] = 1
# Whether to use minibatch stddev op before first base conv layer.
discriminator_dict["use_minibatch_stddev"] = True
# The size of groups to split minibatch examples into.
discriminator_dict["minibatch_stddev_group_size"] = 4
# Whether to average across feature maps and pixels for minibatch stddev.
discriminator_dict["minibatch_stddev_use_averaging"] = True
# The amount of leakyness of discriminator's leaky relus.
discriminator_dict["leaky_relu_alpha"] = 0.2
# Scale factor for L1 regularization for discriminator.
discriminator_dict["l1_regularization_scale"] = 0.
# Scale factor for L2 regularization for discriminator.
discriminator_dict["l2_regularization_scale"] = 0.
# Name of optimizer to use for discriminator.
discriminator_dict["optimizer"] = "Adam"
# How quickly we train model by scaling the gradient for discriminator.
discriminator_dict["learning_rate"] = 0.001
# Adam optimizer's beta1 hyperparameter for first moment.
discriminator_dict["adam_beta1"] = 0.0
# Adam optimizer's beta2 hyperparameter for second moment.
discriminator_dict["adam_beta2"] = 0.99
# Adam optimizer's epsilon hyperparameter for numerical stability.
discriminator_dict["adam_epsilon"] = 1e-8
# Global clipping to prevent gradient norm to exceed this value for discriminator.
discriminator_dict["clip_gradients"] = None
# Coefficient of gradient penalty for discriminator.
discriminator_dict["gradient_penalty_coefficient"] = 10.0
# Target value of gradient magnitudes for gradient penalty for discriminator.
discriminator_dict["gradient_penalty_target"] = 1.0
# Coefficient of epsilon drift penalty for discriminator.
discriminator_dict["epsilon_drift"] = 0.001
# Losses
discriminator_losses_dict = dict()
# Weight to multiply loss of D(x)
discriminator_losses_dict["D_of_x_loss_weight"] = 1.0
# Berg Losses
discriminator_losses_berg_dict = dict()
# Weight to multiply loss of D(G(z))
discriminator_losses_berg_dict["D_of_G_of_z_loss_weight"] = 0.0
# Weight to multiply loss of D(G(E(x)))
discriminator_losses_berg_dict["D_of_G_of_E_of_x_loss_weight"] = 0.0
# Weight to multiply loss of D(G(E(G(z)))
discriminator_losses_berg_dict["D_of_G_of_E_of_G_of_z_loss_weight"] = 0.0
# GANomaly Losses
discriminator_losses_ganomaly_dict = dict()
# Weight to multiply loss of D(G(x))
discriminator_losses_ganomaly_dict["D_of_G_of_x_loss_weight"] = 1.0
discriminator_losses_dict["berg"] = discriminator_losses_berg_dict
discriminator_losses_dict["GANomaly"] = discriminator_losses_ganomaly_dict
discriminator_dict["losses"] = discriminator_losses_dict
return discriminator_dict
def get_reconstruction_config():
"""Gets reconstruction config.
Returns:
Dictionary of reconstruction configs.
"""
reconstruction_dict = dict()
# Whether using multiple resolutions across a list of TF Records.
reconstruction_dict["use_multiple_resolution_records"] = True
# GCS locations to read reconstruction training data.
reconstruction_dict["train_file_patterns"] = [
"data/cifar10_car/train_{0}x{0}_*.tfrecord".format(4 * 2 ** i)
for i in range(4)
]
# GCS locations to read reconstruction evaluation data.
reconstruction_dict["eval_file_patterns"] = [
"data/cifar10_car/test_{0}x{0}_*.tfrecord".format(4 * 2 ** i)
for i in range(4)
]
# Which dataset to use for reconstruction training:
# "mnist", "cifar10", "cifar10_car", "tf_record"
reconstruction_dict["dataset"] = "tf_record"
# TF Record Example feature schema for reconstruction.
reconstruction_dict["tf_record_example_schema"] = [
{
"name": "image_raw",
"type": "FixedLen",
"shape": [],
"dtype": "str"
},
{
"name": "label",
"type": "FixedLen",
"shape": [],
"dtype": "int"
}
]
# Name of image feature within schema dictionary.
reconstruction_dict["image_feature_name"] = "image_raw"
# Encoding of image: raw, png, or jpeg.
reconstruction_dict["image_encoding"] = "raw"
# Height of predownscaled image if NOT using multiple resolution records.
reconstruction_dict["image_predownscaled_height"] = 32
# Width of predownscaled image if NOT using multiple resolution records.
reconstruction_dict["image_predownscaled_width"] = 32
# Depth of image, number of channels.
reconstruction_dict["image_depth"] = 3
# Name of label feature within schema dictionary.
reconstruction_dict["label_feature_name"] = "label"
# Schedule list of number of epochs to train for reconstruction.
reconstruction_dict["num_epochs_schedule"] = [1] * 9
# Number of examples in one epoch of reconstruction training set.
reconstruction_dict["train_dataset_length"] = 400
# Schedule list of number of examples in reconstruction training batch for each resolution block.
reconstruction_dict["train_batch_size_schedule"] = [4] * 9
# Schedule list of number of examples in reconstruction evaluation batch for each resolution block.
reconstruction_dict["eval_batch_size_schedule"] = [4] * 9
# Number of steps/batches to evaluate for reconstruction.
reconstruction_dict["eval_steps"] = 1
# List of number of examples until block added to networks.
reconstruction_dict["num_examples_until_growth_schedule"] = [
epochs * reconstruction_dict["train_dataset_length"]
for epochs in reconstruction_dict["num_epochs_schedule"]
]
# List of number of steps/batches until block added to networks.
reconstruction_dict["num_steps_until_growth_schedule"] = [
ex // bs
for ex, bs in zip(
reconstruction_dict["num_examples_until_growth_schedule"],
reconstruction_dict["train_batch_size_schedule"]
)
]
# Whether to autotune input function performance for reconstruction datasets.
reconstruction_dict["input_fn_autotune"] = True
# How many steps to train before writing steps and loss to log.
reconstruction_dict["log_step_count_steps"] = 10
# How many steps to train before saving a summary.
reconstruction_dict["save_summary_steps"] = 10
# Whether to write loss summaries for TensorBoard.
reconstruction_dict["write_loss_summaries"] = False
# Whether to write generator image summaries for TensorBoard.
reconstruction_dict["write_generator_image_summaries"] = False
# Whether to write encoder image summaries for TensorBoard.
reconstruction_dict["write_encoder_image_summaries"] = False
# Whether to write variable histogram summaries for TensorBoard.
reconstruction_dict["write_variable_histogram_summaries"] = False
# Whether to write gradient histogram summaries for TensorBoard.
reconstruction_dict["write_gradient_histogram_summaries"] = False
# How many steps to train reconstruction before saving a checkpoint.
reconstruction_dict["save_checkpoints_steps"] = 10000
# Max number of reconstruction checkpoints to keep.
reconstruction_dict["keep_checkpoint_max"] = 10
# Whether to save checkpoint every growth phase.
reconstruction_dict["checkpoint_every_growth_phase"] = True
# Whether to save checkpoint every epoch.
reconstruction_dict["checkpoint_every_epoch"] = True
# Checkpoint growth index to restore checkpoint.
reconstruction_dict["checkpoint_growth_idx"] = 0
# Checkpoint epoch index to restore checkpoint.
reconstruction_dict["checkpoint_epoch_idx"] = 0
# The checkpoint save path for saving and restoring.
reconstruction_dict["checkpoint_save_path"] = ""
# Whether to store loss logs.
reconstruction_dict["store_loss_logs"] = True
# Whether to normalize loss logs.
reconstruction_dict["normalized_loss_logs"] = True
# Whether to print model summaries.
reconstruction_dict["print_training_model_summaries"] = False
# Initial growth index to resume training midway.
reconstruction_dict["initial_growth_idx"] = 0
# Initial epoch index to resume training midway.
reconstruction_dict["initial_epoch_idx"] = 0
# Max number of times training loop can be restarted such as for NaN losses.
reconstruction_dict["max_training_loop_restarts"] = 10
# Whether to scale layer weights to equalize learning rate each forward pass.
reconstruction_dict["use_equalized_learning_rate"] = True
# Whether to normalize reconstruction losses by number of pixels.
reconstruction_dict["normalize_reconstruction_losses"] = True
return reconstruction_dict
def get_error_distribution_config():
"""Gets error_distribution config.
Returns:
Dictionary of error_distribution configs.
"""
error_distribution_dict = dict()
# Whether using multiple resolutions across a list of TF Records.
error_distribution_dict["use_multiple_resolution_records"] = False
# GCS locations to read error distribution training data.
error_distribution_dict["train_file_pattern"] = "data/cifar10_car/train_32x32_*.tfrecord"
# GCS locations to read error distribution training data.
error_distribution_dict["eval_file_pattern"] = "data/cifar10_car/train_32x32_*.tfrecord"
# Which dataset to use for error distribution training:
# "mnist", "cifar10", "cifar10_car", "tf_record"
error_distribution_dict["dataset"] = "tf_record"
# TF Record Example feature schema for error distribution.
error_distribution_dict["tf_record_example_schema"] = [
{
"name": "image_raw",
"type": "FixedLen",
"shape": [],
"dtype": "str"
},
{
"name": "label",
"type": "FixedLen",
"shape": [],
"dtype": "int"
}
]
# Name of image feature within schema dictionary.
error_distribution_dict["image_feature_name"] = "image_raw"
# Encoding of image: raw, png, or jpeg.
error_distribution_dict["image_encoding"] = "raw"
# Height of predownscaled image if NOT using multiple resolution records.
error_distribution_dict["image_predownscaled_height"] = 32
# Width of predownscaled image if NOT using multiple resolution records.
error_distribution_dict["image_predownscaled_width"] = 32
# Depth of image, number of channels.
error_distribution_dict["image_depth"] = 3
# Name of label feature within schema dictionary.
error_distribution_dict["label_feature_name"] = "label"
# Number of examples in one epoch of error distribution training set.
error_distribution_dict["train_dataset_length"] = 400
# Number of examples in error distribution training batch.
error_distribution_dict["train_batch_size"] = 32
# Number of steps/batches to evaluate for error distribution.
error_distribution_dict["eval_steps"] = 10
# Whether to autotune input function performance for error distribution datasets.
error_distribution_dict["input_fn_autotune"] = True
# How many steps to train error distribution before saving a checkpoint.
error_distribution_dict["save_checkpoints_steps"] = 10000
# Max number of error distribution checkpoints to keep.
error_distribution_dict["keep_checkpoint_max"] = 10
# The checkpoint save path for saving and restoring.
error_distribution_dict["checkpoint_save_path"] = ""
# Max number of times training loop can be restarted.
error_distribution_dict["max_training_loop_restarts"] = 10
# Whether using sample or population covariance for error distribution.
error_distribution_dict["use_sample_covariance"] = True
return error_distribution_dict
def get_dynamic_threshold_config():
"""Gets dynamic_threshold config.
Returns:
Dictionary of dynamic_threshold configs.
"""
dynamic_threshold_dict = dict()
# Whether using multiple resolutions across a list of TF Records.
dynamic_threshold_dict["use_multiple_resolution_records"] = False
# GCS locations to read dynamic threshold training data.
dynamic_threshold_dict["train_file_pattern"] = "data/cifar10_car/train_32x32_*.tfrecord"
# GCS locations to read dynamic threshold evaluation data.
dynamic_threshold_dict["eval_file_pattern"] = "data/cifar10_car/train_32x32_*.tfrecord"
# Which dataset to use for dynamic threshold training:
# "mnist", "cifar10", "cifar10_car", "tf_record"
dynamic_threshold_dict["dataset"] = "tf_record"
# TF Record Example feature schema for dynamic threshold.
dynamic_threshold_dict["tf_record_example_schema"] = [
{
"name": "image_raw",
"type": "FixedLen",
"shape": [],
"dtype": "str"
},
{
"name": "label",
"type": "FixedLen",
"shape": [],
"dtype": "int"
}
]
# Name of image feature within schema dictionary.
dynamic_threshold_dict["image_feature_name"] = "image_raw"
# Encoding of image: raw, png, or jpeg.
dynamic_threshold_dict["image_encoding"] = "raw"
# Height of predownscaled image if NOT using multiple resolution records.
dynamic_threshold_dict["image_predownscaled_height"] = 32
# Width of predownscaled image if NOT using multiple resolution records.
dynamic_threshold_dict["image_predownscaled_width"] = 32
# Depth of image, number of channels.
dynamic_threshold_dict["image_depth"] = 3
# Name of label feature within schema dictionary.
dynamic_threshold_dict["label_feature_name"] = "label"
# Number of examples in one epoch of dynamic threshold training set.
dynamic_threshold_dict["train_dataset_length"] = 400
# Number of examples in dynamic threshold training batch.
dynamic_threshold_dict["train_batch_size"] = 32
# Number of steps/batches to evaluate for dynamic threshold.
dynamic_threshold_dict["eval_steps"] = 10
# Whether to autotune input function performance for dynamic threshold datasets.
dynamic_threshold_dict["input_fn_autotune"] = True
# How many steps to train dynamic threshold before saving a checkpoint.
dynamic_threshold_dict["save_checkpoints_steps"] = 10000
# Max number of dynamic threshold checkpoints to keep.
dynamic_threshold_dict["keep_checkpoint_max"] = 10
# The checkpoint save path for saving and restoring.
dynamic_threshold_dict["checkpoint_save_path"] = ""
# Max number of times training loop can be restarted.
dynamic_threshold_dict["max_training_loop_restarts"] = 10
# Whether using supervised dynamic thresholding or unsupervised.
dynamic_threshold_dict["use_supervised"] = False
supervised_dict = dict()
# Beta value for supervised F-beta score.
supervised_dict["f_score_beta"] = 0.05
unsupervised_dict = dict()
# Whether using sample or population covariance for dynamic threshold.
unsupervised_dict["use_sample_covariance"] = True
# Max standard deviations of Mahalanobis distance to flag as outlier.
unsupervised_dict["max_mahalanobis_stddevs"] = 3.0
dynamic_threshold_dict["supervised_dict"] = supervised_dict
dynamic_threshold_dict["unsupervised_dict"] = unsupervised_dict
return dynamic_threshold_dict
def get_training_config():
"""Gets training config.
Returns:
Dictionary of training configs.
"""
training_dict = dict()
# GCS location to write checkpoints, loss logs, and export models.
training_dict["output_dir"] = "trained_models/experiment_0"
# Version of TensorFlow.
training_dict["tf_version"] = 2.3
# Whether to use graph mode or not (eager).
training_dict["use_graph_mode"] = True
# Which distribution strategy to use, if any.
training_dict["distribution_strategy"] = "Mirrored"
# Whether we subclass models or use Functional API.
training_dict["subclass_models"] = True
# Whether performing training phase 1 or not.
training_dict["train_reconstruction"] = True
# Whether performing training phase 2 or not.
training_dict["train_error_distribution"] = True
# Whether performing training phase 3 or not.
training_dict["train_dynamic_threshold"] = True
training_dict["reconstruction"] = get_reconstruction_config()
training_dict["error_distribution"] = get_error_distribution_config()
training_dict["dynamic_threshold"] = get_dynamic_threshold_config()
return training_dict
def get_export_config():
"""Gets export config.
Returns:
Dictionary of export configs.
"""
export_dict = dict()
# Most recent export's growth index so that there are no repeat exports.
export_dict["most_recent_export_growth_idx"] = -1
# Most recent export's epoch index so that there are no repeat exports.
export_dict["most_recent_export_epoch_idx"] = -1
# Whether to export SavedModel every growth phase.
export_dict["export_every_growth_phase"] = True
# Whether to export SavedModel every epoch.
export_dict["export_every_epoch"] = True
# Whether to export all growth phases or just current.
export_dict["export_all_growth_phases"] = True
# Using a random noise vector Z with shape (batch_size, generator_latent_size) for berg.
# Whether to export Z.
export_dict["export_Z"] = True
# Whether to export generated images, G(z).
export_dict["export_generated_images"] = True
# Whether to export encoded generated logits, E(G(z)).
export_dict["export_encoded_generated_logits"] = True
# Whether to export encoded generated images, G(E(G(z))).
export_dict["export_encoded_generated_images"] = True
# Whether to export Z generated images, Gd(z).
export_dict["export_Z_generated_images"] = True
# Using a query image with shape (batch_size, height, width, depth)
# Whether to export query images.
export_dict["export_query_images"] = True
# Berg encoded exports.
# Whether to export encoded query logits, E(x).
export_dict["export_query_encoded_logits"] = True
# Whether to export encoded query images, G(E(x)).
export_dict["export_query_encoded_images"] = True
# GANomaly encoded exports.
# Whether to export generator encoded query logits, Ge(x).
export_dict["export_query_gen_encoded_logits"] = True
# Whether to export generator encoded query images, G(x) = Gd(Ge(x)).
export_dict["export_query_gen_encoded_images"] = True
# Whether to export encoder encoded query logits, E(G(x)).
export_dict["export_query_enc_encoded_logits"] = True
# Whether to export encoder encoded query images, Gd(E(G(x))).
export_dict["export_query_enc_encoded_images"] = True
# Anomaly exports.
# Whether to export query anomaly images using sigmoid scaling.
export_dict["export_query_anomaly_images_sigmoid"] = True
# Whether to export query anomaly images using linear scaling.
export_dict["export_query_anomaly_images_linear"] = True
# Whether to export query Mahalanobis distances.
export_dict["export_query_mahalanobis_distances"] = True
# Whether to export query Mahalanobis distance images using sigmoid scaling.
export_dict["export_query_mahalanobis_distance_images_sigmoid"] = True
# Whether to export query Mahalanobis distance images using linear scaling.
export_dict["export_query_mahalanobis_distance_images_linear"] = True
# Whether to export query pixel anomaly flag binary images.
export_dict["export_query_pixel_anomaly_flag_images"] = True
# Whether to export query pixel anomaly flag binary images.
export_dict["export_query_pixel_anomaly_flag_counts"] = True
# Whether to export query pixel anomaly flag binary images.
export_dict["export_query_pixel_anomaly_flag_percentages"] = True
# Whether to export query anomaly scores, only for Berg.
export_dict["export_query_anomaly_scores"] = False
# Whether to export query anomaly flags, only for Berg.
export_dict["export_query_anomaly_flags"] = False
# Anomaly parameters.
# The threshold value at which above flags scores images as anomalous.
export_dict["anomaly_threshold"] = 5.0
# The anomaly convex combination factor for weighting the two anomaly losses.
export_dict["anom_convex_combo_factor"] = 0.05
# Whether to print model summaries.
export_dict["print_serving_model_summaries"] = False
return export_dict
def get_default_config():
"""Gets default config.
"""
arguments = dict()
arguments["generator"] = get_generator_config()
arguments["encoder"] = get_encoder_config()
arguments["discriminator"] = get_discriminator_config()
arguments["training"] = get_training_config()
arguments["export"] = get_export_config()
# Full lists for full 1024x1024 network growth.
full_conv_num_filters = [[512, 512], [512, 512], [512, 512], [512, 512], [256, 256], [128, 128], [64, 64], [32, 32], [16, 16]]
full_conv_kernel_sizes = [[4, 3], [3, 3], [3, 3], [3, 3], [3, 3], [3, 3], [3, 3], [3, 3], [3, 3]]
full_conv_strides = [[1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1]]
# Set final image size as a multiple of 2, starting at 4.
image_size = 1024
num_conv_blocks = max(
min(int(math.log(image_size, 2) - 1), len(full_conv_num_filters)), 1
)
arguments["conv_num_filters"] = full_conv_num_filters[0:num_conv_blocks]
arguments["conv_kernel_sizes"] = full_conv_kernel_sizes[0:num_conv_blocks]
arguments["conv_strides"] = full_conv_strides[0:num_conv_blocks]
# Get conv layer properties for generator and discriminator.
(generator,
discriminator) = (
gan_layer_architecture_shapes.calc_generator_discriminator_conv_layer_properties(
arguments["conv_num_filters"],
arguments["conv_kernel_sizes"],
arguments["conv_strides"],
arguments["training"]["reconstruction"]["image_depth"]
)
)
# Split up generator properties into separate lists.
(generator_base_conv_blocks,
generator_growth_conv_blocks,
generator_to_rgb_layers) = (
gan_layer_architecture_shapes.split_up_generator_conv_layer_properties(
generator,
arguments["conv_num_filters"],
arguments["conv_strides"],
arguments["training"]["reconstruction"]["image_depth"]
)
)
# Generator list of list of lists of base conv block layer shapes.
arguments["generator"]["base_conv_blocks"] = generator_base_conv_blocks
# Generator list of list of lists of growth conv block layer shapes.
arguments["generator"]["growth_conv_blocks"] = (
generator_growth_conv_blocks
)
# Generator list of list of lists of to_RGB layer shapes.
arguments["generator"]["to_rgb_layers"] = generator_to_rgb_layers
# Split up discriminator properties into separate lists.
(discriminator_from_rgb_layers,
discriminator_base_conv_blocks,
discriminator_growth_conv_blocks) = (
gan_layer_architecture_shapes.split_up_discriminator_conv_layer_properties(
discriminator,
arguments["conv_num_filters"],
arguments["conv_strides"],
arguments["training"]["reconstruction"]["image_depth"]
)
)
# Discriminator list of list of lists of from_RGB layer shapes.
arguments["discriminator"]["from_rgb_layers"] = (
discriminator_from_rgb_layers
)
# Discriminator list of list of lists of base conv block layer shapes.
arguments["discriminator"]["base_conv_blocks"] = (
discriminator_base_conv_blocks
)
# Discriminator list of list of lists of growth conv block layer shapes.
arguments["discriminator"]["growth_conv_blocks"] = (
discriminator_growth_conv_blocks
)
if (arguments["generator"]["architecture"] == "GANomaly" and
arguments["generator"]["GANomaly"]["mask_generator_input_images_percent"] > 0.):
# Image mask block pixel sizes list of lists.
arguments["generator"]["image_mask_block_sizes"] = (
image_masks.calculate_image_mask_block_sizes_per_resolution(
num_resolutions=num_conv_blocks,
min_height=arguments["generator"]["projection_dims"][0],
min_width=arguments["generator"]["projection_dims"][1],
pixel_mask_percent=(
arguments["generator"]["GANomaly"][
"mask_generator_input_images_percent"]
)
)
)
return arguments | 0.802672 | 0.456955 |
import os
import sys
import traceback
import pathlib
import json
from dynamicmethod import dynamicmethod
from .file_utils import FileWrapper
__all__ = ['TNode', 'is_file_path', 'open_file']
def get_traceback(exc=None):
"""Get the exception traceback or the system traceback."""
_, _, sys_tb = sys.exc_info() # Must call this before another exception is raised.
try:
return exc.__traceback__
except (AttributeError, Exception):
return sys_tb
def is_file_path(filename):
return isinstance(filename, (str, bytes, pathlib.Path)) or hasattr(filename, '__fspath__')
open_file = FileWrapper
class TNode(object):
DELIM = ' > '
@dynamicmethod
def get_delimiter(cls_self):
return cls_self.DELIM
@dynamicmethod
def set_delimiter(cls_self, delim):
cls_self.DELIM = delim
def __init__(self, title='', *child, children=None, parent=None, data=None, **kwargs):
self._title = title
self._parent = None
self._children = []
# Set given keyword arguments as attributes
for k, v in kwargs.items():
setattr(self, k, v)
# Add children
if children is None:
children = []
children.extend(child)
for child in children:
self.add_child(child)
# Add parent
if parent is not None:
self.parent = parent
if data is not None:
self.set_data(data)
def validate_parent(self, parent):
"""Validate that this parent object is allowed to be a parent.
Raises and error when this parent is not allowed to be set.
"""
pass
def get_parent(self):
"""Return the parent"""
return self._parent
def set_parent(self, parent):
try:
self._parent.remove_child(self)
except (AttributeError, ValueError, TypeError):
pass
if parent is not None:
self.validate_parent(parent)
self._parent = parent
try:
self._parent.add_child(self)
except (AttributeError, ValueError, TypeError):
pass
@property
def parent(self):
"""Return the parent."""
return self.get_parent()
@parent.setter
def parent(self, parent):
"""Set the parent. This property calls set_parent, so inheritance can just override set_parent()."""
self.set_parent(parent)
def get_parents(self, require_title=False):
"""Iterate through the parents"""
p = self.parent
while True:
t = getattr(p, 'title', None)
if p is None or (require_title and (not t or not isinstance(t, str))):
break
yield p
p = getattr(p, 'parent', None)
@property
def title(self):
"""Return the title of this Node"""
return self._title
@title.setter
def title(self, title):
"""Set the title of this Node"""
if title is None:
title = ''
if self._parent and title in self._parent:
raise ValueError('Title already exists in parent!')
self._title = title
@property
def full_title(self):
"""Return the full title with the parent title's separated by the delimiter."""
tt = [self.title] + [p.title for p in self.get_parents(require_title=True)]
return self.get_delimiter().join(reversed(tt))
key = full_title
def depth(self):
"""Return the depth of this node."""
return len(list(self.get_parents()))
def validate_child(self, child):
"""Validate that this child object is allowed to be a child.
Raises and error when this child is not allowed to be added.
"""
pass
def add_child(self, child):
"""Add the given child"""
self.validate_child(child)
try:
if getattr(child, 'parent', None) != self:
child.parent = self
except AttributeError:
pass
if child not in self._children:
self._children.append(child)
return child
def remove_child(self, child):
"""Remove the given child"""
self._children.remove(child)
try:
if getattr(child, 'parent', None):
child.parent = None
except AttributeError:
pass
return child
def clear(self):
"""Clear all children."""
for i in reversed(range(len(self._children))):
try:
child = self._children.pop(i)
child.parent = None
except (AttributeError, Exception):
pass
def exists(self, child):
"""Return if the child exists."""
return child in self
def update(self, d=None, **kwargs):
"""Update the values of this node."""
if d is None:
d = kwargs
elif not isinstance(d, dict):
raise TypeError('Update requires a dictionary or keyword arguments.')
else:
d.update(kwargs)
children = d.pop('children', None)
if children:
for child in children:
self.add_child(child)
for k, v in d.items():
setattr(self, k, v)
def find_parent(self, full_title, create_missing=False):
"""Find the full_title's parent and base title."""
if not isinstance(full_title, str):
try:
full_title = full_title.full_title
except (AttributeError, Exception) as err:
raise TypeError('Invalid full_title given! This must be a str or TNode') from err
split = full_title.split(self.get_delimiter())
if split[0] == self.title:
split = split[1:]
parent = self
for t in split[:-1]:
for child in getattr(parent, 'children', []):
if child.title == t:
parent = child
break
else:
if create_missing:
parent = parent.add_child(self.__class__(t))
else:
raise KeyError('"{}" not found in {}'.format(t, parent))
try:
return parent, split[-1]
except IndexError:
return parent, ''
def find(self, full_title):
"""Find and return the child that may be several levels deep."""
parent, title = self.find_parent(full_title)
for child in getattr(parent, 'children', []):
if child.title == title:
return child
raise KeyError('"{}" not found in {}'.format(title, parent))
def iter_children(self):
"""Iterate through my direct children only."""
for child in self._children:
yield child
@property
def children(self):
"""Return a list of child objects."""
return list(self._children)
def iter(self):
"""Iterate through each child and their children."""
for child in self.iter_children():
yield child
if len(child) > 0:
try:
yield from child.iter()
except (AttributeError, TypeError):
pass
def iter_nearest(self):
"""Iterate the nearest children first."""
children = self.children
while children:
sub = []
for child in children:
yield child
for ch in getattr(child, 'children', []):
sub.append(ch)
children = sub
def __iter__(self):
return self.iter()
def __len__(self):
return len(self._children)
def __bool__(self):
return True # This is not None. Do not return True or False based on empty children
def __contains__(self, item):
try:
self.__getitem__(item)
return True
except (IndexError, KeyError, Exception) as err:
return False
def __getitem__(self, full_title):
if isinstance(full_title, int):
return self._children[full_title]
elif isinstance(full_title, TNode):
for child in self._children:
if child == full_title:
return child
# Get the full title
full_title = full_title.full_title
# Get the lowest level parent
parent, title = self.find_parent(full_title)
# Find if there is a child with the same title
for ch in getattr(parent, 'children', []):
if getattr(ch, 'title', None) == title:
return ch
raise KeyError('"{}" not found in {}'.format(title, parent))
def __setitem__(self, full_title, child):
parent = self
if isinstance(full_title, int):
index = full_title
try:
parent._children[index] = child
except IndexError:
parent._children.append(child)
except AttributeError:
pass
try:
parent.add_child(child)
except (AttributeError, Exception):
pass
return
# Get the lowest level parent
parent, title = self.find_parent(full_title, create_missing=True)
# Find if there is a child with the same title
for i, ch in enumerate(getattr(parent, 'children', [])):
if getattr(ch, 'title', None) == title:
try:
parent[i] = child # This is a questionable way to set the child to the parent at the index.
except (TypeError, Exception):
pass
try:
parent.add_child(child)
except (AttributeError, Exception):
pass
return
# Add the child
try:
if title != child.title:
child.title = title
except (AttributeError, Exception):
pass
try:
parent.add_child(child)
except (AttributeError, Exception):
pass
def __eq__(self, other):
if isinstance(other, str):
return other == self.title or other == self.full_title
return super(TNode, self).__eq__(other)
def __hash__(self):
return hash(self.full_title)
def __str__(self):
d = {'cls': self.__class__.__name__, 'full_title': self.full_title, 'title': self.title}
return '{cls}(full_title={full_title!r})'.format(**d)
def __repr__(self):
return '<{} at 0x{:016X}>'.format(self.__str__(), id(self)) # "<TNode(full_title=) at 0x0000000000000000>"
def has_data(self):
"""Helper to return if this function has data."""
return getattr(self, '_data', None) is not None
def get_data(self):
"""Return the data stored."""
return getattr(self, '_data', None)
def set_data(self, data):
"""Set the stored data."""
setattr(self, '_data', data)
data = property(get_data, set_data)
def to_dict(self, exclude=None, **kwargs):
"""Return this tree as a dictionary of data.
Args:
exclude (list): List of full_title's to exclude. This can also exclude a parent and everything below it.
Returns:
tree (dict): Ex {'title': title, 'data': data if data, 'children': [{'title': title, 'data': data}]}
"""
if exclude is None:
exclude = []
tree = {}
if self.full_title not in exclude:
tree = {'title': self.title}
children = [] # detached
if self.has_data():
tree['data'] = self.get_data()
elif len(self) > 0:
children = tree['children'] = [] # Only attach if children
subparents = []
for child in self.iter_children():
if child.has_data():
children.append(child.to_dict(exclude=exclude, **kwargs))
else:
subparents.append(child.to_dict(exclude=exclude, **kwargs))
# Add parents after children
children.extend(subparents)
return tree
asdict = to_dict
@classmethod
def from_dict(cls, d, tree=None, **kwargs):
"""Create a tree from the given dictionary.
Args:
d (dict): Dictionary of tree items.
Example: {'title': title, 'data': data if data, 'children': [{'title': title, 'data': data}]}
tree (TNode)[None]: Parent tree node to add items to. If None create a top level parent.
Returns:
tree (TNode): Tree (TNode) object that was created.
"""
children = d.pop('children', [])
if tree is None:
tree = cls() # self is the class and this was called as a classmethod
# Set all d items as attributes
for attr, val in d.items():
try:
setattr(tree, attr, val)
except (AttributeError, TypeError, Exception):
pass
for child_d in children:
child = cls.from_dict(child_d, **kwargs)
child.parent = tree
return tree
fromdict = from_dict
@classmethod
def serialize(cls, value):
"""Convert a value to a string or bytes value that can be saved and loaded."""
try:
return json.dumps(value)
except (json.JSONDecodeError, Exception) as err:
try:
return str(value)
except (json.JSONDecodeError, Exception):
cls.print_exception(err, msg='Cannot serialize value "{}"!'.format(value))
@classmethod
def deserialize(cls, value):
"""Convert a string or bytes value to a Python object."""
try:
return json.loads(value)
except (json.JSONDecodeError, Exception) as err:
try:
return value
except (json.JSONDecodeError, Exception):
cls.print_exception(err, msg='Cannot deserialize value "{}"!'.format(value))
@staticmethod
def print_exception(exc, msg=None, error_cls=None):
"""Print the given exception. If a message is given it will be prepended to the exception message with a \n.
Args:
exc (Exception): Exception that was raised.
msg (str)[None]: Additional message to prepend to the exception.
error_cls (Exception)[None]: New Exception class to print the exception as.
"""
if error_cls is None:
if isinstance(exc, BaseException):
error_cls = BaseException
else:
error_cls = ValueError
# Prepend the message to the exception if given
if msg:
msg = "\n".join((msg, str(exc)))
else:
msg = str(exc)
exc_tb = get_traceback(exc)
try:
new_err = error_cls(msg) # Error class does not accept a string message argument
except (TypeError, ValueError, Exception):
new_err = ValueError(msg)
traceback.print_exception(error_cls, new_err, exc_tb)
SAVE_EXT = {}
LOAD_EXT = {}
is_file_path = staticmethod(is_file_path)
open_file = staticmethod(open_file)
@classmethod
def register_saver(cls, ext, func=None):
if not isinstance(ext, str):
raise TypeError('Invalid filename extension given to register!')
if func is None:
def decorator(func):
return cls.register_saver(ext, func)
return decorator
cls.SAVE_EXT[str(ext).lower()] = func
return func
@classmethod
def register_loader(cls, ext, func=None):
if not isinstance(ext, str):
raise TypeError('Invalid filename extension given to register!')
if func is None:
def decorator(func):
return cls.register_loader(ext, func)
return decorator
if hasattr(func, '__func__'):
func = func.__func__
cls.LOAD_EXT[str(ext).lower()] = func
return func
def save(self, filename, ext=None, **kwargs):
"""Save this tree to a file.
Args:
filename (str): Filename or opened file object to save this tree node to.
ext (str)[None]: File extension (Example: '.ini', '.json', ...). Must give if filename is file object.
**kwargs (object/dict): Save function keyword arguments.
"""
if ext is None:
if self.is_file_path(filename):
ext = os.path.splitext(str(filename))[-1]
else:
raise TypeError('Missing "ext" argument when "filename" was not a path!')
func = self.SAVE_EXT.get(ext.lower(), None)
if callable(func):
return func(self, filename, **kwargs)
raise ValueError('Invalid filename extension given!')
@dynamicmethod
def load(self, filename, ext=None, **kwargs):
"""load a tree from a file.
Args:
filename (str/TextIoWrapper): Filename or opened file object to read and load the tree from.
ext (str)[None]: File extension (Example: '.ini', '.json', ...). Must give if filename is file object.
**kwargs (object/dict): load function keyword arguments.
"""
cls = self
if isinstance(self, TNode):
cls = self.__class__
if ext is None:
if self.is_file_path(filename):
ext = os.path.splitext(str(filename))[-1]
else:
raise TypeError('Missing "ext" argument when "filename" was not a path!')
func = self.LOAD_EXT.get(ext.lower(), None)
if callable(func):
bound = func.__get__(self, cls)
return bound(filename, **kwargs)
raise ValueError('Invalid filename extension given!')
def to_json(self, filename, **kwars):
d = self.to_dict()
with self.open_file(filename, 'w') as file:
json.dump(d, file, indent=2)
return filename
@dynamicmethod
def from_json(self, filename, **kwargs):
with self.open_file(filename, 'r') as file:
d = json.load(file)
kwargs = {}
if isinstance(self, TNode):
kwargs['tree'] = self
return self.from_dict(d, **kwargs)
TNode.register_saver('.json', TNode.to_json)
TNode.register_loader('.json', TNode.from_json) | tnode/interface.py | import os
import sys
import traceback
import pathlib
import json
from dynamicmethod import dynamicmethod
from .file_utils import FileWrapper
__all__ = ['TNode', 'is_file_path', 'open_file']
def get_traceback(exc=None):
"""Get the exception traceback or the system traceback."""
_, _, sys_tb = sys.exc_info() # Must call this before another exception is raised.
try:
return exc.__traceback__
except (AttributeError, Exception):
return sys_tb
def is_file_path(filename):
return isinstance(filename, (str, bytes, pathlib.Path)) or hasattr(filename, '__fspath__')
open_file = FileWrapper
class TNode(object):
DELIM = ' > '
@dynamicmethod
def get_delimiter(cls_self):
return cls_self.DELIM
@dynamicmethod
def set_delimiter(cls_self, delim):
cls_self.DELIM = delim
def __init__(self, title='', *child, children=None, parent=None, data=None, **kwargs):
self._title = title
self._parent = None
self._children = []
# Set given keyword arguments as attributes
for k, v in kwargs.items():
setattr(self, k, v)
# Add children
if children is None:
children = []
children.extend(child)
for child in children:
self.add_child(child)
# Add parent
if parent is not None:
self.parent = parent
if data is not None:
self.set_data(data)
def validate_parent(self, parent):
"""Validate that this parent object is allowed to be a parent.
Raises and error when this parent is not allowed to be set.
"""
pass
def get_parent(self):
"""Return the parent"""
return self._parent
def set_parent(self, parent):
try:
self._parent.remove_child(self)
except (AttributeError, ValueError, TypeError):
pass
if parent is not None:
self.validate_parent(parent)
self._parent = parent
try:
self._parent.add_child(self)
except (AttributeError, ValueError, TypeError):
pass
@property
def parent(self):
"""Return the parent."""
return self.get_parent()
@parent.setter
def parent(self, parent):
"""Set the parent. This property calls set_parent, so inheritance can just override set_parent()."""
self.set_parent(parent)
def get_parents(self, require_title=False):
"""Iterate through the parents"""
p = self.parent
while True:
t = getattr(p, 'title', None)
if p is None or (require_title and (not t or not isinstance(t, str))):
break
yield p
p = getattr(p, 'parent', None)
@property
def title(self):
"""Return the title of this Node"""
return self._title
@title.setter
def title(self, title):
"""Set the title of this Node"""
if title is None:
title = ''
if self._parent and title in self._parent:
raise ValueError('Title already exists in parent!')
self._title = title
@property
def full_title(self):
"""Return the full title with the parent title's separated by the delimiter."""
tt = [self.title] + [p.title for p in self.get_parents(require_title=True)]
return self.get_delimiter().join(reversed(tt))
key = full_title
def depth(self):
"""Return the depth of this node."""
return len(list(self.get_parents()))
def validate_child(self, child):
"""Validate that this child object is allowed to be a child.
Raises and error when this child is not allowed to be added.
"""
pass
def add_child(self, child):
"""Add the given child"""
self.validate_child(child)
try:
if getattr(child, 'parent', None) != self:
child.parent = self
except AttributeError:
pass
if child not in self._children:
self._children.append(child)
return child
def remove_child(self, child):
"""Remove the given child"""
self._children.remove(child)
try:
if getattr(child, 'parent', None):
child.parent = None
except AttributeError:
pass
return child
def clear(self):
"""Clear all children."""
for i in reversed(range(len(self._children))):
try:
child = self._children.pop(i)
child.parent = None
except (AttributeError, Exception):
pass
def exists(self, child):
"""Return if the child exists."""
return child in self
def update(self, d=None, **kwargs):
"""Update the values of this node."""
if d is None:
d = kwargs
elif not isinstance(d, dict):
raise TypeError('Update requires a dictionary or keyword arguments.')
else:
d.update(kwargs)
children = d.pop('children', None)
if children:
for child in children:
self.add_child(child)
for k, v in d.items():
setattr(self, k, v)
def find_parent(self, full_title, create_missing=False):
"""Find the full_title's parent and base title."""
if not isinstance(full_title, str):
try:
full_title = full_title.full_title
except (AttributeError, Exception) as err:
raise TypeError('Invalid full_title given! This must be a str or TNode') from err
split = full_title.split(self.get_delimiter())
if split[0] == self.title:
split = split[1:]
parent = self
for t in split[:-1]:
for child in getattr(parent, 'children', []):
if child.title == t:
parent = child
break
else:
if create_missing:
parent = parent.add_child(self.__class__(t))
else:
raise KeyError('"{}" not found in {}'.format(t, parent))
try:
return parent, split[-1]
except IndexError:
return parent, ''
def find(self, full_title):
"""Find and return the child that may be several levels deep."""
parent, title = self.find_parent(full_title)
for child in getattr(parent, 'children', []):
if child.title == title:
return child
raise KeyError('"{}" not found in {}'.format(title, parent))
def iter_children(self):
"""Iterate through my direct children only."""
for child in self._children:
yield child
@property
def children(self):
"""Return a list of child objects."""
return list(self._children)
def iter(self):
"""Iterate through each child and their children."""
for child in self.iter_children():
yield child
if len(child) > 0:
try:
yield from child.iter()
except (AttributeError, TypeError):
pass
def iter_nearest(self):
"""Iterate the nearest children first."""
children = self.children
while children:
sub = []
for child in children:
yield child
for ch in getattr(child, 'children', []):
sub.append(ch)
children = sub
def __iter__(self):
return self.iter()
def __len__(self):
return len(self._children)
def __bool__(self):
return True # This is not None. Do not return True or False based on empty children
def __contains__(self, item):
try:
self.__getitem__(item)
return True
except (IndexError, KeyError, Exception) as err:
return False
def __getitem__(self, full_title):
if isinstance(full_title, int):
return self._children[full_title]
elif isinstance(full_title, TNode):
for child in self._children:
if child == full_title:
return child
# Get the full title
full_title = full_title.full_title
# Get the lowest level parent
parent, title = self.find_parent(full_title)
# Find if there is a child with the same title
for ch in getattr(parent, 'children', []):
if getattr(ch, 'title', None) == title:
return ch
raise KeyError('"{}" not found in {}'.format(title, parent))
def __setitem__(self, full_title, child):
parent = self
if isinstance(full_title, int):
index = full_title
try:
parent._children[index] = child
except IndexError:
parent._children.append(child)
except AttributeError:
pass
try:
parent.add_child(child)
except (AttributeError, Exception):
pass
return
# Get the lowest level parent
parent, title = self.find_parent(full_title, create_missing=True)
# Find if there is a child with the same title
for i, ch in enumerate(getattr(parent, 'children', [])):
if getattr(ch, 'title', None) == title:
try:
parent[i] = child # This is a questionable way to set the child to the parent at the index.
except (TypeError, Exception):
pass
try:
parent.add_child(child)
except (AttributeError, Exception):
pass
return
# Add the child
try:
if title != child.title:
child.title = title
except (AttributeError, Exception):
pass
try:
parent.add_child(child)
except (AttributeError, Exception):
pass
def __eq__(self, other):
if isinstance(other, str):
return other == self.title or other == self.full_title
return super(TNode, self).__eq__(other)
def __hash__(self):
return hash(self.full_title)
def __str__(self):
d = {'cls': self.__class__.__name__, 'full_title': self.full_title, 'title': self.title}
return '{cls}(full_title={full_title!r})'.format(**d)
def __repr__(self):
return '<{} at 0x{:016X}>'.format(self.__str__(), id(self)) # "<TNode(full_title=) at 0x0000000000000000>"
def has_data(self):
"""Helper to return if this function has data."""
return getattr(self, '_data', None) is not None
def get_data(self):
"""Return the data stored."""
return getattr(self, '_data', None)
def set_data(self, data):
"""Set the stored data."""
setattr(self, '_data', data)
data = property(get_data, set_data)
def to_dict(self, exclude=None, **kwargs):
"""Return this tree as a dictionary of data.
Args:
exclude (list): List of full_title's to exclude. This can also exclude a parent and everything below it.
Returns:
tree (dict): Ex {'title': title, 'data': data if data, 'children': [{'title': title, 'data': data}]}
"""
if exclude is None:
exclude = []
tree = {}
if self.full_title not in exclude:
tree = {'title': self.title}
children = [] # detached
if self.has_data():
tree['data'] = self.get_data()
elif len(self) > 0:
children = tree['children'] = [] # Only attach if children
subparents = []
for child in self.iter_children():
if child.has_data():
children.append(child.to_dict(exclude=exclude, **kwargs))
else:
subparents.append(child.to_dict(exclude=exclude, **kwargs))
# Add parents after children
children.extend(subparents)
return tree
asdict = to_dict
@classmethod
def from_dict(cls, d, tree=None, **kwargs):
"""Create a tree from the given dictionary.
Args:
d (dict): Dictionary of tree items.
Example: {'title': title, 'data': data if data, 'children': [{'title': title, 'data': data}]}
tree (TNode)[None]: Parent tree node to add items to. If None create a top level parent.
Returns:
tree (TNode): Tree (TNode) object that was created.
"""
children = d.pop('children', [])
if tree is None:
tree = cls() # self is the class and this was called as a classmethod
# Set all d items as attributes
for attr, val in d.items():
try:
setattr(tree, attr, val)
except (AttributeError, TypeError, Exception):
pass
for child_d in children:
child = cls.from_dict(child_d, **kwargs)
child.parent = tree
return tree
fromdict = from_dict
@classmethod
def serialize(cls, value):
"""Convert a value to a string or bytes value that can be saved and loaded."""
try:
return json.dumps(value)
except (json.JSONDecodeError, Exception) as err:
try:
return str(value)
except (json.JSONDecodeError, Exception):
cls.print_exception(err, msg='Cannot serialize value "{}"!'.format(value))
@classmethod
def deserialize(cls, value):
"""Convert a string or bytes value to a Python object."""
try:
return json.loads(value)
except (json.JSONDecodeError, Exception) as err:
try:
return value
except (json.JSONDecodeError, Exception):
cls.print_exception(err, msg='Cannot deserialize value "{}"!'.format(value))
@staticmethod
def print_exception(exc, msg=None, error_cls=None):
"""Print the given exception. If a message is given it will be prepended to the exception message with a \n.
Args:
exc (Exception): Exception that was raised.
msg (str)[None]: Additional message to prepend to the exception.
error_cls (Exception)[None]: New Exception class to print the exception as.
"""
if error_cls is None:
if isinstance(exc, BaseException):
error_cls = BaseException
else:
error_cls = ValueError
# Prepend the message to the exception if given
if msg:
msg = "\n".join((msg, str(exc)))
else:
msg = str(exc)
exc_tb = get_traceback(exc)
try:
new_err = error_cls(msg) # Error class does not accept a string message argument
except (TypeError, ValueError, Exception):
new_err = ValueError(msg)
traceback.print_exception(error_cls, new_err, exc_tb)
SAVE_EXT = {}
LOAD_EXT = {}
is_file_path = staticmethod(is_file_path)
open_file = staticmethod(open_file)
@classmethod
def register_saver(cls, ext, func=None):
if not isinstance(ext, str):
raise TypeError('Invalid filename extension given to register!')
if func is None:
def decorator(func):
return cls.register_saver(ext, func)
return decorator
cls.SAVE_EXT[str(ext).lower()] = func
return func
@classmethod
def register_loader(cls, ext, func=None):
if not isinstance(ext, str):
raise TypeError('Invalid filename extension given to register!')
if func is None:
def decorator(func):
return cls.register_loader(ext, func)
return decorator
if hasattr(func, '__func__'):
func = func.__func__
cls.LOAD_EXT[str(ext).lower()] = func
return func
def save(self, filename, ext=None, **kwargs):
"""Save this tree to a file.
Args:
filename (str): Filename or opened file object to save this tree node to.
ext (str)[None]: File extension (Example: '.ini', '.json', ...). Must give if filename is file object.
**kwargs (object/dict): Save function keyword arguments.
"""
if ext is None:
if self.is_file_path(filename):
ext = os.path.splitext(str(filename))[-1]
else:
raise TypeError('Missing "ext" argument when "filename" was not a path!')
func = self.SAVE_EXT.get(ext.lower(), None)
if callable(func):
return func(self, filename, **kwargs)
raise ValueError('Invalid filename extension given!')
@dynamicmethod
def load(self, filename, ext=None, **kwargs):
"""load a tree from a file.
Args:
filename (str/TextIoWrapper): Filename or opened file object to read and load the tree from.
ext (str)[None]: File extension (Example: '.ini', '.json', ...). Must give if filename is file object.
**kwargs (object/dict): load function keyword arguments.
"""
cls = self
if isinstance(self, TNode):
cls = self.__class__
if ext is None:
if self.is_file_path(filename):
ext = os.path.splitext(str(filename))[-1]
else:
raise TypeError('Missing "ext" argument when "filename" was not a path!')
func = self.LOAD_EXT.get(ext.lower(), None)
if callable(func):
bound = func.__get__(self, cls)
return bound(filename, **kwargs)
raise ValueError('Invalid filename extension given!')
def to_json(self, filename, **kwars):
d = self.to_dict()
with self.open_file(filename, 'w') as file:
json.dump(d, file, indent=2)
return filename
@dynamicmethod
def from_json(self, filename, **kwargs):
with self.open_file(filename, 'r') as file:
d = json.load(file)
kwargs = {}
if isinstance(self, TNode):
kwargs['tree'] = self
return self.from_dict(d, **kwargs)
TNode.register_saver('.json', TNode.to_json)
TNode.register_loader('.json', TNode.from_json) | 0.530966 | 0.126273 |
from six import text_type
from typing import Union
from zerver.lib.test_classes import WebhookTestCase
class BitbucketHookTests(WebhookTestCase):
STREAM_NAME = 'bitbucket'
URL_TEMPLATE = "/api/v1/external/bitbucket?payload={payload}&stream={stream}"
FIXTURE_DIR_NAME = 'bitbucket'
EXPECTED_SUBJECT = u"Repository name"
EXPECTED_SUBJECT_BRANCH_EVENTS = u"Repository name / master"
def test_bitbucket_on_push_event(self):
# type: () -> None
fixture_name = 'push'
self.url = self.build_url(fixture_name)
commit_info = u'* [25f93d2](https://bitbucket.org/kolaszek/repository-name/commits/25f93d22b719e2d678a7ad5ee0ef0d1fcdf39c12): c'
expected_message = u"kolaszek pushed to branch master\n\n{}".format(commit_info)
self.send_and_test_stream_message(fixture_name, self.EXPECTED_SUBJECT_BRANCH_EVENTS, expected_message, **self.api_auth(self.TEST_USER_EMAIL))
def test_bitbucket_on_push_commits_above_limit_event(self):
# type: () -> None
fixture_name = 'push_commits_above_limit'
self.url = self.build_url(fixture_name)
commit_info = u'* [25f93d2](https://bitbucket.org/kolaszek/repository-name/commits/25f93d22b719e2d678a7ad5ee0ef0d1fcdf39c12): c\n'
expected_message = u"kolaszek pushed to branch master\n\n{}[and 40 more commit(s)]".format(commit_info * 10)
self.send_and_test_stream_message(fixture_name, self.EXPECTED_SUBJECT_BRANCH_EVENTS, expected_message, **self.api_auth(self.TEST_USER_EMAIL))
def test_bitbucket_on_force_push_event(self):
# type: () -> None
fixture_name = 'force_push'
self.url = self.build_url(fixture_name)
expected_message = u"kolaszek [force pushed](https://bitbucket.org/kolaszek/repository-name)"
self.send_and_test_stream_message(fixture_name, self.EXPECTED_SUBJECT, expected_message, **self.api_auth(self.TEST_USER_EMAIL))
def get_body(self, fixture_name):
# type: (text_type) -> Union[text_type, Dict[str, text_type]]
return {}
def get_payload(self, fixture_name):
# type: (text_type) -> Union[text_type, Dict[str, text_type]]
return self.fixture_data(self.FIXTURE_DIR_NAME, fixture_name)
def build_webhook_url(self):
# type: () -> text_type
return ''
def build_url(self, fixture_name):
# type: (text_type) -> text_type
return self.URL_TEMPLATE.format(payload=self.get_payload(fixture_name), stream=self.STREAM_NAME) | zerver/webhooks/bitbucket/tests.py | from six import text_type
from typing import Union
from zerver.lib.test_classes import WebhookTestCase
class BitbucketHookTests(WebhookTestCase):
STREAM_NAME = 'bitbucket'
URL_TEMPLATE = "/api/v1/external/bitbucket?payload={payload}&stream={stream}"
FIXTURE_DIR_NAME = 'bitbucket'
EXPECTED_SUBJECT = u"Repository name"
EXPECTED_SUBJECT_BRANCH_EVENTS = u"Repository name / master"
def test_bitbucket_on_push_event(self):
# type: () -> None
fixture_name = 'push'
self.url = self.build_url(fixture_name)
commit_info = u'* [25f93d2](https://bitbucket.org/kolaszek/repository-name/commits/25f93d22b719e2d678a7ad5ee0ef0d1fcdf39c12): c'
expected_message = u"kolaszek pushed to branch master\n\n{}".format(commit_info)
self.send_and_test_stream_message(fixture_name, self.EXPECTED_SUBJECT_BRANCH_EVENTS, expected_message, **self.api_auth(self.TEST_USER_EMAIL))
def test_bitbucket_on_push_commits_above_limit_event(self):
# type: () -> None
fixture_name = 'push_commits_above_limit'
self.url = self.build_url(fixture_name)
commit_info = u'* [25f93d2](https://bitbucket.org/kolaszek/repository-name/commits/25f93d22b719e2d678a7ad5ee0ef0d1fcdf39c12): c\n'
expected_message = u"kolaszek pushed to branch master\n\n{}[and 40 more commit(s)]".format(commit_info * 10)
self.send_and_test_stream_message(fixture_name, self.EXPECTED_SUBJECT_BRANCH_EVENTS, expected_message, **self.api_auth(self.TEST_USER_EMAIL))
def test_bitbucket_on_force_push_event(self):
# type: () -> None
fixture_name = 'force_push'
self.url = self.build_url(fixture_name)
expected_message = u"kolaszek [force pushed](https://bitbucket.org/kolaszek/repository-name)"
self.send_and_test_stream_message(fixture_name, self.EXPECTED_SUBJECT, expected_message, **self.api_auth(self.TEST_USER_EMAIL))
def get_body(self, fixture_name):
# type: (text_type) -> Union[text_type, Dict[str, text_type]]
return {}
def get_payload(self, fixture_name):
# type: (text_type) -> Union[text_type, Dict[str, text_type]]
return self.fixture_data(self.FIXTURE_DIR_NAME, fixture_name)
def build_webhook_url(self):
# type: () -> text_type
return ''
def build_url(self, fixture_name):
# type: (text_type) -> text_type
return self.URL_TEMPLATE.format(payload=self.get_payload(fixture_name), stream=self.STREAM_NAME) | 0.615666 | 0.205675 |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from scipy.stats import pearsonr, spearmanr
from seqeval.metrics import classification_report, precision_score, recall_score, f1_score
from sklearn.metrics import f1_score as classification_f1_score
def get_conll_scores(predictions, y, y_lex, unk='O'):
"""Get Conll style scores (precision, recall, f1)
"""
if isinstance(predictions, list):
predictions = predictions[-1]
test_p = predictions
if len(test_p.shape) > 2:
test_p = test_p.argmax(2)
test_y = y
if len(test_y.shape) > 2:
test_y = test_y.argmax(2)
prediction_data = []
for n in range(test_y.shape[0]):
test_yval = []
for i in list(test_y[n]):
try:
test_yval.append(y_lex[i])
except KeyError:
pass
test_pval = [unk] * len(test_yval)
for e, i in enumerate(list(test_p[n])[:len(test_pval)]):
try:
test_pval[e] = y_lex[i]
except KeyError:
pass
prediction_data.append((test_yval, test_pval))
y_true, y_pred = list(zip(*prediction_data))
return classification_report(y_true, y_pred, digits=3)
def simple_accuracy(preds, labels):
"""return simple accuracy
"""
return (preds == labels).mean()
def accuracy(preds, labels):
"""return simple accuracy in expected dict format
"""
acc = simple_accuracy(preds, labels)
return {
"acc": acc
}
def acc_and_f1(preds, labels):
"""return accuracy and f1 score
"""
acc = simple_accuracy(preds, labels)
f1 = classification_f1_score(y_true=labels, y_pred=preds)
return {
"acc": acc,
"f1": f1,
"acc_and_f1": (acc + f1) / 2,
}
def pearson_and_spearman(preds, labels):
"""get pearson and spearman correlation
"""
pearson_corr = pearsonr(preds, labels)[0]
spearman_corr = spearmanr(preds, labels)[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def tagging(preds, labels):
p = precision_score(labels, preds)
r = recall_score(labels, preds)
f1 = f1_score(labels, preds)
return p, r, f1 | nlp_architect/utils/metrics.py |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from scipy.stats import pearsonr, spearmanr
from seqeval.metrics import classification_report, precision_score, recall_score, f1_score
from sklearn.metrics import f1_score as classification_f1_score
def get_conll_scores(predictions, y, y_lex, unk='O'):
"""Get Conll style scores (precision, recall, f1)
"""
if isinstance(predictions, list):
predictions = predictions[-1]
test_p = predictions
if len(test_p.shape) > 2:
test_p = test_p.argmax(2)
test_y = y
if len(test_y.shape) > 2:
test_y = test_y.argmax(2)
prediction_data = []
for n in range(test_y.shape[0]):
test_yval = []
for i in list(test_y[n]):
try:
test_yval.append(y_lex[i])
except KeyError:
pass
test_pval = [unk] * len(test_yval)
for e, i in enumerate(list(test_p[n])[:len(test_pval)]):
try:
test_pval[e] = y_lex[i]
except KeyError:
pass
prediction_data.append((test_yval, test_pval))
y_true, y_pred = list(zip(*prediction_data))
return classification_report(y_true, y_pred, digits=3)
def simple_accuracy(preds, labels):
"""return simple accuracy
"""
return (preds == labels).mean()
def accuracy(preds, labels):
"""return simple accuracy in expected dict format
"""
acc = simple_accuracy(preds, labels)
return {
"acc": acc
}
def acc_and_f1(preds, labels):
"""return accuracy and f1 score
"""
acc = simple_accuracy(preds, labels)
f1 = classification_f1_score(y_true=labels, y_pred=preds)
return {
"acc": acc,
"f1": f1,
"acc_and_f1": (acc + f1) / 2,
}
def pearson_and_spearman(preds, labels):
"""get pearson and spearman correlation
"""
pearson_corr = pearsonr(preds, labels)[0]
spearman_corr = spearmanr(preds, labels)[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def tagging(preds, labels):
p = precision_score(labels, preds)
r = recall_score(labels, preds)
f1 = f1_score(labels, preds)
return p, r, f1 | 0.674694 | 0.606935 |
import os
import uuid
import hashlib
import json
import tempfile
import secrets
import zipfile
from glob import glob
from django.core.exceptions import ValidationError
from django.utils.module_loading import import_string
from django.conf import settings
from django.db import models
from django.utils.translation import gettext_lazy as _
from django.utils import timezone
from django_walletpass import crypto
from django_walletpass.storage import WalletPassStorage
from django_walletpass.files import WalletpassContentFile
from django_walletpass.settings import dwpconfig as WALLETPASS_CONF
class PassBuilder:
pass_data = {}
pass_data_required = {
"passTypeIdentifier": WALLETPASS_CONF['PASS_TYPE_ID'],
"serialNumber": None,
"teamIdentifier": WALLETPASS_CONF['TEAM_ID'],
"webServiceURL": WALLETPASS_CONF['SERVICE_URL'],
"authenticationToken": None,
}
directory = None
extra_files = {}
manifest_dict = {}
builded_pass_content = None
def __init__(self, directory=None):
self.directory = directory
if directory is not None:
self._load_pass_json_file_if_exists(directory)
self.pass_data_required.update({
"serialNumber": secrets.token_urlsafe(20),
"authenticationToken": crypto.gen_random_token(),
})
def _copy_dir_files(self, tmp_pass_dir):
"""Copy files from provided base dir to temporal dir
Args:
tmp_pass_dir (str): temporal dir path
"""
for absolute_filepath in glob(os.path.join(self.directory, '**'), recursive=True):
filename = os.path.basename(absolute_filepath)
relative_file_path = os.path.relpath(absolute_filepath, self.directory)
if filename == '.DS_Store':
continue
if not os.path.isfile(absolute_filepath):
continue
filecontent = open(absolute_filepath, 'rb').read()
# Add files to manifest
self.manifest_dict[relative_file_path] = hashlib.sha1(filecontent).hexdigest()
dest_abs_filepath = os.path.join(tmp_pass_dir, relative_file_path)
dest_abs_dirpath = os.path.dirname(dest_abs_filepath)
if not os.path.exists(dest_abs_dirpath):
os.makedirs(dest_abs_dirpath)
ff = open(dest_abs_filepath, 'wb')
ff.write(filecontent)
ff.close()
def _write_extra_files(self, tmp_pass_dir):
"""Write extra files contained in self.extra_files into tmp dir
Args:
tmp_pass_dir (str): temporal dir path
"""
for relative_file_path, filecontent in self.extra_files.items():
# Add files to manifest
self.manifest_dict[relative_file_path] = hashlib.sha1(filecontent).hexdigest()
dest_abs_filepath = os.path.join(tmp_pass_dir, relative_file_path)
dest_abs_dirpath = os.path.dirname(dest_abs_filepath)
if not os.path.exists(dest_abs_dirpath):
os.makedirs(dest_abs_dirpath)
ff = open(dest_abs_filepath, 'wb')
ff.write(filecontent)
ff.close()
def _write_pass_json(self, tmp_pass_dir):
"""Write content of self.pass_data to pass.json (in JSON format)
Args:
tmp_pass_dir (str): temporal dir path where pass.json will be saved
"""
pass_json = json.dumps(self.pass_data)
pass_json_bytes = bytes(pass_json, 'utf8')
# Add pass.json to manifest
self.manifest_dict['pass.json'] = hashlib.sha1(pass_json_bytes).hexdigest()
ff = open(os.path.join(tmp_pass_dir, 'pass.json'), 'wb')
ff.write(pass_json_bytes)
ff.close()
def _write_manifest_json_and_signature(self, tmp_pass_dir):
"""Write the content of self.manifest_dict into manifest.json
Args:
tmp_pass_dir (str): temporal dir path
"""
manifest_json = json.dumps(self.manifest_dict)
manifest_json_bytes = bytes(manifest_json, 'utf8')
ff = open(os.path.join(tmp_pass_dir, 'manifest.json'), 'wb')
ff.write(manifest_json_bytes)
ff.close()
signature_content = crypto.pkcs7_sign(
certcontent=WALLETPASS_CONF['CERT_CONTENT'],
keycontent=WALLETPASS_CONF['KEY_CONTENT'],
wwdr_certificate=WALLETPASS_CONF['WWDRCA_CONTENT'],
data=manifest_json_bytes,
key_password=<PASSWORD>ASS_CONF['KEY_PASSWORD'],
)
ff = open(os.path.join(tmp_pass_dir, 'signature'), 'wb')
ff.write(signature_content)
ff.close()
def _zip_all(self, directory):
zip_file_path = os.path.join(directory, '..', 'walletcard.pkpass')
zip_pkpass = zipfile.ZipFile(zip_file_path, 'w', zipfile.ZIP_DEFLATED)
for filepath in glob(os.path.join(directory, '**'), recursive=True):
relative_file_path = os.path.relpath(filepath, directory)
zip_pkpass.write(filepath, arcname=relative_file_path)
zip_pkpass.close()
return open(zip_file_path, 'rb').read()
def _load_pass_json_file_if_exists(self, directory):
"""Call self.load_pass_json_file if pass.json exist
Args:
directory (str): directory where pass.json resides
"""
if os.path.isfile(os.path.join(directory, 'pass.json')):
self.load_pass_json_file(directory)
def _clean_manifest(self):
self.manifest_dict = {}
def _clean_builded_pass_content(self):
self.builded_pass_content = None
def validate(self):
"""Some validations before build the .pkpass file
Raises:
ValidationError: on validation error
"""
if not self.pass_data:
raise ValidationError(_("Cannot obtain data for pass.json."))
def clean(self):
self._clean_manifest()
self._clean_builded_pass_content()
self.validate()
def load_pass_json_file(self, dir):
"""Load json file without test if exists.
Args:
dir (str): path where resides the pass.json
"""
json_data = open(os.path.join(dir, 'pass.json'), 'r').read()
self.pass_data = json.loads(json_data)
def pre_build_pass_data(self):
"""Update self.pass_data with self.pass_data_required content
"""
self.pass_data.update(self.pass_data_required)
def build(self):
"""Build .pkpass file
"""
self.clean()
with tempfile.TemporaryDirectory() as tmpdirname:
os.mkdir(os.path.join(tmpdirname, 'data.pass'))
tmp_pass_dir = os.path.join(tmpdirname, 'data.pass')
if self.directory:
self._copy_dir_files(tmp_pass_dir)
self._write_extra_files(tmp_pass_dir)
self.pre_build_pass_data()
self._write_pass_json(tmp_pass_dir)
self._write_manifest_json_and_signature(tmp_pass_dir)
self.builded_pass_content = self._zip_all(tmp_pass_dir)
return self.builded_pass_content
def write_to_model(self, instance=None):
"""Saves the content of builded and zipped pass into Pass model.
Args:
instance (Pass, optional): Pass instance, a new one will be created
if none provided. Defaults to None.
Returns:
Pass: instance of Pass (already saved)
"""
if instance is None:
instance = Pass()
setattr(instance, 'pass_type_identifier', WALLETPASS_CONF['PASS_TYPE_ID'])
setattr(instance, 'serial_number', self.pass_data_required.get('serialNumber'))
setattr(instance, 'authentication_token', self.pass_data_required.get('authenticationToken'))
if instance.data.name:
filename = os.path.basename(instance.data.name)
else:
filename = f"{uuid.uuid1()}.pkpass"
content = WalletpassContentFile(self.builded_pass_content)
instance.data.delete()
instance.data.save(filename, content)
return instance
def add_file(self, path, content):
self.extra_files[path] = content
class Pass(models.Model):
"""
Pass instance
"""
pass_type_identifier = models.CharField(max_length=150)
serial_number = models.CharField(max_length=150)
authentication_token = models.CharField(max_length=150)
data = models.FileField(
upload_to=WALLETPASS_CONF['UPLOAD_TO'],
storage=WalletPassStorage(),
)
updated_at = models.DateTimeField(auto_now=True)
def push_notification(self):
klass = import_string(WALLETPASS_CONF['WALLETPASS_PUSH_CLASS'])
push_module = klass()
for registration in self.registrations.all():
push_module.push_notification_from_instance(registration)
def new_pass_builder(self, directory=None):
builder = PassBuilder(directory)
builder.pass_data_required.update({
"passTypeIdentifier": self.pass_type_identifier,
"serialNumber": self.serial_number,
"authenticationToken": self.authentication_token,
})
return builder
def get_pass_builder(self):
builder = PassBuilder()
with tempfile.TemporaryDirectory() as tmpdirname:
os.mkdir(os.path.join(tmpdirname, 'data.pass'))
tmp_pass_dir = os.path.join(tmpdirname, 'data.pass')
# Put zip file into tmp dir
zip_path = os.path.join(tmpdirname, 'walletcard.pkpass')
zip_pkpass = open(zip_path, 'wb')
zip_pkpass.write(self.data.read())
zip_pkpass.close()
# Extract zip file to tmp dir
with zipfile.ZipFile(zip_path, "r") as zip_ref:
zip_ref.extractall(tmp_pass_dir)
# Populate builder with zip content
for filepath in glob(os.path.join(tmp_pass_dir, '**'), recursive=True):
filename = os.path.basename(filepath)
relative_file_path = os.path.relpath(filepath, tmp_pass_dir)
if filename == 'pass.json':
builder.load_pass_json_file(tmp_pass_dir)
continue
if relative_file_path in ['signature', 'manifest.json', '.', '..']:
continue
if not os.path.isfile(filepath):
continue
builder.add_file(relative_file_path, open(filepath, 'rb').read())
# Load of these fields due to that those fields are ignored
# on pass.json loading
builder.pass_data_required.update({
"passTypeIdentifier": self.pass_type_identifier,
"serialNumber": self.serial_number,
"authenticationToken": self.authentication_token,
})
return builder
def __unicode__(self):
return self.serial_number
class Meta:
verbose_name_plural = "passes"
unique_together = (
'pass_type_identifier',
'serial_number',
),
class Registration(models.Model):
"""
Registration of a Pass on a device
"""
device_library_identifier = models.CharField(max_length=150)
push_token = models.CharField(max_length=150)
pazz = models.ForeignKey(
Pass,
on_delete=models.CASCADE,
related_name='registrations',
)
def __unicode__(self):
return self.device_library_identifier
class Log(models.Model):
"""
Log message sent by a device
"""
message = models.TextField()
def __unicode__(self):
return self.message | django_walletpass/models.py | import os
import uuid
import hashlib
import json
import tempfile
import secrets
import zipfile
from glob import glob
from django.core.exceptions import ValidationError
from django.utils.module_loading import import_string
from django.conf import settings
from django.db import models
from django.utils.translation import gettext_lazy as _
from django.utils import timezone
from django_walletpass import crypto
from django_walletpass.storage import WalletPassStorage
from django_walletpass.files import WalletpassContentFile
from django_walletpass.settings import dwpconfig as WALLETPASS_CONF
class PassBuilder:
pass_data = {}
pass_data_required = {
"passTypeIdentifier": WALLETPASS_CONF['PASS_TYPE_ID'],
"serialNumber": None,
"teamIdentifier": WALLETPASS_CONF['TEAM_ID'],
"webServiceURL": WALLETPASS_CONF['SERVICE_URL'],
"authenticationToken": None,
}
directory = None
extra_files = {}
manifest_dict = {}
builded_pass_content = None
def __init__(self, directory=None):
self.directory = directory
if directory is not None:
self._load_pass_json_file_if_exists(directory)
self.pass_data_required.update({
"serialNumber": secrets.token_urlsafe(20),
"authenticationToken": crypto.gen_random_token(),
})
def _copy_dir_files(self, tmp_pass_dir):
"""Copy files from provided base dir to temporal dir
Args:
tmp_pass_dir (str): temporal dir path
"""
for absolute_filepath in glob(os.path.join(self.directory, '**'), recursive=True):
filename = os.path.basename(absolute_filepath)
relative_file_path = os.path.relpath(absolute_filepath, self.directory)
if filename == '.DS_Store':
continue
if not os.path.isfile(absolute_filepath):
continue
filecontent = open(absolute_filepath, 'rb').read()
# Add files to manifest
self.manifest_dict[relative_file_path] = hashlib.sha1(filecontent).hexdigest()
dest_abs_filepath = os.path.join(tmp_pass_dir, relative_file_path)
dest_abs_dirpath = os.path.dirname(dest_abs_filepath)
if not os.path.exists(dest_abs_dirpath):
os.makedirs(dest_abs_dirpath)
ff = open(dest_abs_filepath, 'wb')
ff.write(filecontent)
ff.close()
def _write_extra_files(self, tmp_pass_dir):
"""Write extra files contained in self.extra_files into tmp dir
Args:
tmp_pass_dir (str): temporal dir path
"""
for relative_file_path, filecontent in self.extra_files.items():
# Add files to manifest
self.manifest_dict[relative_file_path] = hashlib.sha1(filecontent).hexdigest()
dest_abs_filepath = os.path.join(tmp_pass_dir, relative_file_path)
dest_abs_dirpath = os.path.dirname(dest_abs_filepath)
if not os.path.exists(dest_abs_dirpath):
os.makedirs(dest_abs_dirpath)
ff = open(dest_abs_filepath, 'wb')
ff.write(filecontent)
ff.close()
def _write_pass_json(self, tmp_pass_dir):
"""Write content of self.pass_data to pass.json (in JSON format)
Args:
tmp_pass_dir (str): temporal dir path where pass.json will be saved
"""
pass_json = json.dumps(self.pass_data)
pass_json_bytes = bytes(pass_json, 'utf8')
# Add pass.json to manifest
self.manifest_dict['pass.json'] = hashlib.sha1(pass_json_bytes).hexdigest()
ff = open(os.path.join(tmp_pass_dir, 'pass.json'), 'wb')
ff.write(pass_json_bytes)
ff.close()
def _write_manifest_json_and_signature(self, tmp_pass_dir):
"""Write the content of self.manifest_dict into manifest.json
Args:
tmp_pass_dir (str): temporal dir path
"""
manifest_json = json.dumps(self.manifest_dict)
manifest_json_bytes = bytes(manifest_json, 'utf8')
ff = open(os.path.join(tmp_pass_dir, 'manifest.json'), 'wb')
ff.write(manifest_json_bytes)
ff.close()
signature_content = crypto.pkcs7_sign(
certcontent=WALLETPASS_CONF['CERT_CONTENT'],
keycontent=WALLETPASS_CONF['KEY_CONTENT'],
wwdr_certificate=WALLETPASS_CONF['WWDRCA_CONTENT'],
data=manifest_json_bytes,
key_password=<PASSWORD>ASS_CONF['KEY_PASSWORD'],
)
ff = open(os.path.join(tmp_pass_dir, 'signature'), 'wb')
ff.write(signature_content)
ff.close()
def _zip_all(self, directory):
zip_file_path = os.path.join(directory, '..', 'walletcard.pkpass')
zip_pkpass = zipfile.ZipFile(zip_file_path, 'w', zipfile.ZIP_DEFLATED)
for filepath in glob(os.path.join(directory, '**'), recursive=True):
relative_file_path = os.path.relpath(filepath, directory)
zip_pkpass.write(filepath, arcname=relative_file_path)
zip_pkpass.close()
return open(zip_file_path, 'rb').read()
def _load_pass_json_file_if_exists(self, directory):
"""Call self.load_pass_json_file if pass.json exist
Args:
directory (str): directory where pass.json resides
"""
if os.path.isfile(os.path.join(directory, 'pass.json')):
self.load_pass_json_file(directory)
def _clean_manifest(self):
self.manifest_dict = {}
def _clean_builded_pass_content(self):
self.builded_pass_content = None
def validate(self):
"""Some validations before build the .pkpass file
Raises:
ValidationError: on validation error
"""
if not self.pass_data:
raise ValidationError(_("Cannot obtain data for pass.json."))
def clean(self):
self._clean_manifest()
self._clean_builded_pass_content()
self.validate()
def load_pass_json_file(self, dir):
"""Load json file without test if exists.
Args:
dir (str): path where resides the pass.json
"""
json_data = open(os.path.join(dir, 'pass.json'), 'r').read()
self.pass_data = json.loads(json_data)
def pre_build_pass_data(self):
"""Update self.pass_data with self.pass_data_required content
"""
self.pass_data.update(self.pass_data_required)
def build(self):
"""Build .pkpass file
"""
self.clean()
with tempfile.TemporaryDirectory() as tmpdirname:
os.mkdir(os.path.join(tmpdirname, 'data.pass'))
tmp_pass_dir = os.path.join(tmpdirname, 'data.pass')
if self.directory:
self._copy_dir_files(tmp_pass_dir)
self._write_extra_files(tmp_pass_dir)
self.pre_build_pass_data()
self._write_pass_json(tmp_pass_dir)
self._write_manifest_json_and_signature(tmp_pass_dir)
self.builded_pass_content = self._zip_all(tmp_pass_dir)
return self.builded_pass_content
def write_to_model(self, instance=None):
"""Saves the content of builded and zipped pass into Pass model.
Args:
instance (Pass, optional): Pass instance, a new one will be created
if none provided. Defaults to None.
Returns:
Pass: instance of Pass (already saved)
"""
if instance is None:
instance = Pass()
setattr(instance, 'pass_type_identifier', WALLETPASS_CONF['PASS_TYPE_ID'])
setattr(instance, 'serial_number', self.pass_data_required.get('serialNumber'))
setattr(instance, 'authentication_token', self.pass_data_required.get('authenticationToken'))
if instance.data.name:
filename = os.path.basename(instance.data.name)
else:
filename = f"{uuid.uuid1()}.pkpass"
content = WalletpassContentFile(self.builded_pass_content)
instance.data.delete()
instance.data.save(filename, content)
return instance
def add_file(self, path, content):
self.extra_files[path] = content
class Pass(models.Model):
"""
Pass instance
"""
pass_type_identifier = models.CharField(max_length=150)
serial_number = models.CharField(max_length=150)
authentication_token = models.CharField(max_length=150)
data = models.FileField(
upload_to=WALLETPASS_CONF['UPLOAD_TO'],
storage=WalletPassStorage(),
)
updated_at = models.DateTimeField(auto_now=True)
def push_notification(self):
klass = import_string(WALLETPASS_CONF['WALLETPASS_PUSH_CLASS'])
push_module = klass()
for registration in self.registrations.all():
push_module.push_notification_from_instance(registration)
def new_pass_builder(self, directory=None):
builder = PassBuilder(directory)
builder.pass_data_required.update({
"passTypeIdentifier": self.pass_type_identifier,
"serialNumber": self.serial_number,
"authenticationToken": self.authentication_token,
})
return builder
def get_pass_builder(self):
builder = PassBuilder()
with tempfile.TemporaryDirectory() as tmpdirname:
os.mkdir(os.path.join(tmpdirname, 'data.pass'))
tmp_pass_dir = os.path.join(tmpdirname, 'data.pass')
# Put zip file into tmp dir
zip_path = os.path.join(tmpdirname, 'walletcard.pkpass')
zip_pkpass = open(zip_path, 'wb')
zip_pkpass.write(self.data.read())
zip_pkpass.close()
# Extract zip file to tmp dir
with zipfile.ZipFile(zip_path, "r") as zip_ref:
zip_ref.extractall(tmp_pass_dir)
# Populate builder with zip content
for filepath in glob(os.path.join(tmp_pass_dir, '**'), recursive=True):
filename = os.path.basename(filepath)
relative_file_path = os.path.relpath(filepath, tmp_pass_dir)
if filename == 'pass.json':
builder.load_pass_json_file(tmp_pass_dir)
continue
if relative_file_path in ['signature', 'manifest.json', '.', '..']:
continue
if not os.path.isfile(filepath):
continue
builder.add_file(relative_file_path, open(filepath, 'rb').read())
# Load of these fields due to that those fields are ignored
# on pass.json loading
builder.pass_data_required.update({
"passTypeIdentifier": self.pass_type_identifier,
"serialNumber": self.serial_number,
"authenticationToken": self.authentication_token,
})
return builder
def __unicode__(self):
return self.serial_number
class Meta:
verbose_name_plural = "passes"
unique_together = (
'pass_type_identifier',
'serial_number',
),
class Registration(models.Model):
"""
Registration of a Pass on a device
"""
device_library_identifier = models.CharField(max_length=150)
push_token = models.CharField(max_length=150)
pazz = models.ForeignKey(
Pass,
on_delete=models.CASCADE,
related_name='registrations',
)
def __unicode__(self):
return self.device_library_identifier
class Log(models.Model):
"""
Log message sent by a device
"""
message = models.TextField()
def __unicode__(self):
return self.message | 0.40028 | 0.069573 |
import datetime
from typing import Sequence
import requests
from bs4 import BeautifulSoup
from bot.currency import Currency
from .base import BaseParser
import logging
logger = logging.getLogger("bot.parsers.belgazprombank_parser")
class BelgazpromParser(BaseParser):
is_active = True
BASE_URL = 'http://belgazprombank.by/about/kursi_valjut/'
DATE_FORMAT = "%d.%m.%Y"
name = 'Белгазпромбанк'
short_name = 'bgp'
MINIMAL_DATE = datetime.datetime(year=2004, month=5, day=1)
allowed_currencies = ('USD', 'EUR', 'RUB', 'BYR',
'GBP', 'UAH', 'CHF', 'PLN', 'BYN')
def __init__(self, parser="lxml", *args, **kwargs):
self.name = BelgazpromParser.name
self.short_name = BelgazpromParser.short_name
self._parser = parser
def __get_response_for_the_date(self,
d: datetime.date) -> requests.models.Response:
"""Gets page with currency rates for the given date"""
supplied_date = d
if supplied_date is None:
supplied_date = datetime.date.today()
assert isinstance(supplied_date, datetime.date), "Incorrect date type"
str_date = datetime.date.strftime(supplied_date,
BelgazpromParser.DATE_FORMAT)
date_params = {"date": str_date}
r = requests.get(BelgazpromParser.BASE_URL, params=date_params)
return r
def __soup_from_response(self,
resp: requests.models.Response) -> BeautifulSoup:
"""Create soup object from the supplied requests response"""
text = resp.text
return BeautifulSoup(text, self._parser)
def __get_currency_table(self,
soup: BeautifulSoup) -> BeautifulSoup:
"""Returns table with exchanges rates from the given
BeautifulSoup object"""
return soup.find(id="courses_tab1_form").parent
def __get_currency_objects(self,
cur_table: BeautifulSoup,
days_since_now=None) -> Sequence[Currency]:
"""
Parses BeautifulSoup table with exchanges rates and extracts
currency data
"""
if not days_since_now:
currencies = []
exchange_table = cur_table.find('table').find('tbody')
exchange_rows = exchange_table.find_all('tr')
for row in exchange_rows:
try:
c = BelgazpromParser.__currency_object_from_row(row)
currencies.append(c)
except ValueError:
logger.error("Error obtaining currency object from {}".format(row))
currencies.append(Currency.empty_currency())
return currencies
@classmethod
def __currency_object_from_row(cls,
row_object: BeautifulSoup) -> Currency:
table_cols = row_object.find_all('td')
buy = table_cols[3].find_all("span")[0].text.strip()
sell = table_cols[4].find_all("span")[0].text.strip()
buy = buy.replace(" ", "")
sell = sell.replace(" ", "")
return Currency(name=table_cols[0].text.strip(),
iso=table_cols[2].text,
sell=float(sell),
buy=float(buy))
def get_all_currencies(self,
date: datetime.date=None) -> Sequence[Currency]:
logger.info("Belgazprom: getting all currencies "
"for the {}".format(date))
today = datetime.date.today()
if date is None:
date = today
assert isinstance(date, datetime.date), "Incorrect date supplied"
r = self.__get_response_for_the_date(date)
s = self.__soup_from_response(r)
currency_table = self.__get_currency_table(s)
currencies = self.__get_currency_objects(currency_table)
return currencies
def get_currency_for_diff_date(self,
diff_days: int,
currency: str="USD") -> Currency:
delta = datetime.timedelta(days=diff_days)
former_date = datetime.date.today() - delta
currency = self.get_currency(currency, date=former_date)
return currency
def get_currency(self,
currency_name: str="USD",
date: datetime.date=None) -> Currency:
logger.info("Belgazprom: getting {}"
"for the {}".format(currency_name, date))
today = datetime.date.today()
if date is None:
date = today
assert isinstance(date, datetime.date), "Incorrect date supplied"
currencies = self.get_all_currencies(date)
for cur in currencies:
if currency_name.upper() == cur.iso:
return cur
else:
return Currency.empty_currency() | bot/parsers/belgazprombank_parser.py |
import datetime
from typing import Sequence
import requests
from bs4 import BeautifulSoup
from bot.currency import Currency
from .base import BaseParser
import logging
logger = logging.getLogger("bot.parsers.belgazprombank_parser")
class BelgazpromParser(BaseParser):
is_active = True
BASE_URL = 'http://belgazprombank.by/about/kursi_valjut/'
DATE_FORMAT = "%d.%m.%Y"
name = 'Белгазпромбанк'
short_name = 'bgp'
MINIMAL_DATE = datetime.datetime(year=2004, month=5, day=1)
allowed_currencies = ('USD', 'EUR', 'RUB', 'BYR',
'GBP', 'UAH', 'CHF', 'PLN', 'BYN')
def __init__(self, parser="lxml", *args, **kwargs):
self.name = BelgazpromParser.name
self.short_name = BelgazpromParser.short_name
self._parser = parser
def __get_response_for_the_date(self,
d: datetime.date) -> requests.models.Response:
"""Gets page with currency rates for the given date"""
supplied_date = d
if supplied_date is None:
supplied_date = datetime.date.today()
assert isinstance(supplied_date, datetime.date), "Incorrect date type"
str_date = datetime.date.strftime(supplied_date,
BelgazpromParser.DATE_FORMAT)
date_params = {"date": str_date}
r = requests.get(BelgazpromParser.BASE_URL, params=date_params)
return r
def __soup_from_response(self,
resp: requests.models.Response) -> BeautifulSoup:
"""Create soup object from the supplied requests response"""
text = resp.text
return BeautifulSoup(text, self._parser)
def __get_currency_table(self,
soup: BeautifulSoup) -> BeautifulSoup:
"""Returns table with exchanges rates from the given
BeautifulSoup object"""
return soup.find(id="courses_tab1_form").parent
def __get_currency_objects(self,
cur_table: BeautifulSoup,
days_since_now=None) -> Sequence[Currency]:
"""
Parses BeautifulSoup table with exchanges rates and extracts
currency data
"""
if not days_since_now:
currencies = []
exchange_table = cur_table.find('table').find('tbody')
exchange_rows = exchange_table.find_all('tr')
for row in exchange_rows:
try:
c = BelgazpromParser.__currency_object_from_row(row)
currencies.append(c)
except ValueError:
logger.error("Error obtaining currency object from {}".format(row))
currencies.append(Currency.empty_currency())
return currencies
@classmethod
def __currency_object_from_row(cls,
row_object: BeautifulSoup) -> Currency:
table_cols = row_object.find_all('td')
buy = table_cols[3].find_all("span")[0].text.strip()
sell = table_cols[4].find_all("span")[0].text.strip()
buy = buy.replace(" ", "")
sell = sell.replace(" ", "")
return Currency(name=table_cols[0].text.strip(),
iso=table_cols[2].text,
sell=float(sell),
buy=float(buy))
def get_all_currencies(self,
date: datetime.date=None) -> Sequence[Currency]:
logger.info("Belgazprom: getting all currencies "
"for the {}".format(date))
today = datetime.date.today()
if date is None:
date = today
assert isinstance(date, datetime.date), "Incorrect date supplied"
r = self.__get_response_for_the_date(date)
s = self.__soup_from_response(r)
currency_table = self.__get_currency_table(s)
currencies = self.__get_currency_objects(currency_table)
return currencies
def get_currency_for_diff_date(self,
diff_days: int,
currency: str="USD") -> Currency:
delta = datetime.timedelta(days=diff_days)
former_date = datetime.date.today() - delta
currency = self.get_currency(currency, date=former_date)
return currency
def get_currency(self,
currency_name: str="USD",
date: datetime.date=None) -> Currency:
logger.info("Belgazprom: getting {}"
"for the {}".format(currency_name, date))
today = datetime.date.today()
if date is None:
date = today
assert isinstance(date, datetime.date), "Incorrect date supplied"
currencies = self.get_all_currencies(date)
for cur in currencies:
if currency_name.upper() == cur.iso:
return cur
else:
return Currency.empty_currency() | 0.761006 | 0.151781 |
import jsonlines
import spacy
import numpy as np
from sklearn import svm
from sklearn.model_selection import GridSearchCV
from embedding import WordEmbeddingFeature
from sentiment_features import SentimentFeature
from bag_of_words import BagOfWordsFeature
from utils import WSCProblem
import argparse
def load_file(filename, model):
data = []
SENTENCE = 'sentence'
CANDIDATE_1 = 'option1'
CANDIDATE_2 = 'option2'
ANSWER = 'answer'
with jsonlines.open(filename) as reader:
for line in reader:
data.append(WSCProblem(
line[SENTENCE],
line[CANDIDATE_1],
line[CANDIDATE_2],
line[ANSWER],
model)
)
return data
def max_length_sentence(problems):
max_length = 0
for datum in problems:
max_length = max(max_length, datum.max_length())
return max_length
def apply_word2vec_features(problems):
# A list of [(sample, label), ... ]
train_and_labels = \
[problem.to_svm_rank_feature() for problem in problems]
# Unpack the tuples into the training set and labels
train, labels = [np.array(list(l)) for l in zip(*train_and_labels)]
return train, labels
def apply_features(problems, processors):
data = []
labels = []
for problem in problems:
labels.append(problem.label())
features = np.array([])
for processor in processors:
f = processor.process(problem)
features = np.append(features, f)
data.append(features)
return np.array(data), np.array(labels)
def main(train_filename, test_filename, data_dir):
model = spacy.load('en_core_web_md')
print('SPACY model loaded')
# Prepare data
train_data = load_file(data_dir + train_filename, model)
test_data = load_file(data_dir + test_filename, model)
max_length = max_length_sentence(train_data)
features = []
features.append(BagOfWordsFeature(train_data))
features.append(SentimentFeature(max_length))
# features.append(WordEmbeddingFeature(max_length))
train, train_labels = apply_features(train_data, features)
test, test_labels = apply_features(test_data, features)
print(
f'Training shape is {train.shape} and labels is {train_labels.shape}')
print(f'Testing shape is {test.shape} and labels is {test_labels.shape}')
# Train classifier
svc = svm.SVC()
Cs = [2**k for k in range(-2, 2)]
params = {'C': Cs}
clf = GridSearchCV(svc, params)
model = clf.fit(train, train_labels)
# Evaluate model.
test_accuracy = model.score(test, test_labels)
train_accuracy = model.score(train, train_labels)
print(f'Parameters used are {model.best_params_}')
print('Scores:')
print(f'Accuracy on test set: {test_accuracy}')
print(f'Accuracy on train set: {train_accuracy}')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Train an SVM model for WSC.')
parser.add_argument(
'--train',
default='pdp.jsonl',
help='The name of the input file for training')
parser.add_argument(
'--test',
default='pdp-test.jsonl',
help='The name of the input file for evaluation data.')
parser.add_argument(
'--data_dir',
default='../data/',
help='The path to the data directory.')
args = parser.parse_args()
main(args.train, args.test, args.data_dir) | svm-rank/main.py | import jsonlines
import spacy
import numpy as np
from sklearn import svm
from sklearn.model_selection import GridSearchCV
from embedding import WordEmbeddingFeature
from sentiment_features import SentimentFeature
from bag_of_words import BagOfWordsFeature
from utils import WSCProblem
import argparse
def load_file(filename, model):
data = []
SENTENCE = 'sentence'
CANDIDATE_1 = 'option1'
CANDIDATE_2 = 'option2'
ANSWER = 'answer'
with jsonlines.open(filename) as reader:
for line in reader:
data.append(WSCProblem(
line[SENTENCE],
line[CANDIDATE_1],
line[CANDIDATE_2],
line[ANSWER],
model)
)
return data
def max_length_sentence(problems):
max_length = 0
for datum in problems:
max_length = max(max_length, datum.max_length())
return max_length
def apply_word2vec_features(problems):
# A list of [(sample, label), ... ]
train_and_labels = \
[problem.to_svm_rank_feature() for problem in problems]
# Unpack the tuples into the training set and labels
train, labels = [np.array(list(l)) for l in zip(*train_and_labels)]
return train, labels
def apply_features(problems, processors):
data = []
labels = []
for problem in problems:
labels.append(problem.label())
features = np.array([])
for processor in processors:
f = processor.process(problem)
features = np.append(features, f)
data.append(features)
return np.array(data), np.array(labels)
def main(train_filename, test_filename, data_dir):
model = spacy.load('en_core_web_md')
print('SPACY model loaded')
# Prepare data
train_data = load_file(data_dir + train_filename, model)
test_data = load_file(data_dir + test_filename, model)
max_length = max_length_sentence(train_data)
features = []
features.append(BagOfWordsFeature(train_data))
features.append(SentimentFeature(max_length))
# features.append(WordEmbeddingFeature(max_length))
train, train_labels = apply_features(train_data, features)
test, test_labels = apply_features(test_data, features)
print(
f'Training shape is {train.shape} and labels is {train_labels.shape}')
print(f'Testing shape is {test.shape} and labels is {test_labels.shape}')
# Train classifier
svc = svm.SVC()
Cs = [2**k for k in range(-2, 2)]
params = {'C': Cs}
clf = GridSearchCV(svc, params)
model = clf.fit(train, train_labels)
# Evaluate model.
test_accuracy = model.score(test, test_labels)
train_accuracy = model.score(train, train_labels)
print(f'Parameters used are {model.best_params_}')
print('Scores:')
print(f'Accuracy on test set: {test_accuracy}')
print(f'Accuracy on train set: {train_accuracy}')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Train an SVM model for WSC.')
parser.add_argument(
'--train',
default='pdp.jsonl',
help='The name of the input file for training')
parser.add_argument(
'--test',
default='pdp-test.jsonl',
help='The name of the input file for evaluation data.')
parser.add_argument(
'--data_dir',
default='../data/',
help='The path to the data directory.')
args = parser.parse_args()
main(args.train, args.test, args.data_dir) | 0.720663 | 0.307735 |
import argparse, textwrap
parser = argparse.ArgumentParser(
description=textwrap.dedent(
"""\
A python script to fetch submissions and comments using PRAW API
"""
),
usage='Use "python3 %(prog)s -h" for more information',
formatter_class=argparse.RawTextHelpFormatter,
)
parser.add_argument(
"-sc",
"--submissions_count",
type=int,
default=10,
help="The number of submissions to crawl in the subreddits",
)
parser.add_argument(
"-st",
"--submissions_type",
type=str,
default="hot",
help="The submissions type to crawl in the subreddits",
)
parser.add_argument(
"-tf",
"--time_filter",
type=str,
default="day",
help="The submissions type to crawl in the subreddits",
)
parser.add_argument(
"-cc",
"--comments_count",
type=str,
default="32",
help="The number of MoreComments to crawl in the comments section",
)
parser.add_argument(
"-op",
"--output_path",
type=str,
default="./output/",
help="Output path for the processed files",
)
parser.add_argument(
"-ip",
"--input_path",
type=str,
default="./input/",
help="Input path for the subreddits_to_crawl file",
)
parser.add_argument(
"-ifn",
"--input_file_name",
type=str,
default="subreddits_to_crawl.csv",
help="File containing csv of subreddits to crawl",
)
parser.add_argument(
"-svt",
"--save_type",
type=str,
default="csv",
help=textwrap.dedent(
"""\
Save mode, can be csv, db, dbwi. Defaults to csv.
csv - csv file
db - db mode with no initialization(tables are expected to exist)
dbwi - db mode with initialization, tables are created as per the statements in `db_tables["init"] arg variable`"""
),
)
feature_parser = parser.add_mutually_exclusive_group(required=False)
feature_parser.add_argument(
"-c",
"--comments",
dest="comments",
action="store_true",
help="Flag to switch on the crawling of comments",
)
feature_parser.add_argument(
"-nc",
"--no-comments",
dest="comments",
action="store_false",
help="Flag to switch off the crawling of comments",
)
parser.set_defaults(comments=True)
args = parser.parse_args()
if args.comments_count == "None":
args.comments_count = None
else:
try:
args.comments_count = int(args.comments_count)
except ValueError:
print("Please pass a number or None for the --comments_count (-cc) option")
raise | args.py | import argparse, textwrap
parser = argparse.ArgumentParser(
description=textwrap.dedent(
"""\
A python script to fetch submissions and comments using PRAW API
"""
),
usage='Use "python3 %(prog)s -h" for more information',
formatter_class=argparse.RawTextHelpFormatter,
)
parser.add_argument(
"-sc",
"--submissions_count",
type=int,
default=10,
help="The number of submissions to crawl in the subreddits",
)
parser.add_argument(
"-st",
"--submissions_type",
type=str,
default="hot",
help="The submissions type to crawl in the subreddits",
)
parser.add_argument(
"-tf",
"--time_filter",
type=str,
default="day",
help="The submissions type to crawl in the subreddits",
)
parser.add_argument(
"-cc",
"--comments_count",
type=str,
default="32",
help="The number of MoreComments to crawl in the comments section",
)
parser.add_argument(
"-op",
"--output_path",
type=str,
default="./output/",
help="Output path for the processed files",
)
parser.add_argument(
"-ip",
"--input_path",
type=str,
default="./input/",
help="Input path for the subreddits_to_crawl file",
)
parser.add_argument(
"-ifn",
"--input_file_name",
type=str,
default="subreddits_to_crawl.csv",
help="File containing csv of subreddits to crawl",
)
parser.add_argument(
"-svt",
"--save_type",
type=str,
default="csv",
help=textwrap.dedent(
"""\
Save mode, can be csv, db, dbwi. Defaults to csv.
csv - csv file
db - db mode with no initialization(tables are expected to exist)
dbwi - db mode with initialization, tables are created as per the statements in `db_tables["init"] arg variable`"""
),
)
feature_parser = parser.add_mutually_exclusive_group(required=False)
feature_parser.add_argument(
"-c",
"--comments",
dest="comments",
action="store_true",
help="Flag to switch on the crawling of comments",
)
feature_parser.add_argument(
"-nc",
"--no-comments",
dest="comments",
action="store_false",
help="Flag to switch off the crawling of comments",
)
parser.set_defaults(comments=True)
args = parser.parse_args()
if args.comments_count == "None":
args.comments_count = None
else:
try:
args.comments_count = int(args.comments_count)
except ValueError:
print("Please pass a number or None for the --comments_count (-cc) option")
raise | 0.405331 | 0.175467 |
import numpy as np
import matplotlib.pyplot as plt
import visa
import time
import math
class DSO6012A(object):
def __init__(self):
scopeID = "USB0::0x0957::0x1722::MY45002264::INSTR" # For DSO6012A
#scopeID = "USB0::0x0957::0x1798::MY54231293::INSTR" # For DSO-X-2014A
rm = visa.ResourceManager()
self.inst = rm.open_resource(scopeID,read_termination='\n')
def write(self,command):
# print command
return self.inst.write(command)
def getChanData(self,channel):
self.write(":WAVEFORM:SOURCE CHAN"+str(int(channel)))
self.write(":WAVEFORM:FORMAT ASCii")
self.write(":WAVEFORM:DATA?")
data = self.inst.read()
numberOfDigit=int(data[1])
data=data[numberOfDigit+3:]
data = data.split(',')
data = np.array(data)
data = data.astype(np.float)
return data
def getWaveForm(self, channel):
self.write(":DIGITIZE CHANNEL"+str(int(channel)))
data = self.getChanData(channel)
self.write("RUN")
return data
def getAllChanWF(self):
self.write(":VIEW CHANNEL1;:VIEW CHANNEL2;:DIGITIZE")
data1 = self.getChanData(1)
data2 = self.getChanData(2)
self.write("RUN")
return data1,data2
def getPointNumber(self):
self.inst.write(":WAVEFORM:POINTS?")
pointNumber = self.inst.read()
pointNumber = int(pointNumber)
return pointNumber
def acquire(self,channel=None,plot=False,autoscale=True):
if autoscale:
if channel: self.myAutoScale(channel)
else :
self.myAutoScale(1)
self.myAutoScale(2)
x = self.getTimeRangeArray()
if channel:
y1 = self.getWaveForm(channel)
else:
y1,y2 = self.getAllChanWF()
if plot:
plt.plot(x,y1)
if not channel:
plt.plot(x,y2)
plt.show(block=False)
if channel:
table = np.eye(len(x),2)
else: table = np.eye(len(x),3)
table[:,0] = x
table[:,1] = y1
if not channel:
table[:,2] = y2
return table
def getTimeRange(self):
self.inst.write(":TIMEBASE:RANGE?")
timerange = self.inst.read()
timerange = float(timerange)
return timerange
def getTimeRangeArray(self):
pointNumber = self.getPointNumber()
timerange = self.getTimeRange()
x = np.linspace(-timerange/2.,timerange/2.,pointNumber)
return x
def getRange(self, channel):
self.inst.write(":CHANNEL"+str(int(channel))+":RANGE?")
range = self.inst.read()
range = float(range)
print "getRange: "+str(range)
return range
def setRange(self,range,channel):
print "Chan"+str(channel)+" setRange: "+str(range)
self.inst.write(":CHANNEL"+str(int(channel))+":RANGE "+str(range))
self.getRange(channel)
return
def getOffset(self, channel):
self.inst.write(":CHANNEL"+str(int(channel))+":OFFSET?")
offset = self.inst.read()
offset = float(offset)
print "getOffset: "+str(offset)
return offset
def setOffset(self,offset,channel):
print "Chan"+str(channel)+" setOffset: "+str(offset)
self.inst.write(":CHANNEL"+str(int(channel))+":OFFSET "+str(offset))
return
def getMinMax(self,channel):
data = self.getWaveForm(channel)
sigMin = min(data)
sigMax = max(data)
print "min: "+str(sigMin)+" max: "+str(sigMax)+" ampl: "+str(sigMax-sigMin)
return sigMin, sigMax
def getAverage(self,channel,autoscale=False):
if autoscale: self.myAutoScale(channel)
data = self.getWaveForm(channel)
avg = np.mean(data)
print "avg: "+str(avg)
return avg
def myAutoScale(self,channel):
range = 4
offset = 0
self.setRange(range,channel)
self.setOffset(offset,channel)
sigMin, sigMax = self.getMinMax(channel)
range = max(0.1,sigMax-sigMin) #Prevent from narrowing the range too soon
offset = (sigMax+sigMin)/2
self.setRange(range,channel)
self.setOffset(offset,channel)
sigMin, sigMax = self.getMinMax(channel)
range = 1.2*math.ceil(1.2*(sigMax-sigMin)/0.008)*0.008 #Get the minimum range that fits the signal
offset = (sigMax+sigMin)/2
self.setRange(range,channel)
self.setOffset(offset,channel)
sigMin, sigMax = self.getMinMax(channel)
offset = (sigMax+sigMin)/2
self.setOffset(offset,channel)
return
if __name__=='__main__':
scope = DSO6012A() | drivers/oscilloscope/dso6012a.py | import numpy as np
import matplotlib.pyplot as plt
import visa
import time
import math
class DSO6012A(object):
def __init__(self):
scopeID = "USB0::0x0957::0x1722::MY45002264::INSTR" # For DSO6012A
#scopeID = "USB0::0x0957::0x1798::MY54231293::INSTR" # For DSO-X-2014A
rm = visa.ResourceManager()
self.inst = rm.open_resource(scopeID,read_termination='\n')
def write(self,command):
# print command
return self.inst.write(command)
def getChanData(self,channel):
self.write(":WAVEFORM:SOURCE CHAN"+str(int(channel)))
self.write(":WAVEFORM:FORMAT ASCii")
self.write(":WAVEFORM:DATA?")
data = self.inst.read()
numberOfDigit=int(data[1])
data=data[numberOfDigit+3:]
data = data.split(',')
data = np.array(data)
data = data.astype(np.float)
return data
def getWaveForm(self, channel):
self.write(":DIGITIZE CHANNEL"+str(int(channel)))
data = self.getChanData(channel)
self.write("RUN")
return data
def getAllChanWF(self):
self.write(":VIEW CHANNEL1;:VIEW CHANNEL2;:DIGITIZE")
data1 = self.getChanData(1)
data2 = self.getChanData(2)
self.write("RUN")
return data1,data2
def getPointNumber(self):
self.inst.write(":WAVEFORM:POINTS?")
pointNumber = self.inst.read()
pointNumber = int(pointNumber)
return pointNumber
def acquire(self,channel=None,plot=False,autoscale=True):
if autoscale:
if channel: self.myAutoScale(channel)
else :
self.myAutoScale(1)
self.myAutoScale(2)
x = self.getTimeRangeArray()
if channel:
y1 = self.getWaveForm(channel)
else:
y1,y2 = self.getAllChanWF()
if plot:
plt.plot(x,y1)
if not channel:
plt.plot(x,y2)
plt.show(block=False)
if channel:
table = np.eye(len(x),2)
else: table = np.eye(len(x),3)
table[:,0] = x
table[:,1] = y1
if not channel:
table[:,2] = y2
return table
def getTimeRange(self):
self.inst.write(":TIMEBASE:RANGE?")
timerange = self.inst.read()
timerange = float(timerange)
return timerange
def getTimeRangeArray(self):
pointNumber = self.getPointNumber()
timerange = self.getTimeRange()
x = np.linspace(-timerange/2.,timerange/2.,pointNumber)
return x
def getRange(self, channel):
self.inst.write(":CHANNEL"+str(int(channel))+":RANGE?")
range = self.inst.read()
range = float(range)
print "getRange: "+str(range)
return range
def setRange(self,range,channel):
print "Chan"+str(channel)+" setRange: "+str(range)
self.inst.write(":CHANNEL"+str(int(channel))+":RANGE "+str(range))
self.getRange(channel)
return
def getOffset(self, channel):
self.inst.write(":CHANNEL"+str(int(channel))+":OFFSET?")
offset = self.inst.read()
offset = float(offset)
print "getOffset: "+str(offset)
return offset
def setOffset(self,offset,channel):
print "Chan"+str(channel)+" setOffset: "+str(offset)
self.inst.write(":CHANNEL"+str(int(channel))+":OFFSET "+str(offset))
return
def getMinMax(self,channel):
data = self.getWaveForm(channel)
sigMin = min(data)
sigMax = max(data)
print "min: "+str(sigMin)+" max: "+str(sigMax)+" ampl: "+str(sigMax-sigMin)
return sigMin, sigMax
def getAverage(self,channel,autoscale=False):
if autoscale: self.myAutoScale(channel)
data = self.getWaveForm(channel)
avg = np.mean(data)
print "avg: "+str(avg)
return avg
def myAutoScale(self,channel):
range = 4
offset = 0
self.setRange(range,channel)
self.setOffset(offset,channel)
sigMin, sigMax = self.getMinMax(channel)
range = max(0.1,sigMax-sigMin) #Prevent from narrowing the range too soon
offset = (sigMax+sigMin)/2
self.setRange(range,channel)
self.setOffset(offset,channel)
sigMin, sigMax = self.getMinMax(channel)
range = 1.2*math.ceil(1.2*(sigMax-sigMin)/0.008)*0.008 #Get the minimum range that fits the signal
offset = (sigMax+sigMin)/2
self.setRange(range,channel)
self.setOffset(offset,channel)
sigMin, sigMax = self.getMinMax(channel)
offset = (sigMax+sigMin)/2
self.setOffset(offset,channel)
return
if __name__=='__main__':
scope = DSO6012A() | 0.150653 | 0.153486 |
from pyrpc import serializers
import json
from django.test import TestCase
class AppTestCase(TestCase):
def test_is_description_line(self):
line = 'Test'
assert(serializers.is_description_line(line))
line = '@param'
assert(not serializers.is_description_line(line))
line = '@return'
assert(not serializers.is_description_line(line))
line = ''
assert(not serializers.is_description_line(line))
def test_is_param_line(self):
line = 'Test'
assert(not serializers.is_param_line(line))
line = '@param'
assert(serializers.is_param_line(line))
def test_is_return_line(self):
line = 'Test'
assert(not serializers.is_return_line(line))
line = '@returns'
assert(serializers.is_return_line(line))
def test_method_serializer(self):
def placeholder_method():
"""
test
@param test: test
@returns: pass
"""
pass
should_return = {
"name": "placeholder_method",
"kwargs": {
"test": "test"
},
"description": [
"test"
],
"returns": "pass"
}
serializer = serializers.MethodSerializer(placeholder_method).data
assert(serializer == should_return)
def test_error_serializer(self):
fixture = {
"id": 1,
"error": "test"
}
should_return = {
"id": 1,
"jsonrpc": "2.0",
"error": "test"
}
serializer = serializers.ErrorSerializer(fixture).data
assert(serializer == should_return)
def test_result_serializer(self):
fixture = {
"id": 1,
"result": "test"
}
should_return = {
"id": 1,
"jsonrpc": "2.0",
"result": "test"
}
serializer = serializers.ResultSerializer(fixture).data
assert(serializer == should_return) | tests/test_serializers.py | from pyrpc import serializers
import json
from django.test import TestCase
class AppTestCase(TestCase):
def test_is_description_line(self):
line = 'Test'
assert(serializers.is_description_line(line))
line = '@param'
assert(not serializers.is_description_line(line))
line = '@return'
assert(not serializers.is_description_line(line))
line = ''
assert(not serializers.is_description_line(line))
def test_is_param_line(self):
line = 'Test'
assert(not serializers.is_param_line(line))
line = '@param'
assert(serializers.is_param_line(line))
def test_is_return_line(self):
line = 'Test'
assert(not serializers.is_return_line(line))
line = '@returns'
assert(serializers.is_return_line(line))
def test_method_serializer(self):
def placeholder_method():
"""
test
@param test: test
@returns: pass
"""
pass
should_return = {
"name": "placeholder_method",
"kwargs": {
"test": "test"
},
"description": [
"test"
],
"returns": "pass"
}
serializer = serializers.MethodSerializer(placeholder_method).data
assert(serializer == should_return)
def test_error_serializer(self):
fixture = {
"id": 1,
"error": "test"
}
should_return = {
"id": 1,
"jsonrpc": "2.0",
"error": "test"
}
serializer = serializers.ErrorSerializer(fixture).data
assert(serializer == should_return)
def test_result_serializer(self):
fixture = {
"id": 1,
"result": "test"
}
should_return = {
"id": 1,
"jsonrpc": "2.0",
"result": "test"
}
serializer = serializers.ResultSerializer(fixture).data
assert(serializer == should_return) | 0.773131 | 0.475727 |
import io
import re
from pathlib import Path
from typing import List, Tuple, Union
import h5py
import numpy
from mlxtk.tools.wave_function import get_spfs, load_wave_function
RE_TIME = re.compile(r"^\s+(.+)\s+\[au\]$")
RE_ELEMENT = re.compile(r"^\s*\((.+)\,(.+)\)$")
def read_first_frame(path: str) -> str:
frame = [] # type: List[str]
encountered_time = False
with open(path) as fhandle:
for line in fhandle:
if line.startswith("$time"):
if encountered_time:
return "".join(frame)
encountered_time = True
frame.append(line)
continue
frame.append(line)
return "".join(frame)
def read_spfs(path: str) -> Tuple[numpy.ndarray, numpy.ndarray]:
with io.StringIO(read_first_frame(path)) as sio:
wfn = load_wave_function(sio)
_, times, psis = read_psi_ascii(path)
spfs = []
for psi in psis:
wfn.PSI = psi
spfs.append(numpy.array(get_spfs(wfn)))
return times, numpy.moveaxis(numpy.array(spfs), 1, 0)
def read_psi_ascii(
path: Union[str, Path]
) -> Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray]:
path = str(path)
times: List[float] = []
psis: List[List[complex]] = []
tape: List[int] = []
tape_finished = False
with open(path) as fhandle:
for line in fhandle:
if line.startswith("$time"):
tape_finished = True
m = RE_TIME.match(fhandle.readline())
if not m:
raise RuntimeError(
f"Error extracting time point from label: {line}"
)
time = float(m.group(1))
times.append(time)
elif line.startswith("$psi"):
tape_finished = True
psis.append([])
match = RE_ELEMENT.match(fhandle.readline())
while match:
psis[-1].append(float(match.group(1)) + 1j * float(match.group(2)))
match = RE_ELEMENT.match(fhandle.readline())
if not tape_finished:
if line.startswith("$tape"):
continue
if not line.strip():
continue
tape.append(int(line))
return (
numpy.array(tape, dtype=numpy.int64),
numpy.array(times),
numpy.array(
[numpy.array(psi).transpose() for psi in psis], dtype=numpy.complex128
),
)
def read_psi_frame_ascii(
path: Union[str, Path], index: int
) -> Tuple[numpy.ndarray, float, numpy.ndarray]:
path = str(path)
counter = -1
times: List[float] = []
psi: List[complex] = []
tape: List[int] = []
tape_finished = False
with open(path) as fhandle:
for line in fhandle:
if line.startswith("$time"):
tape_finished = True
m = RE_TIME.match(fhandle.readline())
if not m:
raise RuntimeError(
f"Error extracting time point from label: {line}"
)
time = float(m.group(1))
times.append(time)
elif line.startswith("$psi"):
counter += 1
match = RE_ELEMENT.match(fhandle.readline())
while match:
if counter == index:
psi.append(float(match.group(1)) + 1j * float(match.group(2)))
match = RE_ELEMENT.match(fhandle.readline())
if not tape_finished:
if line.startswith("$tape"):
continue
if not line.strip():
continue
tape.append(int(line))
if not psi:
raise KeyError(f"index {index} is out of bounds")
return (
numpy.array(tape, dtype=numpy.int64),
numpy.array(times[index]),
numpy.array(psi, dtype=numpy.complex128),
)
def read_psi_hdf5(path):
with h5py.File(path, "r") as fptr:
tape = fptr["tape"][:]
time = fptr["time"][:]
psis = fptr["psis"][:, :]
return [tape, time, psis]
def write_psi_hdf5(path, data):
tape, time, psis = data
with h5py.File(path, "w") as fptr:
dset = fptr.create_dataset(
"tape", tape.shape, dtype=numpy.int64, compression="gzip"
)
dset[:] = tape
dset = fptr.create_dataset(
"time", time.shape, dtype=numpy.float64, compression="gzip"
)
dset[:] = time
dset = fptr.create_dataset(
"psis", psis.shape, dtype=numpy.complex128, compression="gzip"
)
dset[:, :] = psis[:, :]
def write_psi_ascii(path, data):
tape, time, psis = data
with open(path, "w") as fptr:
fptr.write("$tape\n")
fptr.writelines(f"\t{entry}\n" for entry in tape)
for i, time in enumerate(time):
fptr.write(f"\n$time\n\t{time} [au]\n$psi\n")
fptr.writelines(
f" ({numpy.real(entry)},{numpy.imag(entry)})\n" for entry in psis[i]
) | mlxtk/inout/psi.py | import io
import re
from pathlib import Path
from typing import List, Tuple, Union
import h5py
import numpy
from mlxtk.tools.wave_function import get_spfs, load_wave_function
RE_TIME = re.compile(r"^\s+(.+)\s+\[au\]$")
RE_ELEMENT = re.compile(r"^\s*\((.+)\,(.+)\)$")
def read_first_frame(path: str) -> str:
frame = [] # type: List[str]
encountered_time = False
with open(path) as fhandle:
for line in fhandle:
if line.startswith("$time"):
if encountered_time:
return "".join(frame)
encountered_time = True
frame.append(line)
continue
frame.append(line)
return "".join(frame)
def read_spfs(path: str) -> Tuple[numpy.ndarray, numpy.ndarray]:
with io.StringIO(read_first_frame(path)) as sio:
wfn = load_wave_function(sio)
_, times, psis = read_psi_ascii(path)
spfs = []
for psi in psis:
wfn.PSI = psi
spfs.append(numpy.array(get_spfs(wfn)))
return times, numpy.moveaxis(numpy.array(spfs), 1, 0)
def read_psi_ascii(
path: Union[str, Path]
) -> Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray]:
path = str(path)
times: List[float] = []
psis: List[List[complex]] = []
tape: List[int] = []
tape_finished = False
with open(path) as fhandle:
for line in fhandle:
if line.startswith("$time"):
tape_finished = True
m = RE_TIME.match(fhandle.readline())
if not m:
raise RuntimeError(
f"Error extracting time point from label: {line}"
)
time = float(m.group(1))
times.append(time)
elif line.startswith("$psi"):
tape_finished = True
psis.append([])
match = RE_ELEMENT.match(fhandle.readline())
while match:
psis[-1].append(float(match.group(1)) + 1j * float(match.group(2)))
match = RE_ELEMENT.match(fhandle.readline())
if not tape_finished:
if line.startswith("$tape"):
continue
if not line.strip():
continue
tape.append(int(line))
return (
numpy.array(tape, dtype=numpy.int64),
numpy.array(times),
numpy.array(
[numpy.array(psi).transpose() for psi in psis], dtype=numpy.complex128
),
)
def read_psi_frame_ascii(
path: Union[str, Path], index: int
) -> Tuple[numpy.ndarray, float, numpy.ndarray]:
path = str(path)
counter = -1
times: List[float] = []
psi: List[complex] = []
tape: List[int] = []
tape_finished = False
with open(path) as fhandle:
for line in fhandle:
if line.startswith("$time"):
tape_finished = True
m = RE_TIME.match(fhandle.readline())
if not m:
raise RuntimeError(
f"Error extracting time point from label: {line}"
)
time = float(m.group(1))
times.append(time)
elif line.startswith("$psi"):
counter += 1
match = RE_ELEMENT.match(fhandle.readline())
while match:
if counter == index:
psi.append(float(match.group(1)) + 1j * float(match.group(2)))
match = RE_ELEMENT.match(fhandle.readline())
if not tape_finished:
if line.startswith("$tape"):
continue
if not line.strip():
continue
tape.append(int(line))
if not psi:
raise KeyError(f"index {index} is out of bounds")
return (
numpy.array(tape, dtype=numpy.int64),
numpy.array(times[index]),
numpy.array(psi, dtype=numpy.complex128),
)
def read_psi_hdf5(path):
with h5py.File(path, "r") as fptr:
tape = fptr["tape"][:]
time = fptr["time"][:]
psis = fptr["psis"][:, :]
return [tape, time, psis]
def write_psi_hdf5(path, data):
tape, time, psis = data
with h5py.File(path, "w") as fptr:
dset = fptr.create_dataset(
"tape", tape.shape, dtype=numpy.int64, compression="gzip"
)
dset[:] = tape
dset = fptr.create_dataset(
"time", time.shape, dtype=numpy.float64, compression="gzip"
)
dset[:] = time
dset = fptr.create_dataset(
"psis", psis.shape, dtype=numpy.complex128, compression="gzip"
)
dset[:, :] = psis[:, :]
def write_psi_ascii(path, data):
tape, time, psis = data
with open(path, "w") as fptr:
fptr.write("$tape\n")
fptr.writelines(f"\t{entry}\n" for entry in tape)
for i, time in enumerate(time):
fptr.write(f"\n$time\n\t{time} [au]\n$psi\n")
fptr.writelines(
f" ({numpy.real(entry)},{numpy.imag(entry)})\n" for entry in psis[i]
) | 0.57678 | 0.322219 |
# coding: utf-8
"""
Submarine Experiment API
The Submarine REST API allows you to create, list, and get experiments. TheAPI is hosted under the /v1/jobs route on the Submarine server. For example,to list experiments on a server hosted at http://localhost:8080, accesshttp://localhost:8080/api/v1/jobs/ # noqa: E501
OpenAPI spec version: 0.4.0-SNAPSHOT
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class JobSpec(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'namespace': 'str',
'library_spec': 'JobLibrarySpec',
'task_specs': 'dict(str, JobTaskSpec)',
'projects': 'str'
}
attribute_map = {
'name': 'name',
'namespace': 'namespace',
'library_spec': 'librarySpec',
'task_specs': 'taskSpecs',
'projects': 'projects'
}
def __init__(self, name=None, namespace=None, library_spec=None, task_specs=None, projects=None): # noqa: E501
"""JobSpec - a model defined in Swagger""" # noqa: E501
self._name = None
self._namespace = None
self._library_spec = None
self._task_specs = None
self._projects = None
self.discriminator = None
if name is not None:
self.name = name
if namespace is not None:
self.namespace = namespace
if library_spec is not None:
self.library_spec = library_spec
if task_specs is not None:
self.task_specs = task_specs
if projects is not None:
self.projects = projects
@property
def name(self):
"""Gets the name of this JobSpec. # noqa: E501
:return: The name of this JobSpec. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this JobSpec.
:param name: The name of this JobSpec. # noqa: E501
:type: str
"""
self._name = name
@property
def namespace(self):
"""Gets the namespace of this JobSpec. # noqa: E501
:return: The namespace of this JobSpec. # noqa: E501
:rtype: str
"""
return self._namespace
@namespace.setter
def namespace(self, namespace):
"""Sets the namespace of this JobSpec.
:param namespace: The namespace of this JobSpec. # noqa: E501
:type: str
"""
self._namespace = namespace
@property
def library_spec(self):
"""Gets the library_spec of this JobSpec. # noqa: E501
:return: The library_spec of this JobSpec. # noqa: E501
:rtype: JobLibrarySpec
"""
return self._library_spec
@library_spec.setter
def library_spec(self, library_spec):
"""Sets the library_spec of this JobSpec.
:param library_spec: The library_spec of this JobSpec. # noqa: E501
:type: JobLibrarySpec
"""
self._library_spec = library_spec
@property
def task_specs(self):
"""Gets the task_specs of this JobSpec. # noqa: E501
:return: The task_specs of this JobSpec. # noqa: E501
:rtype: dict(str, JobTaskSpec)
"""
return self._task_specs
@task_specs.setter
def task_specs(self, task_specs):
"""Sets the task_specs of this JobSpec.
:param task_specs: The task_specs of this JobSpec. # noqa: E501
:type: dict(str, JobTaskSpec)
"""
self._task_specs = task_specs
@property
def projects(self):
"""Gets the projects of this JobSpec. # noqa: E501
:return: The projects of this JobSpec. # noqa: E501
:rtype: str
"""
return self._projects
@projects.setter
def projects(self, projects):
"""Sets the projects of this JobSpec.
:param projects: The projects of this JobSpec. # noqa: E501
:type: str
"""
self._projects = projects
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(JobSpec, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, JobSpec):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other | submarine-sdk/pysubmarine/submarine/job/models/job_spec.py |
# coding: utf-8
"""
Submarine Experiment API
The Submarine REST API allows you to create, list, and get experiments. TheAPI is hosted under the /v1/jobs route on the Submarine server. For example,to list experiments on a server hosted at http://localhost:8080, accesshttp://localhost:8080/api/v1/jobs/ # noqa: E501
OpenAPI spec version: 0.4.0-SNAPSHOT
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class JobSpec(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'namespace': 'str',
'library_spec': 'JobLibrarySpec',
'task_specs': 'dict(str, JobTaskSpec)',
'projects': 'str'
}
attribute_map = {
'name': 'name',
'namespace': 'namespace',
'library_spec': 'librarySpec',
'task_specs': 'taskSpecs',
'projects': 'projects'
}
def __init__(self, name=None, namespace=None, library_spec=None, task_specs=None, projects=None): # noqa: E501
"""JobSpec - a model defined in Swagger""" # noqa: E501
self._name = None
self._namespace = None
self._library_spec = None
self._task_specs = None
self._projects = None
self.discriminator = None
if name is not None:
self.name = name
if namespace is not None:
self.namespace = namespace
if library_spec is not None:
self.library_spec = library_spec
if task_specs is not None:
self.task_specs = task_specs
if projects is not None:
self.projects = projects
@property
def name(self):
"""Gets the name of this JobSpec. # noqa: E501
:return: The name of this JobSpec. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this JobSpec.
:param name: The name of this JobSpec. # noqa: E501
:type: str
"""
self._name = name
@property
def namespace(self):
"""Gets the namespace of this JobSpec. # noqa: E501
:return: The namespace of this JobSpec. # noqa: E501
:rtype: str
"""
return self._namespace
@namespace.setter
def namespace(self, namespace):
"""Sets the namespace of this JobSpec.
:param namespace: The namespace of this JobSpec. # noqa: E501
:type: str
"""
self._namespace = namespace
@property
def library_spec(self):
"""Gets the library_spec of this JobSpec. # noqa: E501
:return: The library_spec of this JobSpec. # noqa: E501
:rtype: JobLibrarySpec
"""
return self._library_spec
@library_spec.setter
def library_spec(self, library_spec):
"""Sets the library_spec of this JobSpec.
:param library_spec: The library_spec of this JobSpec. # noqa: E501
:type: JobLibrarySpec
"""
self._library_spec = library_spec
@property
def task_specs(self):
"""Gets the task_specs of this JobSpec. # noqa: E501
:return: The task_specs of this JobSpec. # noqa: E501
:rtype: dict(str, JobTaskSpec)
"""
return self._task_specs
@task_specs.setter
def task_specs(self, task_specs):
"""Sets the task_specs of this JobSpec.
:param task_specs: The task_specs of this JobSpec. # noqa: E501
:type: dict(str, JobTaskSpec)
"""
self._task_specs = task_specs
@property
def projects(self):
"""Gets the projects of this JobSpec. # noqa: E501
:return: The projects of this JobSpec. # noqa: E501
:rtype: str
"""
return self._projects
@projects.setter
def projects(self, projects):
"""Sets the projects of this JobSpec.
:param projects: The projects of this JobSpec. # noqa: E501
:type: str
"""
self._projects = projects
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(JobSpec, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, JobSpec):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other | 0.568416 | 0.16872 |
import ast
import numpy as np
def listify(obj):
"""Wrap all non-list or tuple objects in a list.
Provides a simple way to accept flexible arguments.
"""
if obj is None:
return []
else:
return obj if isinstance(obj, (list, tuple, type(None))) else [obj]
def spacify(string, n=2):
"""Add spaces to the beginning of each line in a multi-line string."""
space = n * " "
return space + space.join(string.splitlines(True))
def multilinify(sequence, sep=","):
"""Make a multi-line string out of a sequence of strings."""
sep += "\n"
return "\n" + sep.join(sequence)
def c(*args): # pylint: disable=invalid-name
"""Concatenate columns into a 2D NumPy Array"""
return np.column_stack(args)
def extract_argument_names(expr, accepted_funcs):
"""Extract the names of the arguments passed to a function.
This is used to extract the labels from function calls such as `c(y1, y2, y3, y3)`.
Parameters
----------
expr : str
An expression that is parsed to extract the components of the call.
accepted_funcs : list
A list with the names of the functions that we accept to parse.
Returns
-------
list
If all criteria are met, the names of the arguments. Otherwise it returns None.
"""
# Extract the first thing in the body
parsed_expr = ast.parse(expr).body[0]
if not isinstance(parsed_expr, ast.Expr):
return None
# Check the value is a call
value = parsed_expr.value
if not isinstance(value, ast.Call):
return None
# Check call name is the name of an exepcted function
if value.func.id not in accepted_funcs:
return None
# Check all arguments are either names or constants
args = value.args
if not all(isinstance(arg, ast.Name) for arg in args):
return None
# We can safely build labels now
labels = [arg.id for arg in args]
if labels:
return labels
return None
extra_namespace = {"c": c} | bambi/utils.py | import ast
import numpy as np
def listify(obj):
"""Wrap all non-list or tuple objects in a list.
Provides a simple way to accept flexible arguments.
"""
if obj is None:
return []
else:
return obj if isinstance(obj, (list, tuple, type(None))) else [obj]
def spacify(string, n=2):
"""Add spaces to the beginning of each line in a multi-line string."""
space = n * " "
return space + space.join(string.splitlines(True))
def multilinify(sequence, sep=","):
"""Make a multi-line string out of a sequence of strings."""
sep += "\n"
return "\n" + sep.join(sequence)
def c(*args): # pylint: disable=invalid-name
"""Concatenate columns into a 2D NumPy Array"""
return np.column_stack(args)
def extract_argument_names(expr, accepted_funcs):
"""Extract the names of the arguments passed to a function.
This is used to extract the labels from function calls such as `c(y1, y2, y3, y3)`.
Parameters
----------
expr : str
An expression that is parsed to extract the components of the call.
accepted_funcs : list
A list with the names of the functions that we accept to parse.
Returns
-------
list
If all criteria are met, the names of the arguments. Otherwise it returns None.
"""
# Extract the first thing in the body
parsed_expr = ast.parse(expr).body[0]
if not isinstance(parsed_expr, ast.Expr):
return None
# Check the value is a call
value = parsed_expr.value
if not isinstance(value, ast.Call):
return None
# Check call name is the name of an exepcted function
if value.func.id not in accepted_funcs:
return None
# Check all arguments are either names or constants
args = value.args
if not all(isinstance(arg, ast.Name) for arg in args):
return None
# We can safely build labels now
labels = [arg.id for arg in args]
if labels:
return labels
return None
extra_namespace = {"c": c} | 0.775945 | 0.445228 |
import logging
import os
import codecs
import random
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union, Dict
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available, RobertaModel, BertPreTrainedModel, XLMRobertaConfig
logger = logging.getLogger(__name__)
@dataclass
class InputExample:
"""
A single training/test example for semantic role labeling.
Args:
guid: `str` Unique id for the example.
predicate_indicator: `List[int]` The predicate indicator for the examples.
words: `List[str]` The words of the sequence.
labels: (Optional) `List[str]` The labels for each word of the sequence. This should be specified for train and dev examples, but not for test examples.
"""
guid: str
predicate_indicator: List[int]
words: List[str]
tags: Optional[List[str]]
@dataclass
class InputFeatures:
"""
A single set of features of data.
Property names are the same names as the corresponding inputs to a model.
"""
input_ids: List[int]
attention_mask: List[int]
labels: Optional[List[int]] = None
token_type_ids: Optional[List[int]] = None
if is_torch_available():
import torch
from torch import nn
from torch.utils.data.dataset import Dataset
class SRLDataset(Dataset):
"""
Dataset for reading SRL data.
"""
features: List[InputFeatures]
pad_token_label_id: int = nn.CrossEntropyLoss().ignore_index
# Use cross entropy ignore_index as padding label id so that only real labe ids contribute to loss later.
def __init__(
self,
data: List[Dict],
tokenizer: PreTrainedTokenizer,
labels: List[str],
model_type: str,
max_seq_length: Optional[int] = None,
):
# Load data features
# NOTE this is kind of hacky, but it works for now.
examples = read_prediction_input(data)
self.features = convert_examples_to_append_features(
examples,
labels,
max_seq_length,
tokenizer,
cls_token_at_end = bool(model_type in ["xlnet"]), # xlnet has a cls token at the end
cls_token = tokenizer.cls_token,
cls_token_segment_id = 2 if model_type in ["xlnet"] else 0,
sep_token = tokenizer.sep_token,
sep_token_extra = False, # roberta uses an extra separator b/w pairs of sentences, cf. github.com/pytorch/fairseq/commit/1684e166e3da03f5b600dbb7855cb98ddfcd0805
pad_on_left = bool(tokenizer.padding_side == "left"),
pad_token = tokenizer.pad_token_id,
pad_token_segment_id = tokenizer.pad_token_type_id,
pad_token_label_id = self.pad_token_label_id,
)
return
def __len__(self):
return len(self.features)
def __getitem__(self, i) -> InputFeatures:
return self.features[i]
def read_prediction_input(data) -> List[InputExample]:
guid_index = 1
examples = []
for entry in data:
sentence = entry["sentence"] # .strip().split()
predicate_index = entry["index"]
if predicate_index not in range(len(sentence)):
continue
predicate = [0 if index != predicate_index else 1 for index in range(len(sentence))]
one_hot_tags = ["O" for _ in sentence]
one_hot_tags[predicate_index] = "B-V"
examples.append(InputExample(guid=f"input-{guid_index}", words=sentence, predicate_indicator=predicate, tags=one_hot_tags))
guid_index += 1
return examples
def convert_examples_to_append_features(
examples: List[InputExample],
label_list: List[str],
max_seq_length: int,
tokenizer: PreTrainedTokenizer,
cls_token_at_end = False,
cls_token = "[CLS]",
cls_token_segment_id = 1,
sep_token = "[SEP]",
sep_token_extra = False,
pad_on_left = False,
pad_token = 0,
pad_token_segment_id = 0,
pad_token_label_id = -100,
sequence_a_segment_id = 0,
sequence_b_segment_id = 1,
mask_padding_with_zero = True,
) -> List[InputFeatures]:
"""
Loads a list of input examples from read_better_examples_from_file into a list of `InputFeatures`
"""
label_map = {label: i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10_000 == 0:
logger.info("Writing example %d of %d", ex_index, len(examples))
tokens = []
label_ids = []
predicate_ids = []
predicate = []
predicate_label = ""
for word, label, pred_ind in zip(example.words, example.tags, example.predicate_indicator):
word_tokens = tokenizer.tokenize(word)
if pred_ind == 1:
predicate = word_tokens
predicate_label = label
if len(word_tokens) > 0:
tokens.extend(word_tokens)
# Use the real label id for the first token of the word, and padding ids for the remaining
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(word_tokens)-1))
predicate_ids.extend([pred_ind] * len(word_tokens))
# Account for [CLS] and [SEP] with "- 2" and "- 3" for RoBERTa then additional for the predicate as the second sentence
special_tokens_count = tokenizer.num_special_tokens_to_add() + len(predicate) + 1
if len(tokens) > max_seq_length - special_tokens_count:
tokens = tokens[: (max_seq_length-special_tokens_count)]
label_ids = label_ids[: (max_seq_length - special_tokens_count)]
predicate_ids = predicate_ids[:(max_seq_length - special_tokens_count)]
tokens += [sep_token]
label_ids += [pad_token_label_id]
predicate_ids += [0]
if sep_token_extra:
tokens += [sep_token]
label_ids += [pad_token_label_id]
predicate_ids += [0]
segment_ids = [sequence_a_segment_id] * len(tokens)
tokens.extend(predicate)
label_ids.extend([label_map[predicate_label]] + [pad_token_label_id]*(len(predicate)-1)) # TODO what should the label id for the second sentence (the predicate) be?
predicate_ids.extend([0] * len(predicate)) # TODO or should it be 1?
segment_ids.extend([sequence_b_segment_id] * len(predicate))
tokens += [sep_token]
label_ids += [pad_token_label_id]
predicate_ids += [0]
segment_ids += [sequence_b_segment_id]
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
predicate_ids += [0]
else:
tokens = [cls_token] + tokens
label_ids = [pad_token_label_id] + label_ids
segment_ids = [cls_token_segment_id] + segment_ids
predicate_ids = [0] + predicate_ids
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens.
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length
padding_length = max_seq_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
predicate_ids = ([0] * padding_length) + predicate_ids
input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids
label_ids = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
predicate_ids += [0] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(input_ids) == max_seq_length
assert len(predicate_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(label_ids) == max_seq_length
if ex_index % 1000 == 0:
logger.info("*** Example ***")
logger.info("guid: %s", example.guid)
logger.info("tokens: %s", " ".join([str(x) for x in tokens]))
logger.info("input_ids: %s", " ".join([str(x) for x in input_ids]))
# logger.info("predicate_ids: %s", " ".join([str(x) for x in predicate_ids]))
logger.info("input_mask: %s", " ".join([str(x) for x in input_mask]))
logger.info("segment_ids: %s", " ".join([str(x) for x in segment_ids]))
logger.info("label_ids: %s", " ".join([str(x) for x in label_ids]))
if "token_type_ids" not in tokenizer.model_input_names:
segment_ids = None
# predicate_ids = None
features.append(
InputFeatures(
input_ids=input_ids, attention_mask=input_mask, token_type_ids=segment_ids, labels=label_ids
)
)
return features
def get_labels(path: str) -> List[str]:
if path:
with open(path, "r") as f:
labels = f.read().splitlines()
if "O" not in labels:
labels = ["O"] + labels
return labels
else:
return ['O', 'B-A1', 'I-A1', 'B-A0', 'I-A0', 'B-V', 'I-V'] | demo_srl_utils.py | import logging
import os
import codecs
import random
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union, Dict
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available, RobertaModel, BertPreTrainedModel, XLMRobertaConfig
logger = logging.getLogger(__name__)
@dataclass
class InputExample:
"""
A single training/test example for semantic role labeling.
Args:
guid: `str` Unique id for the example.
predicate_indicator: `List[int]` The predicate indicator for the examples.
words: `List[str]` The words of the sequence.
labels: (Optional) `List[str]` The labels for each word of the sequence. This should be specified for train and dev examples, but not for test examples.
"""
guid: str
predicate_indicator: List[int]
words: List[str]
tags: Optional[List[str]]
@dataclass
class InputFeatures:
"""
A single set of features of data.
Property names are the same names as the corresponding inputs to a model.
"""
input_ids: List[int]
attention_mask: List[int]
labels: Optional[List[int]] = None
token_type_ids: Optional[List[int]] = None
if is_torch_available():
import torch
from torch import nn
from torch.utils.data.dataset import Dataset
class SRLDataset(Dataset):
"""
Dataset for reading SRL data.
"""
features: List[InputFeatures]
pad_token_label_id: int = nn.CrossEntropyLoss().ignore_index
# Use cross entropy ignore_index as padding label id so that only real labe ids contribute to loss later.
def __init__(
self,
data: List[Dict],
tokenizer: PreTrainedTokenizer,
labels: List[str],
model_type: str,
max_seq_length: Optional[int] = None,
):
# Load data features
# NOTE this is kind of hacky, but it works for now.
examples = read_prediction_input(data)
self.features = convert_examples_to_append_features(
examples,
labels,
max_seq_length,
tokenizer,
cls_token_at_end = bool(model_type in ["xlnet"]), # xlnet has a cls token at the end
cls_token = tokenizer.cls_token,
cls_token_segment_id = 2 if model_type in ["xlnet"] else 0,
sep_token = tokenizer.sep_token,
sep_token_extra = False, # roberta uses an extra separator b/w pairs of sentences, cf. github.com/pytorch/fairseq/commit/1684e166e3da03f5b600dbb7855cb98ddfcd0805
pad_on_left = bool(tokenizer.padding_side == "left"),
pad_token = tokenizer.pad_token_id,
pad_token_segment_id = tokenizer.pad_token_type_id,
pad_token_label_id = self.pad_token_label_id,
)
return
def __len__(self):
return len(self.features)
def __getitem__(self, i) -> InputFeatures:
return self.features[i]
def read_prediction_input(data) -> List[InputExample]:
guid_index = 1
examples = []
for entry in data:
sentence = entry["sentence"] # .strip().split()
predicate_index = entry["index"]
if predicate_index not in range(len(sentence)):
continue
predicate = [0 if index != predicate_index else 1 for index in range(len(sentence))]
one_hot_tags = ["O" for _ in sentence]
one_hot_tags[predicate_index] = "B-V"
examples.append(InputExample(guid=f"input-{guid_index}", words=sentence, predicate_indicator=predicate, tags=one_hot_tags))
guid_index += 1
return examples
def convert_examples_to_append_features(
examples: List[InputExample],
label_list: List[str],
max_seq_length: int,
tokenizer: PreTrainedTokenizer,
cls_token_at_end = False,
cls_token = "[CLS]",
cls_token_segment_id = 1,
sep_token = "[SEP]",
sep_token_extra = False,
pad_on_left = False,
pad_token = 0,
pad_token_segment_id = 0,
pad_token_label_id = -100,
sequence_a_segment_id = 0,
sequence_b_segment_id = 1,
mask_padding_with_zero = True,
) -> List[InputFeatures]:
"""
Loads a list of input examples from read_better_examples_from_file into a list of `InputFeatures`
"""
label_map = {label: i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10_000 == 0:
logger.info("Writing example %d of %d", ex_index, len(examples))
tokens = []
label_ids = []
predicate_ids = []
predicate = []
predicate_label = ""
for word, label, pred_ind in zip(example.words, example.tags, example.predicate_indicator):
word_tokens = tokenizer.tokenize(word)
if pred_ind == 1:
predicate = word_tokens
predicate_label = label
if len(word_tokens) > 0:
tokens.extend(word_tokens)
# Use the real label id for the first token of the word, and padding ids for the remaining
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(word_tokens)-1))
predicate_ids.extend([pred_ind] * len(word_tokens))
# Account for [CLS] and [SEP] with "- 2" and "- 3" for RoBERTa then additional for the predicate as the second sentence
special_tokens_count = tokenizer.num_special_tokens_to_add() + len(predicate) + 1
if len(tokens) > max_seq_length - special_tokens_count:
tokens = tokens[: (max_seq_length-special_tokens_count)]
label_ids = label_ids[: (max_seq_length - special_tokens_count)]
predicate_ids = predicate_ids[:(max_seq_length - special_tokens_count)]
tokens += [sep_token]
label_ids += [pad_token_label_id]
predicate_ids += [0]
if sep_token_extra:
tokens += [sep_token]
label_ids += [pad_token_label_id]
predicate_ids += [0]
segment_ids = [sequence_a_segment_id] * len(tokens)
tokens.extend(predicate)
label_ids.extend([label_map[predicate_label]] + [pad_token_label_id]*(len(predicate)-1)) # TODO what should the label id for the second sentence (the predicate) be?
predicate_ids.extend([0] * len(predicate)) # TODO or should it be 1?
segment_ids.extend([sequence_b_segment_id] * len(predicate))
tokens += [sep_token]
label_ids += [pad_token_label_id]
predicate_ids += [0]
segment_ids += [sequence_b_segment_id]
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
predicate_ids += [0]
else:
tokens = [cls_token] + tokens
label_ids = [pad_token_label_id] + label_ids
segment_ids = [cls_token_segment_id] + segment_ids
predicate_ids = [0] + predicate_ids
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens.
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length
padding_length = max_seq_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
predicate_ids = ([0] * padding_length) + predicate_ids
input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids
label_ids = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
predicate_ids += [0] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(input_ids) == max_seq_length
assert len(predicate_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(label_ids) == max_seq_length
if ex_index % 1000 == 0:
logger.info("*** Example ***")
logger.info("guid: %s", example.guid)
logger.info("tokens: %s", " ".join([str(x) for x in tokens]))
logger.info("input_ids: %s", " ".join([str(x) for x in input_ids]))
# logger.info("predicate_ids: %s", " ".join([str(x) for x in predicate_ids]))
logger.info("input_mask: %s", " ".join([str(x) for x in input_mask]))
logger.info("segment_ids: %s", " ".join([str(x) for x in segment_ids]))
logger.info("label_ids: %s", " ".join([str(x) for x in label_ids]))
if "token_type_ids" not in tokenizer.model_input_names:
segment_ids = None
# predicate_ids = None
features.append(
InputFeatures(
input_ids=input_ids, attention_mask=input_mask, token_type_ids=segment_ids, labels=label_ids
)
)
return features
def get_labels(path: str) -> List[str]:
if path:
with open(path, "r") as f:
labels = f.read().splitlines()
if "O" not in labels:
labels = ["O"] + labels
return labels
else:
return ['O', 'B-A1', 'I-A1', 'B-A0', 'I-A0', 'B-V', 'I-V'] | 0.759448 | 0.439928 |
import logging
import os
import time
from typing import Dict, Optional
from eth_typing.evm import ChecksumAddress
from web3 import Web3
CONFIG_KEY_WEB3_INTERVAL = "web3_interval"
CONFIG_KEY_WEB3_LAST_CALL = "web3_last_call"
logger = logging.getLogger("moonworm.deployment")
VERBOSE = os.environ.get("MOONWORM_VERBOSE", "f").lower() in {
"y",
"yes",
"t",
"true",
"1",
}
logger.setLevel(logging.INFO if VERBOSE else logging.WARNING)
def was_deployed_at_block(
web3_client: Web3,
contract_address: ChecksumAddress,
block_number: int,
config: Optional[Dict[str, float]],
) -> bool:
if config is not None:
interval = config.get(CONFIG_KEY_WEB3_INTERVAL)
if interval is not None:
last_call = config.get(CONFIG_KEY_WEB3_LAST_CALL)
current_time = time.time()
if last_call is not None and current_time < last_call + interval:
time.sleep(last_call + interval - current_time + 1)
code = web3_client.eth.get_code(contract_address, block_identifier=block_number)
if config is not None:
config[CONFIG_KEY_WEB3_LAST_CALL] = time.time()
code_hex = code.hex()
was_deployed = not (code_hex == "0x" or code_hex == "0x0" or code_hex == "")
return was_deployed
def find_deployment_block(
web3_client: Web3,
contract_address: ChecksumAddress,
web3_interval: float,
) -> Optional[int]:
"""
Note: We will assume no selfdestruct for now.
This means that, if the address does not currently contain code, we will assume it never contained
code and is therefore not a smart contract address.
"""
log_prefix = f"find_deployment_block(web3_client, contract_address={contract_address}, web3_interval={web3_interval}) -- "
logger.info(f"{log_prefix}Function invoked")
config = {CONFIG_KEY_WEB3_INTERVAL: web3_interval}
max_block = int(web3_client.eth.block_number)
min_block = 0
middle_block = int((min_block + max_block) / 2)
was_deployed_at_max_block = was_deployed_at_block(
web3_client, contract_address, max_block, config=config
)
if not was_deployed_at_max_block:
logger.warn(f"{log_prefix}Address is not a smart contract")
return None
was_deployed: Dict[int, bool] = {
max_block: was_deployed_at_max_block,
min_block: was_deployed_at_block(
web3_client, contract_address, min_block, config=config
),
middle_block: was_deployed_at_block(
web3_client, contract_address, middle_block, config=config
),
}
while max_block - min_block >= 2:
logger.info(
f"{log_prefix}Binary search -- max_block={max_block}, min_block={min_block}, middle_block={middle_block}"
)
if not was_deployed[min_block] and not was_deployed[middle_block]:
min_block = middle_block
else:
max_block = middle_block
middle_block = int((min_block + max_block) / 2)
was_deployed[middle_block] = was_deployed_at_block(
web3_client, contract_address, middle_block, config=config
)
if was_deployed[min_block]:
return min_block
return max_block | moonworm/deployment.py | import logging
import os
import time
from typing import Dict, Optional
from eth_typing.evm import ChecksumAddress
from web3 import Web3
CONFIG_KEY_WEB3_INTERVAL = "web3_interval"
CONFIG_KEY_WEB3_LAST_CALL = "web3_last_call"
logger = logging.getLogger("moonworm.deployment")
VERBOSE = os.environ.get("MOONWORM_VERBOSE", "f").lower() in {
"y",
"yes",
"t",
"true",
"1",
}
logger.setLevel(logging.INFO if VERBOSE else logging.WARNING)
def was_deployed_at_block(
web3_client: Web3,
contract_address: ChecksumAddress,
block_number: int,
config: Optional[Dict[str, float]],
) -> bool:
if config is not None:
interval = config.get(CONFIG_KEY_WEB3_INTERVAL)
if interval is not None:
last_call = config.get(CONFIG_KEY_WEB3_LAST_CALL)
current_time = time.time()
if last_call is not None and current_time < last_call + interval:
time.sleep(last_call + interval - current_time + 1)
code = web3_client.eth.get_code(contract_address, block_identifier=block_number)
if config is not None:
config[CONFIG_KEY_WEB3_LAST_CALL] = time.time()
code_hex = code.hex()
was_deployed = not (code_hex == "0x" or code_hex == "0x0" or code_hex == "")
return was_deployed
def find_deployment_block(
web3_client: Web3,
contract_address: ChecksumAddress,
web3_interval: float,
) -> Optional[int]:
"""
Note: We will assume no selfdestruct for now.
This means that, if the address does not currently contain code, we will assume it never contained
code and is therefore not a smart contract address.
"""
log_prefix = f"find_deployment_block(web3_client, contract_address={contract_address}, web3_interval={web3_interval}) -- "
logger.info(f"{log_prefix}Function invoked")
config = {CONFIG_KEY_WEB3_INTERVAL: web3_interval}
max_block = int(web3_client.eth.block_number)
min_block = 0
middle_block = int((min_block + max_block) / 2)
was_deployed_at_max_block = was_deployed_at_block(
web3_client, contract_address, max_block, config=config
)
if not was_deployed_at_max_block:
logger.warn(f"{log_prefix}Address is not a smart contract")
return None
was_deployed: Dict[int, bool] = {
max_block: was_deployed_at_max_block,
min_block: was_deployed_at_block(
web3_client, contract_address, min_block, config=config
),
middle_block: was_deployed_at_block(
web3_client, contract_address, middle_block, config=config
),
}
while max_block - min_block >= 2:
logger.info(
f"{log_prefix}Binary search -- max_block={max_block}, min_block={min_block}, middle_block={middle_block}"
)
if not was_deployed[min_block] and not was_deployed[middle_block]:
min_block = middle_block
else:
max_block = middle_block
middle_block = int((min_block + max_block) / 2)
was_deployed[middle_block] = was_deployed_at_block(
web3_client, contract_address, middle_block, config=config
)
if was_deployed[min_block]:
return min_block
return max_block | 0.804636 | 0.088505 |
import pandas as pd
import numpy as np
import plotly.graph_objects as go
from optbinning import OptimalBinning
import math
from plotly.subplots import make_subplots
import plotly.figure_factory as ff
import plotly.express as px
def DistributionPlot(Df, PlotVar):
'''
Plots the distribution of a given variable in a dataframe
'''
Labels = [i for i in range(0, 100, 10)]
BinSize = Df[PlotVar].describe().loc["std"] / 20
fig = ff.create_distplot(
hist_data = [Df[Df["Target"] == 0][PlotVar].values, Df[Df["Target"] == 1][PlotVar].values]
, group_labels = [0, 1]
, bin_size=BinSize
, show_hist=True)
fig.update_xaxes(
zeroline = True
, showgrid = True
, title=PlotVar)
fig.update_yaxes(
zeroline=True
, showgrid=True
, title="Distribution")
fig.update_layout(
title = dict(text=str(PlotVar) + " Distribution"
, font=dict(color="Black", size=20))
, font = dict(color="Black", size=10)
, height = 700
, width = 1100
, legend_title='Target')
fig.show(renderer='png', height=700, width=1100)
def Scatter(Df, PlotVar, Hue, Y, Title):
'''
Produces a plot of data pulled from specified dataframe split by a certain binary population
PlotVars defines the independent variable
Hue defines the population for which to split the plots
Y is the dependent variable
Title is title of the plot
'''
fig = go.Figure()
fig.add_trace(
go.Scatter(
x = Df[Df[Hue] == 1][PlotVar]
, y=Df[Df[Hue] == 1][Y]
, legendgroup=Hue + " = 1"
, name=Hue + " = 1"
, mode='markers'
, line=dict(color='red')
, marker=dict(size=10, opacity=0.1)
, showlegend= True))
fig.add_trace(
go.Scatter(
x = Df[Df[Hue] == 0][PlotVar]
, y=Df[Df[Hue] == 0][Y]
, legendgroup=Hue + " = 0"
, name=Hue + " = 0"
, mode='markers'
, line=dict(color='blue')
, marker=dict(size=10, opacity=0.1)
, showlegend= True))
fig.update_xaxes(
zeroline = True
, showgrid = True
, title = PlotVar
)
fig.update_yaxes(
zeroline=True
, showgrid=True
, title = Y)
fig.update_layout(
title = dict(text=Title, font=dict(size=17)))
fig.update_annotations(
font = dict(size=14))
fig.show(renderer="png", height=600, width=1000)
def Distribution(Df, Target, Variable):
Graph = pd.pivot_table(Df, index=Variable, columns=Target, values="Track", aggfunc=len)
Graph1 = pd.pivot_table(Df, index=Variable, values=Target, aggfunc="mean").sort_values(by="Target", ascending=False)
Graph = Graph.reindex(Graph1.index)
fig = make_subplots(specs=[[{"secondary_y": True}]])
fig.add_trace(
go.Scatter(
y=Graph1[Target]*100
, x=[Col.title() if type(Col) == "str" else Col for Col in Graph1.index.values]
, name=Target
, mode="lines"
, showlegend= True)
, secondary_y = True)
fig.add_trace(
go.Bar(
y=Graph[0]
, x=[Col.title() if type(Col) == "str" else Col for Col in Graph1.index.values]
, name="Not"+str(Target)
, showlegend= True)
, secondary_y = False)
fig.add_trace(
go.Bar(
y=Graph[1]
, x=[Col.title() if type(Col) == "str" else Col for Col in Graph1.index.values]
, name=Target
, showlegend= True)
, secondary_y = False)
fig.update_xaxes(
zeroline = True
, showgrid = True
, title = Variable
, type="category")
fig.update_yaxes(
zeroline=True
, showgrid=True
, title="Frequency"
, secondary_y = False)
fig.update_yaxes(
zeroline=True
, showgrid=False
, title=Target
, ticksuffix="%"
, range=[0, 100]
, secondary_y = True)
fig.update_layout(
title = dict(text= str(Variable) +" Distribution vs. " + str(Target), font=dict(color="Black", size=20))
, font = dict(color="Black", size=10)
, height = 600
, width = 900
, barmode='stack')
fig.update_annotations(
font = dict(color="Black", size=14))
fig.show(renderer="png", width=900, height=600)
class VariableBinning():
'''
Class to bin a variable according to a selection of metrics.
:Attribute BinPlot: Plots Distribution vs. Event rate for a DataFrame with class Count columns and Event rate column.
:Attribute BinVariable: Fits OptBinning algorithm and prints summary plot along with BinPlot (For visualising)
:Attribute Transform: Fits and transforms variable, returns transformed series.
'''
def __init__(self, Df, Variable, Target, DType = "numerical"):
self.Temp = Df.copy()[[Variable, Target, "Track"]]
self.Variable = Variable
self.Target = Target
self.Mod = None
self.DType = DType
def BinPlot(self, Graph):
'''
:Param Graph: Dataframe containing Class count columns and event rate column
'''
fig = make_subplots(specs=[[{"secondary_y": True}]])
fig.add_trace(
go.Scatter(
y=Graph["EventRate"]*100
, x=[Col.title() if type(Col) == "str" else Col for Col in Graph.index.values]
, name=self.Target
, mode="lines+markers"
, showlegend= True)
, secondary_y = True)
for Col in [str(self.Target)+" == 0", str(self.Target)+" == 1"]:
fig.add_trace(
go.Bar(
y=Graph[Col]
, x=[Col.title() if type(Col) == "str" else Col for Col in Graph.index.values]
, name=Col
, showlegend= True)
, secondary_y = False)
fig.update_xaxes(
zeroline = True
, showgrid = True
, title = self.Variable
, type='category' if self.DType == "categorical" else "-")
fig.update_yaxes(
zeroline=True
, showgrid=True
, title="Count"
, secondary_y = False)
fig.update_yaxes(
zeroline=True
, showgrid=False
, title=self.Target
, ticksuffix="%"
, range=[0, 100]
, secondary_y = True)
fig.update_layout(
title = dict(text= str(self.Variable) +" Distribution vs. " + str(self.Target), font=dict(color="Black", size=20))
, font = dict(color="Black", size=10)
, height = 600
, width = 900
, barmode='stack')
fig.show()
def BinVariable(self, Trend = "auto_asc_desc", Method = "bins", ShowTable = False):
'''
:Param Trend: Default = "auto_asc_desc", sets the assumed trend for binning
:Param Method: Default = "bins", sets the desired transformation method.
'''
self.Mod = OptimalBinning(name=self.Variable, dtype=self.DType, solver="cp", max_n_prebins=100, monotonic_trend=Trend,
min_prebin_size=0.01, time_limit=30)
#Fit and record
self.Mod.fit(self.Temp[self.Variable], self.Temp[self.Target])
BinningTable = self.Mod.binning_table
Table = BinningTable.build()
BinsValues = self.Mod.splits
if ShowTable == True:
#Print Status and Summary
print(self.Mod.status)
print(BinningTable.analysis())
self.Temp["Transformed"] = self.Mod.transform(self.Temp[self.Variable], metric = Method)
if ((self.DType == "numerical") & (Method != "woe")):
self.Temp["Transformed"] = self.Temp["Transformed"].apply(lambda s: tuple(float(x) for x in s.replace('[', '').replace(')', '').split(',')))
self.Temp["Transformed"] = self.Temp["Transformed"].apply(lambda x: x[0] + 1 if math.isinf(x[1]) else x[1])
Graph = pd.pivot_table(self.Temp, index="Transformed", columns=self.Target, values = "Track", aggfunc="count")
Graph = Graph.rename({0: str(self.Target)+" == 0", 1: str(self.Target)+" == 1"}, axis=1)
Graph1 = pd.pivot_table(self.Temp, index="Transformed", values=self.Target, aggfunc="mean")
Graph["EventRate"] = Graph1[self.Target]
self.BinPlot(Graph)
def Transform(self, Df = None, Method = 'woe'):
'''
:Param Trend: Default = "auto_asc_desc", sets the assumed trend for binning
:Param Method: Default = "bins", sets the desired transformation method.
'''
if Df is not None:
DataFrame = Df.copy()
DataFrame = DataFrame[[self.Variable]]
DataFrame["Transformed"] = self.Mod.transform(DataFrame[self.Variable], metric = Method)
return DataFrame["Transformed"]
else:
self.Temp["Transformed"] = self.Mod.transform(self.Temp[self.Variable], metric = Method)
Graph = pd.pivot_table(self.Temp, index="Transformed", columns=self.Target, values = "Track", aggfunc="count")
Graph = Graph.rename({0: str(self.Target)+" == 0", 1: str(self.Target)+" == 1"}, axis=1)
Graph1 = pd.pivot_table(self.Temp, index="Transformed", values=self.Target, aggfunc="mean").sort_values(by=self.Target, ascending=False)
Graph = Graph.reindex(Graph1.index)
Graph["EventRate"] = Graph1[self.Target]
self.BinPlot(Graph)
return self.Temp["Transformed"]
def Correlation(Df, PlotVars, Title):
'''
Provides a correlation matrix heatmap for data pulled from a specified dataframe
PlotVars define all features in question
Title is title of the plot
'''
Correlations = Df[PlotVars].corr()
fig = go.Figure()
fig.add_trace(
go.Heatmap(
z=Correlations
, x=Correlations.index
, y=Correlations.index
, zmax=1
, zmin=-1
, hoverongaps = False
, colorscale=[(0, "blue"), (0.5, "white"), (1, "red")]))
fig.update_layout(
title = dict(text=Title, font=dict(color="Black", size=20))
, font = dict(color="Black", size=10)
, height = 1000
, width = 1000
, legend_title='Period')
fig.update_annotations(
font = dict(color="Black", size=14))
fig.show(renderer="png", height=900, width=900)
def BarPlot(DataFrame, Title):
fig = go.Figure()
for Column in DataFrame.columns.values:
fig.add_trace(
go.Bar(
y=DataFrame[Column]
, x=DataFrame.columns.values
, name=str(Column)
, showlegend= True))
fig.update_xaxes(
zeroline = True
, showgrid = True
, title = "Features"
, showticklabels=False)
fig.update_yaxes(
zeroline=True
, showgrid=True
, title="Importance")
fig.update_layout(
title = dict(text= Title, font=dict(color="Black", size=20))
, font = dict(color="Black", size=10)
, height = 600
, width = 900)
fig.update_annotations(
font = dict(color="Black", size=14))
fig.show(renderer="png", width=900, height=600)
def InformationValue(Df, Variable, Target):
'''
Computes the information for a given dataframe feature w.r.t a target/dependent variable.
'''
Pivot = pd.pivot_table(Df, index=Variable, values="Track", columns=Target, aggfunc="count").reset_index()
Pivot = Pivot.rename({0:"Flops", 1:"Hits"}, axis=1)
Pivot["Flops"] = Pivot["Flops"] / Pivot["Flops"].sum()
Pivot["Hits"] = Pivot["Hits"] / Pivot["Hits"].sum()
Pivot["IV"] = Pivot["Flops"] - Pivot["Hits"]
Pivot["IV"] = Pivot["IV"]*Pivot[Variable]
return Pivot["IV"].sum() | SpotifyFunctions.py | import pandas as pd
import numpy as np
import plotly.graph_objects as go
from optbinning import OptimalBinning
import math
from plotly.subplots import make_subplots
import plotly.figure_factory as ff
import plotly.express as px
def DistributionPlot(Df, PlotVar):
'''
Plots the distribution of a given variable in a dataframe
'''
Labels = [i for i in range(0, 100, 10)]
BinSize = Df[PlotVar].describe().loc["std"] / 20
fig = ff.create_distplot(
hist_data = [Df[Df["Target"] == 0][PlotVar].values, Df[Df["Target"] == 1][PlotVar].values]
, group_labels = [0, 1]
, bin_size=BinSize
, show_hist=True)
fig.update_xaxes(
zeroline = True
, showgrid = True
, title=PlotVar)
fig.update_yaxes(
zeroline=True
, showgrid=True
, title="Distribution")
fig.update_layout(
title = dict(text=str(PlotVar) + " Distribution"
, font=dict(color="Black", size=20))
, font = dict(color="Black", size=10)
, height = 700
, width = 1100
, legend_title='Target')
fig.show(renderer='png', height=700, width=1100)
def Scatter(Df, PlotVar, Hue, Y, Title):
'''
Produces a plot of data pulled from specified dataframe split by a certain binary population
PlotVars defines the independent variable
Hue defines the population for which to split the plots
Y is the dependent variable
Title is title of the plot
'''
fig = go.Figure()
fig.add_trace(
go.Scatter(
x = Df[Df[Hue] == 1][PlotVar]
, y=Df[Df[Hue] == 1][Y]
, legendgroup=Hue + " = 1"
, name=Hue + " = 1"
, mode='markers'
, line=dict(color='red')
, marker=dict(size=10, opacity=0.1)
, showlegend= True))
fig.add_trace(
go.Scatter(
x = Df[Df[Hue] == 0][PlotVar]
, y=Df[Df[Hue] == 0][Y]
, legendgroup=Hue + " = 0"
, name=Hue + " = 0"
, mode='markers'
, line=dict(color='blue')
, marker=dict(size=10, opacity=0.1)
, showlegend= True))
fig.update_xaxes(
zeroline = True
, showgrid = True
, title = PlotVar
)
fig.update_yaxes(
zeroline=True
, showgrid=True
, title = Y)
fig.update_layout(
title = dict(text=Title, font=dict(size=17)))
fig.update_annotations(
font = dict(size=14))
fig.show(renderer="png", height=600, width=1000)
def Distribution(Df, Target, Variable):
Graph = pd.pivot_table(Df, index=Variable, columns=Target, values="Track", aggfunc=len)
Graph1 = pd.pivot_table(Df, index=Variable, values=Target, aggfunc="mean").sort_values(by="Target", ascending=False)
Graph = Graph.reindex(Graph1.index)
fig = make_subplots(specs=[[{"secondary_y": True}]])
fig.add_trace(
go.Scatter(
y=Graph1[Target]*100
, x=[Col.title() if type(Col) == "str" else Col for Col in Graph1.index.values]
, name=Target
, mode="lines"
, showlegend= True)
, secondary_y = True)
fig.add_trace(
go.Bar(
y=Graph[0]
, x=[Col.title() if type(Col) == "str" else Col for Col in Graph1.index.values]
, name="Not"+str(Target)
, showlegend= True)
, secondary_y = False)
fig.add_trace(
go.Bar(
y=Graph[1]
, x=[Col.title() if type(Col) == "str" else Col for Col in Graph1.index.values]
, name=Target
, showlegend= True)
, secondary_y = False)
fig.update_xaxes(
zeroline = True
, showgrid = True
, title = Variable
, type="category")
fig.update_yaxes(
zeroline=True
, showgrid=True
, title="Frequency"
, secondary_y = False)
fig.update_yaxes(
zeroline=True
, showgrid=False
, title=Target
, ticksuffix="%"
, range=[0, 100]
, secondary_y = True)
fig.update_layout(
title = dict(text= str(Variable) +" Distribution vs. " + str(Target), font=dict(color="Black", size=20))
, font = dict(color="Black", size=10)
, height = 600
, width = 900
, barmode='stack')
fig.update_annotations(
font = dict(color="Black", size=14))
fig.show(renderer="png", width=900, height=600)
class VariableBinning():
'''
Class to bin a variable according to a selection of metrics.
:Attribute BinPlot: Plots Distribution vs. Event rate for a DataFrame with class Count columns and Event rate column.
:Attribute BinVariable: Fits OptBinning algorithm and prints summary plot along with BinPlot (For visualising)
:Attribute Transform: Fits and transforms variable, returns transformed series.
'''
def __init__(self, Df, Variable, Target, DType = "numerical"):
self.Temp = Df.copy()[[Variable, Target, "Track"]]
self.Variable = Variable
self.Target = Target
self.Mod = None
self.DType = DType
def BinPlot(self, Graph):
'''
:Param Graph: Dataframe containing Class count columns and event rate column
'''
fig = make_subplots(specs=[[{"secondary_y": True}]])
fig.add_trace(
go.Scatter(
y=Graph["EventRate"]*100
, x=[Col.title() if type(Col) == "str" else Col for Col in Graph.index.values]
, name=self.Target
, mode="lines+markers"
, showlegend= True)
, secondary_y = True)
for Col in [str(self.Target)+" == 0", str(self.Target)+" == 1"]:
fig.add_trace(
go.Bar(
y=Graph[Col]
, x=[Col.title() if type(Col) == "str" else Col for Col in Graph.index.values]
, name=Col
, showlegend= True)
, secondary_y = False)
fig.update_xaxes(
zeroline = True
, showgrid = True
, title = self.Variable
, type='category' if self.DType == "categorical" else "-")
fig.update_yaxes(
zeroline=True
, showgrid=True
, title="Count"
, secondary_y = False)
fig.update_yaxes(
zeroline=True
, showgrid=False
, title=self.Target
, ticksuffix="%"
, range=[0, 100]
, secondary_y = True)
fig.update_layout(
title = dict(text= str(self.Variable) +" Distribution vs. " + str(self.Target), font=dict(color="Black", size=20))
, font = dict(color="Black", size=10)
, height = 600
, width = 900
, barmode='stack')
fig.show()
def BinVariable(self, Trend = "auto_asc_desc", Method = "bins", ShowTable = False):
'''
:Param Trend: Default = "auto_asc_desc", sets the assumed trend for binning
:Param Method: Default = "bins", sets the desired transformation method.
'''
self.Mod = OptimalBinning(name=self.Variable, dtype=self.DType, solver="cp", max_n_prebins=100, monotonic_trend=Trend,
min_prebin_size=0.01, time_limit=30)
#Fit and record
self.Mod.fit(self.Temp[self.Variable], self.Temp[self.Target])
BinningTable = self.Mod.binning_table
Table = BinningTable.build()
BinsValues = self.Mod.splits
if ShowTable == True:
#Print Status and Summary
print(self.Mod.status)
print(BinningTable.analysis())
self.Temp["Transformed"] = self.Mod.transform(self.Temp[self.Variable], metric = Method)
if ((self.DType == "numerical") & (Method != "woe")):
self.Temp["Transformed"] = self.Temp["Transformed"].apply(lambda s: tuple(float(x) for x in s.replace('[', '').replace(')', '').split(',')))
self.Temp["Transformed"] = self.Temp["Transformed"].apply(lambda x: x[0] + 1 if math.isinf(x[1]) else x[1])
Graph = pd.pivot_table(self.Temp, index="Transformed", columns=self.Target, values = "Track", aggfunc="count")
Graph = Graph.rename({0: str(self.Target)+" == 0", 1: str(self.Target)+" == 1"}, axis=1)
Graph1 = pd.pivot_table(self.Temp, index="Transformed", values=self.Target, aggfunc="mean")
Graph["EventRate"] = Graph1[self.Target]
self.BinPlot(Graph)
def Transform(self, Df = None, Method = 'woe'):
'''
:Param Trend: Default = "auto_asc_desc", sets the assumed trend for binning
:Param Method: Default = "bins", sets the desired transformation method.
'''
if Df is not None:
DataFrame = Df.copy()
DataFrame = DataFrame[[self.Variable]]
DataFrame["Transformed"] = self.Mod.transform(DataFrame[self.Variable], metric = Method)
return DataFrame["Transformed"]
else:
self.Temp["Transformed"] = self.Mod.transform(self.Temp[self.Variable], metric = Method)
Graph = pd.pivot_table(self.Temp, index="Transformed", columns=self.Target, values = "Track", aggfunc="count")
Graph = Graph.rename({0: str(self.Target)+" == 0", 1: str(self.Target)+" == 1"}, axis=1)
Graph1 = pd.pivot_table(self.Temp, index="Transformed", values=self.Target, aggfunc="mean").sort_values(by=self.Target, ascending=False)
Graph = Graph.reindex(Graph1.index)
Graph["EventRate"] = Graph1[self.Target]
self.BinPlot(Graph)
return self.Temp["Transformed"]
def Correlation(Df, PlotVars, Title):
'''
Provides a correlation matrix heatmap for data pulled from a specified dataframe
PlotVars define all features in question
Title is title of the plot
'''
Correlations = Df[PlotVars].corr()
fig = go.Figure()
fig.add_trace(
go.Heatmap(
z=Correlations
, x=Correlations.index
, y=Correlations.index
, zmax=1
, zmin=-1
, hoverongaps = False
, colorscale=[(0, "blue"), (0.5, "white"), (1, "red")]))
fig.update_layout(
title = dict(text=Title, font=dict(color="Black", size=20))
, font = dict(color="Black", size=10)
, height = 1000
, width = 1000
, legend_title='Period')
fig.update_annotations(
font = dict(color="Black", size=14))
fig.show(renderer="png", height=900, width=900)
def BarPlot(DataFrame, Title):
fig = go.Figure()
for Column in DataFrame.columns.values:
fig.add_trace(
go.Bar(
y=DataFrame[Column]
, x=DataFrame.columns.values
, name=str(Column)
, showlegend= True))
fig.update_xaxes(
zeroline = True
, showgrid = True
, title = "Features"
, showticklabels=False)
fig.update_yaxes(
zeroline=True
, showgrid=True
, title="Importance")
fig.update_layout(
title = dict(text= Title, font=dict(color="Black", size=20))
, font = dict(color="Black", size=10)
, height = 600
, width = 900)
fig.update_annotations(
font = dict(color="Black", size=14))
fig.show(renderer="png", width=900, height=600)
def InformationValue(Df, Variable, Target):
'''
Computes the information for a given dataframe feature w.r.t a target/dependent variable.
'''
Pivot = pd.pivot_table(Df, index=Variable, values="Track", columns=Target, aggfunc="count").reset_index()
Pivot = Pivot.rename({0:"Flops", 1:"Hits"}, axis=1)
Pivot["Flops"] = Pivot["Flops"] / Pivot["Flops"].sum()
Pivot["Hits"] = Pivot["Hits"] / Pivot["Hits"].sum()
Pivot["IV"] = Pivot["Flops"] - Pivot["Hits"]
Pivot["IV"] = Pivot["IV"]*Pivot[Variable]
return Pivot["IV"].sum() | 0.703142 | 0.486941 |
from time import sleep
import numpy as np
from json import loads
from .constant import CLOSE, SIZELAYERONE
from .qfunction import Qfunction
from .state import State
from .toolbox import Toolbox
from .dataset import Dataset
from .communication import Communication
import torch
__all__ = ["Agent"]
class Agent():
def __init__(self, dataset: Dataset, state: State, toolbox: Toolbox, qfunction: Qfunction, communication: Communication, myId: int, classType: str):
self.dataset: Dataset = dataset
self.state: State = state
self.toolbox: Toolbox = toolbox
self.qfunction: Qfunction = qfunction
self.communication: Communication = communication
self.myId: int = myId
self.queue: int = 0
self.forbidenQueue: int = 0
self.otherAgents: list = []
self.forbidenAgents: list = []
self.classType: str = classType
self.nbIteration: int = 10
def _setAgents(self, agents: list):
newAgents: list = list(self.otherAgents)
nbOtherAgents: int = 0
for agentId in agents:
newAgents.append({agentId: self.queue})
nbOtherAgents += 1
self.queue += 1
self.state._setNbOtherAgents(nbOtherAgents)
self.otherAgents = list(newAgents)
def _setForbidenAgents(self, forbidenIds:list):
otherAgents:list = list(self.otherAgents)
newForbidenAgents: list = list(self.forbidenAgents)
for _id in forbidenIds:
for dictData in otherAgents:
for key in dictData:
if key == _id:
newForbidenAgents.append({_id: self.forbidenQueue})
self.forbidenQueue += 1
self.forbidenAgents = list(newForbidenAgents)
def _managementCycleLife(self, timeSleep: float):
print(f"Stream initialization, Ready to listen on \"{self.communication.managerTopic}\".\nSend information to agent on \"{self.communication.clusterTopic}\".\n")
self.communication._broadcastInit(self.otherAgents)
i = 0
while True:
fromWho = -2
msg = self.communication.consumer.poll(1.0)
if msg is None:
continue
if msg.error():
print("Consumer error: {}".format(msg.error()))
continue
jsonData = loads(msg.value().decode('utf-8'))
#print(jsonData)
#self.toolbox._progbar(i, self.nbIteration, 30)
print(i)
sleep(timeSleep)
fromWho = self.communication._managementDataSending(jsonData)
if i > self.nbIteration and self.nbIteration != -1:
print("KILL INFLUENCER MANAGER")
self.communication.consumer.close()
self.communication._killInfluencer(fromWho)
break
if (self.communication._killConsume(jsonData) == CLOSE):
self.communication.consumer.close()
print("KILL FOLLOWER MANAGER")
break
i += 1
def _managementCycleLifeDemo(self, timeSleep: float) :
print(f"Stream initialization, Ready to listen on \"{self.communication.managerTopic}\".\nSend information to agent on \"{self.communication.clusterTopic}\".\n")
self.communication._broadcastInit(self.otherAgents)
i = 0
while True:
fromWho = -2
msg = self.communication.consumer.poll(1.0)
if msg is None:
continue
if msg.error():
print("Consumer error: {}".format(msg.error()))
continue
jsonData = loads(msg.value().decode('utf-8'))
#print(jsonData)
#self.toolbox._progbar(i, self.nbIteration, 30)
print(i)
sleep(timeSleep)
fromWho = self.communication._managementDataSending(jsonData)
#self.communication._sendToDisplay(jsonData, i, self.nbIteration)
if i > self.nbIteration and self.nbIteration != -1:
print("KILL INFLUENCER MANAGER")
self.communication.consumer.close()
self.communication._killInfluencer(fromWho)
self.communication._killDisplay(self.myId)
break
if (self.communication._killConsume(jsonData) == CLOSE):
self.communication.consumer.close()
self.communication._killDisplay(self.myId)
print("KILL FOLLOWER MANAGER")
break
i += 1
def _followerCycleLife(self):
print(f"Stream initialization, Ready to listen on \"{self.communication.clusterTopic}\".\nSend information to manager on \"{self.communication.managerTopic}\".\n")
while True:
msg = self.communication.consumer.poll(1.0)
if msg is None:
continue
if msg.error():
print("Consumer error: {}".format(msg.error()))
continue
jsonData = loads(msg.value().decode('utf-8'))
if (self.communication._killConsume(jsonData) == CLOSE):
self.communication.consumer.close()
break
self.communication._updateEnv(jsonData, self.otherAgents, self.state, self.forbidenAgents)
print(self.state.ownState)
self.communication._checkFromAndSendManager(jsonData, self.state)
if ((np.array_equal(self.state.saveCars, self.state.nCars) == False) or (np.array_equal(self.state.savePedestrian, self.state.nPedestrian) == False)) :
self.state._getGlobalScore()
self.communication._broadcastMyState(self.otherAgents, self.state, self.forbidenAgents)
else:
self.state._getGlobalScore()
self.state._setSave([self.state._getnCars()], [self.state._getnPedestrian()], list(self.state._getLight()))
def _initDataset(self, _type: str, eps: float = 1.0):
print(f"Stream initialization, Ready to listen on \"{self.communication.clusterTopic}\".\nSend information to manager on \"{self.communication.managerTopic}\".\n")
while True:
msg = self.communication.consumer.poll(1.0)
if msg is None:
continue
if msg.error():
print("Consumer error: {}".format(msg.error()))
continue
jsonData = loads(msg.value().decode('utf-8'))
if (self.communication._killConsume(jsonData) == CLOSE):
self.communication.consumer.close()
self.communication._killManager(self.forbidenAgents)
self.communication._killFollower(self.forbidenAgents)
break
if _type == "demo":
self.dataset._influencerDataProcess(jsonData, self.otherAgents, self.forbidenAgents, eps)
else:
eps = self.dataset._influencerDataProcess(jsonData, self.otherAgents, self.forbidenAgents, eps)
def _start(self, timeSleep: float = 1.0):
if (self.classType == "influencer"):
self._initDataset("train", 0.9)
return
if (self.classType == "follower"):
self._followerCycleLife()
return
if (self.classType == "manager"):
self._managementCycleLife(timeSleep)
return
print(f"Error() : Unknow classType : {self.classType}")
def _startDemo(self, timeSleep: float = 1.0):
if (self.classType == "influencer"):
self._initDataset("demo", 0.1)
return
if (self.classType == "follower"):
self._followerCycleLife()
return
if (self.classType == "manager"):
self._managementCycleLifeDemo(timeSleep)
return
print(f"Error() : Unknow classType : {self.classType}")
def _save(self):
torch.save(self.qfunction.state_dict(), "./saves/save_" + self.classType + str(self.myId))
def _restore(self, path: str):
print(f"Load State : {path}")
self.qfunction.load_state_dict(torch.load(path)) | regularflow/utils_regularflow/agent.py | from time import sleep
import numpy as np
from json import loads
from .constant import CLOSE, SIZELAYERONE
from .qfunction import Qfunction
from .state import State
from .toolbox import Toolbox
from .dataset import Dataset
from .communication import Communication
import torch
__all__ = ["Agent"]
class Agent():
def __init__(self, dataset: Dataset, state: State, toolbox: Toolbox, qfunction: Qfunction, communication: Communication, myId: int, classType: str):
self.dataset: Dataset = dataset
self.state: State = state
self.toolbox: Toolbox = toolbox
self.qfunction: Qfunction = qfunction
self.communication: Communication = communication
self.myId: int = myId
self.queue: int = 0
self.forbidenQueue: int = 0
self.otherAgents: list = []
self.forbidenAgents: list = []
self.classType: str = classType
self.nbIteration: int = 10
def _setAgents(self, agents: list):
newAgents: list = list(self.otherAgents)
nbOtherAgents: int = 0
for agentId in agents:
newAgents.append({agentId: self.queue})
nbOtherAgents += 1
self.queue += 1
self.state._setNbOtherAgents(nbOtherAgents)
self.otherAgents = list(newAgents)
def _setForbidenAgents(self, forbidenIds:list):
otherAgents:list = list(self.otherAgents)
newForbidenAgents: list = list(self.forbidenAgents)
for _id in forbidenIds:
for dictData in otherAgents:
for key in dictData:
if key == _id:
newForbidenAgents.append({_id: self.forbidenQueue})
self.forbidenQueue += 1
self.forbidenAgents = list(newForbidenAgents)
def _managementCycleLife(self, timeSleep: float):
print(f"Stream initialization, Ready to listen on \"{self.communication.managerTopic}\".\nSend information to agent on \"{self.communication.clusterTopic}\".\n")
self.communication._broadcastInit(self.otherAgents)
i = 0
while True:
fromWho = -2
msg = self.communication.consumer.poll(1.0)
if msg is None:
continue
if msg.error():
print("Consumer error: {}".format(msg.error()))
continue
jsonData = loads(msg.value().decode('utf-8'))
#print(jsonData)
#self.toolbox._progbar(i, self.nbIteration, 30)
print(i)
sleep(timeSleep)
fromWho = self.communication._managementDataSending(jsonData)
if i > self.nbIteration and self.nbIteration != -1:
print("KILL INFLUENCER MANAGER")
self.communication.consumer.close()
self.communication._killInfluencer(fromWho)
break
if (self.communication._killConsume(jsonData) == CLOSE):
self.communication.consumer.close()
print("KILL FOLLOWER MANAGER")
break
i += 1
def _managementCycleLifeDemo(self, timeSleep: float) :
print(f"Stream initialization, Ready to listen on \"{self.communication.managerTopic}\".\nSend information to agent on \"{self.communication.clusterTopic}\".\n")
self.communication._broadcastInit(self.otherAgents)
i = 0
while True:
fromWho = -2
msg = self.communication.consumer.poll(1.0)
if msg is None:
continue
if msg.error():
print("Consumer error: {}".format(msg.error()))
continue
jsonData = loads(msg.value().decode('utf-8'))
#print(jsonData)
#self.toolbox._progbar(i, self.nbIteration, 30)
print(i)
sleep(timeSleep)
fromWho = self.communication._managementDataSending(jsonData)
#self.communication._sendToDisplay(jsonData, i, self.nbIteration)
if i > self.nbIteration and self.nbIteration != -1:
print("KILL INFLUENCER MANAGER")
self.communication.consumer.close()
self.communication._killInfluencer(fromWho)
self.communication._killDisplay(self.myId)
break
if (self.communication._killConsume(jsonData) == CLOSE):
self.communication.consumer.close()
self.communication._killDisplay(self.myId)
print("KILL FOLLOWER MANAGER")
break
i += 1
def _followerCycleLife(self):
print(f"Stream initialization, Ready to listen on \"{self.communication.clusterTopic}\".\nSend information to manager on \"{self.communication.managerTopic}\".\n")
while True:
msg = self.communication.consumer.poll(1.0)
if msg is None:
continue
if msg.error():
print("Consumer error: {}".format(msg.error()))
continue
jsonData = loads(msg.value().decode('utf-8'))
if (self.communication._killConsume(jsonData) == CLOSE):
self.communication.consumer.close()
break
self.communication._updateEnv(jsonData, self.otherAgents, self.state, self.forbidenAgents)
print(self.state.ownState)
self.communication._checkFromAndSendManager(jsonData, self.state)
if ((np.array_equal(self.state.saveCars, self.state.nCars) == False) or (np.array_equal(self.state.savePedestrian, self.state.nPedestrian) == False)) :
self.state._getGlobalScore()
self.communication._broadcastMyState(self.otherAgents, self.state, self.forbidenAgents)
else:
self.state._getGlobalScore()
self.state._setSave([self.state._getnCars()], [self.state._getnPedestrian()], list(self.state._getLight()))
def _initDataset(self, _type: str, eps: float = 1.0):
print(f"Stream initialization, Ready to listen on \"{self.communication.clusterTopic}\".\nSend information to manager on \"{self.communication.managerTopic}\".\n")
while True:
msg = self.communication.consumer.poll(1.0)
if msg is None:
continue
if msg.error():
print("Consumer error: {}".format(msg.error()))
continue
jsonData = loads(msg.value().decode('utf-8'))
if (self.communication._killConsume(jsonData) == CLOSE):
self.communication.consumer.close()
self.communication._killManager(self.forbidenAgents)
self.communication._killFollower(self.forbidenAgents)
break
if _type == "demo":
self.dataset._influencerDataProcess(jsonData, self.otherAgents, self.forbidenAgents, eps)
else:
eps = self.dataset._influencerDataProcess(jsonData, self.otherAgents, self.forbidenAgents, eps)
def _start(self, timeSleep: float = 1.0):
if (self.classType == "influencer"):
self._initDataset("train", 0.9)
return
if (self.classType == "follower"):
self._followerCycleLife()
return
if (self.classType == "manager"):
self._managementCycleLife(timeSleep)
return
print(f"Error() : Unknow classType : {self.classType}")
def _startDemo(self, timeSleep: float = 1.0):
if (self.classType == "influencer"):
self._initDataset("demo", 0.1)
return
if (self.classType == "follower"):
self._followerCycleLife()
return
if (self.classType == "manager"):
self._managementCycleLifeDemo(timeSleep)
return
print(f"Error() : Unknow classType : {self.classType}")
def _save(self):
torch.save(self.qfunction.state_dict(), "./saves/save_" + self.classType + str(self.myId))
def _restore(self, path: str):
print(f"Load State : {path}")
self.qfunction.load_state_dict(torch.load(path)) | 0.249539 | 0.102484 |
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Plantain',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=64)),
('descr', models.CharField(max_length=120)),
('price', models.IntegerField()),
],
),
migrations.CreateModel(
name='Potatoe',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=64)),
('descr', models.CharField(max_length=120)),
('price', models.IntegerField()),
],
),
migrations.CreateModel(
name='Purchased',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user', models.CharField(max_length=25, null=True)),
('products', models.CharField(max_length=600, null=True)),
('total', models.CharField(max_length=6, null=True)),
('deliverTo', models.CharField(max_length=60, null=True)),
('address', models.CharField(max_length=150, null=True)),
('phone', models.CharField(max_length=15, null=True)),
('statusValue', models.CharField(max_length=15, null=True)),
('time', models.CharField(max_length=100, null=True)),
('comment', models.CharField(max_length=150, null=True)),
],
),
migrations.CreateModel(
name='Yam',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=64)),
('descr', models.CharField(max_length=120)),
('price', models.IntegerField()),
],
),
] | debolemix/dbolemix/bolemix/migrations/0001_initial.py |
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Plantain',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=64)),
('descr', models.CharField(max_length=120)),
('price', models.IntegerField()),
],
),
migrations.CreateModel(
name='Potatoe',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=64)),
('descr', models.CharField(max_length=120)),
('price', models.IntegerField()),
],
),
migrations.CreateModel(
name='Purchased',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user', models.CharField(max_length=25, null=True)),
('products', models.CharField(max_length=600, null=True)),
('total', models.CharField(max_length=6, null=True)),
('deliverTo', models.CharField(max_length=60, null=True)),
('address', models.CharField(max_length=150, null=True)),
('phone', models.CharField(max_length=15, null=True)),
('statusValue', models.CharField(max_length=15, null=True)),
('time', models.CharField(max_length=100, null=True)),
('comment', models.CharField(max_length=150, null=True)),
],
),
migrations.CreateModel(
name='Yam',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=64)),
('descr', models.CharField(max_length=120)),
('price', models.IntegerField()),
],
),
] | 0.661267 | 0.197425 |
from __future__ import division
import math
import random
import collections
import numpy as np
import scipy.stats as ss
__all__ = [
'DiscreteDist',
'TruncatedZipfDist',
'means_confidence_interval',
'proportions_confidence_interval',
'cdf',
'pdf',
]
class DiscreteDist(object):
"""Implements a discrete distribution with finite population.
The support must be a finite discrete set of contiguous integers
{1, ..., N}. This definition of discrete distribution.
"""
def __init__(self, pdf, seed=None):
"""
Constructor
Parameters
----------
pdf : array-like
The probability density function
seed : any hashable type (optional)
The seed to be used for random number generation
"""
if np.abs(sum(pdf) - 1.0) > 0.001:
raise ValueError('The sum of pdf values must be equal to 1')
random.seed(seed)
self._pdf = np.asarray(pdf)
self._cdf = np.cumsum(self._pdf)
# set last element of the CDF to 1.0 to avoid rounding errors
self._cdf[-1] = 1.0
def __len__(self):
"""Return the cardinality of the support
Returns
-------
len : int
The cardinality of the support
"""
return len(self._pdf)
@property
def pdf(self):
"""
Return the Probability Density Function (PDF)
Returns
-------
pdf : Numpy array
Array representing the probability density function of the
distribution
"""
return self._pdf
@property
def cdf(self):
"""
Return the Cumulative Density Function (CDF)
Returns
-------
cdf : Numpy array
Array representing cdf
"""
return self._cdf
def rv(self):
"""Get rand value from the distribution
"""
rv = random.random()
# This operation performs binary search over the CDF to return the
# random value. Worst case time complexity is O(log2(n))
return int(np.searchsorted(self._cdf, rv) + 1)
class TruncatedZipfDist(DiscreteDist):
"""Implements a truncated Zipf distribution, i.e. a Zipf distribution with
a finite population, which can hence take values of alpha > 0.
"""
def __init__(self, alpha=1.0, n=1000, seed=None):
"""Constructor
Parameters
----------
alpha : float
The value of the alpha parameter (it must be positive)
n : int
The size of population
seed : any hashable type, optional
The seed to be used for random number generation
"""
# Validate parameters
if alpha <= 0:
raise ValueError('alpha must be positive')
if n < 0:
raise ValueError('n must be positive')
# This is the PDF i. e. the array that contains the probability that
# content i + 1 is picked
pdf = np.arange(1.0, n + 1.0) ** -alpha
pdf /= np.sum(pdf)
self._alpha = alpha
super(TruncatedZipfDist, self).__init__(pdf, seed)
@property
def alpha(self):
return self._alpha
def means_confidence_interval(data, confidence=0.95):
"""Computes the confidence interval for a given set of means.
Parameters
----------
data : array-like
The set of samples whose confidence interval is calculated
confidence : float, optional
The confidence level. It must be a value in the interval (0, 1)
Returns
-------
mean : float
The mean of the sample
err : float
The standard error of the sample
References
----------
[1] <NAME>, From Algorithms to Z-Scores: Probabilistic and Statistical
Modeling in Computer Science.
Available: http://heather.cs.ucdavis.edu/probstatbook
"""
if confidence <= 0 or confidence >= 1:
raise ValueError('The confidence parameter must be greater than 0 and '
'smaller than 1')
n = len(data)
w = np.mean(data)
s = np.std(data)
err = ss.norm.interval(confidence)[1]
return w, err * s / math.sqrt(n)
def proportions_confidence_interval(data, confidence):
"""Computes the confidence interval of a proportion.
Parameters
----------
data : array-like of bool
The sample of data whose proportion of True values needs to be
estimated
confidence : float, optional
The confidence level. It must be a value in the interval (0, 1)
References
----------
[1] <NAME>, From Algorithms to Z-Scores: Probabilistic and Statistical
Modeling in Computer Science.
Available: http://heather.cs.ucdavis.edu/probstatbook
"""
if confidence <= 0 or confidence >= 1:
raise ValueError('The confidence parameter must be greater than 0 and '
'smaller than 1')
n = float(len(data))
m = len((i for i in data if i is True))
p = m / n
err = ss.norm.interval(confidence)[1]
return p, err * math.sqrt(p * (1 - p) / n)
def cdf(data):
"""Return the empirical CDF of a set of 1D data
Parameters
----------
data : array-like
Array of data
Returns
-------
x : array
All occurrences of data sorted
cdf : array
The CDF of data.
More specifically cdf[i] is the probability that x < x[i]
"""
if len(data) < 1:
raise TypeError("data must have at least one element")
freq_dict = collections.Counter(data)
sorted_unique_data = np.sort(list(freq_dict.keys()))
freqs = np.zeros(len(sorted_unique_data))
for i in range(len(freqs)):
freqs[i] = freq_dict[sorted_unique_data[i]]
# freqs = np.array([freq_dict[sorted_unique_data[i]]
# for i in range(len(sorted_unique_data))])
cdf = np.array(np.cumsum(freqs))
norm = cdf[-1]
cdf = cdf / norm # normalize
cdf[-1] = 1.0 # Prevent rounding errors
return sorted_unique_data, cdf
def pdf(data, n_bins):
"""Return the empirical PDF of a set of 1D data
Parameters
----------
data : array-like
Array of data
n_bins : int
The number of bins
Returns
x : array
The center point of all bins
pdf : array
The PDF of data.
"""
# Validate input parameters
if len(data) < 1:
raise TypeError("data must have at least one element")
if not isinstance(n_bins, int):
raise TypeError("intervals parameter must be an integer")
if n_bins < 1:
raise TypeError("Intervals must be >= 1")
# Sort data and divide it in sections
data = np.sort(data)
data_min = data[0]
data_max = data[-1]
boundaries = np.linspace(data_min, data_max, n_bins + 1)
x = boundaries[:-1] + ((boundaries[1] - boundaries[0]) / 2.0)
# Count number of samples in each section
pdf = np.zeros(n_bins)
section = 0
for entry in data:
if entry <= boundaries[section + 1]:
pdf[section] += 1
else:
section += 1
while entry > boundaries[section + 1]:
section += 1
pdf[section] += 1
# Normalize pdf
pdf = (pdf * n_bins) / (np.sum(pdf) * (data_max - data_min))
return x, pdf | icarus/tools/stats.py | from __future__ import division
import math
import random
import collections
import numpy as np
import scipy.stats as ss
__all__ = [
'DiscreteDist',
'TruncatedZipfDist',
'means_confidence_interval',
'proportions_confidence_interval',
'cdf',
'pdf',
]
class DiscreteDist(object):
"""Implements a discrete distribution with finite population.
The support must be a finite discrete set of contiguous integers
{1, ..., N}. This definition of discrete distribution.
"""
def __init__(self, pdf, seed=None):
"""
Constructor
Parameters
----------
pdf : array-like
The probability density function
seed : any hashable type (optional)
The seed to be used for random number generation
"""
if np.abs(sum(pdf) - 1.0) > 0.001:
raise ValueError('The sum of pdf values must be equal to 1')
random.seed(seed)
self._pdf = np.asarray(pdf)
self._cdf = np.cumsum(self._pdf)
# set last element of the CDF to 1.0 to avoid rounding errors
self._cdf[-1] = 1.0
def __len__(self):
"""Return the cardinality of the support
Returns
-------
len : int
The cardinality of the support
"""
return len(self._pdf)
@property
def pdf(self):
"""
Return the Probability Density Function (PDF)
Returns
-------
pdf : Numpy array
Array representing the probability density function of the
distribution
"""
return self._pdf
@property
def cdf(self):
"""
Return the Cumulative Density Function (CDF)
Returns
-------
cdf : Numpy array
Array representing cdf
"""
return self._cdf
def rv(self):
"""Get rand value from the distribution
"""
rv = random.random()
# This operation performs binary search over the CDF to return the
# random value. Worst case time complexity is O(log2(n))
return int(np.searchsorted(self._cdf, rv) + 1)
class TruncatedZipfDist(DiscreteDist):
"""Implements a truncated Zipf distribution, i.e. a Zipf distribution with
a finite population, which can hence take values of alpha > 0.
"""
def __init__(self, alpha=1.0, n=1000, seed=None):
"""Constructor
Parameters
----------
alpha : float
The value of the alpha parameter (it must be positive)
n : int
The size of population
seed : any hashable type, optional
The seed to be used for random number generation
"""
# Validate parameters
if alpha <= 0:
raise ValueError('alpha must be positive')
if n < 0:
raise ValueError('n must be positive')
# This is the PDF i. e. the array that contains the probability that
# content i + 1 is picked
pdf = np.arange(1.0, n + 1.0) ** -alpha
pdf /= np.sum(pdf)
self._alpha = alpha
super(TruncatedZipfDist, self).__init__(pdf, seed)
@property
def alpha(self):
return self._alpha
def means_confidence_interval(data, confidence=0.95):
"""Computes the confidence interval for a given set of means.
Parameters
----------
data : array-like
The set of samples whose confidence interval is calculated
confidence : float, optional
The confidence level. It must be a value in the interval (0, 1)
Returns
-------
mean : float
The mean of the sample
err : float
The standard error of the sample
References
----------
[1] <NAME>, From Algorithms to Z-Scores: Probabilistic and Statistical
Modeling in Computer Science.
Available: http://heather.cs.ucdavis.edu/probstatbook
"""
if confidence <= 0 or confidence >= 1:
raise ValueError('The confidence parameter must be greater than 0 and '
'smaller than 1')
n = len(data)
w = np.mean(data)
s = np.std(data)
err = ss.norm.interval(confidence)[1]
return w, err * s / math.sqrt(n)
def proportions_confidence_interval(data, confidence):
"""Computes the confidence interval of a proportion.
Parameters
----------
data : array-like of bool
The sample of data whose proportion of True values needs to be
estimated
confidence : float, optional
The confidence level. It must be a value in the interval (0, 1)
References
----------
[1] <NAME>, From Algorithms to Z-Scores: Probabilistic and Statistical
Modeling in Computer Science.
Available: http://heather.cs.ucdavis.edu/probstatbook
"""
if confidence <= 0 or confidence >= 1:
raise ValueError('The confidence parameter must be greater than 0 and '
'smaller than 1')
n = float(len(data))
m = len((i for i in data if i is True))
p = m / n
err = ss.norm.interval(confidence)[1]
return p, err * math.sqrt(p * (1 - p) / n)
def cdf(data):
"""Return the empirical CDF of a set of 1D data
Parameters
----------
data : array-like
Array of data
Returns
-------
x : array
All occurrences of data sorted
cdf : array
The CDF of data.
More specifically cdf[i] is the probability that x < x[i]
"""
if len(data) < 1:
raise TypeError("data must have at least one element")
freq_dict = collections.Counter(data)
sorted_unique_data = np.sort(list(freq_dict.keys()))
freqs = np.zeros(len(sorted_unique_data))
for i in range(len(freqs)):
freqs[i] = freq_dict[sorted_unique_data[i]]
# freqs = np.array([freq_dict[sorted_unique_data[i]]
# for i in range(len(sorted_unique_data))])
cdf = np.array(np.cumsum(freqs))
norm = cdf[-1]
cdf = cdf / norm # normalize
cdf[-1] = 1.0 # Prevent rounding errors
return sorted_unique_data, cdf
def pdf(data, n_bins):
"""Return the empirical PDF of a set of 1D data
Parameters
----------
data : array-like
Array of data
n_bins : int
The number of bins
Returns
x : array
The center point of all bins
pdf : array
The PDF of data.
"""
# Validate input parameters
if len(data) < 1:
raise TypeError("data must have at least one element")
if not isinstance(n_bins, int):
raise TypeError("intervals parameter must be an integer")
if n_bins < 1:
raise TypeError("Intervals must be >= 1")
# Sort data and divide it in sections
data = np.sort(data)
data_min = data[0]
data_max = data[-1]
boundaries = np.linspace(data_min, data_max, n_bins + 1)
x = boundaries[:-1] + ((boundaries[1] - boundaries[0]) / 2.0)
# Count number of samples in each section
pdf = np.zeros(n_bins)
section = 0
for entry in data:
if entry <= boundaries[section + 1]:
pdf[section] += 1
else:
section += 1
while entry > boundaries[section + 1]:
section += 1
pdf[section] += 1
# Normalize pdf
pdf = (pdf * n_bins) / (np.sum(pdf) * (data_max - data_min))
return x, pdf | 0.913638 | 0.585931 |
import unittest
import numpy as np
from trajectory import ParallelTrajectory
class TestParallelTrajectory(unittest.TestCase):
def setUp(self):
self.trajectory = ParallelTrajectory(2)
def test_discounted_returns(self):
self.add_rewards(step=1, parallel_rewards=[1, 1], parallel_dones=[False, False])
self.add_rewards(step=2, parallel_rewards=[1, 1], parallel_dones=[False, False])
self.add_rewards(step=3, parallel_rewards=[1, 1], parallel_dones=[True, False])
self.add_rewards(step=4, parallel_rewards=[1, 1], parallel_dones=[False, True])
self.add_rewards(step=5, parallel_rewards=[1, 1], parallel_dones=[False, False])
self.add_rewards(step=6, parallel_rewards=[1, 1], parallel_dones=[True, False])
actual_returns = self.trajectory.discounted_returns(discount=0.9)
expected_returns = [[2.71, 3.439],
[1.9, 2.71],
[1., 1.9],
[2.71, 1.],
[1.9, 1.9],
[1., 1.]]
np.testing.assert_array_equal(actual_returns, np.array(expected_returns))
def add_rewards(self, step, parallel_rewards, parallel_dones):
i = step
self.trajectory.add(
parallel_states=np.array([i, i]),
parallel_actions=np.array([i, i]),
parallel_action_probs=np.array([0.5, 0.5]),
parallel_rewards=np.array(parallel_rewards),
parallel_next_states=np.array([i+1, i+1]),
parallel_dones=np.array(parallel_dones))
def test_rewards(self):
a = [1, 2, 3, 4]
b = a[2:] + a[:2]
c = (np.array(a) + np.array(b))/2.0
np.testing.assert_array_equal(c, np.array([2., 3., 2., 3.]))
def test_action_probs(self):
self.add_action_probs(step=1, parallel_states=[10, 20], parallel_action_probs=[0.10, 0.20])
self.add_action_probs(step=2, parallel_states=[11, 21], parallel_action_probs=[0.11, 0.21])
self.add_action_probs(step=3, parallel_states=[12, 22], parallel_action_probs=[0.12, 0.22])
self.add_action_probs(step=4, parallel_states=[13, 23], parallel_action_probs=[0.13, 0.23])
self.add_action_probs(step=5, parallel_states=[14, 24], parallel_action_probs=[0.14, 0.24])
self.add_action_probs(step=6, parallel_states=[15, 25], parallel_action_probs=[0.15, 0.25])
states, full_states, actions, action_probs, rewards, next_states, dones = self.trajectory.numpy()
np.testing.assert_array_equal(states[:, 0], np.array([10, 11, 12, 13, 14, 15]))
np.testing.assert_array_equal(action_probs[:, 0], np.array([0.10, 0.11, 0.12, 0.13, 0.14, 0.15]))
np.testing.assert_array_equal(states[:, 1], np.array([20, 21, 22, 23, 24, 25]))
np.testing.assert_array_equal(action_probs[:, 1], np.array([0.20, 0.21, 0.22, 0.23, 0.24, 0.25]))
def add_action_probs(self, step, parallel_states, parallel_action_probs):
i = step
self.trajectory.add(
parallel_states=np.array(parallel_states),
parallel_actions=np.array([i, i]),
parallel_action_probs=np.array(parallel_action_probs),
parallel_rewards=np.array([i, i]),
parallel_next_states=np.array([i+1, i+1]),
parallel_dones=np.array([False, False])) | soccer-twos-ppo/test_trajectory.py | import unittest
import numpy as np
from trajectory import ParallelTrajectory
class TestParallelTrajectory(unittest.TestCase):
def setUp(self):
self.trajectory = ParallelTrajectory(2)
def test_discounted_returns(self):
self.add_rewards(step=1, parallel_rewards=[1, 1], parallel_dones=[False, False])
self.add_rewards(step=2, parallel_rewards=[1, 1], parallel_dones=[False, False])
self.add_rewards(step=3, parallel_rewards=[1, 1], parallel_dones=[True, False])
self.add_rewards(step=4, parallel_rewards=[1, 1], parallel_dones=[False, True])
self.add_rewards(step=5, parallel_rewards=[1, 1], parallel_dones=[False, False])
self.add_rewards(step=6, parallel_rewards=[1, 1], parallel_dones=[True, False])
actual_returns = self.trajectory.discounted_returns(discount=0.9)
expected_returns = [[2.71, 3.439],
[1.9, 2.71],
[1., 1.9],
[2.71, 1.],
[1.9, 1.9],
[1., 1.]]
np.testing.assert_array_equal(actual_returns, np.array(expected_returns))
def add_rewards(self, step, parallel_rewards, parallel_dones):
i = step
self.trajectory.add(
parallel_states=np.array([i, i]),
parallel_actions=np.array([i, i]),
parallel_action_probs=np.array([0.5, 0.5]),
parallel_rewards=np.array(parallel_rewards),
parallel_next_states=np.array([i+1, i+1]),
parallel_dones=np.array(parallel_dones))
def test_rewards(self):
a = [1, 2, 3, 4]
b = a[2:] + a[:2]
c = (np.array(a) + np.array(b))/2.0
np.testing.assert_array_equal(c, np.array([2., 3., 2., 3.]))
def test_action_probs(self):
self.add_action_probs(step=1, parallel_states=[10, 20], parallel_action_probs=[0.10, 0.20])
self.add_action_probs(step=2, parallel_states=[11, 21], parallel_action_probs=[0.11, 0.21])
self.add_action_probs(step=3, parallel_states=[12, 22], parallel_action_probs=[0.12, 0.22])
self.add_action_probs(step=4, parallel_states=[13, 23], parallel_action_probs=[0.13, 0.23])
self.add_action_probs(step=5, parallel_states=[14, 24], parallel_action_probs=[0.14, 0.24])
self.add_action_probs(step=6, parallel_states=[15, 25], parallel_action_probs=[0.15, 0.25])
states, full_states, actions, action_probs, rewards, next_states, dones = self.trajectory.numpy()
np.testing.assert_array_equal(states[:, 0], np.array([10, 11, 12, 13, 14, 15]))
np.testing.assert_array_equal(action_probs[:, 0], np.array([0.10, 0.11, 0.12, 0.13, 0.14, 0.15]))
np.testing.assert_array_equal(states[:, 1], np.array([20, 21, 22, 23, 24, 25]))
np.testing.assert_array_equal(action_probs[:, 1], np.array([0.20, 0.21, 0.22, 0.23, 0.24, 0.25]))
def add_action_probs(self, step, parallel_states, parallel_action_probs):
i = step
self.trajectory.add(
parallel_states=np.array(parallel_states),
parallel_actions=np.array([i, i]),
parallel_action_probs=np.array(parallel_action_probs),
parallel_rewards=np.array([i, i]),
parallel_next_states=np.array([i+1, i+1]),
parallel_dones=np.array([False, False])) | 0.604516 | 0.692746 |
import threading
from oslo_log import log as logging
from kingbird.common import consts
from kingbird.common import exceptions
from kingbird.db.sqlalchemy import api as db_api
from kingbird.drivers.openstack import glance_adapter
from kingbird.drivers.openstack.glance_v2 import GlanceClient
from kingbird.drivers.openstack.glance_v2 import GlanceUpload
LOG = logging.getLogger(__name__)
class ImageSyncManager(object):
"""Manages tasks related to resource management."""
def __init__(self, *args, **kwargs):
super(ImageSyncManager, self).__init__()
def create_resources_in_region(self, job_id, target_regions,
source_region, context, resource, force):
"""Create Region Specific threads."""
regions_thread = list()
for region in target_regions:
thread = threading.Thread(target=self.create_resources,
args=(job_id, region, source_region,
context, resource, force))
regions_thread.append(thread)
thread.start()
for region_thread in regions_thread:
region_thread.join()
def create_resources(self, job_id, region, source_region, context,
resource, force):
"""Check dependent images and create resources in target regions."""
source_glance_client = GlanceClient(source_region, context)
target_glance_client = GlanceClient(region, context)
dependent_images = glance_adapter.check_dependent_images(
context, source_region, resource)
if dependent_images is not None:
result = self.create_dependent_image(
resource, dependent_images, target_glance_client,
source_glance_client, region, force)
self.update_result_in_database(context, job_id, region, resource,
result)
else:
result = self.create_independent_image(
resource, target_glance_client, source_glance_client,
region, force)
self.update_result_in_database(context, job_id, region, resource,
result)
def update_result_in_database(self, context, job_id, region, resource,
result):
"""Update result in database based on the sync operation."""
job_result = consts.JOB_SUCCESS if result else consts.JOB_FAILURE
try:
db_api.resource_sync_update(context, job_id, region,
resource, job_result)
except exceptions.JobNotFound():
raise
pass
def create_dependent_image(self, resource, dependent_images,
target_client, source_client, region, force):
"""Create dependent images along with base image.
Base image here is Amazon Machine Image(AMI) and
Dependent images are Amazon Kernel Image(AKI),
Amazon Ramdisk Image(ARI).
:param resource: Resource to be synced.
:param dependent_images: Dependent images for the base image.
:param target_client: Glance client object for the target_region.
:param source_client: Glance client object for source_region.
:param region: Target region in which resource has to be synced.
:param force: Default force option is False. If '--force'
is given then force is set to True.
"""
try:
kernel_image = dependent_images['kernel_image']
ramdisk_image = dependent_images['ramdisk_image']
source_image = source_client.get_image(resource)
# Create images in target regions.
target_kernel_image = target_client.\
create_image(kernel_image, force)
target_ramdisk_image = target_client.\
create_image(ramdisk_image, force)
target_source_image = target_client.\
create_image(source_image, force, target_kernel_image.id,
target_ramdisk_image.id)
# Fetch and Upload image into glance.
# Kernel Image upload.
kernel_image_data = source_client.\
get_image_data(kernel_image.id)
upload_kernel_image = GlanceUpload(kernel_image_data)
target_client.image_upload(target_kernel_image.id,
upload_kernel_image)
LOG.info('Kernel_image %(image)s uploaded in %(region)s'
% {'image': kernel_image.id, 'region': region})
# Ramdisk image upload.
ramdisk_image_data = source_client.\
get_image_data(ramdisk_image.id)
upload_ram_disk_image = GlanceUpload(ramdisk_image_data)
target_client.image_upload(target_ramdisk_image.id,
upload_ram_disk_image)
LOG.info('ramdisk_image %(image)s uploaded in %(region)s'
% {'image': ramdisk_image.id, 'region': region})
# Base 'AMI' image upload.
source_image_data = source_client.get_image_data(source_image.id)
upload_source_image = GlanceUpload(source_image_data)
target_client.image_upload(target_source_image.id,
upload_source_image)
LOG.info('source_image %(image)s uploaded in %(region)s'
% {'image': source_image.id, 'region': region})
return True
except Exception as exc:
LOG.error('Exception Occurred: %(msg)s in %(region)s'
% {'msg': exc.message, 'region': region})
return False
def create_independent_image(self, resource, target_client,
source_client, region, force):
"""Create independent images.
Base image here is Qcow2.
:param resource: Resource to be synced.
:param target_client: Glance client object for the target_region.
:param source_client: Glance client object for source_region.
:param region: Target region in which resource has to be synced.
:param force: Default force option is False. If '--force'
is given then force is set to True.
"""
try:
source_image = source_client.get_image(resource)
target_source_image = target_client.create_image(source_image,
force)
source_image_data = source_client.get_image_data(source_image.id)
upload_source_image = GlanceUpload(source_image_data)
target_client.image_upload(target_source_image.id,
upload_source_image)
LOG.info('source_image %(image)s uploaded in %(region)s'
% {'image': source_image.id, 'region': region})
return True
except Exception as exc:
LOG.error('Exception Occurred: %(msg)s in %(region)s'
% {'msg': exc.message, 'region': region})
return False
def resource_sync(self, context, job_id, payload):
"""Create resources in target regions.
Image with same id is created in target_regions and therefore
if a user wants to syncs the same resource as the ID is already
used glance throws 409 error in order to avoid that we use --force
and set force flag to true and there by creates resource without
fail.
:param context: request context object.
:param job_id: ID of the job which triggered image_sync.
:payload: request payload.
"""
LOG.info('Triggered image sync.')
images_thread = list()
target_regions = payload['target']
force = eval(str(payload.get('force', False)))
resource_ids = payload.get('resources')
source_region = payload['source']
for resource in resource_ids:
thread = threading.Thread(target=self.create_resources_in_region,
args=(job_id, target_regions,
source_region, context,
resource, force))
images_thread.append(thread)
thread.start()
for image_thread in images_thread:
image_thread.join()
try:
resource_sync_details = db_api.\
resource_sync_status(context, job_id)
except exceptions.JobNotFound:
raise
result = consts.JOB_SUCCESS
if consts.JOB_FAILURE in resource_sync_details:
result = consts.JOB_FAILURE
try:
db_api.sync_job_update(context, job_id, result)
except exceptions.JobNotFound:
raise | kingbird/engine/image_sync_manager.py |
import threading
from oslo_log import log as logging
from kingbird.common import consts
from kingbird.common import exceptions
from kingbird.db.sqlalchemy import api as db_api
from kingbird.drivers.openstack import glance_adapter
from kingbird.drivers.openstack.glance_v2 import GlanceClient
from kingbird.drivers.openstack.glance_v2 import GlanceUpload
LOG = logging.getLogger(__name__)
class ImageSyncManager(object):
"""Manages tasks related to resource management."""
def __init__(self, *args, **kwargs):
super(ImageSyncManager, self).__init__()
def create_resources_in_region(self, job_id, target_regions,
source_region, context, resource, force):
"""Create Region Specific threads."""
regions_thread = list()
for region in target_regions:
thread = threading.Thread(target=self.create_resources,
args=(job_id, region, source_region,
context, resource, force))
regions_thread.append(thread)
thread.start()
for region_thread in regions_thread:
region_thread.join()
def create_resources(self, job_id, region, source_region, context,
resource, force):
"""Check dependent images and create resources in target regions."""
source_glance_client = GlanceClient(source_region, context)
target_glance_client = GlanceClient(region, context)
dependent_images = glance_adapter.check_dependent_images(
context, source_region, resource)
if dependent_images is not None:
result = self.create_dependent_image(
resource, dependent_images, target_glance_client,
source_glance_client, region, force)
self.update_result_in_database(context, job_id, region, resource,
result)
else:
result = self.create_independent_image(
resource, target_glance_client, source_glance_client,
region, force)
self.update_result_in_database(context, job_id, region, resource,
result)
def update_result_in_database(self, context, job_id, region, resource,
result):
"""Update result in database based on the sync operation."""
job_result = consts.JOB_SUCCESS if result else consts.JOB_FAILURE
try:
db_api.resource_sync_update(context, job_id, region,
resource, job_result)
except exceptions.JobNotFound():
raise
pass
def create_dependent_image(self, resource, dependent_images,
target_client, source_client, region, force):
"""Create dependent images along with base image.
Base image here is Amazon Machine Image(AMI) and
Dependent images are Amazon Kernel Image(AKI),
Amazon Ramdisk Image(ARI).
:param resource: Resource to be synced.
:param dependent_images: Dependent images for the base image.
:param target_client: Glance client object for the target_region.
:param source_client: Glance client object for source_region.
:param region: Target region in which resource has to be synced.
:param force: Default force option is False. If '--force'
is given then force is set to True.
"""
try:
kernel_image = dependent_images['kernel_image']
ramdisk_image = dependent_images['ramdisk_image']
source_image = source_client.get_image(resource)
# Create images in target regions.
target_kernel_image = target_client.\
create_image(kernel_image, force)
target_ramdisk_image = target_client.\
create_image(ramdisk_image, force)
target_source_image = target_client.\
create_image(source_image, force, target_kernel_image.id,
target_ramdisk_image.id)
# Fetch and Upload image into glance.
# Kernel Image upload.
kernel_image_data = source_client.\
get_image_data(kernel_image.id)
upload_kernel_image = GlanceUpload(kernel_image_data)
target_client.image_upload(target_kernel_image.id,
upload_kernel_image)
LOG.info('Kernel_image %(image)s uploaded in %(region)s'
% {'image': kernel_image.id, 'region': region})
# Ramdisk image upload.
ramdisk_image_data = source_client.\
get_image_data(ramdisk_image.id)
upload_ram_disk_image = GlanceUpload(ramdisk_image_data)
target_client.image_upload(target_ramdisk_image.id,
upload_ram_disk_image)
LOG.info('ramdisk_image %(image)s uploaded in %(region)s'
% {'image': ramdisk_image.id, 'region': region})
# Base 'AMI' image upload.
source_image_data = source_client.get_image_data(source_image.id)
upload_source_image = GlanceUpload(source_image_data)
target_client.image_upload(target_source_image.id,
upload_source_image)
LOG.info('source_image %(image)s uploaded in %(region)s'
% {'image': source_image.id, 'region': region})
return True
except Exception as exc:
LOG.error('Exception Occurred: %(msg)s in %(region)s'
% {'msg': exc.message, 'region': region})
return False
def create_independent_image(self, resource, target_client,
source_client, region, force):
"""Create independent images.
Base image here is Qcow2.
:param resource: Resource to be synced.
:param target_client: Glance client object for the target_region.
:param source_client: Glance client object for source_region.
:param region: Target region in which resource has to be synced.
:param force: Default force option is False. If '--force'
is given then force is set to True.
"""
try:
source_image = source_client.get_image(resource)
target_source_image = target_client.create_image(source_image,
force)
source_image_data = source_client.get_image_data(source_image.id)
upload_source_image = GlanceUpload(source_image_data)
target_client.image_upload(target_source_image.id,
upload_source_image)
LOG.info('source_image %(image)s uploaded in %(region)s'
% {'image': source_image.id, 'region': region})
return True
except Exception as exc:
LOG.error('Exception Occurred: %(msg)s in %(region)s'
% {'msg': exc.message, 'region': region})
return False
def resource_sync(self, context, job_id, payload):
"""Create resources in target regions.
Image with same id is created in target_regions and therefore
if a user wants to syncs the same resource as the ID is already
used glance throws 409 error in order to avoid that we use --force
and set force flag to true and there by creates resource without
fail.
:param context: request context object.
:param job_id: ID of the job which triggered image_sync.
:payload: request payload.
"""
LOG.info('Triggered image sync.')
images_thread = list()
target_regions = payload['target']
force = eval(str(payload.get('force', False)))
resource_ids = payload.get('resources')
source_region = payload['source']
for resource in resource_ids:
thread = threading.Thread(target=self.create_resources_in_region,
args=(job_id, target_regions,
source_region, context,
resource, force))
images_thread.append(thread)
thread.start()
for image_thread in images_thread:
image_thread.join()
try:
resource_sync_details = db_api.\
resource_sync_status(context, job_id)
except exceptions.JobNotFound:
raise
result = consts.JOB_SUCCESS
if consts.JOB_FAILURE in resource_sync_details:
result = consts.JOB_FAILURE
try:
db_api.sync_job_update(context, job_id, result)
except exceptions.JobNotFound:
raise | 0.598195 | 0.095476 |
import threading
import time
import getpass
import rpyc
import urwid
# TODO global scope variable are evil
screen = None
class Service(rpyc.Service):
def on_connect(self):
global screen
if not screen:
return
screen.addSysMessage("Watcher connected")
def on_disconnect(self):
global screen
if not screen:
return
screen.addSysMessage("Watcher disconnected")
def add_err(self, obj):
global screen
screen.addSysMessage("Error Detected:")
screen.addPlainMessage(obj)
def exposed_add_err(self, err):
self.add_err(err)
class ChatInput(urwid.Edit):
''' Custom edit for chat-like input field '''
_metaclass_ = urwid.signals.MetaSignals
signals = ['done']
def keypress(self, size, key):
if key == 'enter':
urwid.emit_signal(self, 'done', self, self.get_edit_text())
super(ChatInput, self).set_edit_text('')
elif key == 'esc':
super(ChatInput, self).set_edit_text('')
else:
urwid.Edit.keypress(self, size, key)
class Screen():
palette = [
('sysmsg', 'black', 'light gray', 'standout,underline', 'black,underline', '#88a')
]
listWalker = urwid.SimpleFocusListWalker([])
loop = None
def __init__(self, username):
self.user = username
def run(self):
listBox = urwid.ListBox(self.listWalker)
textEdit = ChatInput(self.user + ' > ')
urwid.connect_signal(textEdit, 'done', self.onSubmit)
frame = urwid.Frame(
urwid.AttrWrap(listBox, 'body'),
header=urwid.BoxAdapter(urwid.ListBox([
urwid.Text('SO-bro'),
urwid.Divider('-')
]), 2),
footer=urwid.BoxAdapter(urwid.ListBox([
urwid.Divider('-'),
textEdit
]), 5)
)
self.loop = urwid.MainLoop(urwid.Padding(frame, left=2, right=2), self.palette)
self.loop.run()
def addUserMessage(self, user, msg):
self.listWalker.append(urwid.Text(user + ' > ' + msg))
self.loop.draw_screen()
def addSysMessage(self, msg):
self.listWalker.append(urwid.Text(('sysmsg', 'sys > ' + msg)))
self.loop.draw_screen()
def addPlainMessage(self, msg):
self.listWalker.append(urwid.Text(msg))
self.loop.draw_screen()
def onSubmit(self, widget, text):
self.addUserMessage(self.user, text)
if __name__ == "__main__":
global selector
from rpyc.utils.server import ThreadedServer
t = ThreadedServer(Service, port = 18861)
th = threading.Thread(target=t.start)
th.start()
global screen
username = getpass.getuser()
screen = Screen(username)
screen.run() | debugger.py | import threading
import time
import getpass
import rpyc
import urwid
# TODO global scope variable are evil
screen = None
class Service(rpyc.Service):
def on_connect(self):
global screen
if not screen:
return
screen.addSysMessage("Watcher connected")
def on_disconnect(self):
global screen
if not screen:
return
screen.addSysMessage("Watcher disconnected")
def add_err(self, obj):
global screen
screen.addSysMessage("Error Detected:")
screen.addPlainMessage(obj)
def exposed_add_err(self, err):
self.add_err(err)
class ChatInput(urwid.Edit):
''' Custom edit for chat-like input field '''
_metaclass_ = urwid.signals.MetaSignals
signals = ['done']
def keypress(self, size, key):
if key == 'enter':
urwid.emit_signal(self, 'done', self, self.get_edit_text())
super(ChatInput, self).set_edit_text('')
elif key == 'esc':
super(ChatInput, self).set_edit_text('')
else:
urwid.Edit.keypress(self, size, key)
class Screen():
palette = [
('sysmsg', 'black', 'light gray', 'standout,underline', 'black,underline', '#88a')
]
listWalker = urwid.SimpleFocusListWalker([])
loop = None
def __init__(self, username):
self.user = username
def run(self):
listBox = urwid.ListBox(self.listWalker)
textEdit = ChatInput(self.user + ' > ')
urwid.connect_signal(textEdit, 'done', self.onSubmit)
frame = urwid.Frame(
urwid.AttrWrap(listBox, 'body'),
header=urwid.BoxAdapter(urwid.ListBox([
urwid.Text('SO-bro'),
urwid.Divider('-')
]), 2),
footer=urwid.BoxAdapter(urwid.ListBox([
urwid.Divider('-'),
textEdit
]), 5)
)
self.loop = urwid.MainLoop(urwid.Padding(frame, left=2, right=2), self.palette)
self.loop.run()
def addUserMessage(self, user, msg):
self.listWalker.append(urwid.Text(user + ' > ' + msg))
self.loop.draw_screen()
def addSysMessage(self, msg):
self.listWalker.append(urwid.Text(('sysmsg', 'sys > ' + msg)))
self.loop.draw_screen()
def addPlainMessage(self, msg):
self.listWalker.append(urwid.Text(msg))
self.loop.draw_screen()
def onSubmit(self, widget, text):
self.addUserMessage(self.user, text)
if __name__ == "__main__":
global selector
from rpyc.utils.server import ThreadedServer
t = ThreadedServer(Service, port = 18861)
th = threading.Thread(target=t.start)
th.start()
global screen
username = getpass.getuser()
screen = Screen(username)
screen.run() | 0.118985 | 0.065425 |
class DataTable(object):
def __init__(self, colNames, rows):
if not colNames:
raise Exception("Error: Must pass column names to constructor.")
if not rows:
raise Exception("Error: Must rows to constructor.")
self._colNames = colNames
self._rows = rows
def getColumnNames(self):
return self._colNames
def getRowsAsList(self):
return self._rows
def getRowsAsJSON(self):
results = []
for rowIndx in xrange(len(self._rows)):
doc = {}
for colIndx, colName in enumerate(self._colNames):
formattedName = colName.strip().replace(" ", "_")
doc[formattedName] = self._rows[rowIndx][colIndx]
results.append(doc)
return results
class DataTableFactory:
EXCEL_EXTENSIONS = ('XLSX', 'XLS')
CSV_EXTENSIONS = ('TXT', 'CSV')
VALID_EXTENSIONS = EXCEL_EXTENSIONS + CSV_EXTENSIONS
@staticmethod
def getDataTable(fileName=None, fileStream=None, opts={}):
'''
Parses CSV or Excel data and returns a DataTable object.
Note that a file name must be passed. If no fileStream is passed,
it will open a file using the fileName parameter.
'''
if not fileName:
raise Exception("Error: Must pass a file name.")
if fileName and not fileStream:
try:
fileStream = open(fileName, "rb")
except Exception, e:
raise e
ext = fileName.split('.')[-1].upper()
if ext not in DataTableFactory.VALID_EXTENSIONS:
raise Exception("Error: File must be one of the following types: %s" % ', '.join(DataTableFactory.VALID_EXTENSIONS))
colNames = []
rows = []
if ext in DataTableFactory.EXCEL_EXTENSIONS:
from xlrd import open_workbook
output = fileStream.read()
workbook = open_workbook(file_contents=output)
sheet = workbook.sheet_by_index(0)
colNames = [ sheet.cell_value(0, col).lower() for col in xrange(sheet.ncols) ]
for rowIndx in xrange(1, sheet.nrows):
rows.append([ sheet.cell_value(rowIndx, colIndx) for colIndx in xrange(len(colNames)) ])
else:
import csv
delimiter = opts.get('delimiter', ',')
quotechar = opts.get('quotechar', '"')
reader = csv.reader(fileStream, delimiter=delimiter, quotechar=quotechar)
data = [ row for row in reader ]
colNames = [col.lower().strip() for col in data[0]]
rows = [ row for row in data[1:] ]
return DataTable(colNames, rows) | HDMA/HDMA-SocialMediaAPI-dev/underConstruction/IntegrationAPI/pythonAPI/DataFactory/DataTable.py | class DataTable(object):
def __init__(self, colNames, rows):
if not colNames:
raise Exception("Error: Must pass column names to constructor.")
if not rows:
raise Exception("Error: Must rows to constructor.")
self._colNames = colNames
self._rows = rows
def getColumnNames(self):
return self._colNames
def getRowsAsList(self):
return self._rows
def getRowsAsJSON(self):
results = []
for rowIndx in xrange(len(self._rows)):
doc = {}
for colIndx, colName in enumerate(self._colNames):
formattedName = colName.strip().replace(" ", "_")
doc[formattedName] = self._rows[rowIndx][colIndx]
results.append(doc)
return results
class DataTableFactory:
EXCEL_EXTENSIONS = ('XLSX', 'XLS')
CSV_EXTENSIONS = ('TXT', 'CSV')
VALID_EXTENSIONS = EXCEL_EXTENSIONS + CSV_EXTENSIONS
@staticmethod
def getDataTable(fileName=None, fileStream=None, opts={}):
'''
Parses CSV or Excel data and returns a DataTable object.
Note that a file name must be passed. If no fileStream is passed,
it will open a file using the fileName parameter.
'''
if not fileName:
raise Exception("Error: Must pass a file name.")
if fileName and not fileStream:
try:
fileStream = open(fileName, "rb")
except Exception, e:
raise e
ext = fileName.split('.')[-1].upper()
if ext not in DataTableFactory.VALID_EXTENSIONS:
raise Exception("Error: File must be one of the following types: %s" % ', '.join(DataTableFactory.VALID_EXTENSIONS))
colNames = []
rows = []
if ext in DataTableFactory.EXCEL_EXTENSIONS:
from xlrd import open_workbook
output = fileStream.read()
workbook = open_workbook(file_contents=output)
sheet = workbook.sheet_by_index(0)
colNames = [ sheet.cell_value(0, col).lower() for col in xrange(sheet.ncols) ]
for rowIndx in xrange(1, sheet.nrows):
rows.append([ sheet.cell_value(rowIndx, colIndx) for colIndx in xrange(len(colNames)) ])
else:
import csv
delimiter = opts.get('delimiter', ',')
quotechar = opts.get('quotechar', '"')
reader = csv.reader(fileStream, delimiter=delimiter, quotechar=quotechar)
data = [ row for row in reader ]
colNames = [col.lower().strip() for col in data[0]]
rows = [ row for row in data[1:] ]
return DataTable(colNames, rows) | 0.265214 | 0.255576 |
file = open('advent-day-19.txt',newline='')
inputdata = file.read().splitlines()
sample = ['0: 4 1 5','1: 2 3 | 3 2','2: 4 4 | 5 5','3: 4 5 | 5 4','4: "a"','5: "b"','',
'ababbb','bababa','abbbab','aaabbb','aaaabbb']
def format_input(inputdata):
rules = {}
for line in inputdata[:inputdata.index('')]:
key = line.split(':')[0]
if line.split(':')[1].replace('"','').strip() in ['a','b']:
value = line.split(':')[1].replace('"','').strip()
else:
value = line.split(':')[1].split('|')
value = [item.strip().split() for item in value]
rules[key] = value
messages = inputdata[inputdata.index('')+1:]
return rules, messages
def recurse(rule, value, accum, place):
for option in rules[rule]:
for item in option:
print(rule,option,item,':::',accum,place)
if item in ['a','b']:
temp = accum + item
if temp[:place] == value[:place]:
return place + 1, temp
else:
return place, accum
else:
place, accum = recurse(item,value,accum,place)
return place, accum
# adapted from u/MichalMarsalek
def check_rule(text, r, rules):
if len(text) == 0:
return []
if isinstance(rules[r], str):
if text[0] == rules[r]:
return [1]
else:
return []
length0 = []
for disj in rules[r]:
length = [0]
for conj in disj:
length2 = []
for l in length:
for c in check_rule(text[l:], conj, rules):
length2.append(l+c)
length = length2
length0.extend(length)
return length0
rules, messages = format_input(inputdata)
print(sum(len(q) in check_rule(q,'0',rules) for q in messages))
rules2, messages = format_input(inputdata)
rules2['8'] = [['42'],['42','8']]
rules2['11'] = [['42','31'],['42','11','31']]
print(sum(len(q) in check_rule(q,'0',rules2) for q in messages)) | advent-day-19.py | file = open('advent-day-19.txt',newline='')
inputdata = file.read().splitlines()
sample = ['0: 4 1 5','1: 2 3 | 3 2','2: 4 4 | 5 5','3: 4 5 | 5 4','4: "a"','5: "b"','',
'ababbb','bababa','abbbab','aaabbb','aaaabbb']
def format_input(inputdata):
rules = {}
for line in inputdata[:inputdata.index('')]:
key = line.split(':')[0]
if line.split(':')[1].replace('"','').strip() in ['a','b']:
value = line.split(':')[1].replace('"','').strip()
else:
value = line.split(':')[1].split('|')
value = [item.strip().split() for item in value]
rules[key] = value
messages = inputdata[inputdata.index('')+1:]
return rules, messages
def recurse(rule, value, accum, place):
for option in rules[rule]:
for item in option:
print(rule,option,item,':::',accum,place)
if item in ['a','b']:
temp = accum + item
if temp[:place] == value[:place]:
return place + 1, temp
else:
return place, accum
else:
place, accum = recurse(item,value,accum,place)
return place, accum
# adapted from u/MichalMarsalek
def check_rule(text, r, rules):
if len(text) == 0:
return []
if isinstance(rules[r], str):
if text[0] == rules[r]:
return [1]
else:
return []
length0 = []
for disj in rules[r]:
length = [0]
for conj in disj:
length2 = []
for l in length:
for c in check_rule(text[l:], conj, rules):
length2.append(l+c)
length = length2
length0.extend(length)
return length0
rules, messages = format_input(inputdata)
print(sum(len(q) in check_rule(q,'0',rules) for q in messages))
rules2, messages = format_input(inputdata)
rules2['8'] = [['42'],['42','8']]
rules2['11'] = [['42','31'],['42','11','31']]
print(sum(len(q) in check_rule(q,'0',rules2) for q in messages)) | 0.226784 | 0.359617 |
import dropbox
import json
import logging
import requests
from django.conf import settings
from django.core.management.base import BaseCommand
from django.core.mail import EmailMessage
from allauth.socialaccount.models import SocialToken
from books.utils import DropboxParser
from libraries.models import LibraryImport
logger = logging.getLogger('scripts')
class Command(BaseCommand):
help = "get import job and run it"
def handle(self, *args, **options):
logger.debug('Starting book import cronjob')
library_import_jobs = LibraryImport.objects.filter(
status=LibraryImport.PENDING)[:4]
for job in library_import_jobs:
logger.debug('Starting import job %s' % job.id)
job.status = LibraryImport.PROCESSING
job.save()
token = None
try:
token = SocialToken.objects.get(
account__user=job.librarian.user,
app__provider='dropbox_oauth2',
).token
except:
logger.exception(
'Error getting dropbox token for import job %s' % job.id
)
job.status = LibraryImport.ERROR
job.save()
if token:
client = dropbox.client.DropboxClient(token)
parser = DropboxParser(
client=client,
library=job.librarian.library,
user=job.librarian.user,
)
try:
parser.parse(path=job.path)
job.status = LibraryImport.DONE
job.save()
message = EmailMessage(
subject='[Booksonas] Import complete!',
body="We've finished importing {}, go login to booksonas.com to see your books!".format(job.path),
from_email="<EMAIL>",
to=[job.librarian.user.email],
)
message.send()
except:
logger.exception("Error parsing path")
job.status = LibraryImport.ERROR
job.save()
try:
if not settings.DEBUG:
payload = {
'text': 'Error in import job: {}'.format(job.id)
}
r = requests.post(
settings.SLACK_WEBHOOK_URL,
data=json.dumps(payload),
)
except:
logger.exception("Error sending error to slack")
logger.debug('Finished import job %s' % job.id)
logger.debug('Finished book import cronjob') | libraries/management/commands/run_import_job.py | import dropbox
import json
import logging
import requests
from django.conf import settings
from django.core.management.base import BaseCommand
from django.core.mail import EmailMessage
from allauth.socialaccount.models import SocialToken
from books.utils import DropboxParser
from libraries.models import LibraryImport
logger = logging.getLogger('scripts')
class Command(BaseCommand):
help = "get import job and run it"
def handle(self, *args, **options):
logger.debug('Starting book import cronjob')
library_import_jobs = LibraryImport.objects.filter(
status=LibraryImport.PENDING)[:4]
for job in library_import_jobs:
logger.debug('Starting import job %s' % job.id)
job.status = LibraryImport.PROCESSING
job.save()
token = None
try:
token = SocialToken.objects.get(
account__user=job.librarian.user,
app__provider='dropbox_oauth2',
).token
except:
logger.exception(
'Error getting dropbox token for import job %s' % job.id
)
job.status = LibraryImport.ERROR
job.save()
if token:
client = dropbox.client.DropboxClient(token)
parser = DropboxParser(
client=client,
library=job.librarian.library,
user=job.librarian.user,
)
try:
parser.parse(path=job.path)
job.status = LibraryImport.DONE
job.save()
message = EmailMessage(
subject='[Booksonas] Import complete!',
body="We've finished importing {}, go login to booksonas.com to see your books!".format(job.path),
from_email="<EMAIL>",
to=[job.librarian.user.email],
)
message.send()
except:
logger.exception("Error parsing path")
job.status = LibraryImport.ERROR
job.save()
try:
if not settings.DEBUG:
payload = {
'text': 'Error in import job: {}'.format(job.id)
}
r = requests.post(
settings.SLACK_WEBHOOK_URL,
data=json.dumps(payload),
)
except:
logger.exception("Error sending error to slack")
logger.debug('Finished import job %s' % job.id)
logger.debug('Finished book import cronjob') | 0.225929 | 0.039881 |
__author__ = '<NAME>'
import deploy
import unittest
class ArgParserTests(unittest.TestCase):
def setUp(self):
pass
def test_username_nopass(self):
error, args, config = deploy.parse_args(["-u", "bob"])
self.assertEqual('You need to specify either -l/--list or both -u/--username and -p/--password', error)
def test_pass_nousername(self):
error, args, config = deploy.parse_args(["-p", "mypass"])
self.assertEqual('You need to specify either -l/--list or both -u/--username and -p/--password', error)
def test_noargs(self):
error, args, config = deploy.parse_args([])
self.assertEqual('You need to specify either -l/--list or both -u/--username and -p/--password', error)
def test_invalid_configfile(self):
error, args, config = deploy.parse_args(["-c", "missing_file", "-l"])
self.assertEqual("Unable to read content from config file 'missing_file'", error)
def test_list_configfile(self):
error, args, config = deploy.parse_args(["-c", "test_data/test_config.ini", "-l"])
self.assertEqual("Targets found for 'test_data/test_config.ini': ['component1', 'component2', 'component3', 'component4', 'component5', 'component6', 'component7', 'component8', 'group1', 'group2', 'group3', 'group4', 'group5', 'group6', 'group7']", error)
def test_no_target(self):
error, args, config = deploy.parse_args(["-c", "test_data/test_config.ini", "-u", "bob", "-p", "mypass"])
self.assertEqual("No deployment target specified. Doing nothing.", error)
def test_user_pass_target(self):
error, args, config = deploy.parse_args(["-c", "test_data/test_config.ini", "-u", "bob", "-p", "mypass", "all"])
self.assertIsNone(error)
self.assertEqual("test_data/test_config.ini", args.configfile)
self.assertEqual("all", args.target)
self.assertEqual("mypass", args.password)
self.assertEqual("bob", args.username)
if __name__ == '__main__':
unittest.main() | src/test_argparse.py | __author__ = '<NAME>'
import deploy
import unittest
class ArgParserTests(unittest.TestCase):
def setUp(self):
pass
def test_username_nopass(self):
error, args, config = deploy.parse_args(["-u", "bob"])
self.assertEqual('You need to specify either -l/--list or both -u/--username and -p/--password', error)
def test_pass_nousername(self):
error, args, config = deploy.parse_args(["-p", "mypass"])
self.assertEqual('You need to specify either -l/--list or both -u/--username and -p/--password', error)
def test_noargs(self):
error, args, config = deploy.parse_args([])
self.assertEqual('You need to specify either -l/--list or both -u/--username and -p/--password', error)
def test_invalid_configfile(self):
error, args, config = deploy.parse_args(["-c", "missing_file", "-l"])
self.assertEqual("Unable to read content from config file 'missing_file'", error)
def test_list_configfile(self):
error, args, config = deploy.parse_args(["-c", "test_data/test_config.ini", "-l"])
self.assertEqual("Targets found for 'test_data/test_config.ini': ['component1', 'component2', 'component3', 'component4', 'component5', 'component6', 'component7', 'component8', 'group1', 'group2', 'group3', 'group4', 'group5', 'group6', 'group7']", error)
def test_no_target(self):
error, args, config = deploy.parse_args(["-c", "test_data/test_config.ini", "-u", "bob", "-p", "mypass"])
self.assertEqual("No deployment target specified. Doing nothing.", error)
def test_user_pass_target(self):
error, args, config = deploy.parse_args(["-c", "test_data/test_config.ini", "-u", "bob", "-p", "mypass", "all"])
self.assertIsNone(error)
self.assertEqual("test_data/test_config.ini", args.configfile)
self.assertEqual("all", args.target)
self.assertEqual("mypass", args.password)
self.assertEqual("bob", args.username)
if __name__ == '__main__':
unittest.main() | 0.229276 | 0.243721 |
import pytest
from app.services import reddit_service
from app.util.raffler import Raffler
from app.jobs.raffle_job import raffle
from app.db.models.raffle import Raffle
from tests.helpers import raffler_params
from tests.factories import UserFactory
@pytest.fixture(autouse=True)
def patch_raffler_class(monkeypatch):
monkeypatch.setattr(Raffler, "__init__", _stub_raffler_init)
monkeypatch.setattr(Raffler, "fetch_comments", lambda x: True)
monkeypatch.setattr(Raffler, "select_winners", lambda x: True)
monkeypatch.setattr(Raffler, "get_serialized_winners", _stub_winners)
monkeypatch.setattr(reddit_service, "get_submission_by_url", _stub_submission)
yield
class TestRaffle:
class TestSuccessfulRaffle:
def test_raffle_guest_db_saving(self, db_session, client):
raffle.queue(raffler_params(), None)
saved_raffle = Raffle.query.filter_by(submission_id="abc123").first()
assert saved_raffle
assert not saved_raffle.creator
assert len(saved_raffle.winners) == 1
assert saved_raffle.winners[0].username == "test-user"
def test_raffle_verified_db_saving(self, db_session, client):
user = UserFactory(username="verified_redditor")
raffle.queue(raffler_params(), user)
saved_raffle = Raffle.query.filter_by(submission_id="abc123").first()
assert saved_raffle
assert saved_raffle.creator.username == "verified_redditor"
assert len(saved_raffle.winners) == 1
assert saved_raffle.winners[0].username == "test-user"
class TestFailure:
@pytest.fixture
def job(self, mocker):
job = mocker.Mock()
job.meta = {}
job.save_meta = mocker.Mock()
yield job
@pytest.fixture
def get_current_job(self, mocker, job):
get_current_job = mocker.patch("app.jobs.raffle_job.get_current_job")
get_current_job.return_value = job
yield get_current_job
@pytest.fixture
def reddit(self, mocker):
reddit = mocker.patch("app.services.reddit_service")
reddit.get_submission_by_url = mocker.Mock(
return_value=_stub_submission("")
)
yield reddit
@pytest.fixture
def raffler(self, mocker):
raffler = mocker.patch("app.jobs.raffle_job.Raffler")
raffler.return_value = mocker.Mock()
raffler.return_value.fetch_comments = mocker.Mock(
side_effect=ValueError("Some Random Error")
)
yield raffler
def test_set_error_message_to_job(
self, mocker, reddit, raffler, job, get_current_job
):
raffle.queue(raffler_params(), None)
assert job.meta.get("status") == "Error: Some Random Error"
assert job.meta.get("error") is True
def _stub_raffler_init(
self,
submission_url,
winner_count,
min_account_age,
min_comment_karma,
min_link_karma,
min_combined_karma,
ignored_users,
):
return None
def _stub_winners(self):
return [
{
"user": {
"username": "test-user",
"age": 100,
"comment_karma": 100,
"link_karma": 100,
},
"comment_url": "https://redd.it/comments/abc123",
}
]
def _stub_submission(sub_url):
return {
"id": "abc123",
"author": "test_user",
"title": "test_title",
"url": "https://redd.it/abc123",
"subreddit": "test",
"created_at_utc": 1520193497,
}
def _raffle_params():
return {
"submission_url": "https://redd.it/57xvjb",
"winner_count": 1,
"min_account_age": 0,
"min_comment_karma": 0,
"min_link_karma": 0,
}
def _submission():
return {
"id": "57xvjb",
"author": "xozzo",
"title": "pyfootball - A Python API wrapper for football-data.org, \
an open source football (soccer) data REST API",
"url": "https://www.reddit.com/r/coolgithubprojects/comments/57xv \
jb/pyfootball_a_python_api_wrapper_for/",
"subreddit": "coolgithubprojects",
"created_at_utc": 1476717718.0,
} | tests/jobs/test_raffle_job.py | import pytest
from app.services import reddit_service
from app.util.raffler import Raffler
from app.jobs.raffle_job import raffle
from app.db.models.raffle import Raffle
from tests.helpers import raffler_params
from tests.factories import UserFactory
@pytest.fixture(autouse=True)
def patch_raffler_class(monkeypatch):
monkeypatch.setattr(Raffler, "__init__", _stub_raffler_init)
monkeypatch.setattr(Raffler, "fetch_comments", lambda x: True)
monkeypatch.setattr(Raffler, "select_winners", lambda x: True)
monkeypatch.setattr(Raffler, "get_serialized_winners", _stub_winners)
monkeypatch.setattr(reddit_service, "get_submission_by_url", _stub_submission)
yield
class TestRaffle:
class TestSuccessfulRaffle:
def test_raffle_guest_db_saving(self, db_session, client):
raffle.queue(raffler_params(), None)
saved_raffle = Raffle.query.filter_by(submission_id="abc123").first()
assert saved_raffle
assert not saved_raffle.creator
assert len(saved_raffle.winners) == 1
assert saved_raffle.winners[0].username == "test-user"
def test_raffle_verified_db_saving(self, db_session, client):
user = UserFactory(username="verified_redditor")
raffle.queue(raffler_params(), user)
saved_raffle = Raffle.query.filter_by(submission_id="abc123").first()
assert saved_raffle
assert saved_raffle.creator.username == "verified_redditor"
assert len(saved_raffle.winners) == 1
assert saved_raffle.winners[0].username == "test-user"
class TestFailure:
@pytest.fixture
def job(self, mocker):
job = mocker.Mock()
job.meta = {}
job.save_meta = mocker.Mock()
yield job
@pytest.fixture
def get_current_job(self, mocker, job):
get_current_job = mocker.patch("app.jobs.raffle_job.get_current_job")
get_current_job.return_value = job
yield get_current_job
@pytest.fixture
def reddit(self, mocker):
reddit = mocker.patch("app.services.reddit_service")
reddit.get_submission_by_url = mocker.Mock(
return_value=_stub_submission("")
)
yield reddit
@pytest.fixture
def raffler(self, mocker):
raffler = mocker.patch("app.jobs.raffle_job.Raffler")
raffler.return_value = mocker.Mock()
raffler.return_value.fetch_comments = mocker.Mock(
side_effect=ValueError("Some Random Error")
)
yield raffler
def test_set_error_message_to_job(
self, mocker, reddit, raffler, job, get_current_job
):
raffle.queue(raffler_params(), None)
assert job.meta.get("status") == "Error: Some Random Error"
assert job.meta.get("error") is True
def _stub_raffler_init(
self,
submission_url,
winner_count,
min_account_age,
min_comment_karma,
min_link_karma,
min_combined_karma,
ignored_users,
):
return None
def _stub_winners(self):
return [
{
"user": {
"username": "test-user",
"age": 100,
"comment_karma": 100,
"link_karma": 100,
},
"comment_url": "https://redd.it/comments/abc123",
}
]
def _stub_submission(sub_url):
return {
"id": "abc123",
"author": "test_user",
"title": "test_title",
"url": "https://redd.it/abc123",
"subreddit": "test",
"created_at_utc": 1520193497,
}
def _raffle_params():
return {
"submission_url": "https://redd.it/57xvjb",
"winner_count": 1,
"min_account_age": 0,
"min_comment_karma": 0,
"min_link_karma": 0,
}
def _submission():
return {
"id": "57xvjb",
"author": "xozzo",
"title": "pyfootball - A Python API wrapper for football-data.org, \
an open source football (soccer) data REST API",
"url": "https://www.reddit.com/r/coolgithubprojects/comments/57xv \
jb/pyfootball_a_python_api_wrapper_for/",
"subreddit": "coolgithubprojects",
"created_at_utc": 1476717718.0,
} | 0.494629 | 0.255048 |
import os
import sys
import time
import unittest
import k3proc
import k3ut
dd = k3ut.dd
this_base = os.path.dirname(__file__)
class TestProc(unittest.TestCase):
foo_fn = '/tmp/foo'
def _read_file(self, fn):
try:
with open(fn, 'r') as f:
cont = f.read()
return cont
except EnvironmentError:
return None
def _clean(self):
# remove written file
try:
os.unlink(self.foo_fn)
except EnvironmentError:
pass
def setUp(self):
self._clean()
def tearDown(self):
self._clean()
def test_procerror(self):
inp = (1, 'out', 'err', ['ls', 'a', 'b'], {"close_fds": True})
ex_args = (1, 'out', 'err', ['out'], ['err'], ['ls', 'a', 'b'], {"close_fds": True})
ex = k3proc.CalledProcessError(*inp)
self.assertEqual(ex_args, (ex.returncode,
ex.stdout,
ex.stderr,
ex.out,
ex.err,
ex.cmd,
ex.options))
self.assertEqual(inp, ex.args)
def test_error_str_with_capture_false(self):
try:
k3proc.command(
'python', '-c', 'import sys; sys.exit(1)',
capture=False,
check=True,
)
except k3proc.CalledProcessError as e:
self.assertEqual('', e.stdout)
self.assertEqual([], e.out)
self.assertEqual('', e.stderr)
self.assertEqual([], e.err)
def test_error_str(self):
try:
k3proc.command(
'python', '-c', 'import sys, os; os.write(1, b"foo"); os.write(2, b"bar"); sys.exit(1)',
check=True,
env={"foo": "bar"},
cwd="/tmp",
input="123")
except k3proc.CalledProcessError as e:
s = '\n'.join([
"CalledProcessError",
'python -c import sys, os; os.write(1, b"foo"); os.write(2, b"bar"); sys.exit(1)',
"options: {'cwd': '/tmp', 'env': {'foo': 'bar'}, 'input': '123'}",
"exit code: 1",
"foo",
"bar",
])
self.assertEqual(s, str(e))
self.assertEqual(s, repr(e))
# text=False
try:
k3proc.command(
'python', '-c', 'import sys, os; os.write(1, b"\x01"); os.write(2, b"\x02"); sys.exit(1)',
check=True,
env={"foo": "bar"},
cwd="/tmp",
text=False,
input=b"123")
except k3proc.CalledProcessError as e:
s = '\n'.join([
"CalledProcessError",
'python -c import sys, os; os.write(1, b"\x01"); os.write(2, b"\x02"); sys.exit(1)',
"options: {'cwd': '/tmp', 'env': {'foo': 'bar'}, 'input': b'123'}",
"exit code: 1",
"b'\\x01'",
"b'\\x02'",
])
self.assertEqual(s, str(e))
self.assertEqual(s, repr(e))
def test_code_out_err(self):
subproc = os.path.join(this_base, 'subproc.py')
returncode, out, err = k3proc.command('python', subproc, '222')
self.assertEqual(222, returncode)
self.assertEqual('out-1\nout-2\n', out)
self.assertEqual('err-1\nerr-2\n', err)
try:
returncode, out, err = k3proc.command_ex('python', subproc, '222')
except k3proc.CalledProcessError as e:
self.assertEqual(222, e.returncode)
self.assertEqual('out-1\nout-2\n', e.stdout)
self.assertEqual('out-1\nout-2\n'.splitlines(), e.out)
self.assertEqual('err-1\nerr-2\n', e.stderr)
self.assertEqual('err-1\nerr-2\n'.splitlines(), e.err)
self.assertEqual('python', e.cmd[0])
self.assertTrue(e.cmd[1].endswith('subproc.py'))
self.assertEqual('222', e.cmd[2])
self.assertEqual({}, e.options)
else:
self.fail('expect k3proc.CalledProcessError to be raised')
returncode, out, err = k3proc.command_ex('python', subproc, '0')
self.assertEqual(0, returncode)
self.assertEqual('out-1\nout-2\n', out)
self.assertEqual('err-1\nerr-2\n', err)
returncode, out, err = k3proc.command('python', subproc, '0')
self.assertEqual(0, returncode)
self.assertEqual('out-1\nout-2\n', out)
self.assertEqual('err-1\nerr-2\n', err)
def test_text_true(self):
cmd = ['python', '-c', 'import os; os.write(1, b"\\x89")', ]
self.assertRaises(
UnicodeDecodeError,
k3proc.command,
*cmd
)
returncode, out, err = k3proc.command(*cmd, text=False)
dd('returncode:', returncode)
dd('out:', out)
dd('err:', err)
self.assertEqual(0, returncode)
self.assertEqual(b'\x89', out)
def test_close_fds(self):
read_fd = os.path.join(this_base, 'read_fd.py')
with open(read_fd) as f:
fd = f.fileno()
os.set_inheritable(fd, True)
returncode, out, err = k3proc.command(
'python', read_fd, str(fd), close_fds=False)
dd(returncode, out, err)
self.assertEqual(0, returncode)
self.assertEqual('###\n', out)
self.assertEqual('', err)
returncode, out, err = k3proc.command(
'python', read_fd, str(fd), close_fds=True)
self.assertEqual(1, returncode)
self.assertEqual('errno=9\n', out)
self.assertEqual('', err)
def test_cwd(self):
returncode, out, err = k3proc.command(
'python', 'subproc.py', '111', cwd=this_base)
self.assertEqual(111, returncode)
returncode, out, err = k3proc.command('python', 'subproc.py', '111')
if 'PyPy' in sys.version:
# PyPy does not return code correctly. it is 1
self.assertNotEqual(0, returncode)
else:
# 2 for can not find subproc.py
self.assertEqual(2, returncode)
def test_env(self):
returncode, out, err = k3proc.command('python', 'print_env.py', 'abc',
env={"abc": "xyz"},
cwd=this_base)
dd('returncode:', returncode)
dd('out:', out)
dd('err:', err)
self.assertEqual(0, returncode)
self.assertEqual('xyz\n', out)
def test_inherit_env(self):
returncode, out, err = k3proc.command(
'python', '-c', 'import os; print(os.environ.get("PATH"))',
env={"abc": "xyz"},
inherit_env=False,
)
dd('returncode:', returncode)
dd('out:', out)
dd('err:', err)
self.assertEqual(0, returncode)
self.assertEqual('None\n', out, "no PATH inherited")
def test_input(self):
returncode, out, err = k3proc.command('python', 'read_fd.py', '0',
input='abc',
cwd=this_base)
dd('returncode:', returncode)
dd('out:', out)
dd('err:', err)
self.assertEqual(0, returncode)
self.assertEqual('abc\n', out)
def test_timeout(self):
with k3ut.Timer() as t:
self.assertRaises(k3proc.TimeoutExpired,
k3proc.command, 'python', '-c',
'import time; time.sleep(1)',
timeout=0.1
)
self.assertLess(t.spent(), 1)
def test_timeout_tty(self):
with k3ut.Timer() as t:
self.assertRaises(k3proc.TimeoutExpired,
k3proc.command, 'python', '-c',
'import time; time.sleep(1)',
timeout=0.1,
tty=True,
)
self.assertLess(t.spent(), 1)
def test_check(self):
self.assertRaises(k3proc.CalledProcessError,
k3proc.command,
'python', '-c',
'import sys; sys.exit(5)',
check=True,
)
def test_capture(self):
# no capture
read_stdin_in_subproc = '''
import k3proc;
k3proc.command(
'python', '-c', 'import sys; print(sys.stdin.read())',
capture={}
)
'''
returncode, out, err = k3proc.command(
'python', '-c',
read_stdin_in_subproc.format('False'),
input="123",
)
dd('returncode:', returncode)
dd('out:', out)
dd('err:', err)
self.assertEqual(0, returncode)
self.assertEqual("123\n", out)
# capture
returncode, out, err = k3proc.command(
'python', '-c',
read_stdin_in_subproc.format('True'),
input="123",
)
dd('returncode:', returncode)
dd('out:', out)
dd('err:', err)
self.assertEqual(0, returncode)
self.assertEqual("", out)
# default capture
returncode, out, err = k3proc.command(
'python', '-c',
read_stdin_in_subproc.format('None'),
input="123",
)
dd('returncode:', returncode)
dd('out:', out)
dd('err:', err)
self.assertEqual(0, returncode)
self.assertEqual("", out)
def test_tty(self):
returncode, out, err = k3proc.command(
'python', '-c', 'import sys; print(sys.stdout.isatty())',
tty=True,
)
dd('returncode:', returncode)
dd('out:', out)
dd('err:', err)
self.assertEqual(0, returncode)
self.assertEqual('True\n', out)
self.assertEqual("", err)
# without pseudo tty, no color outupt:
_, out, _ = k3proc.command(
'python', '-c', 'import sys; print(sys.stdout.isatty())',
tty=False,
)
self.assertEqual('False\n', out)
# by default no tty:
_, out, _ = k3proc.command(
'python', '-c', 'import sys; print(sys.stdout.isatty())',
)
self.assertEqual('False\n', out)
def test_shell_script(self):
returncode, out, err = k3proc.shell_script(
'ls ' + this_base + ' | grep init | grep -v pyc')
dd('returncode:', returncode)
dd('out:', out)
dd('err:', err)
self.assertEqual(0, returncode)
self.assertEqual('__init__.py\n', out)
def test_start_process(self):
cases = (
('python', this_base + '/write.py', ['foo'], 'foo'),
('python', this_base + '/write.py', ['foo', 'bar'], 'foobar'),
('sh', this_base + '/write.sh', ['123'], '123'),
('sh', this_base + '/write.sh', ['123', '456'], '123456'),
)
for cmd, target, args, expected in cases:
k3proc.start_process(cmd, target, os.environ, *args)
time.sleep(0.1)
self.assertEqual(expected, self._read_file(self.foo_fn)) | k3proc/test/test_proc.py | import os
import sys
import time
import unittest
import k3proc
import k3ut
dd = k3ut.dd
this_base = os.path.dirname(__file__)
class TestProc(unittest.TestCase):
foo_fn = '/tmp/foo'
def _read_file(self, fn):
try:
with open(fn, 'r') as f:
cont = f.read()
return cont
except EnvironmentError:
return None
def _clean(self):
# remove written file
try:
os.unlink(self.foo_fn)
except EnvironmentError:
pass
def setUp(self):
self._clean()
def tearDown(self):
self._clean()
def test_procerror(self):
inp = (1, 'out', 'err', ['ls', 'a', 'b'], {"close_fds": True})
ex_args = (1, 'out', 'err', ['out'], ['err'], ['ls', 'a', 'b'], {"close_fds": True})
ex = k3proc.CalledProcessError(*inp)
self.assertEqual(ex_args, (ex.returncode,
ex.stdout,
ex.stderr,
ex.out,
ex.err,
ex.cmd,
ex.options))
self.assertEqual(inp, ex.args)
def test_error_str_with_capture_false(self):
try:
k3proc.command(
'python', '-c', 'import sys; sys.exit(1)',
capture=False,
check=True,
)
except k3proc.CalledProcessError as e:
self.assertEqual('', e.stdout)
self.assertEqual([], e.out)
self.assertEqual('', e.stderr)
self.assertEqual([], e.err)
def test_error_str(self):
try:
k3proc.command(
'python', '-c', 'import sys, os; os.write(1, b"foo"); os.write(2, b"bar"); sys.exit(1)',
check=True,
env={"foo": "bar"},
cwd="/tmp",
input="123")
except k3proc.CalledProcessError as e:
s = '\n'.join([
"CalledProcessError",
'python -c import sys, os; os.write(1, b"foo"); os.write(2, b"bar"); sys.exit(1)',
"options: {'cwd': '/tmp', 'env': {'foo': 'bar'}, 'input': '123'}",
"exit code: 1",
"foo",
"bar",
])
self.assertEqual(s, str(e))
self.assertEqual(s, repr(e))
# text=False
try:
k3proc.command(
'python', '-c', 'import sys, os; os.write(1, b"\x01"); os.write(2, b"\x02"); sys.exit(1)',
check=True,
env={"foo": "bar"},
cwd="/tmp",
text=False,
input=b"123")
except k3proc.CalledProcessError as e:
s = '\n'.join([
"CalledProcessError",
'python -c import sys, os; os.write(1, b"\x01"); os.write(2, b"\x02"); sys.exit(1)',
"options: {'cwd': '/tmp', 'env': {'foo': 'bar'}, 'input': b'123'}",
"exit code: 1",
"b'\\x01'",
"b'\\x02'",
])
self.assertEqual(s, str(e))
self.assertEqual(s, repr(e))
def test_code_out_err(self):
subproc = os.path.join(this_base, 'subproc.py')
returncode, out, err = k3proc.command('python', subproc, '222')
self.assertEqual(222, returncode)
self.assertEqual('out-1\nout-2\n', out)
self.assertEqual('err-1\nerr-2\n', err)
try:
returncode, out, err = k3proc.command_ex('python', subproc, '222')
except k3proc.CalledProcessError as e:
self.assertEqual(222, e.returncode)
self.assertEqual('out-1\nout-2\n', e.stdout)
self.assertEqual('out-1\nout-2\n'.splitlines(), e.out)
self.assertEqual('err-1\nerr-2\n', e.stderr)
self.assertEqual('err-1\nerr-2\n'.splitlines(), e.err)
self.assertEqual('python', e.cmd[0])
self.assertTrue(e.cmd[1].endswith('subproc.py'))
self.assertEqual('222', e.cmd[2])
self.assertEqual({}, e.options)
else:
self.fail('expect k3proc.CalledProcessError to be raised')
returncode, out, err = k3proc.command_ex('python', subproc, '0')
self.assertEqual(0, returncode)
self.assertEqual('out-1\nout-2\n', out)
self.assertEqual('err-1\nerr-2\n', err)
returncode, out, err = k3proc.command('python', subproc, '0')
self.assertEqual(0, returncode)
self.assertEqual('out-1\nout-2\n', out)
self.assertEqual('err-1\nerr-2\n', err)
def test_text_true(self):
cmd = ['python', '-c', 'import os; os.write(1, b"\\x89")', ]
self.assertRaises(
UnicodeDecodeError,
k3proc.command,
*cmd
)
returncode, out, err = k3proc.command(*cmd, text=False)
dd('returncode:', returncode)
dd('out:', out)
dd('err:', err)
self.assertEqual(0, returncode)
self.assertEqual(b'\x89', out)
def test_close_fds(self):
read_fd = os.path.join(this_base, 'read_fd.py')
with open(read_fd) as f:
fd = f.fileno()
os.set_inheritable(fd, True)
returncode, out, err = k3proc.command(
'python', read_fd, str(fd), close_fds=False)
dd(returncode, out, err)
self.assertEqual(0, returncode)
self.assertEqual('###\n', out)
self.assertEqual('', err)
returncode, out, err = k3proc.command(
'python', read_fd, str(fd), close_fds=True)
self.assertEqual(1, returncode)
self.assertEqual('errno=9\n', out)
self.assertEqual('', err)
def test_cwd(self):
returncode, out, err = k3proc.command(
'python', 'subproc.py', '111', cwd=this_base)
self.assertEqual(111, returncode)
returncode, out, err = k3proc.command('python', 'subproc.py', '111')
if 'PyPy' in sys.version:
# PyPy does not return code correctly. it is 1
self.assertNotEqual(0, returncode)
else:
# 2 for can not find subproc.py
self.assertEqual(2, returncode)
def test_env(self):
returncode, out, err = k3proc.command('python', 'print_env.py', 'abc',
env={"abc": "xyz"},
cwd=this_base)
dd('returncode:', returncode)
dd('out:', out)
dd('err:', err)
self.assertEqual(0, returncode)
self.assertEqual('xyz\n', out)
def test_inherit_env(self):
returncode, out, err = k3proc.command(
'python', '-c', 'import os; print(os.environ.get("PATH"))',
env={"abc": "xyz"},
inherit_env=False,
)
dd('returncode:', returncode)
dd('out:', out)
dd('err:', err)
self.assertEqual(0, returncode)
self.assertEqual('None\n', out, "no PATH inherited")
def test_input(self):
returncode, out, err = k3proc.command('python', 'read_fd.py', '0',
input='abc',
cwd=this_base)
dd('returncode:', returncode)
dd('out:', out)
dd('err:', err)
self.assertEqual(0, returncode)
self.assertEqual('abc\n', out)
def test_timeout(self):
with k3ut.Timer() as t:
self.assertRaises(k3proc.TimeoutExpired,
k3proc.command, 'python', '-c',
'import time; time.sleep(1)',
timeout=0.1
)
self.assertLess(t.spent(), 1)
def test_timeout_tty(self):
with k3ut.Timer() as t:
self.assertRaises(k3proc.TimeoutExpired,
k3proc.command, 'python', '-c',
'import time; time.sleep(1)',
timeout=0.1,
tty=True,
)
self.assertLess(t.spent(), 1)
def test_check(self):
self.assertRaises(k3proc.CalledProcessError,
k3proc.command,
'python', '-c',
'import sys; sys.exit(5)',
check=True,
)
def test_capture(self):
# no capture
read_stdin_in_subproc = '''
import k3proc;
k3proc.command(
'python', '-c', 'import sys; print(sys.stdin.read())',
capture={}
)
'''
returncode, out, err = k3proc.command(
'python', '-c',
read_stdin_in_subproc.format('False'),
input="123",
)
dd('returncode:', returncode)
dd('out:', out)
dd('err:', err)
self.assertEqual(0, returncode)
self.assertEqual("123\n", out)
# capture
returncode, out, err = k3proc.command(
'python', '-c',
read_stdin_in_subproc.format('True'),
input="123",
)
dd('returncode:', returncode)
dd('out:', out)
dd('err:', err)
self.assertEqual(0, returncode)
self.assertEqual("", out)
# default capture
returncode, out, err = k3proc.command(
'python', '-c',
read_stdin_in_subproc.format('None'),
input="123",
)
dd('returncode:', returncode)
dd('out:', out)
dd('err:', err)
self.assertEqual(0, returncode)
self.assertEqual("", out)
def test_tty(self):
returncode, out, err = k3proc.command(
'python', '-c', 'import sys; print(sys.stdout.isatty())',
tty=True,
)
dd('returncode:', returncode)
dd('out:', out)
dd('err:', err)
self.assertEqual(0, returncode)
self.assertEqual('True\n', out)
self.assertEqual("", err)
# without pseudo tty, no color outupt:
_, out, _ = k3proc.command(
'python', '-c', 'import sys; print(sys.stdout.isatty())',
tty=False,
)
self.assertEqual('False\n', out)
# by default no tty:
_, out, _ = k3proc.command(
'python', '-c', 'import sys; print(sys.stdout.isatty())',
)
self.assertEqual('False\n', out)
def test_shell_script(self):
returncode, out, err = k3proc.shell_script(
'ls ' + this_base + ' | grep init | grep -v pyc')
dd('returncode:', returncode)
dd('out:', out)
dd('err:', err)
self.assertEqual(0, returncode)
self.assertEqual('__init__.py\n', out)
def test_start_process(self):
cases = (
('python', this_base + '/write.py', ['foo'], 'foo'),
('python', this_base + '/write.py', ['foo', 'bar'], 'foobar'),
('sh', this_base + '/write.sh', ['123'], '123'),
('sh', this_base + '/write.sh', ['123', '456'], '123456'),
)
for cmd, target, args, expected in cases:
k3proc.start_process(cmd, target, os.environ, *args)
time.sleep(0.1)
self.assertEqual(expected, self._read_file(self.foo_fn)) | 0.306735 | 0.211987 |
import os
import platform
import re
import shutil
import subprocess
import sys
from datetime import datetime
from typing import NoReturn
import packaging.version
import psutil
from executors.controls import restart
from executors.logger import logger
from modules.audio import listener, speaker
from modules.conditions import keywords
from modules.models import models
from modules.temperature import temperature
from modules.utils import shared, support
env = models.env
def system_info() -> NoReturn:
"""Tells the system configuration."""
total, used, free = shutil.disk_usage("/")
total = support.size_converter(byte_size=total)
used = support.size_converter(byte_size=used)
free = support.size_converter(byte_size=free)
ram = support.size_converter(byte_size=psutil.virtual_memory().total).replace('.0', '')
ram_used = support.size_converter(byte_size=psutil.virtual_memory().percent).replace(' B', ' %')
physical = psutil.cpu_count(logical=False)
logical = psutil.cpu_count(logical=True)
speaker.speak(text=f"You're running {platform.platform(terse=True)}, with {physical} physical cores and "
f"{logical} logical cores. Your physical drive capacity is {total}. You have used up {used} of "
f"space. Your free space is {free}. Your RAM capacity is {ram}. You are currently utilizing "
f"{ram_used} of your memory.")
def system_vitals() -> None:
"""Reads system vitals on macOS.
See Also:
- Jarvis will suggest a reboot if the system uptime is more than 2 days.
- If confirmed, invokes `restart <https://thevickypedia.github.io/Jarvis/#jarvis.restart>`__ function.
"""
output = ""
if env.macos:
if not env.root_password:
speaker.speak(text=f"You haven't provided a root password for me to read system vitals {env.title}! "
"Add the root password as an environment variable for me to read.")
return
logger.info('Fetching system vitals')
cpu_temp, gpu_temp, fan_speed, output = None, None, None, ""
# Tested on 10.13, 10.14, 11.6 and 12.3 versions
if not shared.hosted_device or not shared.hosted_device.get('os_version'):
logger.warning("hosted_device information was not loaded during startup. Reloading now.")
shared.hosted_device = hosted_device_info()
if packaging.version.parse(shared.hosted_device.get('os_version')) > packaging.version.parse('10.14'):
critical_info = [each.strip() for each in (os.popen(
f'echo {env.root_password} | sudo -S powermetrics --samplers smc -i1 -n1'
)).read().split('\n') if each != '']
support.flush_screen()
for info in critical_info:
if 'CPU die temperature' in info:
cpu_temp = info.strip('CPU die temperature: ').replace(' C', '').strip()
if 'GPU die temperature' in info:
gpu_temp = info.strip('GPU die temperature: ').replace(' C', '').strip()
if 'Fan' in info:
fan_speed = info.strip('Fan: ').replace(' rpm', '').strip()
else:
fan_speed = subprocess.check_output(
f'echo {env.root_password} | sudo -S spindump 1 1 -file /tmp/spindump.txt > /dev/null 2>&1;grep '
f'"Fan speed" /tmp/spindump.txt;sudo rm /tmp/spindump.txt', shell=True
).decode('utf-8')
if cpu_temp:
cpu = f'Your current average CPU temperature is ' \
f'{support.format_nos(input_=temperature.c2f(arg=support.extract_nos(input_=cpu_temp)))}' \
f'\N{DEGREE SIGN}F. '
output += cpu
speaker.speak(text=cpu)
if gpu_temp:
gpu = f'GPU temperature is {support.format_nos(temperature.c2f(support.extract_nos(gpu_temp)))}' \
f'\N{DEGREE SIGN}F. '
output += gpu
speaker.speak(text=gpu)
if fan_speed:
fan = f'Current fan speed is {support.format_nos(support.extract_nos(fan_speed))} RPM. '
output += fan
speaker.speak(text=fan)
restart_time = datetime.fromtimestamp(psutil.boot_time())
second = (datetime.now() - restart_time).total_seconds()
restart_time = datetime.strftime(restart_time, "%A, %B %d, at %I:%M %p")
restart_duration = support.time_converter(seconds=second)
output += f'Restarted on: {restart_time} - {restart_duration} ago from now.'
if shared.called_by_offline:
speaker.speak(text=output)
return
sys.stdout.write(f'\r{output}')
speaker.speak(text=f"Your {shared.hosted_device.get('device')} was last booted on {restart_time}. "
f"Current boot time is: {restart_duration}.")
if second >= 259_200: # 3 days
if boot_extreme := re.search('(.*) days', restart_duration):
warn = int(boot_extreme.group().replace(' days', '').strip())
speaker.speak(text=f"{env.title}! your {shared.hosted_device.get('device')} has been running for more "
f"than {warn} days. You must consider a reboot for better performance. Would you like "
f"me to restart it for you {env.title}?",
run=True)
response = listener.listen(timeout=3, phrase_limit=3)
if any(word in response.lower() for word in keywords.ok):
logger.info(f'JARVIS::Restarting {shared.hosted_device.get("device")}')
restart(target='PC_Proceed')
def hosted_device_info() -> dict:
"""Gets basic information of the hosted device.
Returns:
dict:
A dictionary of key-value pairs with device type, operating system, os version.
"""
if env.macos:
system_kernel = subprocess.check_output("sysctl hw.model", shell=True).decode('utf-8').splitlines()
device = support.extract_str(system_kernel[0].split(':')[1])
else:
device = subprocess.getoutput("WMIC CSPRODUCT GET VENDOR").replace('Vendor', '').strip()
platform_info = platform.platform(terse=True).split('-')
return {'device': device, 'os_name': platform_info[0], 'os_version': platform_info[1]} | executors/system.py | import os
import platform
import re
import shutil
import subprocess
import sys
from datetime import datetime
from typing import NoReturn
import packaging.version
import psutil
from executors.controls import restart
from executors.logger import logger
from modules.audio import listener, speaker
from modules.conditions import keywords
from modules.models import models
from modules.temperature import temperature
from modules.utils import shared, support
env = models.env
def system_info() -> NoReturn:
"""Tells the system configuration."""
total, used, free = shutil.disk_usage("/")
total = support.size_converter(byte_size=total)
used = support.size_converter(byte_size=used)
free = support.size_converter(byte_size=free)
ram = support.size_converter(byte_size=psutil.virtual_memory().total).replace('.0', '')
ram_used = support.size_converter(byte_size=psutil.virtual_memory().percent).replace(' B', ' %')
physical = psutil.cpu_count(logical=False)
logical = psutil.cpu_count(logical=True)
speaker.speak(text=f"You're running {platform.platform(terse=True)}, with {physical} physical cores and "
f"{logical} logical cores. Your physical drive capacity is {total}. You have used up {used} of "
f"space. Your free space is {free}. Your RAM capacity is {ram}. You are currently utilizing "
f"{ram_used} of your memory.")
def system_vitals() -> None:
"""Reads system vitals on macOS.
See Also:
- Jarvis will suggest a reboot if the system uptime is more than 2 days.
- If confirmed, invokes `restart <https://thevickypedia.github.io/Jarvis/#jarvis.restart>`__ function.
"""
output = ""
if env.macos:
if not env.root_password:
speaker.speak(text=f"You haven't provided a root password for me to read system vitals {env.title}! "
"Add the root password as an environment variable for me to read.")
return
logger.info('Fetching system vitals')
cpu_temp, gpu_temp, fan_speed, output = None, None, None, ""
# Tested on 10.13, 10.14, 11.6 and 12.3 versions
if not shared.hosted_device or not shared.hosted_device.get('os_version'):
logger.warning("hosted_device information was not loaded during startup. Reloading now.")
shared.hosted_device = hosted_device_info()
if packaging.version.parse(shared.hosted_device.get('os_version')) > packaging.version.parse('10.14'):
critical_info = [each.strip() for each in (os.popen(
f'echo {env.root_password} | sudo -S powermetrics --samplers smc -i1 -n1'
)).read().split('\n') if each != '']
support.flush_screen()
for info in critical_info:
if 'CPU die temperature' in info:
cpu_temp = info.strip('CPU die temperature: ').replace(' C', '').strip()
if 'GPU die temperature' in info:
gpu_temp = info.strip('GPU die temperature: ').replace(' C', '').strip()
if 'Fan' in info:
fan_speed = info.strip('Fan: ').replace(' rpm', '').strip()
else:
fan_speed = subprocess.check_output(
f'echo {env.root_password} | sudo -S spindump 1 1 -file /tmp/spindump.txt > /dev/null 2>&1;grep '
f'"Fan speed" /tmp/spindump.txt;sudo rm /tmp/spindump.txt', shell=True
).decode('utf-8')
if cpu_temp:
cpu = f'Your current average CPU temperature is ' \
f'{support.format_nos(input_=temperature.c2f(arg=support.extract_nos(input_=cpu_temp)))}' \
f'\N{DEGREE SIGN}F. '
output += cpu
speaker.speak(text=cpu)
if gpu_temp:
gpu = f'GPU temperature is {support.format_nos(temperature.c2f(support.extract_nos(gpu_temp)))}' \
f'\N{DEGREE SIGN}F. '
output += gpu
speaker.speak(text=gpu)
if fan_speed:
fan = f'Current fan speed is {support.format_nos(support.extract_nos(fan_speed))} RPM. '
output += fan
speaker.speak(text=fan)
restart_time = datetime.fromtimestamp(psutil.boot_time())
second = (datetime.now() - restart_time).total_seconds()
restart_time = datetime.strftime(restart_time, "%A, %B %d, at %I:%M %p")
restart_duration = support.time_converter(seconds=second)
output += f'Restarted on: {restart_time} - {restart_duration} ago from now.'
if shared.called_by_offline:
speaker.speak(text=output)
return
sys.stdout.write(f'\r{output}')
speaker.speak(text=f"Your {shared.hosted_device.get('device')} was last booted on {restart_time}. "
f"Current boot time is: {restart_duration}.")
if second >= 259_200: # 3 days
if boot_extreme := re.search('(.*) days', restart_duration):
warn = int(boot_extreme.group().replace(' days', '').strip())
speaker.speak(text=f"{env.title}! your {shared.hosted_device.get('device')} has been running for more "
f"than {warn} days. You must consider a reboot for better performance. Would you like "
f"me to restart it for you {env.title}?",
run=True)
response = listener.listen(timeout=3, phrase_limit=3)
if any(word in response.lower() for word in keywords.ok):
logger.info(f'JARVIS::Restarting {shared.hosted_device.get("device")}')
restart(target='PC_Proceed')
def hosted_device_info() -> dict:
"""Gets basic information of the hosted device.
Returns:
dict:
A dictionary of key-value pairs with device type, operating system, os version.
"""
if env.macos:
system_kernel = subprocess.check_output("sysctl hw.model", shell=True).decode('utf-8').splitlines()
device = support.extract_str(system_kernel[0].split(':')[1])
else:
device = subprocess.getoutput("WMIC CSPRODUCT GET VENDOR").replace('Vendor', '').strip()
platform_info = platform.platform(terse=True).split('-')
return {'device': device, 'os_name': platform_info[0], 'os_version': platform_info[1]} | 0.569374 | 0.204382 |
import unittest
from idblib import FileSection, binary_search, makeStringIO
class TestFileSection(unittest.TestCase):
""" unittest for FileSection object """
def test_file(self):
s = makeStringIO(b"0123456789abcdef")
fh = FileSection(s, 3, 11)
self.assertEqual(fh.read(3), b"345")
self.assertEqual(fh.read(8), b"6789a")
self.assertEqual(fh.read(8), b"")
fh.seek(-1, 2)
self.assertEqual(fh.read(8), b"a")
fh.seek(3)
self.assertEqual(fh.read(2), b"67")
fh.seek(-2, 1)
self.assertEqual(fh.read(2), b"67")
fh.seek(2, 1)
self.assertEqual(fh.read(2), b"a")
fh.seek(8)
self.assertEqual(fh.read(1), b"")
with self.assertRaises(Exception):
fh.seek(9)
class TestBinarySearch(unittest.TestCase):
""" unittests for binary_search """
class Object:
def __init__(self, num):
self.key = num
def __repr__(self):
return "o(%d)" % self.num
def test_bs(self):
obj = self.Object
lst = [obj(_) for _ in (2, 3, 5, 6)]
self.assertEqual(binary_search(lst, 1), -1)
self.assertEqual(binary_search(lst, 2), 0)
self.assertEqual(binary_search(lst, 3), 1)
self.assertEqual(binary_search(lst, 4), 1)
self.assertEqual(binary_search(lst, 5), 2)
self.assertEqual(binary_search(lst, 6), 3)
self.assertEqual(binary_search(lst, 7), 3)
def test_emptylist(self):
obj = self.Object
lst = []
self.assertEqual(binary_search(lst, 1), -1)
def test_oneelem(self):
obj = self.Object
lst = [obj(1)]
self.assertEqual(binary_search(lst, 0), -1)
self.assertEqual(binary_search(lst, 1), 0)
self.assertEqual(binary_search(lst, 2), 0)
def test_twoelem(self):
obj = self.Object
lst = [obj(1), obj(3)]
self.assertEqual(binary_search(lst, 0), -1)
self.assertEqual(binary_search(lst, 1), 0)
self.assertEqual(binary_search(lst, 2), 0)
self.assertEqual(binary_search(lst, 3), 1)
self.assertEqual(binary_search(lst, 4), 1)
def test_listsize(self):
obj = self.Object
for l in range(3, 32):
lst = [obj(_ + 1) for _ in range(l)]
lst = lst[:1] + lst[2:]
self.assertEqual(binary_search(lst, 0), -1)
self.assertEqual(binary_search(lst, 1), 0)
self.assertEqual(binary_search(lst, 2), 0)
self.assertEqual(binary_search(lst, 3), 1)
self.assertEqual(binary_search(lst, l - 1), l - 3)
self.assertEqual(binary_search(lst, l), l - 2)
self.assertEqual(binary_search(lst, l + 1), l - 2)
self.assertEqual(binary_search(lst, l + 2), l - 2) | test_idblib.py | import unittest
from idblib import FileSection, binary_search, makeStringIO
class TestFileSection(unittest.TestCase):
""" unittest for FileSection object """
def test_file(self):
s = makeStringIO(b"0123456789abcdef")
fh = FileSection(s, 3, 11)
self.assertEqual(fh.read(3), b"345")
self.assertEqual(fh.read(8), b"6789a")
self.assertEqual(fh.read(8), b"")
fh.seek(-1, 2)
self.assertEqual(fh.read(8), b"a")
fh.seek(3)
self.assertEqual(fh.read(2), b"67")
fh.seek(-2, 1)
self.assertEqual(fh.read(2), b"67")
fh.seek(2, 1)
self.assertEqual(fh.read(2), b"a")
fh.seek(8)
self.assertEqual(fh.read(1), b"")
with self.assertRaises(Exception):
fh.seek(9)
class TestBinarySearch(unittest.TestCase):
""" unittests for binary_search """
class Object:
def __init__(self, num):
self.key = num
def __repr__(self):
return "o(%d)" % self.num
def test_bs(self):
obj = self.Object
lst = [obj(_) for _ in (2, 3, 5, 6)]
self.assertEqual(binary_search(lst, 1), -1)
self.assertEqual(binary_search(lst, 2), 0)
self.assertEqual(binary_search(lst, 3), 1)
self.assertEqual(binary_search(lst, 4), 1)
self.assertEqual(binary_search(lst, 5), 2)
self.assertEqual(binary_search(lst, 6), 3)
self.assertEqual(binary_search(lst, 7), 3)
def test_emptylist(self):
obj = self.Object
lst = []
self.assertEqual(binary_search(lst, 1), -1)
def test_oneelem(self):
obj = self.Object
lst = [obj(1)]
self.assertEqual(binary_search(lst, 0), -1)
self.assertEqual(binary_search(lst, 1), 0)
self.assertEqual(binary_search(lst, 2), 0)
def test_twoelem(self):
obj = self.Object
lst = [obj(1), obj(3)]
self.assertEqual(binary_search(lst, 0), -1)
self.assertEqual(binary_search(lst, 1), 0)
self.assertEqual(binary_search(lst, 2), 0)
self.assertEqual(binary_search(lst, 3), 1)
self.assertEqual(binary_search(lst, 4), 1)
def test_listsize(self):
obj = self.Object
for l in range(3, 32):
lst = [obj(_ + 1) for _ in range(l)]
lst = lst[:1] + lst[2:]
self.assertEqual(binary_search(lst, 0), -1)
self.assertEqual(binary_search(lst, 1), 0)
self.assertEqual(binary_search(lst, 2), 0)
self.assertEqual(binary_search(lst, 3), 1)
self.assertEqual(binary_search(lst, l - 1), l - 3)
self.assertEqual(binary_search(lst, l), l - 2)
self.assertEqual(binary_search(lst, l + 1), l - 2)
self.assertEqual(binary_search(lst, l + 2), l - 2) | 0.594551 | 0.47658 |
import json
import pandas as pd
import numpy as np
import os
from os.path import isfile, join
import datetime
import matplotlib.pyplot as plt
project_dir = os.path.dirname(os.path.dirname(os.getcwd()))
out_path = os.path.join(project_dir, "data", "pecan")
fig_path = os.path.join(project_dir, "data", "pecan_info")
def plot_date_range(dates):
for i, (id, dr) in enumerate(dates.items()):
dr = [datetime.datetime.strptime(x, "%Y-%m-%d %H:%M:%S") for x in dr]
plt.plot(dr, [i, i], 'b')
plt.show()
def compute_plot_date_ranges(
compute_date_ranges=False,
resolution=15,
col=['use']
):
col_names = "_".join(sorted(col)) if col else "all_col"
data_dir = "{}min_".format(resolution) + col_names
data_path = os.path.join(out_path, data_dir)
house_files = [f for f in os.listdir(data_path) if isfile(join(data_path, f))]
house_ids = [x.split(".")[0] for x in house_files]
if compute_date_ranges:
date_ranges = {}
print(len(house_ids))
for i, dataid in enumerate(house_ids):
print(i)
df = pd.read_csv(os.path.join(data_path, "{}.csv".format(dataid)), index_col=False)
date_ranges[str(dataid)] = (df["localtime"][len(df)-1], df["localtime"][0])
with open(os.path.join(out_path, "date-ranges_{}.json".format(data_dir)), 'w') as fo:
json.dump(date_ranges, fo)
else:
with open(os.path.join(out_path, "date-ranges_{}.json".format(data_dir)), 'r') as f:
date_ranges = json.load(f)
print(date_ranges)
plot_date_range(date_ranges)
def plot_days_and_nans(
resolution=60,
col=['use'],
plottype=None
):
col_names = "_".join(sorted(col)) if col else "all_col"
f_name = os.path.join(out_path, "nans_{}min_{}.json".format(resolution, col_names))
with open(f_name, 'r') as f:
nan_in_day = json.load(f)
to_date = np.vectorize(lambda x: datetime.datetime.strptime(x, "%Y-%m-%d"))
if plottype == 'heatmap':
idx = pd.date_range(start='1/1/2012', end='12/31/2019', freq='D')
df_all = []
print(len(nan_in_day))
for i, (id, date_nan_dict) in enumerate(nan_in_day.items()):
# print(i)
if plottype == 'heatmap':
nans = 1 - 0.4*np.array(date_nan_dict["nans"])
df = pd.DataFrame(
data=nans,
index=to_date(date_nan_dict["date"]),
columns=['nans']
)
# fill with nan, append
df = df.reindex(idx, fill_value=0)
# make double, such that pixels are a bit wider
df_all += [df]*4
else:
nans = np.array(date_nan_dict["nans"])
dates = to_date(date_nan_dict["date"])
not_nan_dates = dates[np.equal(nans, 0)]
nan_dates = dates[np.equal(nans, 1)]
plt.plot(not_nan_dates, i*np.ones(len(not_nan_dates)), 'b,', markersize=2)
plt.plot(nan_dates, i*np.ones(len(nan_dates)), 'r,', markersize=2)
if plottype == 'heatmap':
df_all = pd.concat(df_all, axis=1)
a = np.transpose(df_all.values)
plt.imshow(a, cmap='gray', vmin=0, vmax=1)
plt.imsave(os.path.join(fig_path, "data_validity_over_time.png"), a,
dpi=600, cmap='gray', vmin=0, vmax=1)
plt.show()
def plot_complete_days_hist(
resolution=60,
col=['use'],
):
col_names = "_".join(sorted(col)) if col else "all_col"
f_name = os.path.join(out_path, "nans_{}min_{}.json".format(resolution, col_names))
with open(f_name, 'r') as f:
nan_in_day = json.load(f)
completes = []
print(len(nan_in_day))
for i, (id, date_nan_dict) in enumerate(nan_in_day.items()):
# print(i)
complete = len(date_nan_dict["nans"]) - sum(date_nan_dict["nans"])
completes.append(complete)
plt.hist(completes, bins=50)
plt.savefig(os.path.join(fig_path, "complete_days_per_household.png"), dpi=600)
plt.show()
def plot_daily_energy_hist():
df = pd.read_csv(os.path.join(out_path, "combined_60min_use"))
x = df.values[:, 1:25]
daily = np.sum(x, axis=1).astype(np.int)
print("plotting")
plt.hist(daily, bins=100, range=(0, 200))
plt.savefig(os.path.join(fig_path, "daily_energy_hist_200max.png"), dpi=600)
plt.show()
def main():
# compute_plot_date_ranges(compute_date_ranges=False)
# plot_days_and_nans(plottype='heatmap')
# plot_complete_days_hist()
plot_daily_energy_hist()
if __name__ == "__main__":
main() | load_data/anlyse_ps_data.py | import json
import pandas as pd
import numpy as np
import os
from os.path import isfile, join
import datetime
import matplotlib.pyplot as plt
project_dir = os.path.dirname(os.path.dirname(os.getcwd()))
out_path = os.path.join(project_dir, "data", "pecan")
fig_path = os.path.join(project_dir, "data", "pecan_info")
def plot_date_range(dates):
for i, (id, dr) in enumerate(dates.items()):
dr = [datetime.datetime.strptime(x, "%Y-%m-%d %H:%M:%S") for x in dr]
plt.plot(dr, [i, i], 'b')
plt.show()
def compute_plot_date_ranges(
compute_date_ranges=False,
resolution=15,
col=['use']
):
col_names = "_".join(sorted(col)) if col else "all_col"
data_dir = "{}min_".format(resolution) + col_names
data_path = os.path.join(out_path, data_dir)
house_files = [f for f in os.listdir(data_path) if isfile(join(data_path, f))]
house_ids = [x.split(".")[0] for x in house_files]
if compute_date_ranges:
date_ranges = {}
print(len(house_ids))
for i, dataid in enumerate(house_ids):
print(i)
df = pd.read_csv(os.path.join(data_path, "{}.csv".format(dataid)), index_col=False)
date_ranges[str(dataid)] = (df["localtime"][len(df)-1], df["localtime"][0])
with open(os.path.join(out_path, "date-ranges_{}.json".format(data_dir)), 'w') as fo:
json.dump(date_ranges, fo)
else:
with open(os.path.join(out_path, "date-ranges_{}.json".format(data_dir)), 'r') as f:
date_ranges = json.load(f)
print(date_ranges)
plot_date_range(date_ranges)
def plot_days_and_nans(
resolution=60,
col=['use'],
plottype=None
):
col_names = "_".join(sorted(col)) if col else "all_col"
f_name = os.path.join(out_path, "nans_{}min_{}.json".format(resolution, col_names))
with open(f_name, 'r') as f:
nan_in_day = json.load(f)
to_date = np.vectorize(lambda x: datetime.datetime.strptime(x, "%Y-%m-%d"))
if plottype == 'heatmap':
idx = pd.date_range(start='1/1/2012', end='12/31/2019', freq='D')
df_all = []
print(len(nan_in_day))
for i, (id, date_nan_dict) in enumerate(nan_in_day.items()):
# print(i)
if plottype == 'heatmap':
nans = 1 - 0.4*np.array(date_nan_dict["nans"])
df = pd.DataFrame(
data=nans,
index=to_date(date_nan_dict["date"]),
columns=['nans']
)
# fill with nan, append
df = df.reindex(idx, fill_value=0)
# make double, such that pixels are a bit wider
df_all += [df]*4
else:
nans = np.array(date_nan_dict["nans"])
dates = to_date(date_nan_dict["date"])
not_nan_dates = dates[np.equal(nans, 0)]
nan_dates = dates[np.equal(nans, 1)]
plt.plot(not_nan_dates, i*np.ones(len(not_nan_dates)), 'b,', markersize=2)
plt.plot(nan_dates, i*np.ones(len(nan_dates)), 'r,', markersize=2)
if plottype == 'heatmap':
df_all = pd.concat(df_all, axis=1)
a = np.transpose(df_all.values)
plt.imshow(a, cmap='gray', vmin=0, vmax=1)
plt.imsave(os.path.join(fig_path, "data_validity_over_time.png"), a,
dpi=600, cmap='gray', vmin=0, vmax=1)
plt.show()
def plot_complete_days_hist(
resolution=60,
col=['use'],
):
col_names = "_".join(sorted(col)) if col else "all_col"
f_name = os.path.join(out_path, "nans_{}min_{}.json".format(resolution, col_names))
with open(f_name, 'r') as f:
nan_in_day = json.load(f)
completes = []
print(len(nan_in_day))
for i, (id, date_nan_dict) in enumerate(nan_in_day.items()):
# print(i)
complete = len(date_nan_dict["nans"]) - sum(date_nan_dict["nans"])
completes.append(complete)
plt.hist(completes, bins=50)
plt.savefig(os.path.join(fig_path, "complete_days_per_household.png"), dpi=600)
plt.show()
def plot_daily_energy_hist():
df = pd.read_csv(os.path.join(out_path, "combined_60min_use"))
x = df.values[:, 1:25]
daily = np.sum(x, axis=1).astype(np.int)
print("plotting")
plt.hist(daily, bins=100, range=(0, 200))
plt.savefig(os.path.join(fig_path, "daily_energy_hist_200max.png"), dpi=600)
plt.show()
def main():
# compute_plot_date_ranges(compute_date_ranges=False)
# plot_days_and_nans(plottype='heatmap')
# plot_complete_days_hist()
plot_daily_energy_hist()
if __name__ == "__main__":
main() | 0.252661 | 0.245893 |
import os
import pickle
import warnings
from typing import Any, Dict, Optional, Union
import gym
import numpy as np
from stable_baselines3.common.callbacks import EventCallback, BaseCallback
from stable_baselines3.common.evaluation import evaluate_policy
from stable_baselines3.common.vec_env import DummyVecEnv, VecEnv, sync_envs_normalization
from stable_baselines_ex.common.vec_normalize_ex import VecNormalizeEx
from stable_baselines_ex.rg.evaluation_rg import evaluate_policy_rg
class EvalCallbackEx(EventCallback):
"""
Callback for evaluating an agent.
:param eval_env: The environment used for initialization
:param callback_on_new_best: Callback to trigger
when there is a new best model according to the ``mean_reward``
:param n_eval_episodes: The number of episodes to test the agent
:param eval_freq: Evaluate the agent every eval_freq call of the callback.
:param log_path: Path to a folder where the evaluations (``evaluations.npz``)
will be saved. It will be updated at each evaluation.
:param best_model_save_path: Path to a folder where the best model
according to performance on the eval env will be saved.
:param deterministic: Whether the evaluation should
use a stochastic or deterministic actions.
:param render: Whether to render or not the environment during evaluation
:param verbose:
:param warn: Passed to ``evaluate_policy`` (warns if ``eval_env`` has not been
wrapped with a Monitor wrapper)
"""
def __init__(
self,
eval_env: Union[gym.Env, VecEnv],
callback_on_new_best: Optional[BaseCallback] = None,
n_eval_episodes: int = 5,
eval_freq: int = 10000,
log_path: str = None,
best_model_save_path: str = None,
deterministic: bool = True,
render: bool = False,
verbose: int = 1,
warn: bool = True,
eval_func=evaluate_policy_rg,
):
super().__init__(callback_on_new_best, verbose=verbose)
self.n_eval_episodes = n_eval_episodes
self.eval_freq = eval_freq
self.best_mean_reward = -np.inf
self.last_mean_reward = -np.inf
self.deterministic = deterministic
self.render = render
self.warn = warn
# Convert to VecEnv for consistency
if not isinstance(eval_env, VecEnv):
eval_env = DummyVecEnv([lambda: eval_env])
if isinstance(eval_env, VecEnv):
assert eval_env.num_envs == 1, "You must pass only one environment for evaluation"
self.eval_env = eval_env
self.best_model_save_path = best_model_save_path
# Logs will be written in ``evaluations.npz``
if log_path is not None:
log_path = os.path.join(log_path, "evaluations")
self.log_path = log_path
self.evaluations_results = []
self.evaluations_timesteps = []
self.evaluations_length = []
# For computing success rate
self._is_success_buffer = []
self.evaluations_successes = []
self.eval_func = eval_func
def _init_callback(self) -> None:
# Does not work in some corner cases, where the wrapper is not the same
if not isinstance(self.training_env, type(self.eval_env)):
warnings.warn("Training and eval env are not of the same type" f"{self.training_env} != {self.eval_env}")
# Create folders if needed
if self.best_model_save_path is not None:
os.makedirs(self.best_model_save_path, exist_ok=True)
if self.log_path is not None:
os.makedirs(os.path.dirname(self.log_path), exist_ok=True)
def _log_success_callback(self, locals_: Dict[str, Any], globals_: Dict[str, Any]) -> None:
"""
Callback passed to the ``evaluate_policy`` function
in order to log the success rate (when applicable),
for instance when using HER.
:param locals_:
:param globals_:
"""
info = locals_["info"]
# VecEnv: unpack
if not isinstance(info, dict):
info = info[0]
if locals_["done"]:
maybe_is_success = info.get("is_success")
if maybe_is_success is not None:
self._is_success_buffer.append(maybe_is_success)
def _on_step(self) -> bool:
if self.eval_freq > 0 and self.n_calls % self.eval_freq == 0:
# Sync training and eval env if there is VecNormalize
sync_envs_normalization(self.training_env, self.eval_env)
# Reset success rate buffer
self._is_success_buffer = []
episode_rewards, episode_lengths = self.eval_func(
self.model,
self.eval_env,
n_eval_episodes=self.n_eval_episodes,
render=self.render,
deterministic=self.deterministic,
return_episode_rewards=True,
warn=self.warn,
callback=self._log_success_callback,
)
if self.log_path is not None:
self.evaluations_timesteps.append(self.num_timesteps)
self.evaluations_results.append(episode_rewards)
self.evaluations_length.append(episode_lengths)
kwargs = {}
# Save success log if present
if len(self._is_success_buffer) > 0:
self.evaluations_successes.append(self._is_success_buffer)
kwargs = dict(successes=self.evaluations_successes)
np.savez(
self.log_path,
timesteps=self.evaluations_timesteps,
results=self.evaluations_results,
ep_lengths=self.evaluations_length,
**kwargs,
)
mean_reward, std_reward = np.mean(episode_rewards), np.std(episode_rewards)
mean_ep_length, std_ep_length = np.mean(episode_lengths), np.std(episode_lengths)
self.last_mean_reward = mean_reward
if self.verbose > 0:
print(
f"Eval num_timesteps={self.num_timesteps}, " f"episode_reward={mean_reward:.2f} +/- {std_reward:.2f}")
print(f"Episode length: {mean_ep_length:.2f} +/- {std_ep_length:.2f}")
# Add to current Logger
self.logger.record("eval/mean_reward", float(mean_reward))
self.logger.record("eval/mean_ep_length", mean_ep_length)
if len(self._is_success_buffer) > 0:
success_rate = np.mean(self._is_success_buffer)
if self.verbose > 0:
print(f"Success rate: {100 * success_rate:.2f}%")
self.logger.record("eval/success_rate", success_rate)
if mean_reward > self.best_mean_reward:
if self.verbose > 0:
print("New best mean reward!")
if self.best_model_save_path is not None:
self.model.save(os.path.join(self.best_model_save_path, "best_model"))
self.best_mean_reward = mean_reward
# Trigger callback if needed
if self.callback is not None:
return self._on_event()
return True
def update_child_locals(self, locals_: Dict[str, Any]) -> None:
"""
Update the references to the local variables.
:param locals_: the local variables during rollout collection
"""
if self.callback:
self.callback.update_locals(locals_)
class CheckpointCallbackEx(BaseCallback):
"""
Callback for saving a model every ``save_freq`` steps
:param save_freq:
:param save_path: Path to the folder where the model will be saved.
:param name_prefix: Common prefix to the saved models
:param verbose:
"""
def __init__(self, save_freq: int, save_path: str, name_prefix: str = "rl_model", verbose: int = 0):
super().__init__(verbose)
self.save_freq = save_freq
self.save_path = save_path
self.name_prefix = name_prefix
def _init_callback(self) -> None:
# Create folder if needed
if self.save_path is not None:
os.makedirs(self.save_path, exist_ok=True)
def _on_step(self) -> bool:
if self.n_calls % self.save_freq == 0:
path = os.path.join(self.save_path, f"{self.name_prefix}_{self.num_timesteps}_steps")
self.model.save(path)
# Additionally save norm_data
env = self.model.env
norm_data = dict()
if isinstance(env, VecNormalizeEx):
norm_data['vn_obs_rms'] = env.obs_rms
norm_data['vn_ret_rms'] = env.ret_rms
venv = env.venv
else:
venv = env
rg_obs_rmss = []
for rg_env in venv.envs:
rg_obs_rmss.append(rg_env.obs_rms)
norm_data['rg_obs_rmss'] = rg_obs_rmss
with open(f'{path}_norm_data.pkl', 'wb') as f:
pickle.dump(norm_data, f)
print(f"Saving model checkpoint to {path}")
return True | sb3/stable_baselines_ex/common/callbacks_ex.py | import os
import pickle
import warnings
from typing import Any, Dict, Optional, Union
import gym
import numpy as np
from stable_baselines3.common.callbacks import EventCallback, BaseCallback
from stable_baselines3.common.evaluation import evaluate_policy
from stable_baselines3.common.vec_env import DummyVecEnv, VecEnv, sync_envs_normalization
from stable_baselines_ex.common.vec_normalize_ex import VecNormalizeEx
from stable_baselines_ex.rg.evaluation_rg import evaluate_policy_rg
class EvalCallbackEx(EventCallback):
"""
Callback for evaluating an agent.
:param eval_env: The environment used for initialization
:param callback_on_new_best: Callback to trigger
when there is a new best model according to the ``mean_reward``
:param n_eval_episodes: The number of episodes to test the agent
:param eval_freq: Evaluate the agent every eval_freq call of the callback.
:param log_path: Path to a folder where the evaluations (``evaluations.npz``)
will be saved. It will be updated at each evaluation.
:param best_model_save_path: Path to a folder where the best model
according to performance on the eval env will be saved.
:param deterministic: Whether the evaluation should
use a stochastic or deterministic actions.
:param render: Whether to render or not the environment during evaluation
:param verbose:
:param warn: Passed to ``evaluate_policy`` (warns if ``eval_env`` has not been
wrapped with a Monitor wrapper)
"""
def __init__(
self,
eval_env: Union[gym.Env, VecEnv],
callback_on_new_best: Optional[BaseCallback] = None,
n_eval_episodes: int = 5,
eval_freq: int = 10000,
log_path: str = None,
best_model_save_path: str = None,
deterministic: bool = True,
render: bool = False,
verbose: int = 1,
warn: bool = True,
eval_func=evaluate_policy_rg,
):
super().__init__(callback_on_new_best, verbose=verbose)
self.n_eval_episodes = n_eval_episodes
self.eval_freq = eval_freq
self.best_mean_reward = -np.inf
self.last_mean_reward = -np.inf
self.deterministic = deterministic
self.render = render
self.warn = warn
# Convert to VecEnv for consistency
if not isinstance(eval_env, VecEnv):
eval_env = DummyVecEnv([lambda: eval_env])
if isinstance(eval_env, VecEnv):
assert eval_env.num_envs == 1, "You must pass only one environment for evaluation"
self.eval_env = eval_env
self.best_model_save_path = best_model_save_path
# Logs will be written in ``evaluations.npz``
if log_path is not None:
log_path = os.path.join(log_path, "evaluations")
self.log_path = log_path
self.evaluations_results = []
self.evaluations_timesteps = []
self.evaluations_length = []
# For computing success rate
self._is_success_buffer = []
self.evaluations_successes = []
self.eval_func = eval_func
def _init_callback(self) -> None:
# Does not work in some corner cases, where the wrapper is not the same
if not isinstance(self.training_env, type(self.eval_env)):
warnings.warn("Training and eval env are not of the same type" f"{self.training_env} != {self.eval_env}")
# Create folders if needed
if self.best_model_save_path is not None:
os.makedirs(self.best_model_save_path, exist_ok=True)
if self.log_path is not None:
os.makedirs(os.path.dirname(self.log_path), exist_ok=True)
def _log_success_callback(self, locals_: Dict[str, Any], globals_: Dict[str, Any]) -> None:
"""
Callback passed to the ``evaluate_policy`` function
in order to log the success rate (when applicable),
for instance when using HER.
:param locals_:
:param globals_:
"""
info = locals_["info"]
# VecEnv: unpack
if not isinstance(info, dict):
info = info[0]
if locals_["done"]:
maybe_is_success = info.get("is_success")
if maybe_is_success is not None:
self._is_success_buffer.append(maybe_is_success)
def _on_step(self) -> bool:
if self.eval_freq > 0 and self.n_calls % self.eval_freq == 0:
# Sync training and eval env if there is VecNormalize
sync_envs_normalization(self.training_env, self.eval_env)
# Reset success rate buffer
self._is_success_buffer = []
episode_rewards, episode_lengths = self.eval_func(
self.model,
self.eval_env,
n_eval_episodes=self.n_eval_episodes,
render=self.render,
deterministic=self.deterministic,
return_episode_rewards=True,
warn=self.warn,
callback=self._log_success_callback,
)
if self.log_path is not None:
self.evaluations_timesteps.append(self.num_timesteps)
self.evaluations_results.append(episode_rewards)
self.evaluations_length.append(episode_lengths)
kwargs = {}
# Save success log if present
if len(self._is_success_buffer) > 0:
self.evaluations_successes.append(self._is_success_buffer)
kwargs = dict(successes=self.evaluations_successes)
np.savez(
self.log_path,
timesteps=self.evaluations_timesteps,
results=self.evaluations_results,
ep_lengths=self.evaluations_length,
**kwargs,
)
mean_reward, std_reward = np.mean(episode_rewards), np.std(episode_rewards)
mean_ep_length, std_ep_length = np.mean(episode_lengths), np.std(episode_lengths)
self.last_mean_reward = mean_reward
if self.verbose > 0:
print(
f"Eval num_timesteps={self.num_timesteps}, " f"episode_reward={mean_reward:.2f} +/- {std_reward:.2f}")
print(f"Episode length: {mean_ep_length:.2f} +/- {std_ep_length:.2f}")
# Add to current Logger
self.logger.record("eval/mean_reward", float(mean_reward))
self.logger.record("eval/mean_ep_length", mean_ep_length)
if len(self._is_success_buffer) > 0:
success_rate = np.mean(self._is_success_buffer)
if self.verbose > 0:
print(f"Success rate: {100 * success_rate:.2f}%")
self.logger.record("eval/success_rate", success_rate)
if mean_reward > self.best_mean_reward:
if self.verbose > 0:
print("New best mean reward!")
if self.best_model_save_path is not None:
self.model.save(os.path.join(self.best_model_save_path, "best_model"))
self.best_mean_reward = mean_reward
# Trigger callback if needed
if self.callback is not None:
return self._on_event()
return True
def update_child_locals(self, locals_: Dict[str, Any]) -> None:
"""
Update the references to the local variables.
:param locals_: the local variables during rollout collection
"""
if self.callback:
self.callback.update_locals(locals_)
class CheckpointCallbackEx(BaseCallback):
"""
Callback for saving a model every ``save_freq`` steps
:param save_freq:
:param save_path: Path to the folder where the model will be saved.
:param name_prefix: Common prefix to the saved models
:param verbose:
"""
def __init__(self, save_freq: int, save_path: str, name_prefix: str = "rl_model", verbose: int = 0):
super().__init__(verbose)
self.save_freq = save_freq
self.save_path = save_path
self.name_prefix = name_prefix
def _init_callback(self) -> None:
# Create folder if needed
if self.save_path is not None:
os.makedirs(self.save_path, exist_ok=True)
def _on_step(self) -> bool:
if self.n_calls % self.save_freq == 0:
path = os.path.join(self.save_path, f"{self.name_prefix}_{self.num_timesteps}_steps")
self.model.save(path)
# Additionally save norm_data
env = self.model.env
norm_data = dict()
if isinstance(env, VecNormalizeEx):
norm_data['vn_obs_rms'] = env.obs_rms
norm_data['vn_ret_rms'] = env.ret_rms
venv = env.venv
else:
venv = env
rg_obs_rmss = []
for rg_env in venv.envs:
rg_obs_rmss.append(rg_env.obs_rms)
norm_data['rg_obs_rmss'] = rg_obs_rmss
with open(f'{path}_norm_data.pkl', 'wb') as f:
pickle.dump(norm_data, f)
print(f"Saving model checkpoint to {path}")
return True | 0.852951 | 0.339198 |
import os
from enum import Enum
from typing import List, Optional
import PIL.Image
import matplotlib
import numpy
import tensorflow as tf
from matplotlib import pyplot as plt
TFHUB_MODEL_LOAD_FORMAT = 'TFHUB_MODEL_LOAD_FORMAT'
COMPRESSED = 'COMPRESSED'
FIGSIZE = 'figure.figsize'
GRID = 'axes.grid'
INPUT_PATH = 'Nemupan_1.jpg'
INPUT_URL = 'https://pm1.narvii.com/6514/5f77eb6ef6f5197a67129e2237c9cd0f3dbe1ea5_00.jpg'
STYLE_PATH = 'rigel_1.png'
STYLE_URL = 'https://64.media.tumblr.com/7238a34a8a2e3ed1e7d3115b0c443713' \
'/tumblr_phqolyDhB81v0eujyo2_r2_1280.png'
class Keys(str, Enum):
STYLE = "STYLE"
CONTENT = "CONTENT"
def mpl_setup():
"""
Configures matplotlib to show images
"""
os.environ[TFHUB_MODEL_LOAD_FORMAT] = COMPRESSED
matplotlib.rcParams[FIGSIZE] = (12, 12)
matplotlib.rcParams[GRID] = False
def tensor_to_image(tensor: tf.Tensor):
"""
Creates an image from a tensor.
:return: the created image
"""
tensor *= 255
tensor = numpy.array(tensor.dtype, numpy.uint8)
if numpy.ndim(tensor) > 3:
assert tensor.shape[0] == 1
tensor = tensor[0]
return PIL.Image.fromarray(tensor)
def gram_matrix(input_tensor: tf.Tensor):
"""
Computes the Gram matrix of a tensor.
"""
result = tf.linalg.einsum("bijc,bijd->bcd", input_tensor, input_tensor)
input_shape = tf.shape(input_tensor)
num_locations = tf.cast(input_shape[1] * input_shape[2], tf.float32)
return result / num_locations
def vgg_layers(layer_names: List[str]):
""" Creates a vgg model that returns a list of intermediate output values."""
# Load our model. Load pretrained VGG, trained on imagenet data
vgg = tf.keras.applications.VGG19(include_top=False, weights='imagenet')
vgg.trainable = False
outputs = [vgg.get_layer(name).output for name in layer_names]
model = tf.keras.Model([vgg.input], outputs)
return model
class Picture:
"""
Helper class to handle images.
"""
__filename: str
__caption: str
__content: Optional[tf.Tensor]
def __init__(self, filename: str, origin: str, caption: str = ""):
"""
Configures the metadata of an image.
:param filename:
the name of the file to store the image
:param origin:
the url to download the image
:param caption:
an optional caption to show in visualization
"""
self.__filename = filename
self.__caption = caption
self.__origin = origin
self.__content = None
def download(self) -> None:
"""
Downloads an image and adds it to this group
"""
self.__filename = tf.keras.utils.get_file(self.__filename, self.__origin)
def load(self) -> None:
"""
Reads the contents of the image as a tensor.
"""
max_dim = 512
img = tf.io.read_file(self.__filename)
img = tf.image.decode_image(img, channels=3)
img = tf.image.convert_image_dtype(img, tf.float32)
shape = tf.cast(tf.shape(img)[:-1], tf.float32)
long_dim = max(shape)
scale = max_dim / long_dim
new_shape = tf.cast(shape * scale, tf.int32)
img = tf.image.resize(img, new_shape)
img = img[tf.newaxis, :]
self.__content = img
def setup_plot(self) -> None:
"""
Configures the image to be displayed.
"""
img = self.__content
if len(img.shape) > 3:
img = tf.squeeze(img, axis=0)
plt.imshow(img)
if self.__caption:
plt.title(self.__caption)
def visualize(self) -> None:
"""
Downloads and configures the image to be displayed.
"""
self.download()
self.load()
self.setup_plot()
@property
def content(self) -> tf.Tensor:
return self.__content
CONTENT_LAYERS = ['block5_conv2']
STYLE_LAYERS = ['block1_conv1',
'block2_conv1',
'block3_conv1',
'block4_conv1',
'block5_conv1']
def clip_01(image):
return tf.clip_by_value(image, clip_value_min=0.0, clip_value_max=1.0) | src/main/python/utils.py | import os
from enum import Enum
from typing import List, Optional
import PIL.Image
import matplotlib
import numpy
import tensorflow as tf
from matplotlib import pyplot as plt
TFHUB_MODEL_LOAD_FORMAT = 'TFHUB_MODEL_LOAD_FORMAT'
COMPRESSED = 'COMPRESSED'
FIGSIZE = 'figure.figsize'
GRID = 'axes.grid'
INPUT_PATH = 'Nemupan_1.jpg'
INPUT_URL = 'https://pm1.narvii.com/6514/5f77eb6ef6f5197a67129e2237c9cd0f3dbe1ea5_00.jpg'
STYLE_PATH = 'rigel_1.png'
STYLE_URL = 'https://64.media.tumblr.com/7238a34a8a2e3ed1e7d3115b0c443713' \
'/tumblr_phqolyDhB81v0eujyo2_r2_1280.png'
class Keys(str, Enum):
STYLE = "STYLE"
CONTENT = "CONTENT"
def mpl_setup():
"""
Configures matplotlib to show images
"""
os.environ[TFHUB_MODEL_LOAD_FORMAT] = COMPRESSED
matplotlib.rcParams[FIGSIZE] = (12, 12)
matplotlib.rcParams[GRID] = False
def tensor_to_image(tensor: tf.Tensor):
"""
Creates an image from a tensor.
:return: the created image
"""
tensor *= 255
tensor = numpy.array(tensor.dtype, numpy.uint8)
if numpy.ndim(tensor) > 3:
assert tensor.shape[0] == 1
tensor = tensor[0]
return PIL.Image.fromarray(tensor)
def gram_matrix(input_tensor: tf.Tensor):
"""
Computes the Gram matrix of a tensor.
"""
result = tf.linalg.einsum("bijc,bijd->bcd", input_tensor, input_tensor)
input_shape = tf.shape(input_tensor)
num_locations = tf.cast(input_shape[1] * input_shape[2], tf.float32)
return result / num_locations
def vgg_layers(layer_names: List[str]):
""" Creates a vgg model that returns a list of intermediate output values."""
# Load our model. Load pretrained VGG, trained on imagenet data
vgg = tf.keras.applications.VGG19(include_top=False, weights='imagenet')
vgg.trainable = False
outputs = [vgg.get_layer(name).output for name in layer_names]
model = tf.keras.Model([vgg.input], outputs)
return model
class Picture:
"""
Helper class to handle images.
"""
__filename: str
__caption: str
__content: Optional[tf.Tensor]
def __init__(self, filename: str, origin: str, caption: str = ""):
"""
Configures the metadata of an image.
:param filename:
the name of the file to store the image
:param origin:
the url to download the image
:param caption:
an optional caption to show in visualization
"""
self.__filename = filename
self.__caption = caption
self.__origin = origin
self.__content = None
def download(self) -> None:
"""
Downloads an image and adds it to this group
"""
self.__filename = tf.keras.utils.get_file(self.__filename, self.__origin)
def load(self) -> None:
"""
Reads the contents of the image as a tensor.
"""
max_dim = 512
img = tf.io.read_file(self.__filename)
img = tf.image.decode_image(img, channels=3)
img = tf.image.convert_image_dtype(img, tf.float32)
shape = tf.cast(tf.shape(img)[:-1], tf.float32)
long_dim = max(shape)
scale = max_dim / long_dim
new_shape = tf.cast(shape * scale, tf.int32)
img = tf.image.resize(img, new_shape)
img = img[tf.newaxis, :]
self.__content = img
def setup_plot(self) -> None:
"""
Configures the image to be displayed.
"""
img = self.__content
if len(img.shape) > 3:
img = tf.squeeze(img, axis=0)
plt.imshow(img)
if self.__caption:
plt.title(self.__caption)
def visualize(self) -> None:
"""
Downloads and configures the image to be displayed.
"""
self.download()
self.load()
self.setup_plot()
@property
def content(self) -> tf.Tensor:
return self.__content
CONTENT_LAYERS = ['block5_conv2']
STYLE_LAYERS = ['block1_conv1',
'block2_conv1',
'block3_conv1',
'block4_conv1',
'block5_conv1']
def clip_01(image):
return tf.clip_by_value(image, clip_value_min=0.0, clip_value_max=1.0) | 0.873323 | 0.453262 |
from .context import Context
class Industry(Context):
class Patent(Context):
def list(self, year, mode, pref_code, city_code, patent_holder_id, sort1, sort2, offset, add_tec=[]):
param = {
'year': year,
'mode': mode,
'prefCode': pref_code,
'cityCode': city_code,
'patentHolderId': patent_holder_id,
'sort1': sort1,
'sort2': sort2,
'offset': offset
}
if add_tec is not None:
param['addTec'] = ','.join(map(lambda tec: '_'.join(map(str, tec)), add_tec))
return self.fetch('list', param)
class Export(Context):
def from_to(self, year, data_type, unit_type, disp_type, region_code, country_code, item_code1, item_code2, item_code3, customs_code1, customs_code2):
param = {
'year': year,
'dataType': data_type,
'unitType': unit_type,
'dispType': disp_type,
'regionCode': region_code,
'countryCode': country_code,
'itemCode1': item_code1,
'itemCode2': item_code2,
'itemCode3': item_code3,
'customsCode1': customs_code1,
'customsCode2': customs_code2
}
return self.fetch('fromTo', param)
class Globalmarket(Context):
def per_pref(self, year, disp_type, region_code, country_code, sic_code, simc_code):
param = {
'year': year,
'dispType': disp_type,
'regionCode': region_code,
'countryCode': country_code,
'sicCode': sic_code,
'simcCode': simc_code
}
return self.fetch('perPref', param)
class Power(Context):
def for_industry(self, year, pref_code, city_code, sic_code):
param = {
'year': year,
'prefCode': pref_code,
'cityCode': city_code,
'sicCode': sic_code
}
return self.fetch('forIndustry', param)
def for_area(self, year, pref_code, area_type, disp_type, sic_code, simc_code, add_industry=[]):
param = {
'year': year,
'prefCode': pref_code,
'areaType': area_type,
'dispType': disp_type,
'sicCode': sic_code,
'simcCode': simc_code
}
if add_industry is not None:
param['addIndustry'] = ','.join(map(lambda industry: '_'.join(map(str, industry)), add_industry))
return self.fetch('forArea', param)
def for_manufacturer_establishments(self, pref_code, sic_code, simc_code, add_area=[]):
param = {
'prefCode': pref_code,
'sicCode': sic_code,
'simcCode': simc_code
}
if add_area is not None:
param['addArea'] = ','.join(map(lambda area: '_'.join(map(str, area)), add_area))
return self.fetch('forManufacturerEstablishments', param)
def __init__(self, accessor, parent_category=''):
super(Industry, self).__init__(accessor, parent_category)
self.__patent = Industry.Patent(accessor, self.category)
self.__export = Industry.Export(accessor, self.category)
self.__global_market = Industry.Globalmarket(accessor, self.category)
self.__power = Industry.Power(accessor, self.category)
@property
def patent(self):
return self.__patent
@property
def export(self):
return self.__export
@property
def global_market(self):
return self.__global_market
@property
def power(self):
return self.__power | resaspy/industry.py | from .context import Context
class Industry(Context):
class Patent(Context):
def list(self, year, mode, pref_code, city_code, patent_holder_id, sort1, sort2, offset, add_tec=[]):
param = {
'year': year,
'mode': mode,
'prefCode': pref_code,
'cityCode': city_code,
'patentHolderId': patent_holder_id,
'sort1': sort1,
'sort2': sort2,
'offset': offset
}
if add_tec is not None:
param['addTec'] = ','.join(map(lambda tec: '_'.join(map(str, tec)), add_tec))
return self.fetch('list', param)
class Export(Context):
def from_to(self, year, data_type, unit_type, disp_type, region_code, country_code, item_code1, item_code2, item_code3, customs_code1, customs_code2):
param = {
'year': year,
'dataType': data_type,
'unitType': unit_type,
'dispType': disp_type,
'regionCode': region_code,
'countryCode': country_code,
'itemCode1': item_code1,
'itemCode2': item_code2,
'itemCode3': item_code3,
'customsCode1': customs_code1,
'customsCode2': customs_code2
}
return self.fetch('fromTo', param)
class Globalmarket(Context):
def per_pref(self, year, disp_type, region_code, country_code, sic_code, simc_code):
param = {
'year': year,
'dispType': disp_type,
'regionCode': region_code,
'countryCode': country_code,
'sicCode': sic_code,
'simcCode': simc_code
}
return self.fetch('perPref', param)
class Power(Context):
def for_industry(self, year, pref_code, city_code, sic_code):
param = {
'year': year,
'prefCode': pref_code,
'cityCode': city_code,
'sicCode': sic_code
}
return self.fetch('forIndustry', param)
def for_area(self, year, pref_code, area_type, disp_type, sic_code, simc_code, add_industry=[]):
param = {
'year': year,
'prefCode': pref_code,
'areaType': area_type,
'dispType': disp_type,
'sicCode': sic_code,
'simcCode': simc_code
}
if add_industry is not None:
param['addIndustry'] = ','.join(map(lambda industry: '_'.join(map(str, industry)), add_industry))
return self.fetch('forArea', param)
def for_manufacturer_establishments(self, pref_code, sic_code, simc_code, add_area=[]):
param = {
'prefCode': pref_code,
'sicCode': sic_code,
'simcCode': simc_code
}
if add_area is not None:
param['addArea'] = ','.join(map(lambda area: '_'.join(map(str, area)), add_area))
return self.fetch('forManufacturerEstablishments', param)
def __init__(self, accessor, parent_category=''):
super(Industry, self).__init__(accessor, parent_category)
self.__patent = Industry.Patent(accessor, self.category)
self.__export = Industry.Export(accessor, self.category)
self.__global_market = Industry.Globalmarket(accessor, self.category)
self.__power = Industry.Power(accessor, self.category)
@property
def patent(self):
return self.__patent
@property
def export(self):
return self.__export
@property
def global_market(self):
return self.__global_market
@property
def power(self):
return self.__power | 0.616705 | 0.203193 |
from copy import deepcopy
from dataclasses import dataclass
from typing import (
Any, Dict, Generic, Hashable, Iterator, List,
Mapping, MutableMapping, Tuple, TypeVar, Union
)
Key = TypeVar("Key")
Value = TypeVar("Value")
@dataclass
class KeyValue(Generic[Key, Value]):
"""
Element of Map for unhashable keys.
"""
key: Key
value: Value
class Map(MutableMapping[Key, Value]):
"""
Dict-like collection with no `Hashable` restriction on elements.
"""
from_collection: Dict[Key, Value]
def __init__(
self,
from_collection: Union[
None,
Mapping[Key, Value],
List[Tuple[Key, Value]],
] = None,
copy_keys: bool = True,
) -> None:
self.from_collection = dict(from_collection or [])
self._unhashable_items: List[KeyValue[Key, Value]] = list()
self._copy_keys = copy_keys
def copy(self) -> "Map[Key, Value]":
"""
Returns shallow copy of Map.
"""
clone = Map(self.from_collection)
for item in self._unhashable_items:
clone[item.key] = item.value
return clone
def __eq__(self, other: Any) -> bool:
return isinstance(other, Map) \
and self.from_collection == other.from_collection \
and self._unhashable_items == other._unhashable_items
def __len__(self) -> int:
return len(self.from_collection) + len(self._unhashable_items)
def __getitem__(self, key: Key) -> Value:
if self.__unhashable(key):
return self.__getitem_unhashable(key)
return self.from_collection[key]
def __contains__(self, key: Any) -> bool:
if self.__unhashable(key):
return self.__contains_unhashable(key)
return key in self.from_collection
def __iter__(self) -> Iterator[Key]:
for key in self.from_collection:
yield key
for item in self._unhashable_items:
yield item.key
def __setitem__(self, key: Key, value: Value) -> None:
if self.__unhashable(key):
self.__setitem_unhashable(key, value)
return
self.from_collection[key] = value
def __delitem__(self, key: Key) -> None:
if self.__unhashable(key):
self.__delitem_unhashable(key)
return
del self.from_collection[key]
def __getitem_unhashable(self, key: Key) -> Value:
for item in self._unhashable_items:
if item.key == key:
return item.value
raise KeyError(key)
def __contains_unhashable(self, key: Key) -> bool:
for item in self._unhashable_items:
if item.key == key:
return True
return False
def __setitem_unhashable(self, key: Key, value: Value) -> None:
for item in self._unhashable_items:
if item.key == key:
item.value = value
return
if self._copy_keys:
key = deepcopy(key)
item = KeyValue(key, value)
self._unhashable_items.append(item)
def __delitem_unhashable(self, key: Key) -> None:
for item in self._unhashable_items:
if item.key == key:
return self._unhashable_items.remove(item)
raise KeyError(key)
@classmethod
def __unhashable(cls, value: Any) -> bool:
if not isinstance(value, Hashable):
return True
try:
hash(value)
return False
except TypeError:
return True | pycaches/nohashmap.py | from copy import deepcopy
from dataclasses import dataclass
from typing import (
Any, Dict, Generic, Hashable, Iterator, List,
Mapping, MutableMapping, Tuple, TypeVar, Union
)
Key = TypeVar("Key")
Value = TypeVar("Value")
@dataclass
class KeyValue(Generic[Key, Value]):
"""
Element of Map for unhashable keys.
"""
key: Key
value: Value
class Map(MutableMapping[Key, Value]):
"""
Dict-like collection with no `Hashable` restriction on elements.
"""
from_collection: Dict[Key, Value]
def __init__(
self,
from_collection: Union[
None,
Mapping[Key, Value],
List[Tuple[Key, Value]],
] = None,
copy_keys: bool = True,
) -> None:
self.from_collection = dict(from_collection or [])
self._unhashable_items: List[KeyValue[Key, Value]] = list()
self._copy_keys = copy_keys
def copy(self) -> "Map[Key, Value]":
"""
Returns shallow copy of Map.
"""
clone = Map(self.from_collection)
for item in self._unhashable_items:
clone[item.key] = item.value
return clone
def __eq__(self, other: Any) -> bool:
return isinstance(other, Map) \
and self.from_collection == other.from_collection \
and self._unhashable_items == other._unhashable_items
def __len__(self) -> int:
return len(self.from_collection) + len(self._unhashable_items)
def __getitem__(self, key: Key) -> Value:
if self.__unhashable(key):
return self.__getitem_unhashable(key)
return self.from_collection[key]
def __contains__(self, key: Any) -> bool:
if self.__unhashable(key):
return self.__contains_unhashable(key)
return key in self.from_collection
def __iter__(self) -> Iterator[Key]:
for key in self.from_collection:
yield key
for item in self._unhashable_items:
yield item.key
def __setitem__(self, key: Key, value: Value) -> None:
if self.__unhashable(key):
self.__setitem_unhashable(key, value)
return
self.from_collection[key] = value
def __delitem__(self, key: Key) -> None:
if self.__unhashable(key):
self.__delitem_unhashable(key)
return
del self.from_collection[key]
def __getitem_unhashable(self, key: Key) -> Value:
for item in self._unhashable_items:
if item.key == key:
return item.value
raise KeyError(key)
def __contains_unhashable(self, key: Key) -> bool:
for item in self._unhashable_items:
if item.key == key:
return True
return False
def __setitem_unhashable(self, key: Key, value: Value) -> None:
for item in self._unhashable_items:
if item.key == key:
item.value = value
return
if self._copy_keys:
key = deepcopy(key)
item = KeyValue(key, value)
self._unhashable_items.append(item)
def __delitem_unhashable(self, key: Key) -> None:
for item in self._unhashable_items:
if item.key == key:
return self._unhashable_items.remove(item)
raise KeyError(key)
@classmethod
def __unhashable(cls, value: Any) -> bool:
if not isinstance(value, Hashable):
return True
try:
hash(value)
return False
except TypeError:
return True | 0.901692 | 0.329715 |
from string import ascii_lowercase, ascii_uppercase
from .errors import UnitExecutionError, UnitOutputError
# Caesar Cipher
def encode_caesar_cipher(message: str, key: int):
if not isinstance(message, str):
raise UnitExecutionError("message must be str")
if not isinstance(key, int):
raise UnitExecutionError("key must be int")
encoded_message = ""
for character in message:
if not character.isalpha() or not character.isascii():
encoded_message += character
elif character.islower():
encoded_message += ascii_lowercase[(ascii_lowercase.index(character) + key) % 26]
else:
encoded_message += ascii_uppercase[(ascii_uppercase.index(character) + key) % 26]
return encoded_message
def decode_caesar_cipher(message: str, key: int):
if not isinstance(message, str):
raise UnitExecutionError("message must be str")
if not isinstance(key, int):
raise UnitExecutionError("key must be int")
return encode_caesar_cipher(message, -key)
# Morse Code
character_to_morse = {
'A': ".-", 'B': "-...", 'C': "-.-.", 'D': "-..", 'E': '.', 'F': "..-.", 'G': "--.", 'H': "....",
'I': "..", 'J': ".---", 'K': "-.-", 'L': ".-..", 'M': "--", 'N': "-.", 'O': "---", 'P': ".--.",
'Q': "--.-", 'R': ".-.", 'S': "...", 'T': '-', 'U': "..-", 'V': "...-", 'W': ".--", 'X': "-..-",
'Y': "-.--", 'Z': "--..", '0': "----", '1': ".----", '2': "..---", '3': "...--", '4': "....-",
'5': ".....", '6': "-....", '7': "--...", '8': "---..", '9': "----.", '.': ".-.-.-", ',': "--..--",
':': "---...", '?': "..--..", "'": ".---.", '-': "-....-", '/': "-..-.", '!': "-.-.--",
'(': "-.--.", ')': "-.--.-", '&': ".-...", ';': "-.-.-.", '=': "-...-", '+': ".-.-.",
'_': "..--.-", '"': ".-..-.", '$': "...-..-", '@': ".--.-.", ' ': '/'
}
# TODO: Add non-English extensions
morse_to_character = {value: key for key, value in character_to_morse.items()}
def encode_morse_code(message: str):
if not isinstance(message, str):
raise UnitExecutionError("message must be str")
try:
return ' '.join(character_to_morse[character] for character in message.upper())
except KeyError as e:
raise UnitOutputError(f"Unable to encode {e}")
def decode_morse_code(message: str):
if not isinstance(message, str):
raise UnitExecutionError("message must be str")
try:
return ' '.join("".join(morse_to_character[character] for character in word.split())
for word in message.split(" / "))
except KeyError as e:
raise UnitOutputError(f"Unable to decode {e}") | units/cryptography.py | from string import ascii_lowercase, ascii_uppercase
from .errors import UnitExecutionError, UnitOutputError
# Caesar Cipher
def encode_caesar_cipher(message: str, key: int):
if not isinstance(message, str):
raise UnitExecutionError("message must be str")
if not isinstance(key, int):
raise UnitExecutionError("key must be int")
encoded_message = ""
for character in message:
if not character.isalpha() or not character.isascii():
encoded_message += character
elif character.islower():
encoded_message += ascii_lowercase[(ascii_lowercase.index(character) + key) % 26]
else:
encoded_message += ascii_uppercase[(ascii_uppercase.index(character) + key) % 26]
return encoded_message
def decode_caesar_cipher(message: str, key: int):
if not isinstance(message, str):
raise UnitExecutionError("message must be str")
if not isinstance(key, int):
raise UnitExecutionError("key must be int")
return encode_caesar_cipher(message, -key)
# Morse Code
character_to_morse = {
'A': ".-", 'B': "-...", 'C': "-.-.", 'D': "-..", 'E': '.', 'F': "..-.", 'G': "--.", 'H': "....",
'I': "..", 'J': ".---", 'K': "-.-", 'L': ".-..", 'M': "--", 'N': "-.", 'O': "---", 'P': ".--.",
'Q': "--.-", 'R': ".-.", 'S': "...", 'T': '-', 'U': "..-", 'V': "...-", 'W': ".--", 'X': "-..-",
'Y': "-.--", 'Z': "--..", '0': "----", '1': ".----", '2': "..---", '3': "...--", '4': "....-",
'5': ".....", '6': "-....", '7': "--...", '8': "---..", '9': "----.", '.': ".-.-.-", ',': "--..--",
':': "---...", '?': "..--..", "'": ".---.", '-': "-....-", '/': "-..-.", '!': "-.-.--",
'(': "-.--.", ')': "-.--.-", '&': ".-...", ';': "-.-.-.", '=': "-...-", '+': ".-.-.",
'_': "..--.-", '"': ".-..-.", '$': "...-..-", '@': ".--.-.", ' ': '/'
}
# TODO: Add non-English extensions
morse_to_character = {value: key for key, value in character_to_morse.items()}
def encode_morse_code(message: str):
if not isinstance(message, str):
raise UnitExecutionError("message must be str")
try:
return ' '.join(character_to_morse[character] for character in message.upper())
except KeyError as e:
raise UnitOutputError(f"Unable to encode {e}")
def decode_morse_code(message: str):
if not isinstance(message, str):
raise UnitExecutionError("message must be str")
try:
return ' '.join("".join(morse_to_character[character] for character in word.split())
for word in message.split(" / "))
except KeyError as e:
raise UnitOutputError(f"Unable to decode {e}") | 0.327991 | 0.108921 |
from __future__ import absolute_import, division, unicode_literals
from datetime import timedelta
import json
import re
import pywikibot
from pywikibot.bot import WikidataBot
from pywikibot.exceptions import (LockedPageError, NoCreateError, NoPageError, PageSaveRelatedError)
import logger
logger = logger.get_logger("bot")
class Pagedata:
def __init__(self, page, enc_metas, prefixes):
self.pagename = page.title()
self.pagename_spaceaboutslashes = self.pagename.replace('/', ' / ')
self.rootpagename, _, self.subpagename = self.pagename.partition('/')
prefix_settings = prefixes[self.rootpagename]
# self.active = prefix_settings["active"]
self.category_of_articles = prefix_settings["category_of_articles"]
self.item_label = prefix_settings["item_label"]
self.enc_meta = enc_metas[self.rootpagename]
self.is_oldorph = True if '/ДО' in self.pagename else False
self.pagename_pattern = self.enc_meta['titleDO'] if self.is_oldorph else self.enc_meta['titleVT']
self.is_bad = False
if ':ДО' in self.category_of_articles and not '/ДО' in self.pagename:
logger.warning('категория ДО, но нет /ДО в названии страницы, пропускаем')
self.is_bad = True
elif not ':ДО' in self.category_of_articles and '/ДО' in self.pagename:
logger.warning('категория не ДО, но есть /ДО в названии страницы, пропускаем')
self.is_bad = True
# пропуск шаблонов с символами regexp, во избежание ошибок
self.is_bad = any((s for s in r'.?*+\()[]' if s in self.pagename_pattern))
# Извлечение названия статьи из названия страницы
m = re.search(self.pagename_pattern.replace('$1', '(.+)'), self.pagename)
if m:
self.article_title = m.group(1)
else:
self.is_bad = True
# уточнение неоднозначностей в скобках
self.disambig_note = self.article_title_no_disambig = None
m = re.search(r'^(.+?)\s+\(([^()]+?)\)$', self.article_title)
if m:
self.article_title_no_disambig = m.group(1).strip()
self.disambig_note = m.group(2).strip()
class NewItemBot(WikidataBot):
"""A bot to create new items."""
treat_missing_item = True
def __init__(self, generator, settings: dict, prefixes: dict, **kwargs):
"""Only accepts options defined in availableOptions."""
self.available_options.update({
'always': True,
'lastedit_days': settings.get('lastedit_days'),
'touch': 'newly', # Can be False, newly (pages linked to newly
# created items) or True (touch all pages)
})
super().__init__(**kwargs)
self.generator = generator
self.lastEdit = self.opt['lastedit_days']
self.lastEditBefore = self.site.server_time() - timedelta(days=self.lastEdit)
pywikibot.output(
'Last edit is set to {0} days so only pages last edited'
'\nbefore {1} will be considered.'.format(self.lastEdit, self.lastEditBefore.isoformat()))
self.enc_metas = get_enc_metas(self.site, self.repo)
# self.prefixes = settings['prefixes']
self.prefixes = prefixes
# self.pattern_of_disambig_in_item_description = settings['pattern_of_disambig_in_item_description']
def treat_page_and_item(self, page, item):
"""Treat page/item."""
if self.filter_off(page, item):
return
page.p = Pagedata(page, self.enc_metas, self.prefixes)
if page.p.is_bad:
return
pywikibot.stdout('page.p done')
data = self.make_item_header(page)
claims = self.make_claims(page)
item = self.create_item_for_page(page, data=data, callback=lambda _, exc: self._callback(page, exc))
self.add_claims(item, claims)
def add_claims(self, item, claims):
"""Treat each page."""
for claim in claims:
# The generator might yield pages from multiple sites
# site = page.site if page is not None else None
self.user_add_claim(item, claim) # self.exists_arg
def create_item_for_page(self, page, data=None, summary=None, **kwargs):
"""
в pywikibot.bot.create_item_for_page() метка всеровно переименовывается как pagename
заменено своей функцией
"""
if not summary:
# FIXME: i18n
summary = ('Bot: New item with sitelink from %s' % page.title(as_link=True, insite=self.repo))
if data is None:
data = {}
data.setdefault('sitelinks', {}).update({
page.site.dbName(): {
'site': page.site.dbName(),
'title': page.title()
}
})
pywikibot.output('Creating item for %s...' % page)
item = pywikibot.ItemPage(page.site.data_repository())
kwargs.setdefault('show_diff', False)
result = self.user_edit_entity(item, data, summary=summary, **kwargs)
if result:
return item
else:
return None
def make_item_header(self, page) -> dict:
p = page.p
RU = 'ru'
data = {
'labels': {RU: {'language': RU, 'value': p.pagename.replace('/ДО', '').replace('/ВТ', '')}},
'descriptions': {lng: p.item_label[lng] for lng in ['en', RU]},
'aliases': {RU: [p.article_title]}}
return data
def make_claims(self, page) -> list:
p = page.p
properties = [
['P31', 'Q13433827'], # 'это частный случай понятия' : энциклопедическая статья
['P1476', ['ru', p.article_title_no_disambig or p.article_title]],
['P1433', p.enc_meta['id']], # 'опубликовано в'
['P407', 'Q7737'], # язык произведения или его названия : русский
]
claims = []
for pid, value in properties:
claim = pywikibot.Claim(self.repo, pid)
if claim.type == 'wikibase-item':
target = pywikibot.ItemPage(self.repo, value)
elif claim.type == 'string':
target = value
elif claim.type == 'monolingualtext':
lang, string = value
target = pywikibot.WbMonolingualText(string, lang)
elif claim.type == 'globe-coordinate':
coord_args = [float(c) for c in value.split(',')]
if len(coord_args) >= 3:
precision = coord_args[2]
else:
precision = 0.0001 # Default value (~10 m at equator)
target = pywikibot.Coordinate(coord_args[0], coord_args[1], precision=precision)
else:
raise NotImplementedError('{} datatype is not yet supported by claimit.py'.format(claim.type))
claim.setTarget(target)
claims.append(claim)
return claims
def filter_off(self, page, item) -> bool:
if item and item.exists():
pywikibot.output('{0} already has an item: {1}.'.format(page, item))
return True
if page.isRedirectPage():
pywikibot.output('{0} is a redirect page. Skipping.'.format(page))
return True
if page.editTime() > self.lastEditBefore:
pywikibot.output(
'Last edit on {0} was on {1}.\nToo recent. Skipping.'.format(page, page.editTime().isoformat()))
return True
if page.isCategoryRedirect():
pywikibot.output('{0} is a category redirect. Skipping.'.format(page))
return True
if page.langlinks():
# FIXME: Implement this
pywikibot.output(
'Found language links (interwiki links).\n'
"Haven't implemented that yet so skipping.")
return True
@staticmethod
def _touch_page(page):
try:
pywikibot.output('Doing a null edit on the page.')
page.touch()
except (NoCreateError, NoPageError):
pywikibot.error('Page {0} does not exist.'.format(page.title(as_link=True)))
except LockedPageError:
pywikibot.error('Page {0} is locked.'.format(page.title(as_link=True)))
except PageSaveRelatedError:
pywikibot.error('Page {0} not saved.'.format(page.title(as_link=True)))
def _callback(self, page, exc):
if exc is None and self.opt['touch']:
self._touch_page(page)
def get_enc_metas(WS, WD):
j = pywikibot.Page(WS, 'MediaWiki:Encyclopedias_settings.json')
other_sources = json.loads(j.text)
enc_metas = {}
for n in other_sources:
n['wditem'] = pywikibot.ItemPage(WD, n['id'])
pname = n['argument']
enc_metas[pname] = n
return enc_metas | create_items_bot.py | from __future__ import absolute_import, division, unicode_literals
from datetime import timedelta
import json
import re
import pywikibot
from pywikibot.bot import WikidataBot
from pywikibot.exceptions import (LockedPageError, NoCreateError, NoPageError, PageSaveRelatedError)
import logger
logger = logger.get_logger("bot")
class Pagedata:
def __init__(self, page, enc_metas, prefixes):
self.pagename = page.title()
self.pagename_spaceaboutslashes = self.pagename.replace('/', ' / ')
self.rootpagename, _, self.subpagename = self.pagename.partition('/')
prefix_settings = prefixes[self.rootpagename]
# self.active = prefix_settings["active"]
self.category_of_articles = prefix_settings["category_of_articles"]
self.item_label = prefix_settings["item_label"]
self.enc_meta = enc_metas[self.rootpagename]
self.is_oldorph = True if '/ДО' in self.pagename else False
self.pagename_pattern = self.enc_meta['titleDO'] if self.is_oldorph else self.enc_meta['titleVT']
self.is_bad = False
if ':ДО' in self.category_of_articles and not '/ДО' in self.pagename:
logger.warning('категория ДО, но нет /ДО в названии страницы, пропускаем')
self.is_bad = True
elif not ':ДО' in self.category_of_articles and '/ДО' in self.pagename:
logger.warning('категория не ДО, но есть /ДО в названии страницы, пропускаем')
self.is_bad = True
# пропуск шаблонов с символами regexp, во избежание ошибок
self.is_bad = any((s for s in r'.?*+\()[]' if s in self.pagename_pattern))
# Извлечение названия статьи из названия страницы
m = re.search(self.pagename_pattern.replace('$1', '(.+)'), self.pagename)
if m:
self.article_title = m.group(1)
else:
self.is_bad = True
# уточнение неоднозначностей в скобках
self.disambig_note = self.article_title_no_disambig = None
m = re.search(r'^(.+?)\s+\(([^()]+?)\)$', self.article_title)
if m:
self.article_title_no_disambig = m.group(1).strip()
self.disambig_note = m.group(2).strip()
class NewItemBot(WikidataBot):
"""A bot to create new items."""
treat_missing_item = True
def __init__(self, generator, settings: dict, prefixes: dict, **kwargs):
"""Only accepts options defined in availableOptions."""
self.available_options.update({
'always': True,
'lastedit_days': settings.get('lastedit_days'),
'touch': 'newly', # Can be False, newly (pages linked to newly
# created items) or True (touch all pages)
})
super().__init__(**kwargs)
self.generator = generator
self.lastEdit = self.opt['lastedit_days']
self.lastEditBefore = self.site.server_time() - timedelta(days=self.lastEdit)
pywikibot.output(
'Last edit is set to {0} days so only pages last edited'
'\nbefore {1} will be considered.'.format(self.lastEdit, self.lastEditBefore.isoformat()))
self.enc_metas = get_enc_metas(self.site, self.repo)
# self.prefixes = settings['prefixes']
self.prefixes = prefixes
# self.pattern_of_disambig_in_item_description = settings['pattern_of_disambig_in_item_description']
def treat_page_and_item(self, page, item):
"""Treat page/item."""
if self.filter_off(page, item):
return
page.p = Pagedata(page, self.enc_metas, self.prefixes)
if page.p.is_bad:
return
pywikibot.stdout('page.p done')
data = self.make_item_header(page)
claims = self.make_claims(page)
item = self.create_item_for_page(page, data=data, callback=lambda _, exc: self._callback(page, exc))
self.add_claims(item, claims)
def add_claims(self, item, claims):
"""Treat each page."""
for claim in claims:
# The generator might yield pages from multiple sites
# site = page.site if page is not None else None
self.user_add_claim(item, claim) # self.exists_arg
def create_item_for_page(self, page, data=None, summary=None, **kwargs):
"""
в pywikibot.bot.create_item_for_page() метка всеровно переименовывается как pagename
заменено своей функцией
"""
if not summary:
# FIXME: i18n
summary = ('Bot: New item with sitelink from %s' % page.title(as_link=True, insite=self.repo))
if data is None:
data = {}
data.setdefault('sitelinks', {}).update({
page.site.dbName(): {
'site': page.site.dbName(),
'title': page.title()
}
})
pywikibot.output('Creating item for %s...' % page)
item = pywikibot.ItemPage(page.site.data_repository())
kwargs.setdefault('show_diff', False)
result = self.user_edit_entity(item, data, summary=summary, **kwargs)
if result:
return item
else:
return None
def make_item_header(self, page) -> dict:
p = page.p
RU = 'ru'
data = {
'labels': {RU: {'language': RU, 'value': p.pagename.replace('/ДО', '').replace('/ВТ', '')}},
'descriptions': {lng: p.item_label[lng] for lng in ['en', RU]},
'aliases': {RU: [p.article_title]}}
return data
def make_claims(self, page) -> list:
p = page.p
properties = [
['P31', 'Q13433827'], # 'это частный случай понятия' : энциклопедическая статья
['P1476', ['ru', p.article_title_no_disambig or p.article_title]],
['P1433', p.enc_meta['id']], # 'опубликовано в'
['P407', 'Q7737'], # язык произведения или его названия : русский
]
claims = []
for pid, value in properties:
claim = pywikibot.Claim(self.repo, pid)
if claim.type == 'wikibase-item':
target = pywikibot.ItemPage(self.repo, value)
elif claim.type == 'string':
target = value
elif claim.type == 'monolingualtext':
lang, string = value
target = pywikibot.WbMonolingualText(string, lang)
elif claim.type == 'globe-coordinate':
coord_args = [float(c) for c in value.split(',')]
if len(coord_args) >= 3:
precision = coord_args[2]
else:
precision = 0.0001 # Default value (~10 m at equator)
target = pywikibot.Coordinate(coord_args[0], coord_args[1], precision=precision)
else:
raise NotImplementedError('{} datatype is not yet supported by claimit.py'.format(claim.type))
claim.setTarget(target)
claims.append(claim)
return claims
def filter_off(self, page, item) -> bool:
if item and item.exists():
pywikibot.output('{0} already has an item: {1}.'.format(page, item))
return True
if page.isRedirectPage():
pywikibot.output('{0} is a redirect page. Skipping.'.format(page))
return True
if page.editTime() > self.lastEditBefore:
pywikibot.output(
'Last edit on {0} was on {1}.\nToo recent. Skipping.'.format(page, page.editTime().isoformat()))
return True
if page.isCategoryRedirect():
pywikibot.output('{0} is a category redirect. Skipping.'.format(page))
return True
if page.langlinks():
# FIXME: Implement this
pywikibot.output(
'Found language links (interwiki links).\n'
"Haven't implemented that yet so skipping.")
return True
@staticmethod
def _touch_page(page):
try:
pywikibot.output('Doing a null edit on the page.')
page.touch()
except (NoCreateError, NoPageError):
pywikibot.error('Page {0} does not exist.'.format(page.title(as_link=True)))
except LockedPageError:
pywikibot.error('Page {0} is locked.'.format(page.title(as_link=True)))
except PageSaveRelatedError:
pywikibot.error('Page {0} not saved.'.format(page.title(as_link=True)))
def _callback(self, page, exc):
if exc is None and self.opt['touch']:
self._touch_page(page)
def get_enc_metas(WS, WD):
j = pywikibot.Page(WS, 'MediaWiki:Encyclopedias_settings.json')
other_sources = json.loads(j.text)
enc_metas = {}
for n in other_sources:
n['wditem'] = pywikibot.ItemPage(WD, n['id'])
pname = n['argument']
enc_metas[pname] = n
return enc_metas | 0.391406 | 0.122497 |
import random
import numpy as np
import mask
import sklearn
from joblib import Parallel, delayed
import tensorflow as tf
import losses
from Cube2Dataset import encode_mask
class ImageGenerator:
def __init__(self, image_names, gts, illuminants=2, size=(512, 1024), ang_dist=3, encode=True):
self.image_names = image_names
self.gts = gts
self.size = size
self.depth = 3
self.illuminants = illuminants
self.ang_dist = ang_dist
self.encode = encode
@tf.function
def create_image_collage(self, images, mask):
image = tf.where(mask == 1, images[0], images[1])
return image
def generate_n_ill_mask(self, illuminants):
size = (128, 128)
num_of_ill = len(illuminants)
while True:
mask_ = np.uint8(np.zeros((size[1], size[0])))
for i in range(num_of_ill - 1):
mask_ = mask.draw_new_line(mask_)
mask_, created_ill = mask.create_mask_from_lines(mask_)
if created_ill != num_of_ill:
continue
val, cnt = np.unique(mask_, return_counts=True)
if np.min(cnt / (size[1] * size[0])) >= 1 / (3 * num_of_ill):
break
color_mask_ = np.zeros((size[0], size[1], 3), dtype='float32')
graysacle_mask_ = np.zeros((size[0], size[1], 3), dtype='float32')
for i, ill in enumerate(illuminants):
color_mask_[mask_ == i + 1, :] = ill
if len(illuminants) > 1:
graysacle_mask_[mask_ == i + 1, :] = i / (len(illuminants) - 1)
# color_mask_ = tf.image.convert_image_dtype(color_mask_, dtype=tf.float32)
return tf.convert_to_tensor(color_mask_), tf.convert_to_tensor(graysacle_mask_)
# @tf.function
def load_img(self, bl=tf.constant(2048 / (2 ** 14 - 1), dtype=tf.float32)):
with tf.device('/device:cpu:0'):
def load(image_name):
image = tf.io.read_file(image_name)
image = tf.image.decode_png(image, channels=3, dtype=tf.uint16)
image = tf.cast(image, dtype=tf.float32) / (2 ** 14 - 1)
image = image - tf.ones_like(image) * bl
image_lt0 = tf.reduce_any(image < 0, axis=-1, keepdims=True)
image = tf.where(image_lt0, tf.zeros_like(image), image)
image = tf.cast(tf.image.resize(image, self.size, method="area"), dtype=tf.float32)
return image
illuminants = []
images = []
for i in range(self.illuminants):
index = tf.random.uniform((), 0, len(self.image_names), dtype=tf.int32, seed=42)
image_name = self.image_names[index]
gt_index = int(image_name[image_name.rfind('/') + 1:-4]) - 1
if len(illuminants) == 0:
ill = self.gts[gt_index]
ill = ill / tf.reduce_max(ill)
illuminants.append(ill)
images.append(load(image_name))
else:
ill = self.gts[gt_index]
while losses.cosine_similarity(ill, illuminants[-1]) * 180 / 3.14 < self.ang_dist:
index = tf.random.uniform((), 0, len(self.image_names), dtype=tf.int32, seed=42)
image_name = self.image_names[index]
gt_index = int(image_name[image_name.rfind('/') + 1:-4]) - 1
ill = self.gts[gt_index]
images.append(load(image_name))
ill = ill / tf.reduce_max(ill)
illuminants.append(ill)
illuminants = [tf.cast(ill, float) for ill in illuminants]
mask_, gcs_mask_ = self.generate_n_ill_mask(illuminants)
mask_ = tf.image.resize(mask_, self.size, method="nearest")
gcs_mask_ = tf.image.resize(gcs_mask_, self.size, method="nearest")
final_image = self.create_image_collage(images, gcs_mask_)
rand_brigthness = tf.random.uniform((self.illuminants,), 0.6, 1.2)
final_image = tf.where(gcs_mask_ < 1, final_image * rand_brigthness[0], final_image * rand_brigthness[1])
gcs_mask_ = tf.where(final_image == 0, tf.ones_like(gcs_mask_) * 0.5, gcs_mask_)
mask_ = tf.where(final_image == 0, tf.zeros_like(mask_), mask_)
if self.encode:
gcs_mask_ = encode_mask(gcs_mask_, final_image)
return final_image, gcs_mask_, mask_, illuminants
def image_generator(gen, images_per_epoch=100, batch_size=1000, out_mapping=lambda *x: (x[0], x[1])):
"""
image_names: list of images, only contains filename
folder: folder with images in image_name
patch_size: size of single patch (only for gtype "single")
uv: god knows leave at false
image_per_epoch: number of images to color and store in ram at once
batch_size: number of patches to use in single step of training
gtype: which generator to use ["single", "multi"]
illuminanst: number of illuminants in each image (only for gtype "multi")
image_size: size oif image used when gtype is "multi"
reduce_mean: perform standardization on image or not
"""
cnt = 0
while True:
if cnt == 0:
data = Parallel(n_jobs=10, prefer="threads")(delayed(gen.load_img)() for i in
range(images_per_epoch))
data = list(map(lambda x: out_mapping(*x), data))
weighted = False
if len(data[0]) == 3:
weighted = True
X = tf.expand_dims(data[0][0], axis=0)
Y = tf.expand_dims(data[0][1], axis=0)
if weighted:
W = tf.expand_dims(data[0][2], axis=0)
for d in data[1:]:
x, y = d[0], d[1]
if weighted:
w = d[2]
W = tf.concat((W, tf.expand_dims(w, axis=0)), axis=0)
X = tf.concat((X, tf.expand_dims(x, axis=0)), axis=0)
Y = tf.concat((Y, tf.expand_dims(y, axis=0)), axis=0)
X = np.array(X)
Y = np.array(Y)
if weighted:
W = np.array(W)
X, Y, W = sklearn.utils.shuffle(X, Y, W)
else:
X, Y = sklearn.utils.shuffle(X, Y)
if weighted:
yield (tf.convert_to_tensor(X[batch_size * cnt:batch_size * (cnt + 1)]),
tf.convert_to_tensor(Y[batch_size * cnt:batch_size * (cnt + 1)]),
tf.convert_to_tensor(W[batch_size * cnt:batch_size * (cnt + 1)]))
else:
yield (tf.convert_to_tensor(X[batch_size * cnt:batch_size * (cnt + 1)]),
tf.convert_to_tensor(Y[batch_size * cnt:batch_size * (cnt + 1)]))
cnt += 1
if cnt * batch_size >= Y.shape[0]:
cnt = 0
if __name__ == '__main__':
import visualizer
import data_processing as dp
def load_image_names(path, base_path):
names = np.loadtxt(path, dtype="str")
names = np.array([base_path + n for n in names])
return names
path = "D:/fax/Cube+/paths.txt"
paths = load_image_names(path, base_path="D:/fax/Cube+")
gts = np.loadtxt("D:/fax/Cube+/cube+_gt.txt")
ig = ImageGenerator(paths, gts)
gen = image_generator(ig, 10, 2, out_mapping=dp.image_histogram_mapping_segmentation)
image, mask = next(gen)
visualizer.visualize(image[..., :3])
visualizer.visualize(mask[..., :3]) | collage_generator.py | import random
import numpy as np
import mask
import sklearn
from joblib import Parallel, delayed
import tensorflow as tf
import losses
from Cube2Dataset import encode_mask
class ImageGenerator:
def __init__(self, image_names, gts, illuminants=2, size=(512, 1024), ang_dist=3, encode=True):
self.image_names = image_names
self.gts = gts
self.size = size
self.depth = 3
self.illuminants = illuminants
self.ang_dist = ang_dist
self.encode = encode
@tf.function
def create_image_collage(self, images, mask):
image = tf.where(mask == 1, images[0], images[1])
return image
def generate_n_ill_mask(self, illuminants):
size = (128, 128)
num_of_ill = len(illuminants)
while True:
mask_ = np.uint8(np.zeros((size[1], size[0])))
for i in range(num_of_ill - 1):
mask_ = mask.draw_new_line(mask_)
mask_, created_ill = mask.create_mask_from_lines(mask_)
if created_ill != num_of_ill:
continue
val, cnt = np.unique(mask_, return_counts=True)
if np.min(cnt / (size[1] * size[0])) >= 1 / (3 * num_of_ill):
break
color_mask_ = np.zeros((size[0], size[1], 3), dtype='float32')
graysacle_mask_ = np.zeros((size[0], size[1], 3), dtype='float32')
for i, ill in enumerate(illuminants):
color_mask_[mask_ == i + 1, :] = ill
if len(illuminants) > 1:
graysacle_mask_[mask_ == i + 1, :] = i / (len(illuminants) - 1)
# color_mask_ = tf.image.convert_image_dtype(color_mask_, dtype=tf.float32)
return tf.convert_to_tensor(color_mask_), tf.convert_to_tensor(graysacle_mask_)
# @tf.function
def load_img(self, bl=tf.constant(2048 / (2 ** 14 - 1), dtype=tf.float32)):
with tf.device('/device:cpu:0'):
def load(image_name):
image = tf.io.read_file(image_name)
image = tf.image.decode_png(image, channels=3, dtype=tf.uint16)
image = tf.cast(image, dtype=tf.float32) / (2 ** 14 - 1)
image = image - tf.ones_like(image) * bl
image_lt0 = tf.reduce_any(image < 0, axis=-1, keepdims=True)
image = tf.where(image_lt0, tf.zeros_like(image), image)
image = tf.cast(tf.image.resize(image, self.size, method="area"), dtype=tf.float32)
return image
illuminants = []
images = []
for i in range(self.illuminants):
index = tf.random.uniform((), 0, len(self.image_names), dtype=tf.int32, seed=42)
image_name = self.image_names[index]
gt_index = int(image_name[image_name.rfind('/') + 1:-4]) - 1
if len(illuminants) == 0:
ill = self.gts[gt_index]
ill = ill / tf.reduce_max(ill)
illuminants.append(ill)
images.append(load(image_name))
else:
ill = self.gts[gt_index]
while losses.cosine_similarity(ill, illuminants[-1]) * 180 / 3.14 < self.ang_dist:
index = tf.random.uniform((), 0, len(self.image_names), dtype=tf.int32, seed=42)
image_name = self.image_names[index]
gt_index = int(image_name[image_name.rfind('/') + 1:-4]) - 1
ill = self.gts[gt_index]
images.append(load(image_name))
ill = ill / tf.reduce_max(ill)
illuminants.append(ill)
illuminants = [tf.cast(ill, float) for ill in illuminants]
mask_, gcs_mask_ = self.generate_n_ill_mask(illuminants)
mask_ = tf.image.resize(mask_, self.size, method="nearest")
gcs_mask_ = tf.image.resize(gcs_mask_, self.size, method="nearest")
final_image = self.create_image_collage(images, gcs_mask_)
rand_brigthness = tf.random.uniform((self.illuminants,), 0.6, 1.2)
final_image = tf.where(gcs_mask_ < 1, final_image * rand_brigthness[0], final_image * rand_brigthness[1])
gcs_mask_ = tf.where(final_image == 0, tf.ones_like(gcs_mask_) * 0.5, gcs_mask_)
mask_ = tf.where(final_image == 0, tf.zeros_like(mask_), mask_)
if self.encode:
gcs_mask_ = encode_mask(gcs_mask_, final_image)
return final_image, gcs_mask_, mask_, illuminants
def image_generator(gen, images_per_epoch=100, batch_size=1000, out_mapping=lambda *x: (x[0], x[1])):
"""
image_names: list of images, only contains filename
folder: folder with images in image_name
patch_size: size of single patch (only for gtype "single")
uv: god knows leave at false
image_per_epoch: number of images to color and store in ram at once
batch_size: number of patches to use in single step of training
gtype: which generator to use ["single", "multi"]
illuminanst: number of illuminants in each image (only for gtype "multi")
image_size: size oif image used when gtype is "multi"
reduce_mean: perform standardization on image or not
"""
cnt = 0
while True:
if cnt == 0:
data = Parallel(n_jobs=10, prefer="threads")(delayed(gen.load_img)() for i in
range(images_per_epoch))
data = list(map(lambda x: out_mapping(*x), data))
weighted = False
if len(data[0]) == 3:
weighted = True
X = tf.expand_dims(data[0][0], axis=0)
Y = tf.expand_dims(data[0][1], axis=0)
if weighted:
W = tf.expand_dims(data[0][2], axis=0)
for d in data[1:]:
x, y = d[0], d[1]
if weighted:
w = d[2]
W = tf.concat((W, tf.expand_dims(w, axis=0)), axis=0)
X = tf.concat((X, tf.expand_dims(x, axis=0)), axis=0)
Y = tf.concat((Y, tf.expand_dims(y, axis=0)), axis=0)
X = np.array(X)
Y = np.array(Y)
if weighted:
W = np.array(W)
X, Y, W = sklearn.utils.shuffle(X, Y, W)
else:
X, Y = sklearn.utils.shuffle(X, Y)
if weighted:
yield (tf.convert_to_tensor(X[batch_size * cnt:batch_size * (cnt + 1)]),
tf.convert_to_tensor(Y[batch_size * cnt:batch_size * (cnt + 1)]),
tf.convert_to_tensor(W[batch_size * cnt:batch_size * (cnt + 1)]))
else:
yield (tf.convert_to_tensor(X[batch_size * cnt:batch_size * (cnt + 1)]),
tf.convert_to_tensor(Y[batch_size * cnt:batch_size * (cnt + 1)]))
cnt += 1
if cnt * batch_size >= Y.shape[0]:
cnt = 0
if __name__ == '__main__':
import visualizer
import data_processing as dp
def load_image_names(path, base_path):
names = np.loadtxt(path, dtype="str")
names = np.array([base_path + n for n in names])
return names
path = "D:/fax/Cube+/paths.txt"
paths = load_image_names(path, base_path="D:/fax/Cube+")
gts = np.loadtxt("D:/fax/Cube+/cube+_gt.txt")
ig = ImageGenerator(paths, gts)
gen = image_generator(ig, 10, 2, out_mapping=dp.image_histogram_mapping_segmentation)
image, mask = next(gen)
visualizer.visualize(image[..., :3])
visualizer.visualize(mask[..., :3]) | 0.541409 | 0.336672 |
from hashcash import check
import random
import string
import sys
import os
import SocketServer
import hashlib
import os
import subprocess
import socket
import sys
import threading
import codecs
import time
def readline(sock):
data = ''
while not data.endswith("\n"):
x = sock.recv(1)
if len(x) < 1:
break
data += x
return data
def do_pow(bits, sock):
resource = "".join(random.choice(string.ascii_lowercase) for i in range(8))
sock.sendall("Please use the following command to solve the Proof of Work: hashcash -mb{} {}\n".format(bits, resource))
sys.stdout.flush()
stamp = readline(sock).strip()
if not stamp.startswith("1:"):
sock.sendall("only hashcash v1 supported")
return False
if not check(stamp, resource=resource, bits=bits):
sock.sendall("invalid")
return False
return True
class PowHandler(SocketServer.BaseRequestHandler):
def handle(self):
try:
self.request.settimeout(TIMEOUT)
if do_pow(DIFFICULTY, self.request):
self.request.settimeout(None) # Turns out this task doesn't like nonblocking fds
delay = 1.0
timeout = int(TIMEOUT / delay)
task = subprocess.Popen(COMMAND, stdin=self.request, stdout=self.request, stderr=self.request)
while task.poll() is None and timeout > 0:
# Do other things too if necessary e.g. print, check resources, etc.
time.sleep(delay)
timeout -= delay
if timeout <= 0:
task.kill()
self.request.sendall(b'Timed out...\n')
task.wait()
except (socket.timeout):
self.request.sendall(b'Timed out...\n')
if __name__ == '__main__':
DIFFICULTY = int(sys.argv[1])
TIMEOUT = int(sys.argv[2])
COMMAND = sys.argv[3:]
SocketServer.ThreadingTCPServer.allow_reuse_address = True
server = SocketServer.ThreadingTCPServer(('0.0.0.0', 1337), PowHandler)
server.serve_forever() | src/pow.py | from hashcash import check
import random
import string
import sys
import os
import SocketServer
import hashlib
import os
import subprocess
import socket
import sys
import threading
import codecs
import time
def readline(sock):
data = ''
while not data.endswith("\n"):
x = sock.recv(1)
if len(x) < 1:
break
data += x
return data
def do_pow(bits, sock):
resource = "".join(random.choice(string.ascii_lowercase) for i in range(8))
sock.sendall("Please use the following command to solve the Proof of Work: hashcash -mb{} {}\n".format(bits, resource))
sys.stdout.flush()
stamp = readline(sock).strip()
if not stamp.startswith("1:"):
sock.sendall("only hashcash v1 supported")
return False
if not check(stamp, resource=resource, bits=bits):
sock.sendall("invalid")
return False
return True
class PowHandler(SocketServer.BaseRequestHandler):
def handle(self):
try:
self.request.settimeout(TIMEOUT)
if do_pow(DIFFICULTY, self.request):
self.request.settimeout(None) # Turns out this task doesn't like nonblocking fds
delay = 1.0
timeout = int(TIMEOUT / delay)
task = subprocess.Popen(COMMAND, stdin=self.request, stdout=self.request, stderr=self.request)
while task.poll() is None and timeout > 0:
# Do other things too if necessary e.g. print, check resources, etc.
time.sleep(delay)
timeout -= delay
if timeout <= 0:
task.kill()
self.request.sendall(b'Timed out...\n')
task.wait()
except (socket.timeout):
self.request.sendall(b'Timed out...\n')
if __name__ == '__main__':
DIFFICULTY = int(sys.argv[1])
TIMEOUT = int(sys.argv[2])
COMMAND = sys.argv[3:]
SocketServer.ThreadingTCPServer.allow_reuse_address = True
server = SocketServer.ThreadingTCPServer(('0.0.0.0', 1337), PowHandler)
server.serve_forever() | 0.210848 | 0.050261 |
import logging
import time
from apscheduler.schedulers.background import BackgroundScheduler
from zvt import init_log
from zvt.domain import *
from zvt.informer.informer import EmailInformer
logger = logging.getLogger(__name__)
sched = BackgroundScheduler()
@sched.scheduled_job('cron', hour=15, minute=30,day_of_week='mon-sat')
def record_kdata():
while True:
email_action = EmailInformer()
try:
Stock.record_data(provider='joinquant', sleeping_time=1)
Stock1dKdata.record_data(provider='joinquant', sleeping_time=1)
StockTradeDay.record_data(provider='joinquant', sleeping_time=1)
StockValuation.record_data(provider='joinquant', sleeping_time=1)
email_action.send_message("<EMAIL>", 'data.runner joinquant.record_kdata finished', '')
break
except Exception as e:
msg = f'joinquant runner error:{e}'
logger.exception(msg)
email_action.send_message("<EMAIL>", 'data.runner joinquant.record_kdata error', msg)
time.sleep(60 * 2)
@sched.scheduled_job('cron', hour=18, minute=30,day_of_week='tue,thu')
def record_others():
while True:
email_action = EmailInformer()
try:
Etf.record_data(provider='joinquant', sleeping_time=1)
EtfStock.record_data(provider='joinquant', sleeping_time=1)
# email_action.send_message("<EMAIL>", 'joinquant runner finished', '')
break
except Exception as e:
msg = f'joinquant runner error:{e}'
logger.exception(msg)
email_action.send_message("<EMAIL>", 'data.runner joinquant.record_others error', msg)
time.sleep(60 * 2)
@sched.scheduled_job('cron', hour=15, minute=30, day_of_week='mon,wed,fri')
def record_block():
email_action = EmailInformer()
while True:
try:
Block.record_data(provider='sina', sleeping_time=2)
BlockStock.record_data(provider='sina', sleeping_time=2)
# email_action.send_message("<EMAIL>", 'sina block finished', '')
break
except Exception as e:
msg = f'sina block error:{e}'
logger.exception(msg)
email_action.send_message("<EMAIL>", 'data.runner sina.record_block error', msg)
time.sleep(60 * 2)
# 自行更改定定时运行时间
# 这些数据都是些低频分散的数据,每天更新一次即可
@sched.scheduled_job('cron', hour=2, minute=00, day_of_week='mon-fri')
def record_finance():
while True:
email_action = EmailInformer()
try:
Stock.record_data(provider='eastmoney')
FinanceFactor.record_data(provider='eastmoney')
BalanceSheet.record_data(provider='eastmoney')
IncomeStatement.record_data(provider='eastmoney')
CashFlowStatement.record_data(provider='eastmoney')
# email_action.send_message("<EMAIL>", 'eastmoney runner1 finished', '')
break
except Exception as e:
msg = f'eastmoney runner1 error:{e}'
logger.exception(msg)
email_action.send_message("<EMAIL>", 'data.runner eastmoney.record_finance error', msg)
time.sleep(60)
if __name__ == '__main__':
init_log('data_run.log')
# 定时启动即可,无需启动时候附带运行一次
sched.start()
sched._thread.join() | script/data_runner.py | import logging
import time
from apscheduler.schedulers.background import BackgroundScheduler
from zvt import init_log
from zvt.domain import *
from zvt.informer.informer import EmailInformer
logger = logging.getLogger(__name__)
sched = BackgroundScheduler()
@sched.scheduled_job('cron', hour=15, minute=30,day_of_week='mon-sat')
def record_kdata():
while True:
email_action = EmailInformer()
try:
Stock.record_data(provider='joinquant', sleeping_time=1)
Stock1dKdata.record_data(provider='joinquant', sleeping_time=1)
StockTradeDay.record_data(provider='joinquant', sleeping_time=1)
StockValuation.record_data(provider='joinquant', sleeping_time=1)
email_action.send_message("<EMAIL>", 'data.runner joinquant.record_kdata finished', '')
break
except Exception as e:
msg = f'joinquant runner error:{e}'
logger.exception(msg)
email_action.send_message("<EMAIL>", 'data.runner joinquant.record_kdata error', msg)
time.sleep(60 * 2)
@sched.scheduled_job('cron', hour=18, minute=30,day_of_week='tue,thu')
def record_others():
while True:
email_action = EmailInformer()
try:
Etf.record_data(provider='joinquant', sleeping_time=1)
EtfStock.record_data(provider='joinquant', sleeping_time=1)
# email_action.send_message("<EMAIL>", 'joinquant runner finished', '')
break
except Exception as e:
msg = f'joinquant runner error:{e}'
logger.exception(msg)
email_action.send_message("<EMAIL>", 'data.runner joinquant.record_others error', msg)
time.sleep(60 * 2)
@sched.scheduled_job('cron', hour=15, minute=30, day_of_week='mon,wed,fri')
def record_block():
email_action = EmailInformer()
while True:
try:
Block.record_data(provider='sina', sleeping_time=2)
BlockStock.record_data(provider='sina', sleeping_time=2)
# email_action.send_message("<EMAIL>", 'sina block finished', '')
break
except Exception as e:
msg = f'sina block error:{e}'
logger.exception(msg)
email_action.send_message("<EMAIL>", 'data.runner sina.record_block error', msg)
time.sleep(60 * 2)
# 自行更改定定时运行时间
# 这些数据都是些低频分散的数据,每天更新一次即可
@sched.scheduled_job('cron', hour=2, minute=00, day_of_week='mon-fri')
def record_finance():
while True:
email_action = EmailInformer()
try:
Stock.record_data(provider='eastmoney')
FinanceFactor.record_data(provider='eastmoney')
BalanceSheet.record_data(provider='eastmoney')
IncomeStatement.record_data(provider='eastmoney')
CashFlowStatement.record_data(provider='eastmoney')
# email_action.send_message("<EMAIL>", 'eastmoney runner1 finished', '')
break
except Exception as e:
msg = f'eastmoney runner1 error:{e}'
logger.exception(msg)
email_action.send_message("<EMAIL>", 'data.runner eastmoney.record_finance error', msg)
time.sleep(60)
if __name__ == '__main__':
init_log('data_run.log')
# 定时启动即可,无需启动时候附带运行一次
sched.start()
sched._thread.join() | 0.313945 | 0.179279 |
from argparse import (
ArgumentParser,
Namespace,
)
import torch
from torch import nn
from torch.nn import functional as F
from utils.misc import optional_string
from .gaussian_smoothing import GaussianSmoothing
class DegradeArguments:
@staticmethod
def add_arguments(parser: ArgumentParser):
parser.add_argument('--spectral_sensitivity', choices=["g", "b", "gb"], default="g",
help="Type of spectral sensitivity. g: grayscale (panchromatic), b: blue-sensitive, gb: green+blue (orthochromatic)")
parser.add_argument('--gaussian', type=float, default=0,
help="estimated blur radius in pixels of the input photo if it is scaled to 1024x1024")
@staticmethod
def to_string(args: Namespace) -> str:
return (
f"{args.spectral_sensitivity}"
+ optional_string(args.gaussian > 0, f"-G{args.gaussian}")
)
class CameraResponse(nn.Module):
def __init__(self):
super().__init__()
self.register_parameter("gamma", nn.Parameter(torch.ones(1)))
self.register_parameter("offset", nn.Parameter(torch.zeros(1)))
self.register_parameter("gain", nn.Parameter(torch.ones(1)))
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = torch.clamp(x, max=1, min=-1+1e-2)
x = (1 + x) * 0.5
x = self.offset + self.gain * torch.pow(x, self.gamma)
x = (x - 0.5) * 2
# b = torch.clamp(b, max=1, min=-1)
return x
class SpectralResponse(nn.Module):
# TODO: use enum instead for color mode
def __init__(self, spectral_sensitivity: str = 'b'):
assert spectral_sensitivity in ("g", "b", "gb"), f"spectral_sensitivity {spectral_sensitivity} is not implemented."
super().__init__()
self.spectral_sensitivity = spectral_sensitivity
if self.spectral_sensitivity == "g":
self.register_buffer("to_gray", torch.tensor([0.299, 0.587, 0.114]).reshape(1, -1, 1, 1))
def forward(self, rgb: torch.Tensor) -> torch.Tensor:
if self.spectral_sensitivity == "b":
x = rgb[:, -1:]
elif self.spectral_sensitivity == "gb":
x = (rgb[:, 1:2] + rgb[:, -1:]) * 0.5
else:
assert self.spectral_sensitivity == "g"
x = (rgb * self.to_gray).sum(dim=1, keepdim=True)
return x
class Downsample(nn.Module):
"""Antialiasing downsampling"""
def __init__(self, input_size: int, output_size: int, channels: int):
super().__init__()
if input_size % output_size == 0:
self.stride = input_size // output_size
self.grid = None
else:
self.stride = 1
step = input_size / output_size
x = torch.arange(output_size) * step
Y, X = torch.meshgrid(x, x)
grid = torch.stack((X, Y), dim=-1)
grid /= torch.Tensor((input_size - 1, input_size - 1)).view(1, 1, -1)
grid = grid * 2 - 1
self.register_buffer("grid", grid)
sigma = 0.5 * input_size / output_size
#print(f"{input_size} -> {output_size}: sigma={sigma}")
self.blur = GaussianSmoothing(channels, int(2 * (sigma * 2) + 1 + 0.5), sigma)
def forward(self, im: torch.Tensor):
out = self.blur(im, stride=self.stride)
if self.grid is not None:
out = F.grid_sample(out, self.grid[None].expand(im.shape[0], -1, -1, -1))
return out
class Degrade(nn.Module):
"""
Simulate the degradation of antique film
"""
def __init__(self, args:Namespace):
super().__init__()
self.srf = SpectralResponse(args.spectral_sensitivity)
self.crf = CameraResponse()
self.gaussian = None
if args.gaussian is not None and args.gaussian > 0:
self.gaussian = GaussianSmoothing(3, 2 * int(args.gaussian * 2 + 0.5) + 1, args.gaussian)
def forward(self, img: torch.Tensor, downsample: nn.Module = None):
if self.gaussian is not None:
img = self.gaussian(img)
if downsample is not None:
img = downsample(img)
img = self.srf(img)
img = self.crf(img)
# Note that I changed it back to 3 channels
return img.repeat((1, 3, 1, 1)) if img.shape[1] == 1 else img | models/degrade.py | from argparse import (
ArgumentParser,
Namespace,
)
import torch
from torch import nn
from torch.nn import functional as F
from utils.misc import optional_string
from .gaussian_smoothing import GaussianSmoothing
class DegradeArguments:
@staticmethod
def add_arguments(parser: ArgumentParser):
parser.add_argument('--spectral_sensitivity', choices=["g", "b", "gb"], default="g",
help="Type of spectral sensitivity. g: grayscale (panchromatic), b: blue-sensitive, gb: green+blue (orthochromatic)")
parser.add_argument('--gaussian', type=float, default=0,
help="estimated blur radius in pixels of the input photo if it is scaled to 1024x1024")
@staticmethod
def to_string(args: Namespace) -> str:
return (
f"{args.spectral_sensitivity}"
+ optional_string(args.gaussian > 0, f"-G{args.gaussian}")
)
class CameraResponse(nn.Module):
def __init__(self):
super().__init__()
self.register_parameter("gamma", nn.Parameter(torch.ones(1)))
self.register_parameter("offset", nn.Parameter(torch.zeros(1)))
self.register_parameter("gain", nn.Parameter(torch.ones(1)))
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = torch.clamp(x, max=1, min=-1+1e-2)
x = (1 + x) * 0.5
x = self.offset + self.gain * torch.pow(x, self.gamma)
x = (x - 0.5) * 2
# b = torch.clamp(b, max=1, min=-1)
return x
class SpectralResponse(nn.Module):
# TODO: use enum instead for color mode
def __init__(self, spectral_sensitivity: str = 'b'):
assert spectral_sensitivity in ("g", "b", "gb"), f"spectral_sensitivity {spectral_sensitivity} is not implemented."
super().__init__()
self.spectral_sensitivity = spectral_sensitivity
if self.spectral_sensitivity == "g":
self.register_buffer("to_gray", torch.tensor([0.299, 0.587, 0.114]).reshape(1, -1, 1, 1))
def forward(self, rgb: torch.Tensor) -> torch.Tensor:
if self.spectral_sensitivity == "b":
x = rgb[:, -1:]
elif self.spectral_sensitivity == "gb":
x = (rgb[:, 1:2] + rgb[:, -1:]) * 0.5
else:
assert self.spectral_sensitivity == "g"
x = (rgb * self.to_gray).sum(dim=1, keepdim=True)
return x
class Downsample(nn.Module):
"""Antialiasing downsampling"""
def __init__(self, input_size: int, output_size: int, channels: int):
super().__init__()
if input_size % output_size == 0:
self.stride = input_size // output_size
self.grid = None
else:
self.stride = 1
step = input_size / output_size
x = torch.arange(output_size) * step
Y, X = torch.meshgrid(x, x)
grid = torch.stack((X, Y), dim=-1)
grid /= torch.Tensor((input_size - 1, input_size - 1)).view(1, 1, -1)
grid = grid * 2 - 1
self.register_buffer("grid", grid)
sigma = 0.5 * input_size / output_size
#print(f"{input_size} -> {output_size}: sigma={sigma}")
self.blur = GaussianSmoothing(channels, int(2 * (sigma * 2) + 1 + 0.5), sigma)
def forward(self, im: torch.Tensor):
out = self.blur(im, stride=self.stride)
if self.grid is not None:
out = F.grid_sample(out, self.grid[None].expand(im.shape[0], -1, -1, -1))
return out
class Degrade(nn.Module):
"""
Simulate the degradation of antique film
"""
def __init__(self, args:Namespace):
super().__init__()
self.srf = SpectralResponse(args.spectral_sensitivity)
self.crf = CameraResponse()
self.gaussian = None
if args.gaussian is not None and args.gaussian > 0:
self.gaussian = GaussianSmoothing(3, 2 * int(args.gaussian * 2 + 0.5) + 1, args.gaussian)
def forward(self, img: torch.Tensor, downsample: nn.Module = None):
if self.gaussian is not None:
img = self.gaussian(img)
if downsample is not None:
img = downsample(img)
img = self.srf(img)
img = self.crf(img)
# Note that I changed it back to 3 channels
return img.repeat((1, 3, 1, 1)) if img.shape[1] == 1 else img | 0.875999 | 0.552238 |
from django.db import models
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import User
from tinymce import models as tinymce_models
from taggit_autosuggest.managers import TaggableManager
from modelpage.current_user import get_current_user
from modelpage.core.models import Category
class Video(models.Model):
created = models.DateTimeField(_(u'Data de Criação'))
title = models.CharField(_(u'Título do Vídeo'), max_length=200)
slug = models.SlugField(_(u'Link no Site'), max_length=200,
unique=True)
embed = models.TextField(_(u'Código do Vídeo'), help_text='Embed')
body = tinymce_models.HTMLField(_(u'Descrição do Vídeo'), blank=True,
null=True)
modified = models.DateTimeField(_(u'Data de Modificação'), auto_now=True)
author = models.ForeignKey(User, verbose_name=_(u'Autor'),
editable=False, default=get_current_user)
categories = models.ManyToManyField(Category,
verbose_name=_(u'Categorias'),
blank=True, null=True)
tags = TaggableManager()
def get_absolute_url(self):
return reverse('channel:video_date_detail',
kwargs={'year': self.created.year,
'month': self.created.strftime('%m'),
'day': self.created.strftime('%d'),
'slug': self.slug})
def admin_embed(self):
return self.embed
admin_embed.allow_tags = True
admin_embed.short_description = 'Vídeo'
def __unicode__(self):
return unicode(self.title)
def save(self, *args, **kwargs):
self.embed = self.embed.replace('width="560" height="315"',
'width="100%" height="100%"')
super(Video, self).save(*args, **kwargs)
class Meta:
verbose_name = _(u'Canal de Vídeo')
verbose_name_plural = _(u'Canal de Vídeos')
ordering = ['-created', 'title', 'author']
class Audio(models.Model):
created = models.DateTimeField(_(u'Data de Criação'))
title = models.CharField(_(u'Título do Áudio'), max_length=200)
slug = models.SlugField(_(u'Link no Site'), max_length=200,
unique=True)
embed = models.TextField(_(u'Código do Áudio'), help_text='Embed')
body = tinymce_models.HTMLField(_(u'Descrição do Áudio'), blank=True,
null=True)
modified = models.DateTimeField(_(u'Data de Modificação'), auto_now=True)
author = models.ForeignKey(User, verbose_name=_(u'Autor'),
editable=False, default=get_current_user)
categories = models.ManyToManyField(Category,
verbose_name=_(u'Categorias'),
blank=True, null=True)
tags = TaggableManager()
def get_absolute_url(self):
return reverse('channel:audio_date_detail',
kwargs={'year': self.created.year,
'month': self.created.strftime('%m'),
'day': self.created.strftime('%d'),
'slug': self.slug})
def admin_embed(self):
return self.embed
admin_embed.allow_tags = True
admin_embed.short_description = 'Áudio'
def __unicode__(self):
return unicode(self.title)
def save(self, *args, **kwargs):
self.embed = self.embed.replace('height="450"', 'height="100%"')
super(Audio, self).save(*args, **kwargs)
class Meta:
verbose_name = _(u'Canal de Áudio')
verbose_name_plural = _(u'Canal de Áudios')
ordering = ['-created', 'title', 'author'] | modelpage/channel/models.py | from django.db import models
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import User
from tinymce import models as tinymce_models
from taggit_autosuggest.managers import TaggableManager
from modelpage.current_user import get_current_user
from modelpage.core.models import Category
class Video(models.Model):
created = models.DateTimeField(_(u'Data de Criação'))
title = models.CharField(_(u'Título do Vídeo'), max_length=200)
slug = models.SlugField(_(u'Link no Site'), max_length=200,
unique=True)
embed = models.TextField(_(u'Código do Vídeo'), help_text='Embed')
body = tinymce_models.HTMLField(_(u'Descrição do Vídeo'), blank=True,
null=True)
modified = models.DateTimeField(_(u'Data de Modificação'), auto_now=True)
author = models.ForeignKey(User, verbose_name=_(u'Autor'),
editable=False, default=get_current_user)
categories = models.ManyToManyField(Category,
verbose_name=_(u'Categorias'),
blank=True, null=True)
tags = TaggableManager()
def get_absolute_url(self):
return reverse('channel:video_date_detail',
kwargs={'year': self.created.year,
'month': self.created.strftime('%m'),
'day': self.created.strftime('%d'),
'slug': self.slug})
def admin_embed(self):
return self.embed
admin_embed.allow_tags = True
admin_embed.short_description = 'Vídeo'
def __unicode__(self):
return unicode(self.title)
def save(self, *args, **kwargs):
self.embed = self.embed.replace('width="560" height="315"',
'width="100%" height="100%"')
super(Video, self).save(*args, **kwargs)
class Meta:
verbose_name = _(u'Canal de Vídeo')
verbose_name_plural = _(u'Canal de Vídeos')
ordering = ['-created', 'title', 'author']
class Audio(models.Model):
created = models.DateTimeField(_(u'Data de Criação'))
title = models.CharField(_(u'Título do Áudio'), max_length=200)
slug = models.SlugField(_(u'Link no Site'), max_length=200,
unique=True)
embed = models.TextField(_(u'Código do Áudio'), help_text='Embed')
body = tinymce_models.HTMLField(_(u'Descrição do Áudio'), blank=True,
null=True)
modified = models.DateTimeField(_(u'Data de Modificação'), auto_now=True)
author = models.ForeignKey(User, verbose_name=_(u'Autor'),
editable=False, default=get_current_user)
categories = models.ManyToManyField(Category,
verbose_name=_(u'Categorias'),
blank=True, null=True)
tags = TaggableManager()
def get_absolute_url(self):
return reverse('channel:audio_date_detail',
kwargs={'year': self.created.year,
'month': self.created.strftime('%m'),
'day': self.created.strftime('%d'),
'slug': self.slug})
def admin_embed(self):
return self.embed
admin_embed.allow_tags = True
admin_embed.short_description = 'Áudio'
def __unicode__(self):
return unicode(self.title)
def save(self, *args, **kwargs):
self.embed = self.embed.replace('height="450"', 'height="100%"')
super(Audio, self).save(*args, **kwargs)
class Meta:
verbose_name = _(u'Canal de Áudio')
verbose_name_plural = _(u'Canal de Áudios')
ordering = ['-created', 'title', 'author'] | 0.524882 | 0.106505 |
pgeocode_country_codes = {
'Andorra': 'AD',
'Argentina': 'AR',
'American Samoa': 'AS',
'Austria': 'AT',
'Australia': 'AU',
'Åland Islands': 'AX',
'Bangladesh': 'BD',
'Belgium': 'BE',
'Bulgaria': 'BG',
'Bermuda': 'BM',
'Brazil': 'BR',
'Belarus': 'BY',
'Canada': 'CA',
'Switzerland': 'CH',
'Colombia': 'CO',
'Costa Rica': 'CR',
'Czechia': 'CZ',
'Germany': 'DE',
'Denmark': 'DK',
'Dominican Republic': 'DO',
'Algeria': 'DZ',
'Spain': 'ES',
'Finland': 'FI',
'Faroe Islands': 'FO',
'France': 'FR',
'United Kingdom of Great Britain and Northern Ireland': 'GB',
'French Guiana': 'GF',
'Guernsey': 'GG',
'Greenland': 'GL',
'Guadeloupe': 'GP',
'Guatemala': 'GT',
'Guam': 'GU',
'Croatia': 'HR',
'Hungary': 'HU',
'Ireland': 'IE',
'Isle of Man': 'IM',
'India': 'IN',
'Iceland': 'IS',
'Italy': 'IT',
'Jersey': 'JE',
'Japan': 'JP',
'Liechtenstein': 'LI',
'Sri Lanka': 'LK',
'Lithuania': 'LT',
'Luxembourg': 'LU',
'Latvia': 'LV',
'Monaco': 'MC',
'Republic of Moldova': 'MD',
'Marshall Islands': 'MH',
'The former Yugoslav Republic of Macedonia': 'MK',
'Northern Mariana Islands': 'MP',
'Martinique': 'MQ',
'Malta': 'MT',
'Mexico': 'MX',
'Malaysia': 'MY',
'New Caledonia': 'NC',
'Netherlands': 'NL',
'Norway': 'NO',
'New Zealand': 'NZ',
'Philippines': 'PH',
'Pakistan': 'PK',
'Poland': 'PL',
'Saint Pierre and Miquelon': 'PM',
'Puerto Rico': 'PR',
'Portugal': 'PT',
'Réunion': 'RE',
'Romania': 'RO',
'Russian Federation': 'RU',
'Sweden': 'SE',
'Slovenia': 'SI',
'Svalbard and Jan Mayen Islands': 'SJ',
'Slovakia': 'SK',
'San Marino': 'SM',
'Thailand': 'TH',
'Turkey': 'TR',
'Ukraine': 'UA',
'United States of America': 'US',
'Uruguay': 'UY',
'Holy See': 'VA',
'United States Virgin Islands': 'VI',
'Wallis and Futuna Islands': 'WF',
'Mayotte': 'YT',
'South Africa': 'ZA'
} | batch_geocoder/pgeocode_country_codes.py | pgeocode_country_codes = {
'Andorra': 'AD',
'Argentina': 'AR',
'American Samoa': 'AS',
'Austria': 'AT',
'Australia': 'AU',
'Åland Islands': 'AX',
'Bangladesh': 'BD',
'Belgium': 'BE',
'Bulgaria': 'BG',
'Bermuda': 'BM',
'Brazil': 'BR',
'Belarus': 'BY',
'Canada': 'CA',
'Switzerland': 'CH',
'Colombia': 'CO',
'Costa Rica': 'CR',
'Czechia': 'CZ',
'Germany': 'DE',
'Denmark': 'DK',
'Dominican Republic': 'DO',
'Algeria': 'DZ',
'Spain': 'ES',
'Finland': 'FI',
'Faroe Islands': 'FO',
'France': 'FR',
'United Kingdom of Great Britain and Northern Ireland': 'GB',
'French Guiana': 'GF',
'Guernsey': 'GG',
'Greenland': 'GL',
'Guadeloupe': 'GP',
'Guatemala': 'GT',
'Guam': 'GU',
'Croatia': 'HR',
'Hungary': 'HU',
'Ireland': 'IE',
'Isle of Man': 'IM',
'India': 'IN',
'Iceland': 'IS',
'Italy': 'IT',
'Jersey': 'JE',
'Japan': 'JP',
'Liechtenstein': 'LI',
'Sri Lanka': 'LK',
'Lithuania': 'LT',
'Luxembourg': 'LU',
'Latvia': 'LV',
'Monaco': 'MC',
'Republic of Moldova': 'MD',
'Marshall Islands': 'MH',
'The former Yugoslav Republic of Macedonia': 'MK',
'Northern Mariana Islands': 'MP',
'Martinique': 'MQ',
'Malta': 'MT',
'Mexico': 'MX',
'Malaysia': 'MY',
'New Caledonia': 'NC',
'Netherlands': 'NL',
'Norway': 'NO',
'New Zealand': 'NZ',
'Philippines': 'PH',
'Pakistan': 'PK',
'Poland': 'PL',
'Saint Pierre and Miquelon': 'PM',
'Puerto Rico': 'PR',
'Portugal': 'PT',
'Réunion': 'RE',
'Romania': 'RO',
'Russian Federation': 'RU',
'Sweden': 'SE',
'Slovenia': 'SI',
'Svalbard and Jan Mayen Islands': 'SJ',
'Slovakia': 'SK',
'San Marino': 'SM',
'Thailand': 'TH',
'Turkey': 'TR',
'Ukraine': 'UA',
'United States of America': 'US',
'Uruguay': 'UY',
'Holy See': 'VA',
'United States Virgin Islands': 'VI',
'Wallis and Futuna Islands': 'WF',
'Mayotte': 'YT',
'South Africa': 'ZA'
} | 0.414069 | 0.407569 |
from datetime import timedelta
from typing import List, Optional, Tuple
import hypothesis.strategies as st
import numpy as np
import numpy.testing as npt
import pandas as pd
import pandas.testing as tm
import pyarrow as pa
import pytest
from hypothesis import example, given, settings
# fmt: off
import fletcher as fr
from fletcher._algorithms import (
_extract_data_buffer_as_np_array,
_merge_valid_bitmaps,
all_op,
any_op,
integer_array_to_numpy,
take_indices_on_pyarrow_list,
)
from fletcher.algorithms.utils.chunking import _calculate_chunk_offsets, _combined_in_chunk_offsets, _in_chunk_offsets
# fmt: on
def test_calculate_chunk_offsets():
arr = pa.chunked_array([[1, 1, 1]])
npt.assert_array_equal(_calculate_chunk_offsets(arr), np.array([0]))
arr = pa.chunked_array([[1], [1, 1]])
npt.assert_array_equal(_calculate_chunk_offsets(arr), np.array([0, 1]))
arr = pa.chunked_array([[1, 1], [1]])
npt.assert_array_equal(_calculate_chunk_offsets(arr), np.array([0, 2]))
def check_valid_in_offsets(
arr: pa.ChunkedArray, in_offsets: List[Tuple[int, int, int]]
) -> None:
if arr.num_chunks == 0:
assert in_offsets == []
return
# We always start at the beginning
assert in_offsets[0][0] == 0
assert in_offsets[0][1] == 0
# Overall, the chunk offsets must have the same length as the array
assert sum(x[2] for x in in_offsets) == len(arr)
@given(data=st.lists(st.lists(st.integers(min_value=0, max_value=10))))
def test_in_chunk_offsets(data: List[List[int]]):
arr = pa.chunked_array(data, type=pa.int64())
# Simple case: Passing in the actual chunk offsets should yield a valid selection
offsets = list(_calculate_chunk_offsets(arr))
in_offsets = _in_chunk_offsets(arr, offsets)
check_valid_in_offsets(arr, in_offsets)
def test_combined_in_chunk_offsets():
a = pa.chunked_array([[]])
b = pa.chunked_array([[]])
in_a_offsets, in_b_offsets = _combined_in_chunk_offsets(a, b)
assert in_a_offsets == [(0, 0, 0)]
assert in_b_offsets == [(0, 0, 0)]
a = pa.chunked_array([[1]])
b = pa.chunked_array([[2]])
in_a_offsets, in_b_offsets = _combined_in_chunk_offsets(a, b)
assert in_a_offsets == [(0, 0, 1)]
assert in_b_offsets == [(0, 0, 1)]
a = pa.chunked_array([[1, 2], [3, 4, 5]])
b = pa.chunked_array([[1], [2, 3], [4, 5]])
in_a_offsets, in_b_offsets = _combined_in_chunk_offsets(a, b)
assert in_a_offsets == [(0, 0, 1), (0, 1, 1), (1, 0, 1), (1, 1, 2)]
assert in_b_offsets == [(0, 0, 1), (1, 0, 1), (1, 1, 1), (2, 0, 2)]
@pytest.mark.parametrize("data", [[1, 2, 4, 5], [1.0, 0.5, 4.0, 5.0]])
def test_extract_data_buffer_as_np_array(data):
arr = pa.array(data)
result = _extract_data_buffer_as_np_array(arr)
expected = np.array(data)
npt.assert_array_equal(result, expected)
result = _extract_data_buffer_as_np_array(arr[2:4])
expected = np.array(data[2:4])
npt.assert_array_equal(result, expected)
def assert_content_equals_array(result, expected):
"""Assert that the result is an Arrow structure and the content matches an array."""
assert isinstance(result, (pa.Array, pa.ChunkedArray))
if isinstance(result, pa.ChunkedArray):
result = pa.concat_arrays(result.iterchunks())
assert result.equals(expected)
def test_merge_valid_bitmaps():
a = pa.array([1, 1, 1, 1, 1, 1, 1, 1, 1])
b = pa.array([1, 1, 1, None, None, None, 1, 1, 1])
expected = np.array([0xFF, 0x1], dtype=np.uint8)
result = _merge_valid_bitmaps(a, a)
npt.assert_array_equal(result, expected)
expected = np.array([0xC7, 0x1], dtype=np.uint8)
result = _merge_valid_bitmaps(a, b)
npt.assert_array_equal(result, expected)
expected = np.array([0x1], dtype=np.uint8)
result = _merge_valid_bitmaps(a.slice(8, 1), a.slice(8, 1))
npt.assert_array_equal(result, expected)
expected = np.array([0xF], dtype=np.uint8)
result = _merge_valid_bitmaps(a.slice(0, 4), a.slice(0, 4))
npt.assert_array_equal(result, expected)
expected = np.array([0x7], dtype=np.uint8)
result = _merge_valid_bitmaps(a.slice(0, 4), b.slice(0, 4))
npt.assert_array_equal(result, expected)
expected = np.array([0xF], dtype=np.uint8)
result = _merge_valid_bitmaps(a.slice(5, 4), a.slice(5, 4))
npt.assert_array_equal(result, expected)
expected = np.array([0xE], dtype=np.uint8)
result = _merge_valid_bitmaps(a.slice(5, 4), b.slice(5, 4))
npt.assert_array_equal(result, expected)
expected = np.array([0x3], dtype=np.uint8)
result = _merge_valid_bitmaps(a.slice(5, 2), a.slice(5, 2))
npt.assert_array_equal(result, expected)
expected = np.array([0x2], dtype=np.uint8)
result = _merge_valid_bitmaps(a.slice(5, 2), b.slice(5, 2))
npt.assert_array_equal(result, expected)
expected = np.array([0x3], dtype=np.uint8)
result = _merge_valid_bitmaps(a.slice(5, 2), a.slice(3, 2))
npt.assert_array_equal(result, expected)
expected = np.array([0x0], dtype=np.uint8)
result = _merge_valid_bitmaps(a.slice(5, 2), b.slice(3, 2))
npt.assert_array_equal(result, expected)
@settings(deadline=timedelta(milliseconds=1000))
@given(data=st.lists(st.one_of(st.text(), st.none())))
def test_text_cat(data):
if any("\x00" in x for x in data if x):
# pytest.skip("pandas cannot handle \\x00 characters in tests")
# Skip is not working properly with hypothesis
return
ser_pd = pd.Series(data, dtype=str)
arrow_data = pa.array(data, type=pa.string())
fr_array = fr.FletcherArray(arrow_data)
ser_fr = pd.Series(fr_array)
fr_other_array = fr.FletcherArray(arrow_data)
ser_fr_other = pd.Series(fr_other_array)
result_pd = ser_pd.str.cat(ser_pd)
result_fr = ser_fr.fr_text.cat(ser_fr_other)
result_fr = result_fr.astype(object)
# Pandas returns np.nan for NA values in cat, keep this in line
result_fr[result_fr.isna()] = np.nan
tm.assert_series_equal(result_fr, result_pd)
def _optional_len(x: Optional[str]) -> int:
if x is not None:
return len(x)
else:
return 0
@settings(deadline=timedelta(milliseconds=1000))
@given(data=st.lists(st.one_of(st.text(), st.none())))
@pytest.mark.xfail(reason="Not implemented")
def test_text_zfill(data):
if any("\x00" in x for x in data if x):
# pytest.skip("pandas cannot handle \\x00 characters in tests")
# Skip is not working properly with hypothesis
return
ser_pd = pd.Series(data, dtype=str)
max_str_len = ser_pd.map(_optional_len).max()
if pd.isna(max_str_len):
max_str_len = 0
arrow_data = pa.array(data, type=pa.string())
fr_array = fr.FletcherArray(arrow_data)
ser_fr = pd.Series(fr_array)
result_pd = ser_pd.str.zfill(max_str_len + 1)
result_fr = ser_fr.fr_text.zfill(max_str_len + 1)
result_fr = result_fr.astype(object)
# Pandas returns np.nan for NA values in cat, keep this in line
result_fr[result_fr.isna()] = np.nan
tm.assert_series_equal(result_fr, result_pd)
@settings(deadline=None)
@given(data=st.lists(st.one_of(st.booleans(), st.none())), skipna=st.booleans())
@example([], False)
@example([], True)
# Test with numpy.array as input.
# This has the caveat that the missing buffer is None.
@example(np.ones(10).astype(bool), False)
@example(np.ones(10).astype(bool), True)
def test_any_op(data, skipna):
arrow = pa.array(data, type=pa.bool_())
# https://github.com/pandas-dev/pandas/issues/27709 / https://github.com/pandas-dev/pandas/issues/12863
pandas = pd.Series(data).astype(float)
assert any_op(arrow, skipna) == pandas.any(skipna=skipna)
# Split in the middle and check whether this still works
if len(data) > 2:
arrow = pa.chunked_array(
[data[: len(data) // 2], data[len(data) // 2 :]], type=pa.bool_()
)
assert any_op(arrow, skipna) == pandas.any(skipna=skipna)
@given(data=st.lists(st.one_of(st.booleans(), st.none())), skipna=st.booleans())
# Test with numpy.array as input.
# This has the caveat that the missing buffer is None.
@example(np.ones(10).astype(bool), False)
@example(np.ones(10).astype(bool), True)
def test_all_op(data, skipna):
arrow = pa.array(data, type=pa.bool_())
# https://github.com/pandas-dev/pandas/issues/27709 / https://github.com/pandas-dev/pandas/issues/12863
pandas = pd.Series(data).astype(float)
assert all_op(arrow, skipna) == pandas.all(skipna=skipna)
# Split in the middle and check whether this still works
if len(data) > 2:
arrow = pa.chunked_array(
[data[: len(data) // 2], data[len(data) // 2 :]], type=pa.bool_()
)
assert all_op(arrow, skipna) == pandas.all(skipna=skipna)
@pytest.mark.parametrize(
("array", "fill_null_value", "expected"),
[
(pa.array([2, 1], type=pa.int16()), -1, np.array([2, 1], dtype=np.int16)),
(pa.array([2, None], type=pa.int32()), -1, np.array([2, -1], dtype=np.int32)),
(pa.array([2, None], type=pa.int64()), -1.5, np.array([2, -1], dtype=np.int64)),
(pa.array([1, None], type=pa.uint8()), 257, np.array([1, 1], dtype=np.uint8)),
(pa.array([None, None], type=pa.int8()), 5, np.array([5, 5], dtype=np.int8)),
(pa.array([], type=pa.int8()), 5, np.array([], dtype=np.int8)),
],
)
def test_integer_array_to_numpy(array, fill_null_value, expected):
actual = integer_array_to_numpy(array, fill_null_value)
assert actual.dtype == expected.dtype
np.testing.assert_array_equal(actual, expected)
@pytest.mark.parametrize(
("array", "indices"),
[
(
pa.array([[k] for k in range(10 ** 4)]),
np.random.randint(0, 10 ** 4, 10 ** 2),
),
(
pa.array([[float(k)] for k in range(10 ** 4)]),
np.random.randint(0, 10 ** 4, 10 ** 2),
),
(
pa.array(np.random.randint(0, 100, 10) for _ in range(10 ** 4)),
np.random.randint(0, 10 ** 4, 10 ** 5),
),
(
pa.LargeListArray.from_arrays(
[k for k in range(10 ** 4 + 1)], [k for k in range(10 ** 4)]
),
np.random.randint(0, 10 ** 4, 10 ** 2),
),
(
pa.LargeListArray.from_arrays(
[k for k in range(10 ** 4 + 1)], [float(k) for k in range(10 ** 4)]
),
np.random.randint(0, 10 ** 4, 10 ** 2),
),
(pa.array([[]]), [0]),
],
)
def test_take_indices_on_pyarrow_list(array, indices):
np.testing.assert_array_equal(
array.take(pa.array(indices)).to_pylist(),
take_indices_on_pyarrow_list(array, indices).to_pylist(),
) | tests/test_algorithms.py | from datetime import timedelta
from typing import List, Optional, Tuple
import hypothesis.strategies as st
import numpy as np
import numpy.testing as npt
import pandas as pd
import pandas.testing as tm
import pyarrow as pa
import pytest
from hypothesis import example, given, settings
# fmt: off
import fletcher as fr
from fletcher._algorithms import (
_extract_data_buffer_as_np_array,
_merge_valid_bitmaps,
all_op,
any_op,
integer_array_to_numpy,
take_indices_on_pyarrow_list,
)
from fletcher.algorithms.utils.chunking import _calculate_chunk_offsets, _combined_in_chunk_offsets, _in_chunk_offsets
# fmt: on
def test_calculate_chunk_offsets():
arr = pa.chunked_array([[1, 1, 1]])
npt.assert_array_equal(_calculate_chunk_offsets(arr), np.array([0]))
arr = pa.chunked_array([[1], [1, 1]])
npt.assert_array_equal(_calculate_chunk_offsets(arr), np.array([0, 1]))
arr = pa.chunked_array([[1, 1], [1]])
npt.assert_array_equal(_calculate_chunk_offsets(arr), np.array([0, 2]))
def check_valid_in_offsets(
arr: pa.ChunkedArray, in_offsets: List[Tuple[int, int, int]]
) -> None:
if arr.num_chunks == 0:
assert in_offsets == []
return
# We always start at the beginning
assert in_offsets[0][0] == 0
assert in_offsets[0][1] == 0
# Overall, the chunk offsets must have the same length as the array
assert sum(x[2] for x in in_offsets) == len(arr)
@given(data=st.lists(st.lists(st.integers(min_value=0, max_value=10))))
def test_in_chunk_offsets(data: List[List[int]]):
arr = pa.chunked_array(data, type=pa.int64())
# Simple case: Passing in the actual chunk offsets should yield a valid selection
offsets = list(_calculate_chunk_offsets(arr))
in_offsets = _in_chunk_offsets(arr, offsets)
check_valid_in_offsets(arr, in_offsets)
def test_combined_in_chunk_offsets():
a = pa.chunked_array([[]])
b = pa.chunked_array([[]])
in_a_offsets, in_b_offsets = _combined_in_chunk_offsets(a, b)
assert in_a_offsets == [(0, 0, 0)]
assert in_b_offsets == [(0, 0, 0)]
a = pa.chunked_array([[1]])
b = pa.chunked_array([[2]])
in_a_offsets, in_b_offsets = _combined_in_chunk_offsets(a, b)
assert in_a_offsets == [(0, 0, 1)]
assert in_b_offsets == [(0, 0, 1)]
a = pa.chunked_array([[1, 2], [3, 4, 5]])
b = pa.chunked_array([[1], [2, 3], [4, 5]])
in_a_offsets, in_b_offsets = _combined_in_chunk_offsets(a, b)
assert in_a_offsets == [(0, 0, 1), (0, 1, 1), (1, 0, 1), (1, 1, 2)]
assert in_b_offsets == [(0, 0, 1), (1, 0, 1), (1, 1, 1), (2, 0, 2)]
@pytest.mark.parametrize("data", [[1, 2, 4, 5], [1.0, 0.5, 4.0, 5.0]])
def test_extract_data_buffer_as_np_array(data):
arr = pa.array(data)
result = _extract_data_buffer_as_np_array(arr)
expected = np.array(data)
npt.assert_array_equal(result, expected)
result = _extract_data_buffer_as_np_array(arr[2:4])
expected = np.array(data[2:4])
npt.assert_array_equal(result, expected)
def assert_content_equals_array(result, expected):
"""Assert that the result is an Arrow structure and the content matches an array."""
assert isinstance(result, (pa.Array, pa.ChunkedArray))
if isinstance(result, pa.ChunkedArray):
result = pa.concat_arrays(result.iterchunks())
assert result.equals(expected)
def test_merge_valid_bitmaps():
a = pa.array([1, 1, 1, 1, 1, 1, 1, 1, 1])
b = pa.array([1, 1, 1, None, None, None, 1, 1, 1])
expected = np.array([0xFF, 0x1], dtype=np.uint8)
result = _merge_valid_bitmaps(a, a)
npt.assert_array_equal(result, expected)
expected = np.array([0xC7, 0x1], dtype=np.uint8)
result = _merge_valid_bitmaps(a, b)
npt.assert_array_equal(result, expected)
expected = np.array([0x1], dtype=np.uint8)
result = _merge_valid_bitmaps(a.slice(8, 1), a.slice(8, 1))
npt.assert_array_equal(result, expected)
expected = np.array([0xF], dtype=np.uint8)
result = _merge_valid_bitmaps(a.slice(0, 4), a.slice(0, 4))
npt.assert_array_equal(result, expected)
expected = np.array([0x7], dtype=np.uint8)
result = _merge_valid_bitmaps(a.slice(0, 4), b.slice(0, 4))
npt.assert_array_equal(result, expected)
expected = np.array([0xF], dtype=np.uint8)
result = _merge_valid_bitmaps(a.slice(5, 4), a.slice(5, 4))
npt.assert_array_equal(result, expected)
expected = np.array([0xE], dtype=np.uint8)
result = _merge_valid_bitmaps(a.slice(5, 4), b.slice(5, 4))
npt.assert_array_equal(result, expected)
expected = np.array([0x3], dtype=np.uint8)
result = _merge_valid_bitmaps(a.slice(5, 2), a.slice(5, 2))
npt.assert_array_equal(result, expected)
expected = np.array([0x2], dtype=np.uint8)
result = _merge_valid_bitmaps(a.slice(5, 2), b.slice(5, 2))
npt.assert_array_equal(result, expected)
expected = np.array([0x3], dtype=np.uint8)
result = _merge_valid_bitmaps(a.slice(5, 2), a.slice(3, 2))
npt.assert_array_equal(result, expected)
expected = np.array([0x0], dtype=np.uint8)
result = _merge_valid_bitmaps(a.slice(5, 2), b.slice(3, 2))
npt.assert_array_equal(result, expected)
@settings(deadline=timedelta(milliseconds=1000))
@given(data=st.lists(st.one_of(st.text(), st.none())))
def test_text_cat(data):
if any("\x00" in x for x in data if x):
# pytest.skip("pandas cannot handle \\x00 characters in tests")
# Skip is not working properly with hypothesis
return
ser_pd = pd.Series(data, dtype=str)
arrow_data = pa.array(data, type=pa.string())
fr_array = fr.FletcherArray(arrow_data)
ser_fr = pd.Series(fr_array)
fr_other_array = fr.FletcherArray(arrow_data)
ser_fr_other = pd.Series(fr_other_array)
result_pd = ser_pd.str.cat(ser_pd)
result_fr = ser_fr.fr_text.cat(ser_fr_other)
result_fr = result_fr.astype(object)
# Pandas returns np.nan for NA values in cat, keep this in line
result_fr[result_fr.isna()] = np.nan
tm.assert_series_equal(result_fr, result_pd)
def _optional_len(x: Optional[str]) -> int:
if x is not None:
return len(x)
else:
return 0
@settings(deadline=timedelta(milliseconds=1000))
@given(data=st.lists(st.one_of(st.text(), st.none())))
@pytest.mark.xfail(reason="Not implemented")
def test_text_zfill(data):
if any("\x00" in x for x in data if x):
# pytest.skip("pandas cannot handle \\x00 characters in tests")
# Skip is not working properly with hypothesis
return
ser_pd = pd.Series(data, dtype=str)
max_str_len = ser_pd.map(_optional_len).max()
if pd.isna(max_str_len):
max_str_len = 0
arrow_data = pa.array(data, type=pa.string())
fr_array = fr.FletcherArray(arrow_data)
ser_fr = pd.Series(fr_array)
result_pd = ser_pd.str.zfill(max_str_len + 1)
result_fr = ser_fr.fr_text.zfill(max_str_len + 1)
result_fr = result_fr.astype(object)
# Pandas returns np.nan for NA values in cat, keep this in line
result_fr[result_fr.isna()] = np.nan
tm.assert_series_equal(result_fr, result_pd)
@settings(deadline=None)
@given(data=st.lists(st.one_of(st.booleans(), st.none())), skipna=st.booleans())
@example([], False)
@example([], True)
# Test with numpy.array as input.
# This has the caveat that the missing buffer is None.
@example(np.ones(10).astype(bool), False)
@example(np.ones(10).astype(bool), True)
def test_any_op(data, skipna):
arrow = pa.array(data, type=pa.bool_())
# https://github.com/pandas-dev/pandas/issues/27709 / https://github.com/pandas-dev/pandas/issues/12863
pandas = pd.Series(data).astype(float)
assert any_op(arrow, skipna) == pandas.any(skipna=skipna)
# Split in the middle and check whether this still works
if len(data) > 2:
arrow = pa.chunked_array(
[data[: len(data) // 2], data[len(data) // 2 :]], type=pa.bool_()
)
assert any_op(arrow, skipna) == pandas.any(skipna=skipna)
@given(data=st.lists(st.one_of(st.booleans(), st.none())), skipna=st.booleans())
# Test with numpy.array as input.
# This has the caveat that the missing buffer is None.
@example(np.ones(10).astype(bool), False)
@example(np.ones(10).astype(bool), True)
def test_all_op(data, skipna):
arrow = pa.array(data, type=pa.bool_())
# https://github.com/pandas-dev/pandas/issues/27709 / https://github.com/pandas-dev/pandas/issues/12863
pandas = pd.Series(data).astype(float)
assert all_op(arrow, skipna) == pandas.all(skipna=skipna)
# Split in the middle and check whether this still works
if len(data) > 2:
arrow = pa.chunked_array(
[data[: len(data) // 2], data[len(data) // 2 :]], type=pa.bool_()
)
assert all_op(arrow, skipna) == pandas.all(skipna=skipna)
@pytest.mark.parametrize(
("array", "fill_null_value", "expected"),
[
(pa.array([2, 1], type=pa.int16()), -1, np.array([2, 1], dtype=np.int16)),
(pa.array([2, None], type=pa.int32()), -1, np.array([2, -1], dtype=np.int32)),
(pa.array([2, None], type=pa.int64()), -1.5, np.array([2, -1], dtype=np.int64)),
(pa.array([1, None], type=pa.uint8()), 257, np.array([1, 1], dtype=np.uint8)),
(pa.array([None, None], type=pa.int8()), 5, np.array([5, 5], dtype=np.int8)),
(pa.array([], type=pa.int8()), 5, np.array([], dtype=np.int8)),
],
)
def test_integer_array_to_numpy(array, fill_null_value, expected):
actual = integer_array_to_numpy(array, fill_null_value)
assert actual.dtype == expected.dtype
np.testing.assert_array_equal(actual, expected)
@pytest.mark.parametrize(
("array", "indices"),
[
(
pa.array([[k] for k in range(10 ** 4)]),
np.random.randint(0, 10 ** 4, 10 ** 2),
),
(
pa.array([[float(k)] for k in range(10 ** 4)]),
np.random.randint(0, 10 ** 4, 10 ** 2),
),
(
pa.array(np.random.randint(0, 100, 10) for _ in range(10 ** 4)),
np.random.randint(0, 10 ** 4, 10 ** 5),
),
(
pa.LargeListArray.from_arrays(
[k for k in range(10 ** 4 + 1)], [k for k in range(10 ** 4)]
),
np.random.randint(0, 10 ** 4, 10 ** 2),
),
(
pa.LargeListArray.from_arrays(
[k for k in range(10 ** 4 + 1)], [float(k) for k in range(10 ** 4)]
),
np.random.randint(0, 10 ** 4, 10 ** 2),
),
(pa.array([[]]), [0]),
],
)
def test_take_indices_on_pyarrow_list(array, indices):
np.testing.assert_array_equal(
array.take(pa.array(indices)).to_pylist(),
take_indices_on_pyarrow_list(array, indices).to_pylist(),
) | 0.88539 | 0.607081 |
from itertools import groupby
from pathlib import Path
from typing import List
from invoke import Result, task
from termcolor import cprint
from tasks.utils import PROJECT_INFO, ensure_reports_dir, paths_to_str, print_header, to_pathlib_path
_REPORTS_DIR = PROJECT_INFO.reports_directory / "typecheck/junit.xml"
def _handle_unexpected_pass(expected_to_fail: bool, result: Result, path: str):
if expected_to_fail and not result.failed:
result.exited = 1 # force failure
cprint(
f"\nThis folder was expected to fail but no errors were found.\n\nPlease edit the "
f"'{__file__}' file and move '{path}' from `broken_directories` to `fixed_directories`.",
"red",
attrs=["bold"],
)
def _typecheck(ctx, paths: List[Path], force_typing=False):
print_header(("Forced" if force_typing else "Optional") + " typing", level=2)
common_flags = [
"--show-column-numbers",
"--show-error-codes",
"--color-output",
"--warn-unused-config",
"--warn-unused-ignores",
"--follow-imports silent",
f"--junit-xml {_REPORTS_DIR}",
*(["--strict", "--allow-untyped-decorators"] if force_typing else []),
# Untyped decorators are allowed because they may be third party decorators
]
ctx.run(f"set -o pipefail; mypy {' '.join(common_flags)} {paths_to_str(paths)}", pty=True)
@task(iterable=["path"])
def typecheck(ctx, path=None):
"""Run type checking on source code.
A non-zero return code from this task indicates invalid types were discovered.
Args:
ctx (invoke.Context): Invoke context.
path (Optional[List[str]]): Path override. Run tests only on given paths.
"""
print_header("RUNNING TYPE CHECKER")
ensure_reports_dir()
src = PROJECT_INFO.source_directory
paths = to_pathlib_path(path, [src, PROJECT_INFO.tests_directory, PROJECT_INFO.tasks_directory])
grouped_paths = groupby(paths, lambda current_path: src in current_path.parents or current_path == src)
for force_typing, group in grouped_paths:
_typecheck(ctx, list(group), force_typing) | tasks/typecheck.py | from itertools import groupby
from pathlib import Path
from typing import List
from invoke import Result, task
from termcolor import cprint
from tasks.utils import PROJECT_INFO, ensure_reports_dir, paths_to_str, print_header, to_pathlib_path
_REPORTS_DIR = PROJECT_INFO.reports_directory / "typecheck/junit.xml"
def _handle_unexpected_pass(expected_to_fail: bool, result: Result, path: str):
if expected_to_fail and not result.failed:
result.exited = 1 # force failure
cprint(
f"\nThis folder was expected to fail but no errors were found.\n\nPlease edit the "
f"'{__file__}' file and move '{path}' from `broken_directories` to `fixed_directories`.",
"red",
attrs=["bold"],
)
def _typecheck(ctx, paths: List[Path], force_typing=False):
print_header(("Forced" if force_typing else "Optional") + " typing", level=2)
common_flags = [
"--show-column-numbers",
"--show-error-codes",
"--color-output",
"--warn-unused-config",
"--warn-unused-ignores",
"--follow-imports silent",
f"--junit-xml {_REPORTS_DIR}",
*(["--strict", "--allow-untyped-decorators"] if force_typing else []),
# Untyped decorators are allowed because they may be third party decorators
]
ctx.run(f"set -o pipefail; mypy {' '.join(common_flags)} {paths_to_str(paths)}", pty=True)
@task(iterable=["path"])
def typecheck(ctx, path=None):
"""Run type checking on source code.
A non-zero return code from this task indicates invalid types were discovered.
Args:
ctx (invoke.Context): Invoke context.
path (Optional[List[str]]): Path override. Run tests only on given paths.
"""
print_header("RUNNING TYPE CHECKER")
ensure_reports_dir()
src = PROJECT_INFO.source_directory
paths = to_pathlib_path(path, [src, PROJECT_INFO.tests_directory, PROJECT_INFO.tasks_directory])
grouped_paths = groupby(paths, lambda current_path: src in current_path.parents or current_path == src)
for force_typing, group in grouped_paths:
_typecheck(ctx, list(group), force_typing) | 0.648021 | 0.264985 |
import logging
from os import uname
from os.path import isfile
from time import time
from ast import literal_eval
import settings
from functions.filesystem.remove_file import remove_file
skyline_app = 'thunder'
skyline_app_logger = '%sLog' % skyline_app
logger = logging.getLogger(skyline_app_logger)
# The failover THUNDER keys directory which is failed over to and
# used in the event that Redis is down
THUNDER_KEYS_DIR = '%s/thunder/keys' % settings.SKYLINE_TMP_DIR
this_host = str(uname()[1])
# @added 20210520 - Branch #1444: thunder
def check_thunder_failover_key(self, check_key):
"""
Determine if there is a failover alert key for an alert if Redis is down
:param self: the self object
:param check_key: the alert cache key name
:type self: object
:type check_key: str
:return: expiry
:rtype: int
"""
function_str = 'functions.thunder.checks.check_thunder_failover_key'
expiry = 0
thunder_key_file = '%s/%s' % (THUNDER_KEYS_DIR, check_key)
key_dict = {}
if isfile(thunder_key_file):
try:
with open(thunder_key_file, 'r') as f:
key_dict_str = f.read()
key_dict = literal_eval(key_dict_str)
except Exception as e:
logger.error('error :: %s :: failed to open thunder_key_file: %s - %s' % (
function_str, thunder_key_file, e))
timestamp = 0
if key_dict:
try:
timestamp = int(key_dict['timestamp'])
expiry = int(key_dict['expiry'])
except Exception as e:
logger.error('error :: %s :: failed to determine timestamp and expiry from key_dict created from thunder_key_file: %s - %s' % (
function_str, thunder_key_file, e))
if timestamp:
now = int(time())
if (timestamp + expiry) >= now:
expiry = 0
try:
removed_file = remove_file(thunder_key_file)
if removed_file:
logger.info('%s :: removed expired thunder_key_file: %s' % (
function_str, thunder_key_file))
except Exception as e:
logger.error('error :: %s :: failed to remove %s, continuing - %s' % (
function_str, thunder_key_file, e))
if (timestamp + expiry) <= now:
expiry = now - (timestamp + expiry)
# Try and set in Redis and remove failover key if successful
if expiry and timestamp:
try:
set_alert_cache_key = self.redis_conn.setex(check_key, expiry, timestamp)
if set_alert_cache_key:
logger.info('%s :: set Redis key %s with %s TTL' % (
function_str, check_key, str(expiry)))
try:
removed_file = remove_file(thunder_key_file)
if removed_file:
logger.info('%s :: added thunder alert key to Redis so removed thunder_key_file: %s' % (
function_str, thunder_key_file))
except Exception as e:
logger.error('error :: %s :: failed to remove %s, continuing - %s' % (
function_str, thunder_key_file, e))
except Exception as e:
logger.warn('warning :: %s :: failed to set_alert_cache_key in Redis, probably still down - %s - %s' % (
function_str, check_key, e))
return expiry | skyline/functions/thunder/check_thunder_failover_key.py | import logging
from os import uname
from os.path import isfile
from time import time
from ast import literal_eval
import settings
from functions.filesystem.remove_file import remove_file
skyline_app = 'thunder'
skyline_app_logger = '%sLog' % skyline_app
logger = logging.getLogger(skyline_app_logger)
# The failover THUNDER keys directory which is failed over to and
# used in the event that Redis is down
THUNDER_KEYS_DIR = '%s/thunder/keys' % settings.SKYLINE_TMP_DIR
this_host = str(uname()[1])
# @added 20210520 - Branch #1444: thunder
def check_thunder_failover_key(self, check_key):
"""
Determine if there is a failover alert key for an alert if Redis is down
:param self: the self object
:param check_key: the alert cache key name
:type self: object
:type check_key: str
:return: expiry
:rtype: int
"""
function_str = 'functions.thunder.checks.check_thunder_failover_key'
expiry = 0
thunder_key_file = '%s/%s' % (THUNDER_KEYS_DIR, check_key)
key_dict = {}
if isfile(thunder_key_file):
try:
with open(thunder_key_file, 'r') as f:
key_dict_str = f.read()
key_dict = literal_eval(key_dict_str)
except Exception as e:
logger.error('error :: %s :: failed to open thunder_key_file: %s - %s' % (
function_str, thunder_key_file, e))
timestamp = 0
if key_dict:
try:
timestamp = int(key_dict['timestamp'])
expiry = int(key_dict['expiry'])
except Exception as e:
logger.error('error :: %s :: failed to determine timestamp and expiry from key_dict created from thunder_key_file: %s - %s' % (
function_str, thunder_key_file, e))
if timestamp:
now = int(time())
if (timestamp + expiry) >= now:
expiry = 0
try:
removed_file = remove_file(thunder_key_file)
if removed_file:
logger.info('%s :: removed expired thunder_key_file: %s' % (
function_str, thunder_key_file))
except Exception as e:
logger.error('error :: %s :: failed to remove %s, continuing - %s' % (
function_str, thunder_key_file, e))
if (timestamp + expiry) <= now:
expiry = now - (timestamp + expiry)
# Try and set in Redis and remove failover key if successful
if expiry and timestamp:
try:
set_alert_cache_key = self.redis_conn.setex(check_key, expiry, timestamp)
if set_alert_cache_key:
logger.info('%s :: set Redis key %s with %s TTL' % (
function_str, check_key, str(expiry)))
try:
removed_file = remove_file(thunder_key_file)
if removed_file:
logger.info('%s :: added thunder alert key to Redis so removed thunder_key_file: %s' % (
function_str, thunder_key_file))
except Exception as e:
logger.error('error :: %s :: failed to remove %s, continuing - %s' % (
function_str, thunder_key_file, e))
except Exception as e:
logger.warn('warning :: %s :: failed to set_alert_cache_key in Redis, probably still down - %s - %s' % (
function_str, check_key, e))
return expiry | 0.340156 | 0.090574 |
import random, math
#Removes leading zeros after decimal and/or approximate to 4dp
def trimval(thelist):
''' Takes in number list or float and removes leading zeros '''
if type(thelist) == list:
temp = []
for i in thelist:
if type(i) == int:
temp.append(i)
else:
temp.append(float('%.4f' % i))
thelist = temp
return thelist
elif type(thelist) == float:
return float('%.4f' % thelist)
return thelist
def trimlist(*args):
''' Takes in number list or float and removes leading zeros '''
store = []
values =[]
for mylist in args:
each = []
for x in mylist:
if type(x) == float:
each.append (float("%.4f" % x))
elif type(x) == list:
inner = []
for y in x:
if type(y) == float:
inner.append(float("%.4f" % y))
else:
inner.append(y)
each.append(inner)
else:
each.append(x)
values.append(each)
store.append(values)
return store[0]
class Calculate():
def __init__(self, *args):
''' Initialising the instances '''
#Checking for valid arguments and value assignment
if len(args) == 3:
self._steps = args[2]
elif len(args) == 2:
self._steps = 1
else:
raise Exception("Invalid arguments: must be 2 or 3 --> Outcome , Cummulative probability, optional: steps")
self._outcome, self._cum_prob, self._probability = args[0], args[1], []
# Checks in case user hasn't inputted the right information
#Error checks for invalid inputs
self.last_cum = (self._cum_prob[-1:])
self.last_cum = (''.join(map(str, self.last_cum)))
if len(self._outcome) != len(self._cum_prob):
raise ValueError("'prob' arguments must be of same length")
elif float(self.last_cum) != 1:
raise ValueError("last value of 2nd argument must be 1")
for i in args[1]:
try:
if 0 > i < 1:
raise ValueError("cummulative probability must be between 0 and 1")
except TypeError:
raise Exception("All items in the second argument list must be an int")
# Calculates the probability of an outcome given its cummulative probability
def prob_(self):
''' Returns a probability given its cummulative probability '''
# Starting variables
y = 1; self._probability.append(args[1][0])
while y < len(self._cum_prob):
self._probability.append(self._cum_prob[y] - self._cum_prob[y-1])
y+=1
return self._probability
prob_(self)
# Generaes a discreteEmp for the given outcome
def discreteemp_(self):
'''returns a random number from the outcome list'''
#--- generating a random number based on discreteemp
emplist = []
def twoargs():
count = 0
self._number = random.random()
while count < len(self._cum_prob):
#self._number = random.random()
if self._cum_prob[count] < self._number <= self._cum_prob[count+1]:
return self._outcome[count+1]
elif 0 <= self._number <= self._cum_prob[0]:
return self._outcome[0]
count+=1
if len(args) == 2:
return twoargs()
elif len(args) == 3:
self.amount = args[2]
increment = 0
if self.amount == 1:
return twoargs()
else:
try:
while increment < self.amount:
generated = twoargs()
emplist.append(generated)
increment +=1
except TypeError:
raise Exception("Third argument must be an int > 0")
return emplist
# Calculates the expectation value given its outcome and cummulative probability
def expect_(self):
''' returns the expectation value of the outcomes'''
expectation, increment = 0,0
while increment < len(self._cum_prob):
expectation += self._probability[increment] * self._outcome[increment]
increment += 1
if len(args) == 2:
return expectation
elif len(args) == 3:
expectation *= self._steps
return expectation
else:
raise valueerror("arguments must be two or three")
# Calculates the estimated variance of the given lists
def eststddev_(self):
'''returns estimated variance of the outcome'''
#arguments are: [outcomes], [cummulative probabilities], optional: float(steps)]
mean = expect_(self) / self._steps
increment = 0
occurtimes = 0
while increment < len(self._cum_prob):
occurtimes += self._probability[increment] * pow((self._outcome[increment] - mean), 2)
increment +=1
try:
if len(args) == 2:
return math.sqrt(occurtimes)
elif len(args) == 3:
return math.sqrt(occurtimes) * math.sqrt(self._steps)
except ValueError:
raise Exception("Second list argument must be cummulative i.e always increasing")
# else:
# raise valueerror("arguments must be two or three")
# Calculates the estimated standard deviation of the given lists
def estvar_(self):
''' Returns the estimated standard deviation of the outcome'''
#arguments are: [outcomes], [cummulative probabilities], optional: float(steps)]
variance = math.pow(eststddev_(self), 2)
return variance
# Calling all methods
self._discreteemp = discreteemp_(self)
self._expectval = expect_(self)
self._estvar = estvar_(self);
self._eststddev = eststddev_(self);
def prob(self):
return trimval(self._probability)
def discreteemp(self):
return trimval(self._discreteemp)
def expectval(self):
return trimval(self._expectval)
def estmean(self):
return trimval(self._expectval)
def estvar(self):
return trimval(self._estvar)
def eststddev(self):
return trimval(self._eststddev) | eventsim/discrete.py | import random, math
#Removes leading zeros after decimal and/or approximate to 4dp
def trimval(thelist):
''' Takes in number list or float and removes leading zeros '''
if type(thelist) == list:
temp = []
for i in thelist:
if type(i) == int:
temp.append(i)
else:
temp.append(float('%.4f' % i))
thelist = temp
return thelist
elif type(thelist) == float:
return float('%.4f' % thelist)
return thelist
def trimlist(*args):
''' Takes in number list or float and removes leading zeros '''
store = []
values =[]
for mylist in args:
each = []
for x in mylist:
if type(x) == float:
each.append (float("%.4f" % x))
elif type(x) == list:
inner = []
for y in x:
if type(y) == float:
inner.append(float("%.4f" % y))
else:
inner.append(y)
each.append(inner)
else:
each.append(x)
values.append(each)
store.append(values)
return store[0]
class Calculate():
def __init__(self, *args):
''' Initialising the instances '''
#Checking for valid arguments and value assignment
if len(args) == 3:
self._steps = args[2]
elif len(args) == 2:
self._steps = 1
else:
raise Exception("Invalid arguments: must be 2 or 3 --> Outcome , Cummulative probability, optional: steps")
self._outcome, self._cum_prob, self._probability = args[0], args[1], []
# Checks in case user hasn't inputted the right information
#Error checks for invalid inputs
self.last_cum = (self._cum_prob[-1:])
self.last_cum = (''.join(map(str, self.last_cum)))
if len(self._outcome) != len(self._cum_prob):
raise ValueError("'prob' arguments must be of same length")
elif float(self.last_cum) != 1:
raise ValueError("last value of 2nd argument must be 1")
for i in args[1]:
try:
if 0 > i < 1:
raise ValueError("cummulative probability must be between 0 and 1")
except TypeError:
raise Exception("All items in the second argument list must be an int")
# Calculates the probability of an outcome given its cummulative probability
def prob_(self):
''' Returns a probability given its cummulative probability '''
# Starting variables
y = 1; self._probability.append(args[1][0])
while y < len(self._cum_prob):
self._probability.append(self._cum_prob[y] - self._cum_prob[y-1])
y+=1
return self._probability
prob_(self)
# Generaes a discreteEmp for the given outcome
def discreteemp_(self):
'''returns a random number from the outcome list'''
#--- generating a random number based on discreteemp
emplist = []
def twoargs():
count = 0
self._number = random.random()
while count < len(self._cum_prob):
#self._number = random.random()
if self._cum_prob[count] < self._number <= self._cum_prob[count+1]:
return self._outcome[count+1]
elif 0 <= self._number <= self._cum_prob[0]:
return self._outcome[0]
count+=1
if len(args) == 2:
return twoargs()
elif len(args) == 3:
self.amount = args[2]
increment = 0
if self.amount == 1:
return twoargs()
else:
try:
while increment < self.amount:
generated = twoargs()
emplist.append(generated)
increment +=1
except TypeError:
raise Exception("Third argument must be an int > 0")
return emplist
# Calculates the expectation value given its outcome and cummulative probability
def expect_(self):
''' returns the expectation value of the outcomes'''
expectation, increment = 0,0
while increment < len(self._cum_prob):
expectation += self._probability[increment] * self._outcome[increment]
increment += 1
if len(args) == 2:
return expectation
elif len(args) == 3:
expectation *= self._steps
return expectation
else:
raise valueerror("arguments must be two or three")
# Calculates the estimated variance of the given lists
def eststddev_(self):
'''returns estimated variance of the outcome'''
#arguments are: [outcomes], [cummulative probabilities], optional: float(steps)]
mean = expect_(self) / self._steps
increment = 0
occurtimes = 0
while increment < len(self._cum_prob):
occurtimes += self._probability[increment] * pow((self._outcome[increment] - mean), 2)
increment +=1
try:
if len(args) == 2:
return math.sqrt(occurtimes)
elif len(args) == 3:
return math.sqrt(occurtimes) * math.sqrt(self._steps)
except ValueError:
raise Exception("Second list argument must be cummulative i.e always increasing")
# else:
# raise valueerror("arguments must be two or three")
# Calculates the estimated standard deviation of the given lists
def estvar_(self):
''' Returns the estimated standard deviation of the outcome'''
#arguments are: [outcomes], [cummulative probabilities], optional: float(steps)]
variance = math.pow(eststddev_(self), 2)
return variance
# Calling all methods
self._discreteemp = discreteemp_(self)
self._expectval = expect_(self)
self._estvar = estvar_(self);
self._eststddev = eststddev_(self);
def prob(self):
return trimval(self._probability)
def discreteemp(self):
return trimval(self._discreteemp)
def expectval(self):
return trimval(self._expectval)
def estmean(self):
return trimval(self._expectval)
def estvar(self):
return trimval(self._estvar)
def eststddev(self):
return trimval(self._eststddev) | 0.403567 | 0.410461 |
import theano
from theano import tensor as T
from theano.tensor.nnet import conv
import numpy as np
class HexConvLayer:
def __init__(self, rng, input, input_shape, num_D5_filters, num_D3_filters, params = None):
W3_bound = np.sqrt(6. / (7*(input_shape[1] + num_D3_filters)))
W5_bound = np.sqrt(6. / (19*(input_shape[1] + num_D5_filters)))
if(params):
self.W3_values = params[1]
else:
self.W3_values = theano.shared(
np.asarray(
rng.uniform(
low=-W3_bound,
high=W3_bound,
size=(num_D3_filters,input_shape[1],7)
),
dtype=theano.config.floatX
),
borrow = True
)
#Place weights in hexagonal filter of diameter 3
W3 = T.zeros((num_D3_filters,input_shape[1],3,3))
W3 = T.set_subtensor(W3[:,:,1:,0], self.W3_values[:,:,:2])
W3 = T.set_subtensor(W3[:,:,:,1], self.W3_values[:,:,2:5])
W3 = T.set_subtensor(W3[:,:,:2,2], self.W3_values[:,:,5:])
if(params):
self.W5_values = params[0]
else:
self.W5_values = theano.shared(
np.asarray(
rng.uniform(
low=-W5_bound,
high=W5_bound,
size=(num_D5_filters,input_shape[1],19)
),
dtype=theano.config.floatX
),
borrow = True
)
#Place weights in hexagonal filter of diameter 5
W5 = T.zeros((num_D5_filters,input_shape[1],5,5))
W5 = T.set_subtensor(W5[:,:,2:,0], self.W5_values[:,:,:3])
W5 = T.set_subtensor(W5[:,:,1:,1], self.W5_values[:,:,3:7])
W5 = T.set_subtensor(W5[:,:,:,2], self.W5_values[:,:,7:12])
W5 = T.set_subtensor(W5[:,:,:4,3], self.W5_values[:,:,12:16])
W5 = T.set_subtensor(W5[:,:,:3,4], self.W5_values[:,:,16:])
if(params):
self.b = params[2]
else:
b_values = np.zeros((num_D5_filters+num_D3_filters), dtype=theano.config.floatX)
self.b = theano.shared(value=b_values, borrow=True)
conv_out3 = conv.conv2d(
input = input[:,:,1:-1,1:-1],
filters = W3,
filter_shape = (num_D3_filters,input_shape[1],3,3),
image_shape = [input_shape[0], input_shape[1], input_shape[2]-2, input_shape[3]-2]
)
conv_out5 = conv.conv2d(
input = input,
filters = W5,
filter_shape = (num_D5_filters,input_shape[1],5,5),
image_shape = input_shape
)
full_out = T.concatenate([conv_out5, conv_out3], axis=1)
squished_out = T.nnet.relu(full_out + self.b.dimshuffle('x', 0, 'x', 'x'))
padded_out = T.zeros((squished_out.shape[0], num_D3_filters+num_D5_filters, input_shape[2], input_shape[3]))
padded_out = T.set_subtensor(padded_out[:,:,2:-2,2:-2], squished_out)
self.output = padded_out
self.params = [self.W5_values, self.W3_values, self.b]
self.mem_size = (T.prod(self.W5_values.shape)+T.prod(self.W3_values.shape)+T.prod(self.b.shape))*4
self.input = input
class FullyConnectedLayer:
def __init__(self, rng, input, n_in, n_out, params = None):
self.input = input
if(params):
self.W = params[0]
else:
W_values = np.asarray(
rng.uniform(
low=-np.sqrt(6. / (n_in + n_out)),
high=np.sqrt(6. / (n_in + n_out)),
size=(n_in, n_out)
),
dtype=theano.config.floatX
)
self.W = theano.shared(value=W_values, name='W', borrow=True)
if(params):
self.b = params[1]
else:
b_values = np.zeros((n_out,), dtype=theano.config.floatX)
self.b = theano.shared(value=b_values, name='b', borrow=True)
self.output = T.nnet.relu(T.dot(input, self.W) + self.b)
self.params = [self.W, self.b]
self.mem_size = (T.prod(self.W.shape)+T.prod(self.b.shape))*4
class SigmoidLayer:
def __init__(self, rng, input, n_in, n_out, params = None):
self.input = input
if(params):
self.W = params[0]
else:
W_values = np.asarray(
rng.uniform(
low=-np.sqrt(6. / (n_in + n_out)),
high=np.sqrt(6. / (n_in + n_out)),
size=(n_in, n_out)
),
dtype=theano.config.floatX
)
self.W = theano.shared(value=W_values, name='W', borrow=True)
if(params):
self.b = params[1]
else:
b_values = np.zeros((n_out,), dtype=theano.config.floatX)
self.b = theano.shared(value=b_values, name='b', borrow=True)
self.output = T.nnet.sigmoid(T.dot(input, self.W) + self.b)
self.params = [self.W, self.b]
self.mem_size = (T.prod(self.W.shape)+T.prod(self.b.shape))*4 | layers.py | import theano
from theano import tensor as T
from theano.tensor.nnet import conv
import numpy as np
class HexConvLayer:
def __init__(self, rng, input, input_shape, num_D5_filters, num_D3_filters, params = None):
W3_bound = np.sqrt(6. / (7*(input_shape[1] + num_D3_filters)))
W5_bound = np.sqrt(6. / (19*(input_shape[1] + num_D5_filters)))
if(params):
self.W3_values = params[1]
else:
self.W3_values = theano.shared(
np.asarray(
rng.uniform(
low=-W3_bound,
high=W3_bound,
size=(num_D3_filters,input_shape[1],7)
),
dtype=theano.config.floatX
),
borrow = True
)
#Place weights in hexagonal filter of diameter 3
W3 = T.zeros((num_D3_filters,input_shape[1],3,3))
W3 = T.set_subtensor(W3[:,:,1:,0], self.W3_values[:,:,:2])
W3 = T.set_subtensor(W3[:,:,:,1], self.W3_values[:,:,2:5])
W3 = T.set_subtensor(W3[:,:,:2,2], self.W3_values[:,:,5:])
if(params):
self.W5_values = params[0]
else:
self.W5_values = theano.shared(
np.asarray(
rng.uniform(
low=-W5_bound,
high=W5_bound,
size=(num_D5_filters,input_shape[1],19)
),
dtype=theano.config.floatX
),
borrow = True
)
#Place weights in hexagonal filter of diameter 5
W5 = T.zeros((num_D5_filters,input_shape[1],5,5))
W5 = T.set_subtensor(W5[:,:,2:,0], self.W5_values[:,:,:3])
W5 = T.set_subtensor(W5[:,:,1:,1], self.W5_values[:,:,3:7])
W5 = T.set_subtensor(W5[:,:,:,2], self.W5_values[:,:,7:12])
W5 = T.set_subtensor(W5[:,:,:4,3], self.W5_values[:,:,12:16])
W5 = T.set_subtensor(W5[:,:,:3,4], self.W5_values[:,:,16:])
if(params):
self.b = params[2]
else:
b_values = np.zeros((num_D5_filters+num_D3_filters), dtype=theano.config.floatX)
self.b = theano.shared(value=b_values, borrow=True)
conv_out3 = conv.conv2d(
input = input[:,:,1:-1,1:-1],
filters = W3,
filter_shape = (num_D3_filters,input_shape[1],3,3),
image_shape = [input_shape[0], input_shape[1], input_shape[2]-2, input_shape[3]-2]
)
conv_out5 = conv.conv2d(
input = input,
filters = W5,
filter_shape = (num_D5_filters,input_shape[1],5,5),
image_shape = input_shape
)
full_out = T.concatenate([conv_out5, conv_out3], axis=1)
squished_out = T.nnet.relu(full_out + self.b.dimshuffle('x', 0, 'x', 'x'))
padded_out = T.zeros((squished_out.shape[0], num_D3_filters+num_D5_filters, input_shape[2], input_shape[3]))
padded_out = T.set_subtensor(padded_out[:,:,2:-2,2:-2], squished_out)
self.output = padded_out
self.params = [self.W5_values, self.W3_values, self.b]
self.mem_size = (T.prod(self.W5_values.shape)+T.prod(self.W3_values.shape)+T.prod(self.b.shape))*4
self.input = input
class FullyConnectedLayer:
def __init__(self, rng, input, n_in, n_out, params = None):
self.input = input
if(params):
self.W = params[0]
else:
W_values = np.asarray(
rng.uniform(
low=-np.sqrt(6. / (n_in + n_out)),
high=np.sqrt(6. / (n_in + n_out)),
size=(n_in, n_out)
),
dtype=theano.config.floatX
)
self.W = theano.shared(value=W_values, name='W', borrow=True)
if(params):
self.b = params[1]
else:
b_values = np.zeros((n_out,), dtype=theano.config.floatX)
self.b = theano.shared(value=b_values, name='b', borrow=True)
self.output = T.nnet.relu(T.dot(input, self.W) + self.b)
self.params = [self.W, self.b]
self.mem_size = (T.prod(self.W.shape)+T.prod(self.b.shape))*4
class SigmoidLayer:
def __init__(self, rng, input, n_in, n_out, params = None):
self.input = input
if(params):
self.W = params[0]
else:
W_values = np.asarray(
rng.uniform(
low=-np.sqrt(6. / (n_in + n_out)),
high=np.sqrt(6. / (n_in + n_out)),
size=(n_in, n_out)
),
dtype=theano.config.floatX
)
self.W = theano.shared(value=W_values, name='W', borrow=True)
if(params):
self.b = params[1]
else:
b_values = np.zeros((n_out,), dtype=theano.config.floatX)
self.b = theano.shared(value=b_values, name='b', borrow=True)
self.output = T.nnet.sigmoid(T.dot(input, self.W) + self.b)
self.params = [self.W, self.b]
self.mem_size = (T.prod(self.W.shape)+T.prod(self.b.shape))*4 | 0.389082 | 0.573649 |
'''Game manager module.'''
# pylint: disable=fixme, line-too-long, invalid-name, undefined-variable
# pylint: disable=too-many-branches, too-many-statements, too-many-arguments
from random import randint
import pygame
from pygame.locals import * # pylint: disable=wildcard-import, unused-wildcard-import
from pygame.time import delay
from sprites import Tree, Board, Element
from sounds import Sounds, play_sound
class TreeManager:
'''Tree manager.'''
__screen_size = (900, 600)
screen = pygame.display.set_mode(__screen_size, DOUBLEBUF, 32)
fruit_list = []
fruit_image = pygame.image.load(Tree.fruit).convert_alpha()
fruit_width = fruit_image.get_width()
fruit_height = fruit_image.get_height()
type = 0 # 0 Tree, 1 Energy
energy_full = False # Energy full mark
money_empty = False # Not any money left?
def display_text(self, text, position, txt_size=25, txt_color=(255, 255, 255)):
'''Display text with given position, size and color.'''
my_font = pygame.font.SysFont(None, txt_size)
text_screen = my_font.render(text, True, txt_color)
self.screen.blit(text_screen, position)
def draw_tree(self, energy_num, money_num):
'''Draws the game tree.'''
Tree(Tree.tree, (0, 600)).draw(self.screen) # Draw tree
Tree(Tree.energy_num, Tree.energy_num_position).draw(self.screen) # Draw energy num
if energy_num > 30:
self.display_text(str(30) + '/30', (22, 55), 21)
else:
self.display_text(str(energy_num)+'/30', (22, 55), 21)
Tree(Tree.money, (15, 135)).draw(self.screen) # Draw money
self.display_text(str(money_num), (32, 124), 21)
for i in range(0, 10): # Draw fruits
Tree(Tree.fruit, Tree.position[i]).draw(self.screen)
self.display_text(str(i+1), (Tree.position[i][0]+15, Tree.position[i][1]-47))
if self.type == 1:
Tree(Tree.energy_buy, Tree.energy_buy_position).draw(self.screen)
if self.energy_full:
self.display_text('energy is full!', (430, 310), 30, (255, 0, 0))
pygame.display.flip()
delay(500)
self.energy_full = False
if self.money_empty:
self.display_text('money is not enough!', (410, 310), 30, (255, 0, 0))
pygame.display.flip()
delay(500)
self.money_empty = False
def mouse_select(self, mgr, mousex, mousey, level, energy_num, money_num):
'''Handle mouse event.'''
if self.type == 0: # Tree Scene
for i in range(0, 10):
if Tree.position[i][0] < mousex < Tree.position[i][0] + self.fruit_width \
and Tree.position[i][1] - self.fruit_height < mousey < Tree.position[i][1]:
if energy_num <= 0:
self.type = 1
else:
level = i + 1
if Tree.energy_num_position[0] < mousex < Tree.energy_num_position[0] + 60 \
and Tree.energy_num_position[1] - 60 < mousey < Tree.energy_num_position[1]: # 精力60*60
play_sound(Sounds.CLICK)
self.type = 1
else: # Energy Scene
if 408 < mousex < 600 and 263 < mousey < 313: # "Buy Energy" button clicked
play_sound(Sounds.CLICK_BUTTON)
if money_num < 50:
self.money_empty = True
if energy_num >= 30:
self.energy_full = True
elif energy_num < 30 and money_num >= 50:
energy_num += 5
money_num -= 50
elif 619 < mousex < 638 and 158 < mousey < 177: # "X" clicked
self.type = 0
mgr.level, mgr.energy_num, mgr.money = level, energy_num, money_num
# pylint: disable=too-many-public-methods, too-many-instance-attributes, too-many-nested-blocks
class Manager:
'''Game manager.'''
__screen_size = (900, 600)
screen = pygame.display.set_mode(__screen_size, DOUBLEBUF, 32)
__brick_size = 50
__bg = pygame.image.load('img/bg.png').convert()
stop_width = 63
selected = [-1, -1] # Current selected [row, col]
swap_sign = -1 # Swap sign
last_sel = [-1, -1] # Last selected [row, col]
value_swapped = False # Swapped?
death_sign = True # Death map sign
boom_sel = [-1, -1] # Eliminate 4: [row, col]
level = 0 # Current level, 0 for tree
money = 100 # Money
energy_num = 30 # Energy num
num_sign = True
type = 2 # (0) Playing, (1) Passed, (-1) Failed, (2) Tree
reset_mode = True # Reset layout?
init_step = 15 # Initial steps for each level
step = init_step # Steps left of the game
score = 0 # Score
min = 20 # Medium score 1
max = 50 # Medium score 2
animal_num = [0, 0, 0, 0, 0, 0] # Number of eliminated animals
ice_num = 0 # Number left of required ice
success_board = Board(Board.success, [200, 0]) # Success board
fail_board = Board(Board.fail, [200, 0]) # Failure board
height, width = 9, 9
row, col = 5, 5
ice_list = [[-1 for _ in range(21)] for _ in range(21)] # (-1) None, (1) Ice
animal = [[-1 for _ in range(21)] for _ in range(21)] # (-2) Elimated, (-1) None, (0-4) Animal
list_x, list_y = (__screen_size[0] - 11 * __brick_size) / 2, (__screen_size[1] - 11 * __brick_size) / 2 # Position of the blocks
def __init__(self, width, height):
self.height = height
self.width = width
self.list_x = (Manager.__screen_size[0] - self.width * Manager.__brick_size) / 2
self.list_y = (Manager.__screen_size[1] - self.height * Manager.__brick_size) / 2
self.row, self.col = Manager.xy_rc(self.list_x, self.list_y)
self.list_x, self.list_y = Manager.rc_xy(self.row, self.col)
self.ice_list = [[-1 for _ in range(21)] for _ in range(21)]
self.animal = [[-1 for _ in range(21)] for _ in range(21)]
self.reset_animals()
def reset_animals(self):
'''Reset board with random animals.'''
for row in range(self.row, self.row + self.height):
for col in range(self.col, self.col + self.width):
self.animal[row][col] = randint(0, 5)
@staticmethod
def rc_xy(row, col):
'''(row, col) -> (x, y)'''
return int(Manager.list_x + (col-Manager.col)*Manager.__brick_size), int\
(Manager.list_y+(row-Manager.row)*Manager.__brick_size)
@staticmethod
def xy_rc(x, y):
'''(x, y) -> (row, col)'''
return int((y-Manager.list_y)/Manager.__brick_size+Manager.row), int\
((x-Manager.list_x)/Manager.__brick_size+Manager.col)
@staticmethod
def draw_brick(x, y):
'''Draw a brick at (x, y).'''
brick = Element(Element.brick, (x, y))
Manager.screen.blit(brick.image, brick.rect)
def draw_task(self, task_animal_num, which_animal, \
board_position=(400, 90), animal_position=(430, 35), txt_position=(455, 60)):
'''Draw task board'''
txt_size = 24
txt_color = (0, 0, 0)
Board(Board.task_board, board_position).draw(self.screen)
if which_animal == 6:
task_animal = Element(Element.ice, animal_position)
else:
task_animal = Element(Element.animals[which_animal], animal_position)
task_animal.image = pygame.transform.smoothscale(task_animal.image, (40, 40))
task_animal.draw(self.screen)
if which_animal == 6:
if task_animal_num-self.ice_num <= 0:
Board(Board.ok, (txt_position[0], txt_position[1]+15)).draw(self.screen)
else:
self.load_text(str(task_animal_num-self.ice_num), txt_position, txt_size, txt_color)
else:
if task_animal_num - self.animal_num[which_animal] <= 0:
Board(Board.ok, (txt_position[0], txt_position[1]+15)).draw(self.screen)
else:
self.load_text(str(task_animal_num - self.animal_num[which_animal]), txt_position, txt_size, txt_color)
def draw(self):
'''Draw background, animals, and so on.'''
# Draw background
self.screen.blit(Manager.__bg, (0, 0))
# Display steps left
Board(Board.step_board, (0, 142)).draw(self.screen)
tens, single = divmod(self.step, 10)
if tens == 0:
Board(Board.num_format%single, (790, 110)).draw(self.screen)
else:
Board(Board.num_format%tens, (775, 110)).draw(self.screen)
Board(Board.num_format%single, (805, 110)).draw(self.screen)
# Display level & pause button
Board(Board.level_format%self.level, (30, 105)).draw(self.screen)
Element(Element.stop, Element.stop_position).draw(self.screen)
# Draw bricks, ice and animals
brick_group = pygame.sprite.Group()
animal_group = pygame.sprite.Group()
ice_group = pygame.sprite.Group()
for i in range(0, 21):
for j in range(0, 21):
x, y = Manager.rc_xy(i, j)
if self.animal[i][j] != -1:
brick_group.add(Element(Element.brick, (x, y)))
animal_group.add(Element(Element.animals[self.animal[i][j]], (x, y)))
if self.ice_list[i][j] != -1:
ice_group.add(Element(Element.ice, (x, y)))
brick_group.draw(self.screen)
ice_group.draw(self.screen)
for animallist in animal_group:
self.screen.blit(animallist.image, animallist.rect)
if self.level == 1:
self.draw_task(10, 4)
elif self.level == 2:
self.draw_task(21, 1)
elif self.level == 3:
self.draw_task(16, 4, (300, 90), (330, 35), (360, 60))
self.draw_task(16, 5, (500, 90), (530, 35), (560, 60))
elif self.level == 4:
self.draw_task(18, 5, (300, 90), (330, 35), (360, 60))
self.draw_task(18, 2, (500, 90), (530, 35), (560, 60))
elif self.level == 5:
self.draw_task(28, 2, (300, 90), (330, 35), (360, 60))
self.draw_task(28, 0, (500, 90), (530, 35), (560, 60))
elif self.level == 6:
self.draw_task(70, 4)
elif self.level == 7:
self.draw_task(36, 1)
self.draw_task(36, 2, (300, 90), (330, 35), (360, 60))
self.draw_task(36, 0, (500, 90), (530, 35), (560, 60))
elif self.level == 8:
self.draw_task(15, 6)
elif self.level == 9:
self.draw_task(49, 6)
else:
self.draw_task(39, 6)
# Display selected animal
if self.selected != [-1, -1]:
frame_sprite = Element(Element.frame, Manager.rc_xy(self.selected[0], self.selected[1]))
self.screen.blit(frame_sprite.image, frame_sprite.rect)
# Show score
self.load_text('Score:' + str(self.score), (300, 550), 30)
pygame.draw.rect(self.screen, (50, 150, 50, 180), Rect(300, 570, self.score * 2, 25))
pygame.draw.rect(self.screen, (100, 200, 100, 180), Rect(300, 570, 200, 25), 2)
return animal_group
def mouse_image(self):
'''Replace the mouse image with img/mouse.png'''
mouse_cursor = pygame.image.load('img/mouse.png').convert_alpha()
mouse_x, mouse_y = pygame.mouse.get_pos()
# Find the topleft position of the mouse
mouse_x -= mouse_cursor.get_width() / 2
mouse_y -= mouse_cursor.get_height() / 2
self.screen.blit(mouse_cursor, (mouse_x, mouse_y))
def mouse_select(self, mousex, mousey):
'''Handle mouse click event.'''
if self.type == 1: # Passed
if Board.button_position[0][0] < mousex < Board.button_position[0][0]+100 \
and Board.button_position[0][1] - 50 < mousey < Board.button_position[0][1]: # Clicked replay button
if self.energy_num < 5:
self.level = 0
self.reset_mode = True
elif Board.button_position[1][0] < mousex < Board.button_position[1][0]+100 \
and Board.button_position[1][1]-50 < mousey < Board.button_position[1][1]: # Clicked next level button
if self.level < 10:
if self.energy_num < 5:
self.level = 0
else:
self.level += 1
self.reset_mode = True
elif 610 < mousex < 610 + 55 and 205 - 55 < mousey < 205: # x
self.level = 0
self.reset_mode = True
elif self.type == -1: # Failed
if Board.button_position[1][0] < mousex < Board.button_position[1][0]+100 \
and Board.button_position[1][1]-50 < mousey < Board.button_position[1][1]: # Clicked replay button
if self.energy_num < 5:
self.level = 0
self.reset_mode = True
elif Board.button_position[0][0] < mousex < Board.button_position[0][0]+100 \
and Board.button_position[0][1]-50 < mousey < Board.button_position[0][1]: # Clicked 5 more steps button
if self.money < 5:
self.level = 0
else:
self.money -= 5
self.step += 5
self.type = 0 # Playing game
self.fail_board = Board(Board.fail, [200, 0])
elif 610 < mousex < 610 + 55 and 205 - 55 < mousey < 205:
self.level = 0
self.reset_mode = True
elif self.type == 0:
if self.list_x < mousex < self.list_x + Manager.__brick_size * self.width \
and self.list_y < mousey < self.list_y + Manager.__brick_size * self.height:
mouse_selected = Manager.xy_rc(mousex, mousey)
if self.animal[mouse_selected[0]][mouse_selected[1]] != -1:
play_sound(Sounds.CLICK)
self.selected = mouse_selected
if (self.last_sel[0] == self.selected[0] and abs(self.last_sel[1] - self.selected[1]) == 1) \
or (self.last_sel[1] == self.selected[1] and abs(self.last_sel[0] - self.selected[0]) == 1):
self.swap_sign = 1 # Valid move, swap
elif Element.stop_position[0] < mousex < Element.stop_position[0]+self.stop_width\
and Element.stop_position[1] < mousey < Element.stop_position[1]+self.stop_width: # Exit button clicked
play_sound(Sounds.CLICK_BUTTON)
self.level = 0
self.reset_mode = True
else:
self.selected = [-1, -1]
def swap(self, spritegroup):
'''Swap two selected animals on the board.'''
if self.swap_sign == -1: # Not swapped
self.last_sel = self.selected
if self.swap_sign == 1:
last_x, last_y = Manager.rc_xy(self.last_sel[0], self.last_sel[1])
sel_x, sel_y = Manager.rc_xy(self.selected[0], self.selected[1])
if self.last_sel[0] == self.selected[0]: # Swap vertically
for animallist in spritegroup:
if animallist.rect.topleft == (last_x, last_y):
last_sprite = animallist
last_sprite.speed = [self.selected[1]-self.last_sel[1], 0]
elif animallist.rect.topleft == (sel_x, sel_y):
selected_sprite = animallist
selected_sprite.speed = [self.last_sel[1]-self.selected[1], 0]
else: # Swap horizontally
for animallist in spritegroup:
if animallist.rect.topleft == (last_x, last_y):
last_sprite = animallist
last_sprite.speed = [0, self.selected[0]-self.last_sel[0]]
elif animallist.rect.topleft == (sel_x, sel_y):
selected_sprite = animallist
selected_sprite.speed = [0, self.last_sel[0]-self.selected[0]]
while last_sprite.speed != [0, 0]:
delay(5)
self.draw_brick(last_x, last_y)
self.draw_brick(sel_x, sel_y)
last_sprite.move(last_sprite.speed)
selected_sprite.move(selected_sprite.speed)
self.screen.blit(last_sprite.image, last_sprite.rect)
self.screen.blit(selected_sprite.image, selected_sprite.rect)
pygame.display.flip()
self.swap_values()
if self.eliminate_animals():
self.step -= 1
else:
self.swap_values()
self.value_swapped = False
self.boom_sel = self.selected
self.swap_sign = -1
self.selected = [-1, -1]
def swap_values(self):
'''Swap values.'''
(xl, yl), (xc, yc) = self.last_sel, self.selected
self.animal[xl][yl], self.animal[xc][yc] = self.animal[xc][yc], self.animal[xl][yl]
def load_text(self, text, position, txt_size, txt_color=(255, 255, 255)):
'''Display text with given position, size and color.'''
my_font = pygame.font.SysFont(None, txt_size)
text_screen = my_font.render(text, True, txt_color)
self.screen.blit(text_screen, position)
def death_map(self):
'''Checks if there is not a valid move.'''
for i in range(self.row, self.row + self.height):
for j in range(self.col, self.col + self.width):
if self.animal[i][j] != -1:
if self.animal[i][j] == self.animal[i][j+1]:
if (self.animal[i][j] in [self.animal[i-1][j-1], self.animal[i+1][j-1]] \
and self.animal[i][j-1] != -1) or \
(self.animal[i][j] in [self.animal[i-1][j+2], self.animal[i+1][j+2]] \
and self.animal[i][j+2] != -1):
# a b
# a a
# c d
self.death_sign = False
break
if self.animal[i][j] == self.animal[i+1][j]:
if (self.animal[i][j] in [self.animal[i-1][j-1], self.animal[i-1][j+1]] \
and self.animal[i-1][j] != -1) or \
(self.animal[i][j] in [self.animal[i+2][j - 1], self.animal[i+2][j + 1]] \
and self.animal[i+2][j] != -1):
# a b
# a
# a
# c d
self.death_sign = False
break
else:
if self.animal[i-1][j-1] == self.animal[i][j]:
if (self.animal[i][j] == self.animal[i-1][j+1] and self.animal[i-1][j] != -1)\
or (self.animal[i][j] == self.animal[i+1][j-1] and self.animal[i][j-1] != -1):
# a a a b
# a a
# c a
self.death_sign = False
break
if self.animal[i][j] == self.animal[i+1][j+1]:
if (self.animal[i][j] == self.animal[i-1][j+1] and self.animal[i][j+1] != -1)\
or (self.animal[i][j] == self.animal[i+1][j-1] and self.animal[i+1][j] != -1):
# a b
# a a
# b a a a
self.death_sign = False
break
if self.death_sign:
delay(500)
Element(Element.none_animal, (230, 150)).draw(self.screen)
pygame.display.flip()
delay(500)
temp = [self.step, self.score, self.animal_num, self.ice_num, self.energy_num]
self.reset_mode = True
self.set_level_mode(self.level)
self.step = temp[0]
self.score = temp[1]
self.animal_num = temp[2]
self.ice_num = temp[3]
self.energy_num = temp[4]
else:
self.death_sign = True
# TODO: Merge 4 functions below
def exists_left(self, i, j, num):
'''Checks there are at least {num} continous same animals on the left side of (i, j).'''
for t in range(0, num):
if self.animal[i][j] != self.animal[i][j - t] or self.animal[i][j] < 0:
return False
return True
def exists_right(self, i, j, num):
'''Checks there are at least {num} continous same animals on the right side of (i, j).'''
for t in range(0, num):
if self.animal[i][j] != self.animal[i][j + t] or self.animal[i][j] < 0:
return False
return True
def exists_up(self, i, j, num):
'''Checks there are at least {num} continous same animals above (i, j).'''
for t in range(0, num):
if self.animal[i][j] != self.animal[i - t][j] or self.animal[i][j] < 0:
return False
return True
def exists_down(self, i, j, num):
'''Checks there are at least {num} continous same animals below (i, j).'''
for t in range(0, num):
if self.animal[i][j] != self.animal[i + t][j] or self.animal[i][j] < 0:
return False
return True
# TODO: Merge 4 functions below
def change_left(self, i, j, num):
'''Change the left side of the animal.'''
self.value_swapped = True
self.score += num
for k in range(0, int(num)):
self.animal[i][j - k] = -2
def change_right(self, i, j, num):
'''Change the right side of the animal.'''
self.value_swapped = True
self.score += num
for k in range(0, num):
self.animal[i][j + k] = -2
def change_up(self, i, j, num):
'''Change above the animal.'''
self.value_swapped = True
self.score += num
for k in range(0, num):
self.animal[i-k][j] = -2
def change_down(self, i, j, num):
'''Change below the animal.'''
self.value_swapped = True
self.score += num
for k in range(0, num):
self.animal[i+k][j] = -2
def eliminate_animals(self):
'''Eliminate the animals.'''
score_level = self.score
self.value_swapped = False
for i in range(self.row, self.row + self.height):
for j in range(self.col, self.col + self.width):
# TODO: Simplify the if statement below
if self.exists_right(i, j, 5):
self.value_swapped = True
if self.exists_down(i, j+2, 3):
self.animal_num[self.animal[i][j]] += 7
Sounds.eliminate(5) # Elimination sound 5
self.change_right(i, j, 5)
self.change_down(i, j+2, 3)
else:
self.animal_num[self.animal[i][j]] += 5
Sounds.eliminate(3) # Elimination sound 3
self.change_right(i, j, 5)
elif self.exists_right(i, j, 4):
self.value_swapped = True
if self.exists_down(i, j+1, 3):
self.animal_num[self.animal[i][j]] += 6
Sounds.eliminate(4) # Elimination sound 4
self.change_right(i, j, 4)
self.change_down(i, j+1, 3)
elif self.exists_down(i, j+2, 3):
self.animal_num[self.animal[i][j]] += 6
Sounds.eliminate(4) # Elimination sound 4
self.change_right(i, j, 4)
self.change_down(i, j+2, 3)
else:
self.animal_num[self.animal[i][j]] += 4
Sounds.eliminate(2) # Elimination sound 2
self.change_right(i, j, 4)
elif self.exists_right(i, j, 3):
self.value_swapped = True
if self.exists_down(i, j, 3):
self.animal_num[self.animal[i][j]] += 5
Sounds.eliminate(3) # Elimination sound 3
self.change_right(i, j, 3)
self.change_down(i, j, 3)
elif self.exists_down(i, j+1, 3):
self.animal_num[self.animal[i][j]] += 5
Sounds.eliminate(3) # Elimination sound 3
self.change_right(i, j, 3)
self.change_down(i, j+1, 3)
elif self.exists_down(i, j+2, 3):
self.animal_num[self.animal[i][j]] += 5
Sounds.eliminate(3) # Elimination sound 3
self.change_right(i, j, 3)
self.change_down(i, j + 2, 3)
else:
self.animal_num[self.animal[i][j]] += 3
Sounds.eliminate(1) # Elimination sound 1
self.change_right(i, j, 3)
elif self.exists_down(i, j, 5):
self.value_swapped = True
if self.exists_right(i+2, j, 3):
self.animal_num[self.animal[i][j]] += 7
Sounds.eliminate(5) # Elimination sound 5
self.change_down(i, j, 5)
self.change_right(i+2, j, 3)
elif self.exists_left(i+2, j, 3):
self.animal_num[self.animal[i][j]] += 7
Sounds.eliminate(5) # Elimination sound 5
self.change_down(i, j, 5)
self.change_left(i+2, j, 3)
else:
self.animal_num[self.animal[i][j]] += 5
Sounds.eliminate(3) # Elimination sound 3
self.change_down(i, j, 5)
elif self.exists_down(i, j, 4):
self.value_swapped = True
if self.exists_left(i+1, j, 3):
self.animal_num[self.animal[i][j]] += 6
Sounds.eliminate(4) # Elimination sound 4
self.change_down(i, j, 4)
self.change_left(i+1, j, 3)
elif self.exists_right(i+1, j, 3):
self.animal_num[self.animal[i][j]] += 6
Sounds.eliminate(4) # Elimination sound 4
self.change_down(i, j, 4)
self.change_right(i+1, j, 3)
elif self.exists_left(i+2, j, 3):
self.animal_num[self.animal[i][j]] += 6
Sounds.eliminate(4) # Elimination sound 4
self.change_down(i, j, 4)
self.change_left(i+2, j, 3)
elif self.exists_right(i+2, j, 3):
self.animal_num[self.animal[i][j]] += 6
Sounds.eliminate(4) # Elimination sound 4
self.change_down(i, j, 4)
self.change_right(i+2, j, 3)
else:
self.animal_num[self.animal[i][j]] += 4
Sounds.eliminate(2) # Elimination sound 2
self.change_down(i, j, 4)
elif self.exists_down(i, j, 3):
self.value_swapped = True
if self.exists_left(i+1, j, 3):
self.animal_num[self.animal[i][j]] += 5
Sounds.eliminate(3) # Elimination sound 3
self.change_down(i, j, 3)
self.change_left(i+1, j, 3)
elif self.exists_right(i+1, j, 3):
self.animal_num[self.animal[i][j]] += 5
Sounds.eliminate(3) # Elimination sound 3
self.change_down(i, j, 3)
self.change_right(i+1, j, 3)
elif self.exists_left(i+2, j, 3):
self.animal_num[self.animal[i][j]] += 5
Sounds.eliminate(3) # Elimination sound 3
self.change_down(i, j, 3)
self.change_left(i+2, j, 3)
elif self.exists_right(i+2, j, 3):
self.animal_num[self.animal[i][j]] += 5
Sounds.eliminate(3) # Elimination sound 3
self.change_down(i, j, 3)
self.change_right(i+2, j, 3)
elif self.exists_left(i+2, j, 2) and self.exists_right(i+2, j, 2):
self.animal_num[self.animal[i][j]] += 5
Sounds.eliminate(3) # Elimination sound 3
self.change_down(i, j, 3)
self.change_left(i+2, j, 2)
self.change_right(i+2, j, 2)
elif self.exists_left(i+2, j, 2) and self.exists_right(i+2, j, 3):
self.animal_num[self.animal[i][j]] += 6
Sounds.eliminate(4) # Elimination sound 4
self.change_down(i, j, 3)
self.change_left(i+2, j, 2)
self.change_right(i+2, j, 3)
elif self.exists_left(i+2, j, 3) and self.exists_right(i+2, j, 2):
self.animal_num[self.animal[i][j]] += 6
Sounds.eliminate(4) # Elimination sound 4
self.change_down(i, j, 3)
self.change_left(i+2, j, 3)
self.change_right(i+2, j, 2)
elif self.exists_left(i+2, j, 3) and self.exists_right(i+2, j, 3):
self.animal_num[self.animal[i][j]] += 7
Sounds.eliminate(5) # Elimination sound 5
self.change_down(i, j, 3)
self.change_left(i+2, j, 3)
self.change_right(i+2, j, 3)
else:
self.animal_num[self.animal[i][j]] += 3
Sounds.eliminate(1) # Elimination sound 1
self.change_down(i, j, 3)
self.fall_animal()
score_level = self.score - score_level # Score level
# Display & speak: good, great, amazing, excellent, unbelievable
if score_level < 5:
return self.value_swapped
if score_level < 8: # 5 good
Sounds.score_level(0)
Element(Element.score_level[0], (350, 250)).draw(self.screen)
pygame.display.flip()
delay(500)
elif score_level < 10: # 8 great
Sounds.score_level(1)
Element(Element.score_level[1], (350, 250)).draw(self.screen)
pygame.display.flip()
delay(500)
elif score_level < 15: # 10 amazing
Sounds.score_level(2)
Element(Element.score_level[2], (350, 250)).draw(self.screen)
pygame.display.flip()
delay(500)
elif score_level < 20: # 15 excellent
Sounds.score_level(3)
Element(Element.score_level[3], (350, 250)).draw(self.screen)
pygame.display.flip()
delay(500)
elif score_level >= 20: # 20 unbelievable
Sounds.score_level(4)
Element(Element.score_level[4], (350, 250)).draw(self.screen)
pygame.display.flip()
delay(500)
return self.value_swapped # Return the swap value sign
def fall_animal(self): # pylint: disable=too-many-locals
'''Animation of falling animals'''
clock = pygame.time.Clock()
position = []
ice_position = []
for i in range(self.row, self.row + self.height):
for j in range(self.col, self.col + self.width):
if self.animal[i][j] == -2:
x, y = self.rc_xy(i, j)
position.append((x, y))
if self.ice_list[i][j] == 1:
ice_position.append((x, y))
if position:
for index in range(0, 9):
clock.tick(20)
for pos in position:
self.draw_brick(pos[0], pos[1])
if pos in ice_position:
Element(Element.ice_format%index, (pos[0], pos[1])).draw(self.screen)
Element(Element.bling_format%index, (pos[0], pos[1])).draw(self.screen)
pygame.display.flip()
for i in range(self.row, self.row + self.height):
brick_position = []
fall_animal_list = []
speed = [0, 1]
for j in range(self.col, self.col + self.width):
if self.animal[i][j] == -2:
x, y = self.rc_xy(i, j)
if self.ice_list[i][j] == 1:
play_sound(Sounds.ICE_BREAKING)
self.ice_num += 1
self.ice_list[i][j] = -1
brick_position.append((x, y))
for m in range(i, self.row - 1, -1):
if self.animal[m - 1][j] != -1:
x, y = self.rc_xy(m - 1, j)
brick_position.append((x, y))
animal = Element(Element.animals[self.animal[m - 1][j]], (x, y))
fall_animal_list.append(animal)
self.animal[m][j] = self.animal[m - 1][j]
else:
self.animal[m][j] = randint(0, 5)
break
while speed != [0, 0] and fall_animal_list:
for position in brick_position:
self.draw_brick(position[0], position[1])
for animal_sprite in fall_animal_list:
animal_sprite.move(speed)
animal_sprite.draw(self.screen)
speed = animal_sprite.speed
pygame.display.flip()
def judge_next(self, tp, score):
'''Check whether the next level is reached or not'''
if tp == 1: # Passed
self.load_fns_window(score)
elif tp == -1: # Failed
self.load_fail_window()
def load_fail_window(self):
'''Display the failure board and buttons'''
sound_sign = 0
step_add = Board(Board.step_add, Board.button_position[0]) # L: 5 more steps
retry = Board(Board.replay, Board.button_position[1]) # R: Replay
self.screen.blit(self.fail_board.image, self.fail_board.rect) # Failure board
self.screen.blit(step_add.image, step_add.rect)
self.screen.blit(retry.image, retry.rect)
while self.fail_board.speed != [0, 0]:
self.draw()
self.screen.blit(self.fail_board.image, self.fail_board.rect)
self.fail_board.move()
pygame.display.flip()
if sound_sign == 0:
play_sound(Sounds.BOARD_SOUND)
sound_sign = 1
def load_fns_window(self, score):
'''Display the success board, score and buttons'''
sound_sign = 0
replay = Board(Board.replay, Board.button_position[0]) # L: Replay
self.screen.blit(self.success_board.image, self.success_board.rect) # Successful board
if self.level < 10: # If not the last level
next_level = Board(Board.next, Board.button_position[1]) # R: Next level
self.screen.blit(next_level.image, next_level.rect)
self.screen.blit(replay.image, replay.rect)
while self.success_board.speed != [0, 0]:
self.draw()
self.screen.blit(self.success_board.image, self.success_board.rect)
self.success_board.move()
pygame.display.flip()
if sound_sign == 0:
play_sound(Sounds.BOARD_SOUND)
sound_sign = 1
self.displayStars(score) # Display the stars
# Money
self.load_text(str(self.score*2), (Board.starts_position[0][0]+75, Board.starts_position[0][0]+46), 20, (0, 0, 0))
def displayStars(self, score):
'''Display the stars according to the score.'''
star1 = Board(Board.stars, Board.starts_position[0])
star2 = Board(Board.stars, Board.starts_position[1])
star3 = Board(Board.stars, Board.starts_position[2])
if 0 <= score < self.min:
self.load_text('1', (Board.starts_position[1][0]+48, Board.starts_position[1][1]+35), 20, (0, 0, 0))
self.screen.blit(star1.image, star1.rect)
elif self.min <= score <= self.max:
self.load_text('2', (Board.starts_position[1][0] + 48, Board.starts_position[1][1] + 35), 20, (0, 0, 0))
self.screen.blit(star1.image, star1.rect)
self.screen.blit(star2.image, star2.rect)
elif score > self.max:
self.load_text('5', (Board.starts_position[1][0] + 48, Board.starts_position[1][1] + 35), 20, (0, 0, 0))
self.screen.blit(star1.image, star1.rect)
self.screen.blit(star2.image, star2.rect)
self.screen.blit(star3.image, star3.rect)
pygame.display.flip()
def set_level_mode(self, level):
'''Set the level mode and its steps.'''
self.level = level
if self.reset_mode: # If it is required to reset the mode
self.num_sign = True
if level == 1:
self.__init__(7, 7)
self.animal[7][9] = self.animal[7][10] = self.animal[7][11] = self.animal[8][10] = self.animal[11][7] = \
self.animal[11][13] = self.animal[12][7] = self.animal[12][8] = self.animal[12][12] = self.animal[12][13] = \
self.animal[13][7] = self.animal[13][8] = self.animal[13][9] = self.animal[13][11] = self.animal[13][12] = \
self.animal[13][13] = -1
self.init_step = 17 # 17 initial steps
elif level == 2:
self.__init__(4, 8)
self.init_step = 16 # 16 initial steps
elif level == 3:
self.__init__(7, 7)
self.init_step = 18 # 18 initial steps
elif level == 4:
self.__init__(9, 7)
row, col = self.row, self.col
self.animal[row][col] = self.animal[row][col+7] = self.animal[row][col+8] = self.animal[row+1][col+8] = \
self.animal[row+5][col] = self.animal[row+6][col] = self.animal[row+6][col+1] = self.animal[row+6][col+8] = -1
self.init_step = 20
elif level == 5:
self.__init__(8, 9)
row, col = self.row, self.col
self.animal[row][col+7] = self.animal[row+2][col] = self.animal[row+5][col] = self.animal[row+3][col+7] = \
self.animal[row+6][col+7] = self.animal[row+8][col] = -1
self.init_step = 20
elif level == 6:
self.__init__(9, 9)
row, col = self.row, self.col
self.animal[row][col] = self.animal[row][col+8] = self.animal[row+2][col+4] = self.animal[row+3][col+2] = \
self.animal[row+3][col+6] = self.animal[row+8][col] = self.animal[row+8][col+8] = -1
for i in range(row+4, row+6):
for j in range(col+3, col+6):
self.animal[i][j] = -1
self.init_step = 28
elif level == 7:
self.__init__(9, 9)
row, col = self.row, self.col
for i in range(row, row + 9):
self.animal[i][col+4] = -1
for j in range(col, col+4):
self.animal[row+3][j] = -1
for j in range(col+5, col+9):
self.animal[row+5][j] = -1
self.init_step = 25
elif level == 8:
self.__init__(7, 8)
row, col = self.row, self.col
for i in range(row+2, row+5):
for j in range(col+1, col+6):
self.ice_list[i][j] = 1
self.init_step = 21
elif level == 9:
self.__init__(9, 9)
row, col = self.row, self.col
self.animal[row][col+4] = self.animal[row+4][col] = self.animal[row+4][col+8] = self.animal[row+8][col+4] = -1
for i in range(row+1, row+8):
for j in range(col+1, col+8):
self.ice_list[i][j] = 1
self.init_step = 35
else:
self.__init__(9, 9)
row, col = self.row, self.col
for i in range(row, row+2):
for j in range(col, col+9):
self.animal[i][j] = -1
self.animal[row][col+4] = randint(0, 5)
self.animal[row+1][col+2] = randint(0, 5)
self.animal[row+1][col+4] = randint(0, 5)
self.animal[row+1][col+6] = randint(0, 5)
self.animal[row+2][col+1] = self.animal[row+3][col+1] = self.animal[row+2][col+3] = self.animal[row+3][col+3] =\
self.animal[row+2][col+5] = self.animal[row+3][col+5] = self.animal[row+2][col+7] = \
self.animal[row+3][col+7] = self.animal[row+8][col] = self.animal[row+8][col+8] = -1
for i in range(row+4, row+8):
for j in range(col, col+9):
self.ice_list[i][j] = 1
self.ice_list[row+2][col+4] = self.ice_list[row+3][col+2] = self.ice_list[row+3][col+4] = \
self.ice_list[row+3][col+6] = 1
self.init_step = 40
self.type = 0
self.energy_num -= 5
self.success_board = Board(Board.success, [200, 0]) # Success board
self.fail_board = Board(Board.fail, [200, 0]) # Failure board
self.step = self.init_step
self.score = 0
self.animal_num = [0, 0, 0, 0, 0, 0]
self.ice_num = 0
self.reset_mode = False
def num_add(self):
'''Add to score'''
if self.num_sign:
self.money += self.score * 2
if self.score < self.min:
self.energy_num += 1
elif self.score < self.max:
self.energy_num += 2
else:
self.energy_num += 5
self.num_sign = False
def judge_level(self):
'''Check whether the level was passed'''
if self.step <= 0:
self.type = -1 # Game over
if self.level == 1:
if self.animal_num[4] >= 10: # L1: 10 frogs
self.type = 1 # Level 1 passed
self.num_add()
elif self.level == 2:
if self.animal_num[1] >= 21: # L2: 21 bears
self.type = 1 # Level 2 passed
self.num_add()
elif self.level == 3:
if self.animal_num[4] >= 16 and self.animal_num[5] >= 16: # L3: 16 frogs and 16 cows
self.type = 1 # Level 3 passed
self.num_add()
elif self.level == 4:
if self.animal_num[5] >= 18 and self.animal_num[2] >= 18: # L4: 18 cows and 18 chicks
self.type = 1 # Level 4 passed
self.num_add()
elif self.level == 5:
if self.animal_num[2] >= 28 and self.animal_num[0] >= 28: # L5: 28 chicks and 28 foxes
self.type = 1 # Level 5 passed
self.num_add()
elif self.level == 6:
if self.animal_num[4] >= 70: # L6: 70 frogs
self.type = 1 # Level 6 passed
self.num_add()
elif self.level == 7:
if self.animal_num[2] >= 36 and self.animal_num[1] >= 36 and self.animal_num[0] >= 36: # L7: 36 chickens, 36 bears and 36 foxes
self.type = 1 # Level 7 passed
self.num_add()
elif self.level == 8:
if self.ice_num >= 15: # L8: 15 ice
self.type = 1 # Level 8 passed
self.num_add()
elif self.level == 9:
if self.ice_num >= 49: # L9: 49 ice
self.type = 1 # Level 9 passed
self.num_add()
else:
if self.ice_num >= 39: # L10: 39 ice
self.type = 1 # Level 10 passed
self.num_add()
self.judge_next(self.type, self.score) | manager.py | '''Game manager module.'''
# pylint: disable=fixme, line-too-long, invalid-name, undefined-variable
# pylint: disable=too-many-branches, too-many-statements, too-many-arguments
from random import randint
import pygame
from pygame.locals import * # pylint: disable=wildcard-import, unused-wildcard-import
from pygame.time import delay
from sprites import Tree, Board, Element
from sounds import Sounds, play_sound
class TreeManager:
'''Tree manager.'''
__screen_size = (900, 600)
screen = pygame.display.set_mode(__screen_size, DOUBLEBUF, 32)
fruit_list = []
fruit_image = pygame.image.load(Tree.fruit).convert_alpha()
fruit_width = fruit_image.get_width()
fruit_height = fruit_image.get_height()
type = 0 # 0 Tree, 1 Energy
energy_full = False # Energy full mark
money_empty = False # Not any money left?
def display_text(self, text, position, txt_size=25, txt_color=(255, 255, 255)):
'''Display text with given position, size and color.'''
my_font = pygame.font.SysFont(None, txt_size)
text_screen = my_font.render(text, True, txt_color)
self.screen.blit(text_screen, position)
def draw_tree(self, energy_num, money_num):
'''Draws the game tree.'''
Tree(Tree.tree, (0, 600)).draw(self.screen) # Draw tree
Tree(Tree.energy_num, Tree.energy_num_position).draw(self.screen) # Draw energy num
if energy_num > 30:
self.display_text(str(30) + '/30', (22, 55), 21)
else:
self.display_text(str(energy_num)+'/30', (22, 55), 21)
Tree(Tree.money, (15, 135)).draw(self.screen) # Draw money
self.display_text(str(money_num), (32, 124), 21)
for i in range(0, 10): # Draw fruits
Tree(Tree.fruit, Tree.position[i]).draw(self.screen)
self.display_text(str(i+1), (Tree.position[i][0]+15, Tree.position[i][1]-47))
if self.type == 1:
Tree(Tree.energy_buy, Tree.energy_buy_position).draw(self.screen)
if self.energy_full:
self.display_text('energy is full!', (430, 310), 30, (255, 0, 0))
pygame.display.flip()
delay(500)
self.energy_full = False
if self.money_empty:
self.display_text('money is not enough!', (410, 310), 30, (255, 0, 0))
pygame.display.flip()
delay(500)
self.money_empty = False
def mouse_select(self, mgr, mousex, mousey, level, energy_num, money_num):
'''Handle mouse event.'''
if self.type == 0: # Tree Scene
for i in range(0, 10):
if Tree.position[i][0] < mousex < Tree.position[i][0] + self.fruit_width \
and Tree.position[i][1] - self.fruit_height < mousey < Tree.position[i][1]:
if energy_num <= 0:
self.type = 1
else:
level = i + 1
if Tree.energy_num_position[0] < mousex < Tree.energy_num_position[0] + 60 \
and Tree.energy_num_position[1] - 60 < mousey < Tree.energy_num_position[1]: # 精力60*60
play_sound(Sounds.CLICK)
self.type = 1
else: # Energy Scene
if 408 < mousex < 600 and 263 < mousey < 313: # "Buy Energy" button clicked
play_sound(Sounds.CLICK_BUTTON)
if money_num < 50:
self.money_empty = True
if energy_num >= 30:
self.energy_full = True
elif energy_num < 30 and money_num >= 50:
energy_num += 5
money_num -= 50
elif 619 < mousex < 638 and 158 < mousey < 177: # "X" clicked
self.type = 0
mgr.level, mgr.energy_num, mgr.money = level, energy_num, money_num
# pylint: disable=too-many-public-methods, too-many-instance-attributes, too-many-nested-blocks
class Manager:
'''Game manager.'''
__screen_size = (900, 600)
screen = pygame.display.set_mode(__screen_size, DOUBLEBUF, 32)
__brick_size = 50
__bg = pygame.image.load('img/bg.png').convert()
stop_width = 63
selected = [-1, -1] # Current selected [row, col]
swap_sign = -1 # Swap sign
last_sel = [-1, -1] # Last selected [row, col]
value_swapped = False # Swapped?
death_sign = True # Death map sign
boom_sel = [-1, -1] # Eliminate 4: [row, col]
level = 0 # Current level, 0 for tree
money = 100 # Money
energy_num = 30 # Energy num
num_sign = True
type = 2 # (0) Playing, (1) Passed, (-1) Failed, (2) Tree
reset_mode = True # Reset layout?
init_step = 15 # Initial steps for each level
step = init_step # Steps left of the game
score = 0 # Score
min = 20 # Medium score 1
max = 50 # Medium score 2
animal_num = [0, 0, 0, 0, 0, 0] # Number of eliminated animals
ice_num = 0 # Number left of required ice
success_board = Board(Board.success, [200, 0]) # Success board
fail_board = Board(Board.fail, [200, 0]) # Failure board
height, width = 9, 9
row, col = 5, 5
ice_list = [[-1 for _ in range(21)] for _ in range(21)] # (-1) None, (1) Ice
animal = [[-1 for _ in range(21)] for _ in range(21)] # (-2) Elimated, (-1) None, (0-4) Animal
list_x, list_y = (__screen_size[0] - 11 * __brick_size) / 2, (__screen_size[1] - 11 * __brick_size) / 2 # Position of the blocks
def __init__(self, width, height):
self.height = height
self.width = width
self.list_x = (Manager.__screen_size[0] - self.width * Manager.__brick_size) / 2
self.list_y = (Manager.__screen_size[1] - self.height * Manager.__brick_size) / 2
self.row, self.col = Manager.xy_rc(self.list_x, self.list_y)
self.list_x, self.list_y = Manager.rc_xy(self.row, self.col)
self.ice_list = [[-1 for _ in range(21)] for _ in range(21)]
self.animal = [[-1 for _ in range(21)] for _ in range(21)]
self.reset_animals()
def reset_animals(self):
'''Reset board with random animals.'''
for row in range(self.row, self.row + self.height):
for col in range(self.col, self.col + self.width):
self.animal[row][col] = randint(0, 5)
@staticmethod
def rc_xy(row, col):
'''(row, col) -> (x, y)'''
return int(Manager.list_x + (col-Manager.col)*Manager.__brick_size), int\
(Manager.list_y+(row-Manager.row)*Manager.__brick_size)
@staticmethod
def xy_rc(x, y):
'''(x, y) -> (row, col)'''
return int((y-Manager.list_y)/Manager.__brick_size+Manager.row), int\
((x-Manager.list_x)/Manager.__brick_size+Manager.col)
@staticmethod
def draw_brick(x, y):
'''Draw a brick at (x, y).'''
brick = Element(Element.brick, (x, y))
Manager.screen.blit(brick.image, brick.rect)
def draw_task(self, task_animal_num, which_animal, \
board_position=(400, 90), animal_position=(430, 35), txt_position=(455, 60)):
'''Draw task board'''
txt_size = 24
txt_color = (0, 0, 0)
Board(Board.task_board, board_position).draw(self.screen)
if which_animal == 6:
task_animal = Element(Element.ice, animal_position)
else:
task_animal = Element(Element.animals[which_animal], animal_position)
task_animal.image = pygame.transform.smoothscale(task_animal.image, (40, 40))
task_animal.draw(self.screen)
if which_animal == 6:
if task_animal_num-self.ice_num <= 0:
Board(Board.ok, (txt_position[0], txt_position[1]+15)).draw(self.screen)
else:
self.load_text(str(task_animal_num-self.ice_num), txt_position, txt_size, txt_color)
else:
if task_animal_num - self.animal_num[which_animal] <= 0:
Board(Board.ok, (txt_position[0], txt_position[1]+15)).draw(self.screen)
else:
self.load_text(str(task_animal_num - self.animal_num[which_animal]), txt_position, txt_size, txt_color)
def draw(self):
'''Draw background, animals, and so on.'''
# Draw background
self.screen.blit(Manager.__bg, (0, 0))
# Display steps left
Board(Board.step_board, (0, 142)).draw(self.screen)
tens, single = divmod(self.step, 10)
if tens == 0:
Board(Board.num_format%single, (790, 110)).draw(self.screen)
else:
Board(Board.num_format%tens, (775, 110)).draw(self.screen)
Board(Board.num_format%single, (805, 110)).draw(self.screen)
# Display level & pause button
Board(Board.level_format%self.level, (30, 105)).draw(self.screen)
Element(Element.stop, Element.stop_position).draw(self.screen)
# Draw bricks, ice and animals
brick_group = pygame.sprite.Group()
animal_group = pygame.sprite.Group()
ice_group = pygame.sprite.Group()
for i in range(0, 21):
for j in range(0, 21):
x, y = Manager.rc_xy(i, j)
if self.animal[i][j] != -1:
brick_group.add(Element(Element.brick, (x, y)))
animal_group.add(Element(Element.animals[self.animal[i][j]], (x, y)))
if self.ice_list[i][j] != -1:
ice_group.add(Element(Element.ice, (x, y)))
brick_group.draw(self.screen)
ice_group.draw(self.screen)
for animallist in animal_group:
self.screen.blit(animallist.image, animallist.rect)
if self.level == 1:
self.draw_task(10, 4)
elif self.level == 2:
self.draw_task(21, 1)
elif self.level == 3:
self.draw_task(16, 4, (300, 90), (330, 35), (360, 60))
self.draw_task(16, 5, (500, 90), (530, 35), (560, 60))
elif self.level == 4:
self.draw_task(18, 5, (300, 90), (330, 35), (360, 60))
self.draw_task(18, 2, (500, 90), (530, 35), (560, 60))
elif self.level == 5:
self.draw_task(28, 2, (300, 90), (330, 35), (360, 60))
self.draw_task(28, 0, (500, 90), (530, 35), (560, 60))
elif self.level == 6:
self.draw_task(70, 4)
elif self.level == 7:
self.draw_task(36, 1)
self.draw_task(36, 2, (300, 90), (330, 35), (360, 60))
self.draw_task(36, 0, (500, 90), (530, 35), (560, 60))
elif self.level == 8:
self.draw_task(15, 6)
elif self.level == 9:
self.draw_task(49, 6)
else:
self.draw_task(39, 6)
# Display selected animal
if self.selected != [-1, -1]:
frame_sprite = Element(Element.frame, Manager.rc_xy(self.selected[0], self.selected[1]))
self.screen.blit(frame_sprite.image, frame_sprite.rect)
# Show score
self.load_text('Score:' + str(self.score), (300, 550), 30)
pygame.draw.rect(self.screen, (50, 150, 50, 180), Rect(300, 570, self.score * 2, 25))
pygame.draw.rect(self.screen, (100, 200, 100, 180), Rect(300, 570, 200, 25), 2)
return animal_group
def mouse_image(self):
'''Replace the mouse image with img/mouse.png'''
mouse_cursor = pygame.image.load('img/mouse.png').convert_alpha()
mouse_x, mouse_y = pygame.mouse.get_pos()
# Find the topleft position of the mouse
mouse_x -= mouse_cursor.get_width() / 2
mouse_y -= mouse_cursor.get_height() / 2
self.screen.blit(mouse_cursor, (mouse_x, mouse_y))
def mouse_select(self, mousex, mousey):
'''Handle mouse click event.'''
if self.type == 1: # Passed
if Board.button_position[0][0] < mousex < Board.button_position[0][0]+100 \
and Board.button_position[0][1] - 50 < mousey < Board.button_position[0][1]: # Clicked replay button
if self.energy_num < 5:
self.level = 0
self.reset_mode = True
elif Board.button_position[1][0] < mousex < Board.button_position[1][0]+100 \
and Board.button_position[1][1]-50 < mousey < Board.button_position[1][1]: # Clicked next level button
if self.level < 10:
if self.energy_num < 5:
self.level = 0
else:
self.level += 1
self.reset_mode = True
elif 610 < mousex < 610 + 55 and 205 - 55 < mousey < 205: # x
self.level = 0
self.reset_mode = True
elif self.type == -1: # Failed
if Board.button_position[1][0] < mousex < Board.button_position[1][0]+100 \
and Board.button_position[1][1]-50 < mousey < Board.button_position[1][1]: # Clicked replay button
if self.energy_num < 5:
self.level = 0
self.reset_mode = True
elif Board.button_position[0][0] < mousex < Board.button_position[0][0]+100 \
and Board.button_position[0][1]-50 < mousey < Board.button_position[0][1]: # Clicked 5 more steps button
if self.money < 5:
self.level = 0
else:
self.money -= 5
self.step += 5
self.type = 0 # Playing game
self.fail_board = Board(Board.fail, [200, 0])
elif 610 < mousex < 610 + 55 and 205 - 55 < mousey < 205:
self.level = 0
self.reset_mode = True
elif self.type == 0:
if self.list_x < mousex < self.list_x + Manager.__brick_size * self.width \
and self.list_y < mousey < self.list_y + Manager.__brick_size * self.height:
mouse_selected = Manager.xy_rc(mousex, mousey)
if self.animal[mouse_selected[0]][mouse_selected[1]] != -1:
play_sound(Sounds.CLICK)
self.selected = mouse_selected
if (self.last_sel[0] == self.selected[0] and abs(self.last_sel[1] - self.selected[1]) == 1) \
or (self.last_sel[1] == self.selected[1] and abs(self.last_sel[0] - self.selected[0]) == 1):
self.swap_sign = 1 # Valid move, swap
elif Element.stop_position[0] < mousex < Element.stop_position[0]+self.stop_width\
and Element.stop_position[1] < mousey < Element.stop_position[1]+self.stop_width: # Exit button clicked
play_sound(Sounds.CLICK_BUTTON)
self.level = 0
self.reset_mode = True
else:
self.selected = [-1, -1]
def swap(self, spritegroup):
'''Swap two selected animals on the board.'''
if self.swap_sign == -1: # Not swapped
self.last_sel = self.selected
if self.swap_sign == 1:
last_x, last_y = Manager.rc_xy(self.last_sel[0], self.last_sel[1])
sel_x, sel_y = Manager.rc_xy(self.selected[0], self.selected[1])
if self.last_sel[0] == self.selected[0]: # Swap vertically
for animallist in spritegroup:
if animallist.rect.topleft == (last_x, last_y):
last_sprite = animallist
last_sprite.speed = [self.selected[1]-self.last_sel[1], 0]
elif animallist.rect.topleft == (sel_x, sel_y):
selected_sprite = animallist
selected_sprite.speed = [self.last_sel[1]-self.selected[1], 0]
else: # Swap horizontally
for animallist in spritegroup:
if animallist.rect.topleft == (last_x, last_y):
last_sprite = animallist
last_sprite.speed = [0, self.selected[0]-self.last_sel[0]]
elif animallist.rect.topleft == (sel_x, sel_y):
selected_sprite = animallist
selected_sprite.speed = [0, self.last_sel[0]-self.selected[0]]
while last_sprite.speed != [0, 0]:
delay(5)
self.draw_brick(last_x, last_y)
self.draw_brick(sel_x, sel_y)
last_sprite.move(last_sprite.speed)
selected_sprite.move(selected_sprite.speed)
self.screen.blit(last_sprite.image, last_sprite.rect)
self.screen.blit(selected_sprite.image, selected_sprite.rect)
pygame.display.flip()
self.swap_values()
if self.eliminate_animals():
self.step -= 1
else:
self.swap_values()
self.value_swapped = False
self.boom_sel = self.selected
self.swap_sign = -1
self.selected = [-1, -1]
def swap_values(self):
'''Swap values.'''
(xl, yl), (xc, yc) = self.last_sel, self.selected
self.animal[xl][yl], self.animal[xc][yc] = self.animal[xc][yc], self.animal[xl][yl]
def load_text(self, text, position, txt_size, txt_color=(255, 255, 255)):
'''Display text with given position, size and color.'''
my_font = pygame.font.SysFont(None, txt_size)
text_screen = my_font.render(text, True, txt_color)
self.screen.blit(text_screen, position)
def death_map(self):
'''Checks if there is not a valid move.'''
for i in range(self.row, self.row + self.height):
for j in range(self.col, self.col + self.width):
if self.animal[i][j] != -1:
if self.animal[i][j] == self.animal[i][j+1]:
if (self.animal[i][j] in [self.animal[i-1][j-1], self.animal[i+1][j-1]] \
and self.animal[i][j-1] != -1) or \
(self.animal[i][j] in [self.animal[i-1][j+2], self.animal[i+1][j+2]] \
and self.animal[i][j+2] != -1):
# a b
# a a
# c d
self.death_sign = False
break
if self.animal[i][j] == self.animal[i+1][j]:
if (self.animal[i][j] in [self.animal[i-1][j-1], self.animal[i-1][j+1]] \
and self.animal[i-1][j] != -1) or \
(self.animal[i][j] in [self.animal[i+2][j - 1], self.animal[i+2][j + 1]] \
and self.animal[i+2][j] != -1):
# a b
# a
# a
# c d
self.death_sign = False
break
else:
if self.animal[i-1][j-1] == self.animal[i][j]:
if (self.animal[i][j] == self.animal[i-1][j+1] and self.animal[i-1][j] != -1)\
or (self.animal[i][j] == self.animal[i+1][j-1] and self.animal[i][j-1] != -1):
# a a a b
# a a
# c a
self.death_sign = False
break
if self.animal[i][j] == self.animal[i+1][j+1]:
if (self.animal[i][j] == self.animal[i-1][j+1] and self.animal[i][j+1] != -1)\
or (self.animal[i][j] == self.animal[i+1][j-1] and self.animal[i+1][j] != -1):
# a b
# a a
# b a a a
self.death_sign = False
break
if self.death_sign:
delay(500)
Element(Element.none_animal, (230, 150)).draw(self.screen)
pygame.display.flip()
delay(500)
temp = [self.step, self.score, self.animal_num, self.ice_num, self.energy_num]
self.reset_mode = True
self.set_level_mode(self.level)
self.step = temp[0]
self.score = temp[1]
self.animal_num = temp[2]
self.ice_num = temp[3]
self.energy_num = temp[4]
else:
self.death_sign = True
# TODO: Merge 4 functions below
def exists_left(self, i, j, num):
'''Checks there are at least {num} continous same animals on the left side of (i, j).'''
for t in range(0, num):
if self.animal[i][j] != self.animal[i][j - t] or self.animal[i][j] < 0:
return False
return True
def exists_right(self, i, j, num):
'''Checks there are at least {num} continous same animals on the right side of (i, j).'''
for t in range(0, num):
if self.animal[i][j] != self.animal[i][j + t] or self.animal[i][j] < 0:
return False
return True
def exists_up(self, i, j, num):
'''Checks there are at least {num} continous same animals above (i, j).'''
for t in range(0, num):
if self.animal[i][j] != self.animal[i - t][j] or self.animal[i][j] < 0:
return False
return True
def exists_down(self, i, j, num):
'''Checks there are at least {num} continous same animals below (i, j).'''
for t in range(0, num):
if self.animal[i][j] != self.animal[i + t][j] or self.animal[i][j] < 0:
return False
return True
# TODO: Merge 4 functions below
def change_left(self, i, j, num):
'''Change the left side of the animal.'''
self.value_swapped = True
self.score += num
for k in range(0, int(num)):
self.animal[i][j - k] = -2
def change_right(self, i, j, num):
'''Change the right side of the animal.'''
self.value_swapped = True
self.score += num
for k in range(0, num):
self.animal[i][j + k] = -2
def change_up(self, i, j, num):
'''Change above the animal.'''
self.value_swapped = True
self.score += num
for k in range(0, num):
self.animal[i-k][j] = -2
def change_down(self, i, j, num):
'''Change below the animal.'''
self.value_swapped = True
self.score += num
for k in range(0, num):
self.animal[i+k][j] = -2
def eliminate_animals(self):
'''Eliminate the animals.'''
score_level = self.score
self.value_swapped = False
for i in range(self.row, self.row + self.height):
for j in range(self.col, self.col + self.width):
# TODO: Simplify the if statement below
if self.exists_right(i, j, 5):
self.value_swapped = True
if self.exists_down(i, j+2, 3):
self.animal_num[self.animal[i][j]] += 7
Sounds.eliminate(5) # Elimination sound 5
self.change_right(i, j, 5)
self.change_down(i, j+2, 3)
else:
self.animal_num[self.animal[i][j]] += 5
Sounds.eliminate(3) # Elimination sound 3
self.change_right(i, j, 5)
elif self.exists_right(i, j, 4):
self.value_swapped = True
if self.exists_down(i, j+1, 3):
self.animal_num[self.animal[i][j]] += 6
Sounds.eliminate(4) # Elimination sound 4
self.change_right(i, j, 4)
self.change_down(i, j+1, 3)
elif self.exists_down(i, j+2, 3):
self.animal_num[self.animal[i][j]] += 6
Sounds.eliminate(4) # Elimination sound 4
self.change_right(i, j, 4)
self.change_down(i, j+2, 3)
else:
self.animal_num[self.animal[i][j]] += 4
Sounds.eliminate(2) # Elimination sound 2
self.change_right(i, j, 4)
elif self.exists_right(i, j, 3):
self.value_swapped = True
if self.exists_down(i, j, 3):
self.animal_num[self.animal[i][j]] += 5
Sounds.eliminate(3) # Elimination sound 3
self.change_right(i, j, 3)
self.change_down(i, j, 3)
elif self.exists_down(i, j+1, 3):
self.animal_num[self.animal[i][j]] += 5
Sounds.eliminate(3) # Elimination sound 3
self.change_right(i, j, 3)
self.change_down(i, j+1, 3)
elif self.exists_down(i, j+2, 3):
self.animal_num[self.animal[i][j]] += 5
Sounds.eliminate(3) # Elimination sound 3
self.change_right(i, j, 3)
self.change_down(i, j + 2, 3)
else:
self.animal_num[self.animal[i][j]] += 3
Sounds.eliminate(1) # Elimination sound 1
self.change_right(i, j, 3)
elif self.exists_down(i, j, 5):
self.value_swapped = True
if self.exists_right(i+2, j, 3):
self.animal_num[self.animal[i][j]] += 7
Sounds.eliminate(5) # Elimination sound 5
self.change_down(i, j, 5)
self.change_right(i+2, j, 3)
elif self.exists_left(i+2, j, 3):
self.animal_num[self.animal[i][j]] += 7
Sounds.eliminate(5) # Elimination sound 5
self.change_down(i, j, 5)
self.change_left(i+2, j, 3)
else:
self.animal_num[self.animal[i][j]] += 5
Sounds.eliminate(3) # Elimination sound 3
self.change_down(i, j, 5)
elif self.exists_down(i, j, 4):
self.value_swapped = True
if self.exists_left(i+1, j, 3):
self.animal_num[self.animal[i][j]] += 6
Sounds.eliminate(4) # Elimination sound 4
self.change_down(i, j, 4)
self.change_left(i+1, j, 3)
elif self.exists_right(i+1, j, 3):
self.animal_num[self.animal[i][j]] += 6
Sounds.eliminate(4) # Elimination sound 4
self.change_down(i, j, 4)
self.change_right(i+1, j, 3)
elif self.exists_left(i+2, j, 3):
self.animal_num[self.animal[i][j]] += 6
Sounds.eliminate(4) # Elimination sound 4
self.change_down(i, j, 4)
self.change_left(i+2, j, 3)
elif self.exists_right(i+2, j, 3):
self.animal_num[self.animal[i][j]] += 6
Sounds.eliminate(4) # Elimination sound 4
self.change_down(i, j, 4)
self.change_right(i+2, j, 3)
else:
self.animal_num[self.animal[i][j]] += 4
Sounds.eliminate(2) # Elimination sound 2
self.change_down(i, j, 4)
elif self.exists_down(i, j, 3):
self.value_swapped = True
if self.exists_left(i+1, j, 3):
self.animal_num[self.animal[i][j]] += 5
Sounds.eliminate(3) # Elimination sound 3
self.change_down(i, j, 3)
self.change_left(i+1, j, 3)
elif self.exists_right(i+1, j, 3):
self.animal_num[self.animal[i][j]] += 5
Sounds.eliminate(3) # Elimination sound 3
self.change_down(i, j, 3)
self.change_right(i+1, j, 3)
elif self.exists_left(i+2, j, 3):
self.animal_num[self.animal[i][j]] += 5
Sounds.eliminate(3) # Elimination sound 3
self.change_down(i, j, 3)
self.change_left(i+2, j, 3)
elif self.exists_right(i+2, j, 3):
self.animal_num[self.animal[i][j]] += 5
Sounds.eliminate(3) # Elimination sound 3
self.change_down(i, j, 3)
self.change_right(i+2, j, 3)
elif self.exists_left(i+2, j, 2) and self.exists_right(i+2, j, 2):
self.animal_num[self.animal[i][j]] += 5
Sounds.eliminate(3) # Elimination sound 3
self.change_down(i, j, 3)
self.change_left(i+2, j, 2)
self.change_right(i+2, j, 2)
elif self.exists_left(i+2, j, 2) and self.exists_right(i+2, j, 3):
self.animal_num[self.animal[i][j]] += 6
Sounds.eliminate(4) # Elimination sound 4
self.change_down(i, j, 3)
self.change_left(i+2, j, 2)
self.change_right(i+2, j, 3)
elif self.exists_left(i+2, j, 3) and self.exists_right(i+2, j, 2):
self.animal_num[self.animal[i][j]] += 6
Sounds.eliminate(4) # Elimination sound 4
self.change_down(i, j, 3)
self.change_left(i+2, j, 3)
self.change_right(i+2, j, 2)
elif self.exists_left(i+2, j, 3) and self.exists_right(i+2, j, 3):
self.animal_num[self.animal[i][j]] += 7
Sounds.eliminate(5) # Elimination sound 5
self.change_down(i, j, 3)
self.change_left(i+2, j, 3)
self.change_right(i+2, j, 3)
else:
self.animal_num[self.animal[i][j]] += 3
Sounds.eliminate(1) # Elimination sound 1
self.change_down(i, j, 3)
self.fall_animal()
score_level = self.score - score_level # Score level
# Display & speak: good, great, amazing, excellent, unbelievable
if score_level < 5:
return self.value_swapped
if score_level < 8: # 5 good
Sounds.score_level(0)
Element(Element.score_level[0], (350, 250)).draw(self.screen)
pygame.display.flip()
delay(500)
elif score_level < 10: # 8 great
Sounds.score_level(1)
Element(Element.score_level[1], (350, 250)).draw(self.screen)
pygame.display.flip()
delay(500)
elif score_level < 15: # 10 amazing
Sounds.score_level(2)
Element(Element.score_level[2], (350, 250)).draw(self.screen)
pygame.display.flip()
delay(500)
elif score_level < 20: # 15 excellent
Sounds.score_level(3)
Element(Element.score_level[3], (350, 250)).draw(self.screen)
pygame.display.flip()
delay(500)
elif score_level >= 20: # 20 unbelievable
Sounds.score_level(4)
Element(Element.score_level[4], (350, 250)).draw(self.screen)
pygame.display.flip()
delay(500)
return self.value_swapped # Return the swap value sign
def fall_animal(self): # pylint: disable=too-many-locals
'''Animation of falling animals'''
clock = pygame.time.Clock()
position = []
ice_position = []
for i in range(self.row, self.row + self.height):
for j in range(self.col, self.col + self.width):
if self.animal[i][j] == -2:
x, y = self.rc_xy(i, j)
position.append((x, y))
if self.ice_list[i][j] == 1:
ice_position.append((x, y))
if position:
for index in range(0, 9):
clock.tick(20)
for pos in position:
self.draw_brick(pos[0], pos[1])
if pos in ice_position:
Element(Element.ice_format%index, (pos[0], pos[1])).draw(self.screen)
Element(Element.bling_format%index, (pos[0], pos[1])).draw(self.screen)
pygame.display.flip()
for i in range(self.row, self.row + self.height):
brick_position = []
fall_animal_list = []
speed = [0, 1]
for j in range(self.col, self.col + self.width):
if self.animal[i][j] == -2:
x, y = self.rc_xy(i, j)
if self.ice_list[i][j] == 1:
play_sound(Sounds.ICE_BREAKING)
self.ice_num += 1
self.ice_list[i][j] = -1
brick_position.append((x, y))
for m in range(i, self.row - 1, -1):
if self.animal[m - 1][j] != -1:
x, y = self.rc_xy(m - 1, j)
brick_position.append((x, y))
animal = Element(Element.animals[self.animal[m - 1][j]], (x, y))
fall_animal_list.append(animal)
self.animal[m][j] = self.animal[m - 1][j]
else:
self.animal[m][j] = randint(0, 5)
break
while speed != [0, 0] and fall_animal_list:
for position in brick_position:
self.draw_brick(position[0], position[1])
for animal_sprite in fall_animal_list:
animal_sprite.move(speed)
animal_sprite.draw(self.screen)
speed = animal_sprite.speed
pygame.display.flip()
def judge_next(self, tp, score):
'''Check whether the next level is reached or not'''
if tp == 1: # Passed
self.load_fns_window(score)
elif tp == -1: # Failed
self.load_fail_window()
def load_fail_window(self):
'''Display the failure board and buttons'''
sound_sign = 0
step_add = Board(Board.step_add, Board.button_position[0]) # L: 5 more steps
retry = Board(Board.replay, Board.button_position[1]) # R: Replay
self.screen.blit(self.fail_board.image, self.fail_board.rect) # Failure board
self.screen.blit(step_add.image, step_add.rect)
self.screen.blit(retry.image, retry.rect)
while self.fail_board.speed != [0, 0]:
self.draw()
self.screen.blit(self.fail_board.image, self.fail_board.rect)
self.fail_board.move()
pygame.display.flip()
if sound_sign == 0:
play_sound(Sounds.BOARD_SOUND)
sound_sign = 1
def load_fns_window(self, score):
'''Display the success board, score and buttons'''
sound_sign = 0
replay = Board(Board.replay, Board.button_position[0]) # L: Replay
self.screen.blit(self.success_board.image, self.success_board.rect) # Successful board
if self.level < 10: # If not the last level
next_level = Board(Board.next, Board.button_position[1]) # R: Next level
self.screen.blit(next_level.image, next_level.rect)
self.screen.blit(replay.image, replay.rect)
while self.success_board.speed != [0, 0]:
self.draw()
self.screen.blit(self.success_board.image, self.success_board.rect)
self.success_board.move()
pygame.display.flip()
if sound_sign == 0:
play_sound(Sounds.BOARD_SOUND)
sound_sign = 1
self.displayStars(score) # Display the stars
# Money
self.load_text(str(self.score*2), (Board.starts_position[0][0]+75, Board.starts_position[0][0]+46), 20, (0, 0, 0))
def displayStars(self, score):
'''Display the stars according to the score.'''
star1 = Board(Board.stars, Board.starts_position[0])
star2 = Board(Board.stars, Board.starts_position[1])
star3 = Board(Board.stars, Board.starts_position[2])
if 0 <= score < self.min:
self.load_text('1', (Board.starts_position[1][0]+48, Board.starts_position[1][1]+35), 20, (0, 0, 0))
self.screen.blit(star1.image, star1.rect)
elif self.min <= score <= self.max:
self.load_text('2', (Board.starts_position[1][0] + 48, Board.starts_position[1][1] + 35), 20, (0, 0, 0))
self.screen.blit(star1.image, star1.rect)
self.screen.blit(star2.image, star2.rect)
elif score > self.max:
self.load_text('5', (Board.starts_position[1][0] + 48, Board.starts_position[1][1] + 35), 20, (0, 0, 0))
self.screen.blit(star1.image, star1.rect)
self.screen.blit(star2.image, star2.rect)
self.screen.blit(star3.image, star3.rect)
pygame.display.flip()
def set_level_mode(self, level):
'''Set the level mode and its steps.'''
self.level = level
if self.reset_mode: # If it is required to reset the mode
self.num_sign = True
if level == 1:
self.__init__(7, 7)
self.animal[7][9] = self.animal[7][10] = self.animal[7][11] = self.animal[8][10] = self.animal[11][7] = \
self.animal[11][13] = self.animal[12][7] = self.animal[12][8] = self.animal[12][12] = self.animal[12][13] = \
self.animal[13][7] = self.animal[13][8] = self.animal[13][9] = self.animal[13][11] = self.animal[13][12] = \
self.animal[13][13] = -1
self.init_step = 17 # 17 initial steps
elif level == 2:
self.__init__(4, 8)
self.init_step = 16 # 16 initial steps
elif level == 3:
self.__init__(7, 7)
self.init_step = 18 # 18 initial steps
elif level == 4:
self.__init__(9, 7)
row, col = self.row, self.col
self.animal[row][col] = self.animal[row][col+7] = self.animal[row][col+8] = self.animal[row+1][col+8] = \
self.animal[row+5][col] = self.animal[row+6][col] = self.animal[row+6][col+1] = self.animal[row+6][col+8] = -1
self.init_step = 20
elif level == 5:
self.__init__(8, 9)
row, col = self.row, self.col
self.animal[row][col+7] = self.animal[row+2][col] = self.animal[row+5][col] = self.animal[row+3][col+7] = \
self.animal[row+6][col+7] = self.animal[row+8][col] = -1
self.init_step = 20
elif level == 6:
self.__init__(9, 9)
row, col = self.row, self.col
self.animal[row][col] = self.animal[row][col+8] = self.animal[row+2][col+4] = self.animal[row+3][col+2] = \
self.animal[row+3][col+6] = self.animal[row+8][col] = self.animal[row+8][col+8] = -1
for i in range(row+4, row+6):
for j in range(col+3, col+6):
self.animal[i][j] = -1
self.init_step = 28
elif level == 7:
self.__init__(9, 9)
row, col = self.row, self.col
for i in range(row, row + 9):
self.animal[i][col+4] = -1
for j in range(col, col+4):
self.animal[row+3][j] = -1
for j in range(col+5, col+9):
self.animal[row+5][j] = -1
self.init_step = 25
elif level == 8:
self.__init__(7, 8)
row, col = self.row, self.col
for i in range(row+2, row+5):
for j in range(col+1, col+6):
self.ice_list[i][j] = 1
self.init_step = 21
elif level == 9:
self.__init__(9, 9)
row, col = self.row, self.col
self.animal[row][col+4] = self.animal[row+4][col] = self.animal[row+4][col+8] = self.animal[row+8][col+4] = -1
for i in range(row+1, row+8):
for j in range(col+1, col+8):
self.ice_list[i][j] = 1
self.init_step = 35
else:
self.__init__(9, 9)
row, col = self.row, self.col
for i in range(row, row+2):
for j in range(col, col+9):
self.animal[i][j] = -1
self.animal[row][col+4] = randint(0, 5)
self.animal[row+1][col+2] = randint(0, 5)
self.animal[row+1][col+4] = randint(0, 5)
self.animal[row+1][col+6] = randint(0, 5)
self.animal[row+2][col+1] = self.animal[row+3][col+1] = self.animal[row+2][col+3] = self.animal[row+3][col+3] =\
self.animal[row+2][col+5] = self.animal[row+3][col+5] = self.animal[row+2][col+7] = \
self.animal[row+3][col+7] = self.animal[row+8][col] = self.animal[row+8][col+8] = -1
for i in range(row+4, row+8):
for j in range(col, col+9):
self.ice_list[i][j] = 1
self.ice_list[row+2][col+4] = self.ice_list[row+3][col+2] = self.ice_list[row+3][col+4] = \
self.ice_list[row+3][col+6] = 1
self.init_step = 40
self.type = 0
self.energy_num -= 5
self.success_board = Board(Board.success, [200, 0]) # Success board
self.fail_board = Board(Board.fail, [200, 0]) # Failure board
self.step = self.init_step
self.score = 0
self.animal_num = [0, 0, 0, 0, 0, 0]
self.ice_num = 0
self.reset_mode = False
def num_add(self):
'''Add to score'''
if self.num_sign:
self.money += self.score * 2
if self.score < self.min:
self.energy_num += 1
elif self.score < self.max:
self.energy_num += 2
else:
self.energy_num += 5
self.num_sign = False
def judge_level(self):
'''Check whether the level was passed'''
if self.step <= 0:
self.type = -1 # Game over
if self.level == 1:
if self.animal_num[4] >= 10: # L1: 10 frogs
self.type = 1 # Level 1 passed
self.num_add()
elif self.level == 2:
if self.animal_num[1] >= 21: # L2: 21 bears
self.type = 1 # Level 2 passed
self.num_add()
elif self.level == 3:
if self.animal_num[4] >= 16 and self.animal_num[5] >= 16: # L3: 16 frogs and 16 cows
self.type = 1 # Level 3 passed
self.num_add()
elif self.level == 4:
if self.animal_num[5] >= 18 and self.animal_num[2] >= 18: # L4: 18 cows and 18 chicks
self.type = 1 # Level 4 passed
self.num_add()
elif self.level == 5:
if self.animal_num[2] >= 28 and self.animal_num[0] >= 28: # L5: 28 chicks and 28 foxes
self.type = 1 # Level 5 passed
self.num_add()
elif self.level == 6:
if self.animal_num[4] >= 70: # L6: 70 frogs
self.type = 1 # Level 6 passed
self.num_add()
elif self.level == 7:
if self.animal_num[2] >= 36 and self.animal_num[1] >= 36 and self.animal_num[0] >= 36: # L7: 36 chickens, 36 bears and 36 foxes
self.type = 1 # Level 7 passed
self.num_add()
elif self.level == 8:
if self.ice_num >= 15: # L8: 15 ice
self.type = 1 # Level 8 passed
self.num_add()
elif self.level == 9:
if self.ice_num >= 49: # L9: 49 ice
self.type = 1 # Level 9 passed
self.num_add()
else:
if self.ice_num >= 39: # L10: 39 ice
self.type = 1 # Level 10 passed
self.num_add()
self.judge_next(self.type, self.score) | 0.406391 | 0.169784 |
import types
from typing import Dict, Sequence
from pysaurus.core.functions import is_valid_attribute_name
from pysaurus.core.override import Override
__fn_types__ = (
types.FunctionType,
types.MethodType,
types.BuiltinMethodType,
types.BuiltinFunctionType,
types.ClassMethodDescriptorType,
classmethod,
)
def is_attribute(key, value):
return is_valid_attribute_name(key) and not isinstance(value, __fn_types__)
class _Checker:
__slots__ = ("default",)
__init__ = Override("_Checker.__init__")
@__init__.override
def __init__(self):
self.default = ()
@__init__.override
def __init__(self, value: object):
self.default = None if value is None else (value,)
__call__ = Override("_Checker.__call__")
@__call__.override
def __call__(self):
return None if self.default is None else self.validate(*self.default)
@__call__.override
def __call__(self, value: object):
return None if value is self.default is None else self.validate(value)
def __str__(self):
return f"${type(self).__name__}" f"({', '.join(str(d) for d in self.default)})"
__repr__ = __str__
validate = Override("_Checker.validate")
@validate.override
def validate(self):
raise NotImplementedError()
@validate.override
def validate(self, value: object):
raise NotImplementedError()
def to_dict(self, value):
return value
class _ClassChecker(_Checker):
__slots__ = ("cls",)
def __init__(self, cls, *args):
assert isinstance(cls, type)
super().__init__(*args)
self.cls = cls
@_Checker.validate.override
def validate(self):
return self.cls()
@_Checker.validate.override
def validate(self, value: object):
return value if isinstance(value, self.cls) else self.cls(value)
class _JsonableChecker(_Checker):
__slots__ = ("cls",)
def __init__(self, cls, *args):
assert issubclass(cls, Jsonable)
if args:
(default,) = args
if isinstance(default, cls):
default = default.to_dict()
else:
assert isinstance(default, dict) or default is None
else:
default = {}
super().__init__(default)
self.cls = cls
@_Checker.validate.override
def validate(self, value: object):
return value if isinstance(value, self.cls) else self.cls.from_dict(value)
def to_dict(self, value):
return value.to_dict()
def _get_checker(cls, *args):
if issubclass(cls, Jsonable):
return _JsonableChecker(cls, *args)
else:
return _ClassChecker(cls, *args)
class ShortFunctor:
__slots__ = ("__to_short", "__to_long")
def __init__(self, fields: Sequence[str], long_to_short: Dict[str, str]):
assert len(fields) == len(long_to_short)
assert all(field in long_to_short for field in fields)
self.__to_short = long_to_short
self.__to_long = {short: long for long, short in long_to_short.items()}
def to_short(self, dct_long_keys: dict):
return {self.__to_short[key]: value for key, value in dct_long_keys.items()}
def from_short(self, dct_short_keys: dict):
return {self.__to_long[short]: value for short, value in dct_short_keys.items()}
class NoShortFunctor:
__slots__ = ()
@classmethod
def to_short(cls, dct):
return dct
@classmethod
def from_short(cls, dct):
return dct
def get_bases(bases: tuple):
if not bases:
return ()
assert len(bases) == 1
all_bases = bases[0].__mro__
assert all_bases[-1] is object
assert all_bases[-2] is Jsonable
return all_bases[:-2]
def gen_get(namespace: dict, key: str):
name_getter = f"get_{key}"
if name_getter in namespace:
return namespace.pop(name_getter)
def getter(self):
return self.__json__[key]
getter.__name__ = name_getter
return getter
def gen_set(namespace: dict, key: str):
name_setter = f"set_{key}"
if name_setter in namespace:
return namespace.pop(name_setter)
def setter(self, value):
self.__json__[key] = value
setter.__name__ = name_setter
return setter
class _MetaJSON(type):
__slots__ = ()
def __new__(cls, name, bases, namespace):
assert "__definitions__" not in namespace, "Reserved attribute: __definitions__"
annotations = namespace.get("__annotations__", {})
attributes = {
key: value for key, value in namespace.items() if is_attribute(key, value)
}
original_attributes = list(attributes)
definitions = {}
for base in get_bases(bases):
definitions.update(base.__definitions__)
for key, value in attributes.items():
if isinstance(value, _Checker):
assert key not in annotations
definitions[key] = value
elif key in annotations:
annotation = annotations[key]
assert isinstance(annotation, type)
definitions[key] = _get_checker(annotation, value)
else:
definitions[key] = _get_checker(type(value), value)
for key, annotation in annotations.items():
if key not in definitions:
original_attributes.append(key)
assert isinstance(annotation, type)
definitions[key] = _get_checker(annotation)
short = namespace.get("__short__", {})
shortener = (
ShortFunctor(tuple(definitions), short) if short else NoShortFunctor()
)
namespace["__definitions__"] = {
key: definitions[key] for key in sorted(definitions)
}
namespace["__shortener__"] = shortener
for key in original_attributes:
namespace[key] = property(gen_get(namespace, key), gen_set(namespace, key))
return type.__new__(cls, name, bases, namespace)
class Jsonable(metaclass=_MetaJSON):
__slots__ = ("__json__",)
def __init__(self, **kwargs):
self.__json__ = {}
for key, checker in self.__definitions__.items():
if key in kwargs:
value = checker(kwargs.pop(key))
else:
value = checker()
self.__json__[key] = value
assert not kwargs, f"{type(self).__name__}: unknown keys: {tuple(kwargs)}"
def __bool__(self):
return True
def __len__(self):
return len(self.__json__)
def __iter__(self):
return iter(self.__json__.items())
def __hash__(self):
return hash(tuple(self))
def __eq__(self, other):
return type(self) is type(other) and all(a == b for a, b in zip(self, other))
def __str__(self):
fields = ", ".join(
f"{key}={repr(value) if isinstance(value, str) else value}"
for key, value in self
)
return f"{type(self).__name__}({fields})"
__repr__ = __str__
def update(self, dct: dict):
assert isinstance(dct, dict)
for key, checker in self.__definitions__.items():
if key in dct:
self.__json__[key] = checker(dct[key])
def to_json(self):
return self.__json__
@classmethod
def from_json(cls, dct):
assert isinstance(dct, dict)
return cls(**dct)
def to_dict(self):
return self.__shortener__.to_short(
{key: self.__definitions__[key].to_dict(value) for key, value in self}
)
@classmethod
def from_dict(cls, dct):
assert isinstance(dct, dict)
return cls(**cls.__shortener__.from_short(dct)) | pysaurus/core/jsonable.py | import types
from typing import Dict, Sequence
from pysaurus.core.functions import is_valid_attribute_name
from pysaurus.core.override import Override
__fn_types__ = (
types.FunctionType,
types.MethodType,
types.BuiltinMethodType,
types.BuiltinFunctionType,
types.ClassMethodDescriptorType,
classmethod,
)
def is_attribute(key, value):
return is_valid_attribute_name(key) and not isinstance(value, __fn_types__)
class _Checker:
__slots__ = ("default",)
__init__ = Override("_Checker.__init__")
@__init__.override
def __init__(self):
self.default = ()
@__init__.override
def __init__(self, value: object):
self.default = None if value is None else (value,)
__call__ = Override("_Checker.__call__")
@__call__.override
def __call__(self):
return None if self.default is None else self.validate(*self.default)
@__call__.override
def __call__(self, value: object):
return None if value is self.default is None else self.validate(value)
def __str__(self):
return f"${type(self).__name__}" f"({', '.join(str(d) for d in self.default)})"
__repr__ = __str__
validate = Override("_Checker.validate")
@validate.override
def validate(self):
raise NotImplementedError()
@validate.override
def validate(self, value: object):
raise NotImplementedError()
def to_dict(self, value):
return value
class _ClassChecker(_Checker):
__slots__ = ("cls",)
def __init__(self, cls, *args):
assert isinstance(cls, type)
super().__init__(*args)
self.cls = cls
@_Checker.validate.override
def validate(self):
return self.cls()
@_Checker.validate.override
def validate(self, value: object):
return value if isinstance(value, self.cls) else self.cls(value)
class _JsonableChecker(_Checker):
__slots__ = ("cls",)
def __init__(self, cls, *args):
assert issubclass(cls, Jsonable)
if args:
(default,) = args
if isinstance(default, cls):
default = default.to_dict()
else:
assert isinstance(default, dict) or default is None
else:
default = {}
super().__init__(default)
self.cls = cls
@_Checker.validate.override
def validate(self, value: object):
return value if isinstance(value, self.cls) else self.cls.from_dict(value)
def to_dict(self, value):
return value.to_dict()
def _get_checker(cls, *args):
if issubclass(cls, Jsonable):
return _JsonableChecker(cls, *args)
else:
return _ClassChecker(cls, *args)
class ShortFunctor:
__slots__ = ("__to_short", "__to_long")
def __init__(self, fields: Sequence[str], long_to_short: Dict[str, str]):
assert len(fields) == len(long_to_short)
assert all(field in long_to_short for field in fields)
self.__to_short = long_to_short
self.__to_long = {short: long for long, short in long_to_short.items()}
def to_short(self, dct_long_keys: dict):
return {self.__to_short[key]: value for key, value in dct_long_keys.items()}
def from_short(self, dct_short_keys: dict):
return {self.__to_long[short]: value for short, value in dct_short_keys.items()}
class NoShortFunctor:
__slots__ = ()
@classmethod
def to_short(cls, dct):
return dct
@classmethod
def from_short(cls, dct):
return dct
def get_bases(bases: tuple):
if not bases:
return ()
assert len(bases) == 1
all_bases = bases[0].__mro__
assert all_bases[-1] is object
assert all_bases[-2] is Jsonable
return all_bases[:-2]
def gen_get(namespace: dict, key: str):
name_getter = f"get_{key}"
if name_getter in namespace:
return namespace.pop(name_getter)
def getter(self):
return self.__json__[key]
getter.__name__ = name_getter
return getter
def gen_set(namespace: dict, key: str):
name_setter = f"set_{key}"
if name_setter in namespace:
return namespace.pop(name_setter)
def setter(self, value):
self.__json__[key] = value
setter.__name__ = name_setter
return setter
class _MetaJSON(type):
__slots__ = ()
def __new__(cls, name, bases, namespace):
assert "__definitions__" not in namespace, "Reserved attribute: __definitions__"
annotations = namespace.get("__annotations__", {})
attributes = {
key: value for key, value in namespace.items() if is_attribute(key, value)
}
original_attributes = list(attributes)
definitions = {}
for base in get_bases(bases):
definitions.update(base.__definitions__)
for key, value in attributes.items():
if isinstance(value, _Checker):
assert key not in annotations
definitions[key] = value
elif key in annotations:
annotation = annotations[key]
assert isinstance(annotation, type)
definitions[key] = _get_checker(annotation, value)
else:
definitions[key] = _get_checker(type(value), value)
for key, annotation in annotations.items():
if key not in definitions:
original_attributes.append(key)
assert isinstance(annotation, type)
definitions[key] = _get_checker(annotation)
short = namespace.get("__short__", {})
shortener = (
ShortFunctor(tuple(definitions), short) if short else NoShortFunctor()
)
namespace["__definitions__"] = {
key: definitions[key] for key in sorted(definitions)
}
namespace["__shortener__"] = shortener
for key in original_attributes:
namespace[key] = property(gen_get(namespace, key), gen_set(namespace, key))
return type.__new__(cls, name, bases, namespace)
class Jsonable(metaclass=_MetaJSON):
__slots__ = ("__json__",)
def __init__(self, **kwargs):
self.__json__ = {}
for key, checker in self.__definitions__.items():
if key in kwargs:
value = checker(kwargs.pop(key))
else:
value = checker()
self.__json__[key] = value
assert not kwargs, f"{type(self).__name__}: unknown keys: {tuple(kwargs)}"
def __bool__(self):
return True
def __len__(self):
return len(self.__json__)
def __iter__(self):
return iter(self.__json__.items())
def __hash__(self):
return hash(tuple(self))
def __eq__(self, other):
return type(self) is type(other) and all(a == b for a, b in zip(self, other))
def __str__(self):
fields = ", ".join(
f"{key}={repr(value) if isinstance(value, str) else value}"
for key, value in self
)
return f"{type(self).__name__}({fields})"
__repr__ = __str__
def update(self, dct: dict):
assert isinstance(dct, dict)
for key, checker in self.__definitions__.items():
if key in dct:
self.__json__[key] = checker(dct[key])
def to_json(self):
return self.__json__
@classmethod
def from_json(cls, dct):
assert isinstance(dct, dict)
return cls(**dct)
def to_dict(self):
return self.__shortener__.to_short(
{key: self.__definitions__[key].to_dict(value) for key, value in self}
)
@classmethod
def from_dict(cls, dct):
assert isinstance(dct, dict)
return cls(**cls.__shortener__.from_short(dct)) | 0.84241 | 0.24655 |
import xml.etree.ElementTree as ET
from collections import defaultdict
def _parse_node_names_single_file(filename: str) -> dict:
node_names = {}
tree = ET.parse(filename)
root = tree.getroot()
for node in root.findall("./graph/node"):
data = node.find("./data[@key='d0']")
node_id = node.attrib["id"]
node_name = data.text
node_names[node_id] = node_name
return node_names
def parse_node_names(graphml_filenames: list) -> dict:
node_names = {}
for filename in graphml_filenames:
node_names_single = _parse_node_names_single_file(filename)
node_names = {**node_names, **node_names_single}
return node_names
def _find_thread_count(root: ET.Element) -> int:
for attribute in root.findall("./description/attribute"):
if attribute.attrib["name"] == "Thread":
return len(list(attribute))
raise ValueError("No Thread attribute found")
def parse_traceml(filename: str, node_names: dict) -> dict:
task_durations = defaultdict(list)
tree = ET.parse(filename)
root = tree.getroot()
thread_count = _find_thread_count(root)
thread_current_task = [[]] * thread_count
for node in root:
if not "tid" in node.attrib:
continue
thread_id = int(node.attrib["tid"]) - 1
if node.tag == "task_begin":
task_id = node.attrib["id"]
start_timestamp = int(node.attrib["ts"])
thread_current_task[thread_id].append((task_id, start_timestamp))
elif node.tag == "task_end":
assert len(thread_current_task[thread_id]) > 0
end_timestamp = int(node.attrib["ts"])
task_id, start_timestamp = thread_current_task[thread_id].pop()
duration = end_timestamp - start_timestamp
assert duration >= 0
task_durations[task_id].append(duration)
node_durations = defaultdict(list)
for task_id, durations in task_durations.items():
node_name = node_names[task_id] if task_id in node_names else task_id
node_durations[node_name] += durations
return dict(node_durations) | traceml_parser.py | import xml.etree.ElementTree as ET
from collections import defaultdict
def _parse_node_names_single_file(filename: str) -> dict:
node_names = {}
tree = ET.parse(filename)
root = tree.getroot()
for node in root.findall("./graph/node"):
data = node.find("./data[@key='d0']")
node_id = node.attrib["id"]
node_name = data.text
node_names[node_id] = node_name
return node_names
def parse_node_names(graphml_filenames: list) -> dict:
node_names = {}
for filename in graphml_filenames:
node_names_single = _parse_node_names_single_file(filename)
node_names = {**node_names, **node_names_single}
return node_names
def _find_thread_count(root: ET.Element) -> int:
for attribute in root.findall("./description/attribute"):
if attribute.attrib["name"] == "Thread":
return len(list(attribute))
raise ValueError("No Thread attribute found")
def parse_traceml(filename: str, node_names: dict) -> dict:
task_durations = defaultdict(list)
tree = ET.parse(filename)
root = tree.getroot()
thread_count = _find_thread_count(root)
thread_current_task = [[]] * thread_count
for node in root:
if not "tid" in node.attrib:
continue
thread_id = int(node.attrib["tid"]) - 1
if node.tag == "task_begin":
task_id = node.attrib["id"]
start_timestamp = int(node.attrib["ts"])
thread_current_task[thread_id].append((task_id, start_timestamp))
elif node.tag == "task_end":
assert len(thread_current_task[thread_id]) > 0
end_timestamp = int(node.attrib["ts"])
task_id, start_timestamp = thread_current_task[thread_id].pop()
duration = end_timestamp - start_timestamp
assert duration >= 0
task_durations[task_id].append(duration)
node_durations = defaultdict(list)
for task_id, durations in task_durations.items():
node_name = node_names[task_id] if task_id in node_names else task_id
node_durations[node_name] += durations
return dict(node_durations) | 0.463687 | 0.247248 |
import logging
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404, render
from django.utils import timezone
from django.utils.decorators import method_decorator
from django.utils.translation import gettext_lazy as _
from django.views.generic import CreateView, DetailView, ListView, RedirectView, UpdateView
from django.views.generic.edit import FormView
from .forms import *
from .models import *
logger = logging.getLogger(__name__)
class OtaAccessMixin(object):
def get_context_data(self, **kwargs):
context = super(OtaAccessMixin, self).get_context_data(**kwargs)
context['is_staff'] = self.request.user.is_staff
context['org'] = self.org
context.update(self.org.permissions(self.request.user))
return context
def get_object(self, queryset=None):
deployment_request = get_object_or_404(DeploymentRequest, pk=self.kwargs['pk'])
if deployment_request.org_id == self.org.id:
if self.org.has_permission(self.request.user, 'can_manage_ota'):
return deployment_request
raise PermissionDenied("User has no access to this deployment request")
def get_queryset(self):
return DeploymentRequest.objects.filter(org=self.org)
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
self.org = Org.objects.get_from_request(self.request)
if self.org and not self.org.has_permission(self.request.user, 'can_manage_ota'):
messages.error(self.request, 'User has no permissions to manage devices')
return HttpResponseRedirect(self.org.get_absolute_url())
return super(OtaAccessMixin, self).dispatch(request, *args, **kwargs)
class OtaIndexView(OtaAccessMixin, ListView):
model = DeploymentRequest
template_name = 'ota/index.html'
def get_context_data(self, **kwargs):
context = super(OtaIndexView, self).get_context_data(**kwargs)
context['fleets'] = Fleet.objects.filter(org=context['org'])
context['scripts'] = DeviceScript.objects.filter(org=context['org'])
context['requests'] = DeploymentRequest.objects.filter(org=context['org']).select_related('script', 'fleet')
return context
class DeploymentRequestListView(OtaAccessMixin, ListView):
model = DeploymentRequest
template_name = 'ota/request_list.html'
def get_queryset(self):
qs = super(DeploymentRequestListView, self).get_queryset()
return qs.select_related('script', 'fleet', 'org')
class DeploymentRequestDetailView(OtaAccessMixin, DetailView):
model = DeploymentRequest
template_name = 'ota/request_detail.html'
class DeploymentRequestCreateView(OtaAccessMixin, CreateView):
model = DeviceScript
form_class = DeploymentRequestForm
template_name = 'ota/form.html'
def form_valid(self, form):
self.object = form.save(commit=False)
self.object.created_by = self.request.user
self.object.org = self.org
self.object.selection_criteria = form.cleaned_data['selection_criteria_text']
self.object.save()
return HttpResponseRedirect(self.get_success_url())
def get_context_data(self, **kwargs):
context = super(DeploymentRequestCreateView, self).get_context_data(**kwargs)
context['title'] = _('New Deployment Request')
context['referer'] = self.request.META.get('HTTP_REFERER')
return context
def get_form_kwargs( self ):
kwargs = super( DeploymentRequestCreateView, self ).get_form_kwargs()
kwargs['org'] = self.org
return kwargs
class DeploymentRequestUpdateView(OtaAccessMixin, UpdateView):
model = DeviceScript
form_class = DeploymentRequestForm
template_name = 'ota/form.html'
def form_valid(self, form):
self.object = form.save(commit=False)
self.object.selection_criteria = form.cleaned_data['selection_criteria_text']
self.object.save()
return HttpResponseRedirect(self.get_success_url())
def get_context_data(self, **kwargs):
context = super(DeploymentRequestUpdateView, self).get_context_data(**kwargs)
context['title'] = _('Edit Deployment Request')
context['referer'] = self.request.META.get('HTTP_REFERER')
return context
def get_form_kwargs( self ):
kwargs = super( DeploymentRequestUpdateView, self ).get_form_kwargs()
kwargs['org'] = self.org
return kwargs
class DeploymentRequestReleaseView(OtaAccessMixin, UpdateView):
model = DeploymentRequest
form_class = DeploymentRequestReleaseForm
template_name = 'ota/form.html'
def get_context_data(self, **kwargs):
context = super(DeploymentRequestReleaseView, self).get_context_data(**kwargs)
context['title'] = _('Deployment Request Publishing Form')
context['referer'] = self.request.META.get('HTTP_REFERER')
return context
class DeploymentRequestCompleteView(OtaAccessMixin, UpdateView):
model = DeploymentRequest
form_class = DeploymentRequestCompleteForm
template_name = 'ota/form.html'
def form_valid(self, form):
self.object = form.save(commit=False)
self.object.completed_on = timezone.now()
self.object.save()
return HttpResponseRedirect(self.get_success_url())
def get_context_data(self, **kwargs):
context = super(DeploymentRequestCompleteView, self).get_context_data(**kwargs)
context['title'] = _('Deployment Request Complete Form')
context['referer'] = self.request.META.get('HTTP_REFERER')
return context | server/apps/ota/views.py | import logging
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404, render
from django.utils import timezone
from django.utils.decorators import method_decorator
from django.utils.translation import gettext_lazy as _
from django.views.generic import CreateView, DetailView, ListView, RedirectView, UpdateView
from django.views.generic.edit import FormView
from .forms import *
from .models import *
logger = logging.getLogger(__name__)
class OtaAccessMixin(object):
def get_context_data(self, **kwargs):
context = super(OtaAccessMixin, self).get_context_data(**kwargs)
context['is_staff'] = self.request.user.is_staff
context['org'] = self.org
context.update(self.org.permissions(self.request.user))
return context
def get_object(self, queryset=None):
deployment_request = get_object_or_404(DeploymentRequest, pk=self.kwargs['pk'])
if deployment_request.org_id == self.org.id:
if self.org.has_permission(self.request.user, 'can_manage_ota'):
return deployment_request
raise PermissionDenied("User has no access to this deployment request")
def get_queryset(self):
return DeploymentRequest.objects.filter(org=self.org)
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
self.org = Org.objects.get_from_request(self.request)
if self.org and not self.org.has_permission(self.request.user, 'can_manage_ota'):
messages.error(self.request, 'User has no permissions to manage devices')
return HttpResponseRedirect(self.org.get_absolute_url())
return super(OtaAccessMixin, self).dispatch(request, *args, **kwargs)
class OtaIndexView(OtaAccessMixin, ListView):
model = DeploymentRequest
template_name = 'ota/index.html'
def get_context_data(self, **kwargs):
context = super(OtaIndexView, self).get_context_data(**kwargs)
context['fleets'] = Fleet.objects.filter(org=context['org'])
context['scripts'] = DeviceScript.objects.filter(org=context['org'])
context['requests'] = DeploymentRequest.objects.filter(org=context['org']).select_related('script', 'fleet')
return context
class DeploymentRequestListView(OtaAccessMixin, ListView):
model = DeploymentRequest
template_name = 'ota/request_list.html'
def get_queryset(self):
qs = super(DeploymentRequestListView, self).get_queryset()
return qs.select_related('script', 'fleet', 'org')
class DeploymentRequestDetailView(OtaAccessMixin, DetailView):
model = DeploymentRequest
template_name = 'ota/request_detail.html'
class DeploymentRequestCreateView(OtaAccessMixin, CreateView):
model = DeviceScript
form_class = DeploymentRequestForm
template_name = 'ota/form.html'
def form_valid(self, form):
self.object = form.save(commit=False)
self.object.created_by = self.request.user
self.object.org = self.org
self.object.selection_criteria = form.cleaned_data['selection_criteria_text']
self.object.save()
return HttpResponseRedirect(self.get_success_url())
def get_context_data(self, **kwargs):
context = super(DeploymentRequestCreateView, self).get_context_data(**kwargs)
context['title'] = _('New Deployment Request')
context['referer'] = self.request.META.get('HTTP_REFERER')
return context
def get_form_kwargs( self ):
kwargs = super( DeploymentRequestCreateView, self ).get_form_kwargs()
kwargs['org'] = self.org
return kwargs
class DeploymentRequestUpdateView(OtaAccessMixin, UpdateView):
model = DeviceScript
form_class = DeploymentRequestForm
template_name = 'ota/form.html'
def form_valid(self, form):
self.object = form.save(commit=False)
self.object.selection_criteria = form.cleaned_data['selection_criteria_text']
self.object.save()
return HttpResponseRedirect(self.get_success_url())
def get_context_data(self, **kwargs):
context = super(DeploymentRequestUpdateView, self).get_context_data(**kwargs)
context['title'] = _('Edit Deployment Request')
context['referer'] = self.request.META.get('HTTP_REFERER')
return context
def get_form_kwargs( self ):
kwargs = super( DeploymentRequestUpdateView, self ).get_form_kwargs()
kwargs['org'] = self.org
return kwargs
class DeploymentRequestReleaseView(OtaAccessMixin, UpdateView):
model = DeploymentRequest
form_class = DeploymentRequestReleaseForm
template_name = 'ota/form.html'
def get_context_data(self, **kwargs):
context = super(DeploymentRequestReleaseView, self).get_context_data(**kwargs)
context['title'] = _('Deployment Request Publishing Form')
context['referer'] = self.request.META.get('HTTP_REFERER')
return context
class DeploymentRequestCompleteView(OtaAccessMixin, UpdateView):
model = DeploymentRequest
form_class = DeploymentRequestCompleteForm
template_name = 'ota/form.html'
def form_valid(self, form):
self.object = form.save(commit=False)
self.object.completed_on = timezone.now()
self.object.save()
return HttpResponseRedirect(self.get_success_url())
def get_context_data(self, **kwargs):
context = super(DeploymentRequestCompleteView, self).get_context_data(**kwargs)
context['title'] = _('Deployment Request Complete Form')
context['referer'] = self.request.META.get('HTTP_REFERER')
return context | 0.441191 | 0.063019 |
import tensorflow as tf
import math
import numpy as np
class Classifier(object):
def __init__(self, batch_size, network,
observation_dim=814,
learning_rate=3e-4,
optimizer=tf.train.AdamOptimizer,
image_ch_dim=1,
num_labels = 10,
decay_step=430,
decay_rate=0.9):
self._batch_size = batch_size
self._network = network
self._observation_dim = observation_dim
self._learning_rate = learning_rate
self._optimizer = optimizer
self._image_ch_dim = image_ch_dim
self._decay_step = decay_step
self._decay_rate = decay_rate
self._num_labels = num_labels
self._step = 0
self._write_summary = False
self._build_graph()
def _build_graph(self):
tf.reset_default_graph()
dim = int(math.sqrt(self._observation_dim / self._image_ch_dim))
with tf.variable_scope('cla'):
self.x = tf.placeholder(tf.float32, shape=[None, dim, dim, self._image_ch_dim])
self.y = tf.placeholder(tf.int64, (None))
self.phase_train = tf.placeholder(tf.bool)
with tf.variable_scope('nn', reuse=tf.AUTO_REUSE):
logits = self._network(self.x, self.phase_train, self._observation_dim, self._image_ch_dim, self._num_labels)
with tf.variable_scope('loss'):
cross_entropy = self.soft_max_cross_entropy(logits, self.y)
self._loss = cross_entropy + self.l2_regularization(0.1)
with tf.variable_scope('evaluate'):
predict = tf.argmax(logits, 1)
actual = tf.argmax(tf.one_hot(self.y, self._num_labels), 1)
self._correctness = tf.equal(predict, actual)
self._accuracy = tf.reduce_mean(tf.cast(self._correctness, tf.float32))
self._tp = tf.cast(tf.count_nonzero(predict * actual), tf.float32)
self._tn = tf.cast(tf.count_nonzero((predict - 1) * (actual - 1)), tf.float32)
self._fp = tf.cast(tf.count_nonzero(predict * (actual - 1)), tf.float32)
self._fn = tf.cast(tf.count_nonzero((predict - 1) * actual), tf.float32)
with tf.variable_scope('lr_scheduler'):
global_step = tf.Variable(0, trainable=False)
self._decay_learning_rate = tf.train.exponential_decay(self._learning_rate, global_step,
self._decay_step, self._decay_rate)
with tf.variable_scope('optimizer'):
optimizer = tf.train.AdamOptimizer(
learning_rate=self._decay_learning_rate)
with tf.variable_scope('training-step'):
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
self._train = optimizer.minimize(self._loss)
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.5, allow_growth=True)
self._sesh = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
self._train_writer = tf.summary.FileWriter("./summaries/train", self._sesh.graph)
init = tf.global_variables_initializer()
tf.local_variables_initializer().run(session=self._sesh)
self._sesh.run(init)
tf.summary.scalar('loss', self._loss)
self._merge =tf.summary.merge_all()
def soft_max_cross_entropy(self, logits, labels):
return tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=tf.one_hot(labels, self._num_labels)))
def l2_regularization(self, weight=0.2):
vars = tf.trainable_variables()
lossL2 = tf.add_n([tf.nn.l2_loss(v) for v in vars]) * weight
return lossL2
def update(self, x, y):
if self._write_summary:
_, loss, summ = self._sesh.run([self._train, self._loss, self._merge], feed_dict={ self.x: x, self.y: y, self.phase_train: True})
self._train_writer.add_summary(summ, self._step)
else:
_, loss = self._sesh.run([self._train, self._loss], feed_dict={
self.x: x, self.y: y, self.phase_train: True})
self._step += 1
return loss
def evaluate(self, X_data, y_data):
num_examples = len(X_data)
tp,tn,fp,fn = 0,0,0,0
for offset in range(0, num_examples, self._batch_size):
batch_x, batch_y = X_data[offset:offset+self._batch_size], y_data[offset:offset+self._batch_size]
tpb,tnb,fpb,fnb = self._sesh.run([self._tp, self._tn, self._fp, self._fn], feed_dict={self.x: batch_x, self.y: batch_y, self.phase_train: False})
tp += tpb
tn += tnb
fp += fpb
fn += fnb
return tp, tn, fp, fn
def reset_session(self):
tf.reset_default_graph()
def save_weights(self, path):
print("Save weights to ", path)
saver = tf.train.Saver()
saver.save(self._sesh, path)
def load_weights(self, path):
print("Load weights from ", path)
saver = tf.train.Saver()
saver.restore(self._sesh, path)
def done(self):
self._train_writer.close() | models/classifier.py | import tensorflow as tf
import math
import numpy as np
class Classifier(object):
def __init__(self, batch_size, network,
observation_dim=814,
learning_rate=3e-4,
optimizer=tf.train.AdamOptimizer,
image_ch_dim=1,
num_labels = 10,
decay_step=430,
decay_rate=0.9):
self._batch_size = batch_size
self._network = network
self._observation_dim = observation_dim
self._learning_rate = learning_rate
self._optimizer = optimizer
self._image_ch_dim = image_ch_dim
self._decay_step = decay_step
self._decay_rate = decay_rate
self._num_labels = num_labels
self._step = 0
self._write_summary = False
self._build_graph()
def _build_graph(self):
tf.reset_default_graph()
dim = int(math.sqrt(self._observation_dim / self._image_ch_dim))
with tf.variable_scope('cla'):
self.x = tf.placeholder(tf.float32, shape=[None, dim, dim, self._image_ch_dim])
self.y = tf.placeholder(tf.int64, (None))
self.phase_train = tf.placeholder(tf.bool)
with tf.variable_scope('nn', reuse=tf.AUTO_REUSE):
logits = self._network(self.x, self.phase_train, self._observation_dim, self._image_ch_dim, self._num_labels)
with tf.variable_scope('loss'):
cross_entropy = self.soft_max_cross_entropy(logits, self.y)
self._loss = cross_entropy + self.l2_regularization(0.1)
with tf.variable_scope('evaluate'):
predict = tf.argmax(logits, 1)
actual = tf.argmax(tf.one_hot(self.y, self._num_labels), 1)
self._correctness = tf.equal(predict, actual)
self._accuracy = tf.reduce_mean(tf.cast(self._correctness, tf.float32))
self._tp = tf.cast(tf.count_nonzero(predict * actual), tf.float32)
self._tn = tf.cast(tf.count_nonzero((predict - 1) * (actual - 1)), tf.float32)
self._fp = tf.cast(tf.count_nonzero(predict * (actual - 1)), tf.float32)
self._fn = tf.cast(tf.count_nonzero((predict - 1) * actual), tf.float32)
with tf.variable_scope('lr_scheduler'):
global_step = tf.Variable(0, trainable=False)
self._decay_learning_rate = tf.train.exponential_decay(self._learning_rate, global_step,
self._decay_step, self._decay_rate)
with tf.variable_scope('optimizer'):
optimizer = tf.train.AdamOptimizer(
learning_rate=self._decay_learning_rate)
with tf.variable_scope('training-step'):
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
self._train = optimizer.minimize(self._loss)
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.5, allow_growth=True)
self._sesh = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
self._train_writer = tf.summary.FileWriter("./summaries/train", self._sesh.graph)
init = tf.global_variables_initializer()
tf.local_variables_initializer().run(session=self._sesh)
self._sesh.run(init)
tf.summary.scalar('loss', self._loss)
self._merge =tf.summary.merge_all()
def soft_max_cross_entropy(self, logits, labels):
return tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=tf.one_hot(labels, self._num_labels)))
def l2_regularization(self, weight=0.2):
vars = tf.trainable_variables()
lossL2 = tf.add_n([tf.nn.l2_loss(v) for v in vars]) * weight
return lossL2
def update(self, x, y):
if self._write_summary:
_, loss, summ = self._sesh.run([self._train, self._loss, self._merge], feed_dict={ self.x: x, self.y: y, self.phase_train: True})
self._train_writer.add_summary(summ, self._step)
else:
_, loss = self._sesh.run([self._train, self._loss], feed_dict={
self.x: x, self.y: y, self.phase_train: True})
self._step += 1
return loss
def evaluate(self, X_data, y_data):
num_examples = len(X_data)
tp,tn,fp,fn = 0,0,0,0
for offset in range(0, num_examples, self._batch_size):
batch_x, batch_y = X_data[offset:offset+self._batch_size], y_data[offset:offset+self._batch_size]
tpb,tnb,fpb,fnb = self._sesh.run([self._tp, self._tn, self._fp, self._fn], feed_dict={self.x: batch_x, self.y: batch_y, self.phase_train: False})
tp += tpb
tn += tnb
fp += fpb
fn += fnb
return tp, tn, fp, fn
def reset_session(self):
tf.reset_default_graph()
def save_weights(self, path):
print("Save weights to ", path)
saver = tf.train.Saver()
saver.save(self._sesh, path)
def load_weights(self, path):
print("Load weights from ", path)
saver = tf.train.Saver()
saver.restore(self._sesh, path)
def done(self):
self._train_writer.close() | 0.841305 | 0.243316 |
from django.urls import path,re_path,include
from . import views
app_name='reviews.planning'
urlpatterns = [
re_path(r'^save_source/$', views.save_source, name='save_source'),
re_path(r'^remove_source/$', views.remove_source_from_review, name='remove_source_from_review'),
re_path(r'^suggested_sources/$', views.suggested_sources, name='suggested_sources'),
re_path(r'^add_suggested_sources/$', views.add_suggested_sources, name='add_suggested_sources'),
re_path(r'^save_question/$', views.save_question, name='save_question'),
re_path(r'^save_question_order/$', views.save_question_order, name='save_question_order'),
re_path(r'^save_picoc/$', views.save_picoc, name='save_picoc'),
re_path(r'^add_or_edit_question/$', views.add_or_edit_question, name='add_or_edit_question'),
re_path(r'^remove_question/$', views.remove_question, name='remove_question'),
re_path(r'^save_objective/$', views.save_objective, name='save_objective'),
re_path(r'^add_criteria/$', views.add_criteria, name='add_criteria'),
re_path(r'^remove_criteria/$', views.remove_criteria, name='remove_criteria'),
re_path(r'^import_pico_keywords/$', views.import_pico_keywords, name='import_pico_keywords'),
re_path(r'^add_keyword/$', views.add_keyword, name='add_keyword'),
re_path(r'^edit_keyword/$', views.edit_keyword, name='edit_keyword'),
re_path(r'^remove_keyword/$', views.remove_keyword, name='remove_keyword'),
re_path(r'^add_quality_assessment_question/$', views.add_quality_assessment_question, name='add_quality_assessment_question'),
re_path(r'^edit_quality_assessment_question/$', views.edit_quality_assessment_question, name='edit_quality_assessment_question'),
re_path(r'^save_quality_assessment_question/$', views.save_quality_assessment_question, name='save_quality_assessment_question'),
re_path(r'^save_quality_assessment_question_order/$', views.save_quality_assessment_question_order, name='save_quality_assessment_question_order'),
re_path(r'^remove_quality_assessment_question/$', views.remove_quality_assessment_question, name='remove_quality_assessment_question'),
re_path(r'^add_quality_assessment_answer/$', views.add_quality_assessment_answer, name='add_quality_assessment_answer'),
re_path(r'^edit_quality_assessment_answer/$', views.edit_quality_assessment_answer, name='edit_quality_assessment_answer'),
re_path(r'^save_quality_assessment_answer/$', views.save_quality_assessment_answer, name='save_quality_assessment_answer'),
re_path(r'^remove_quality_assessment_answer/$', views.remove_quality_assessment_answer, name='remove_quality_assessment_answer'),
re_path(r'^add_suggested_answer/$', views.add_suggested_answer, name='add_suggested_answer'),
re_path(r'^add_new_data_extraction_field/$', views.add_new_data_extraction_field, name='add_new_data_extraction_field'),
re_path(r'^edit_data_extraction_field/$', views.edit_data_extraction_field, name='edit_data_extraction_field'),
re_path(r'^save_data_extraction_field/$', views.save_data_extraction_field, name='save_data_extraction_field'),
re_path(r'^save_data_extraction_field_order/$', views.save_data_extraction_field_order, name='save_data_extraction_field_order'),
re_path(r'^remove_data_extraction_field/$', views.remove_data_extraction_field, name='remove_data_extraction_field'),
re_path(r'^calculate_max_score/$', views.calculate_max_score, name='calculate_max_score'),
re_path(r'^save_cutoff_score/$', views.save_cutoff_score, name='save_cutoff_score'),
re_path(r'^generate_search_string/$', views.generate_search_string, name='generate_search_string'),
re_path(r'^save_generic_search_string/$', views.save_generic_search_string, name='save_generic_search_string'),
] | parsifal/reviews/planning/urls.py |
from django.urls import path,re_path,include
from . import views
app_name='reviews.planning'
urlpatterns = [
re_path(r'^save_source/$', views.save_source, name='save_source'),
re_path(r'^remove_source/$', views.remove_source_from_review, name='remove_source_from_review'),
re_path(r'^suggested_sources/$', views.suggested_sources, name='suggested_sources'),
re_path(r'^add_suggested_sources/$', views.add_suggested_sources, name='add_suggested_sources'),
re_path(r'^save_question/$', views.save_question, name='save_question'),
re_path(r'^save_question_order/$', views.save_question_order, name='save_question_order'),
re_path(r'^save_picoc/$', views.save_picoc, name='save_picoc'),
re_path(r'^add_or_edit_question/$', views.add_or_edit_question, name='add_or_edit_question'),
re_path(r'^remove_question/$', views.remove_question, name='remove_question'),
re_path(r'^save_objective/$', views.save_objective, name='save_objective'),
re_path(r'^add_criteria/$', views.add_criteria, name='add_criteria'),
re_path(r'^remove_criteria/$', views.remove_criteria, name='remove_criteria'),
re_path(r'^import_pico_keywords/$', views.import_pico_keywords, name='import_pico_keywords'),
re_path(r'^add_keyword/$', views.add_keyword, name='add_keyword'),
re_path(r'^edit_keyword/$', views.edit_keyword, name='edit_keyword'),
re_path(r'^remove_keyword/$', views.remove_keyword, name='remove_keyword'),
re_path(r'^add_quality_assessment_question/$', views.add_quality_assessment_question, name='add_quality_assessment_question'),
re_path(r'^edit_quality_assessment_question/$', views.edit_quality_assessment_question, name='edit_quality_assessment_question'),
re_path(r'^save_quality_assessment_question/$', views.save_quality_assessment_question, name='save_quality_assessment_question'),
re_path(r'^save_quality_assessment_question_order/$', views.save_quality_assessment_question_order, name='save_quality_assessment_question_order'),
re_path(r'^remove_quality_assessment_question/$', views.remove_quality_assessment_question, name='remove_quality_assessment_question'),
re_path(r'^add_quality_assessment_answer/$', views.add_quality_assessment_answer, name='add_quality_assessment_answer'),
re_path(r'^edit_quality_assessment_answer/$', views.edit_quality_assessment_answer, name='edit_quality_assessment_answer'),
re_path(r'^save_quality_assessment_answer/$', views.save_quality_assessment_answer, name='save_quality_assessment_answer'),
re_path(r'^remove_quality_assessment_answer/$', views.remove_quality_assessment_answer, name='remove_quality_assessment_answer'),
re_path(r'^add_suggested_answer/$', views.add_suggested_answer, name='add_suggested_answer'),
re_path(r'^add_new_data_extraction_field/$', views.add_new_data_extraction_field, name='add_new_data_extraction_field'),
re_path(r'^edit_data_extraction_field/$', views.edit_data_extraction_field, name='edit_data_extraction_field'),
re_path(r'^save_data_extraction_field/$', views.save_data_extraction_field, name='save_data_extraction_field'),
re_path(r'^save_data_extraction_field_order/$', views.save_data_extraction_field_order, name='save_data_extraction_field_order'),
re_path(r'^remove_data_extraction_field/$', views.remove_data_extraction_field, name='remove_data_extraction_field'),
re_path(r'^calculate_max_score/$', views.calculate_max_score, name='calculate_max_score'),
re_path(r'^save_cutoff_score/$', views.save_cutoff_score, name='save_cutoff_score'),
re_path(r'^generate_search_string/$', views.generate_search_string, name='generate_search_string'),
re_path(r'^save_generic_search_string/$', views.save_generic_search_string, name='save_generic_search_string'),
] | 0.269902 | 0.073663 |
import mysql.connector
import os
class Uploader:
def __init__(self, extracted_data):
self.extracted_data = extracted_data
def upload(self):
self.connect()
self.insert()
self.close()
def connect(self):
root_pass = os.environ.get('MARIADB_ROOT_PASSWORD')
# root_pass is defined, if process run from docker-compose
if root_pass:
host = 'db'
root_pass = <PASSWORD>
else:
host = 'localhost'
root_pass = '<PASSWORD>'
self.cnx = mysql.connector.connect(user='root',
password=root_<PASSWORD>,
host=host,
database='extracted')
self.cursor = self.cnx.cursor()
print('Connected to database.')
def insert(self):
def transform_icon(icon_url):
if 'http' not in icon_url:
return 'https://community.cloudflare.steamstatic.com/economy/image/' + icon_url
else:
return icon_url
print('Upsert started...')
for case in self.extracted_data:
self.cursor.execute("INSERT INTO cases (name, icon_url) VALUES (%s, %s) ON DUPLICATE KEY UPDATE icon_url=VALUES(icon_url)", (case['name'], transform_icon(case['asset_description']['icon_url'])))
caseId = self.cursor.lastrowid
if caseId == 0:
self.cursor.execute("SELECT id FROM cases WHERE name=%s", [case['name']])
caseId = list(self.cursor)[0][0]
self.cursor.execute("INSERT IGNORE INTO prices (caseId, sale_price, total, timestamp) VALUES (%s, %s, %s, %s)",
(caseId, case['sale_price'], case['total'], int(case['timestamp'])))
if 'key' in case.keys():
self.cursor.execute("INSERT INTO caseKeys (caseId, name, icon_url) VALUES (%s, %s, %s) ON DUPLICATE KEY UPDATE name=VALUES(name), icon_url=VALUES(icon_url)", (caseId, case['key']['name'], transform_icon(case['key']['asset_description']['icon_url'])))
self.cursor.execute("SELECT id FROM caseKeys WHERE name=%s", [case['key']['name']])
keyId = list(self.cursor)[0][0]
# Case description
for i, descField in enumerate(case['asset_description']['descriptions']):
if descField['value'] == ' ':
continue
if 'color' not in descField.keys():
descField['color'] = 'NULL'
ins0 = "INSERT INTO descriptionFields (caseId, ind, value, color) VALUES (%s, %s, %s, %s) ON DUPLICATE KEY UPDATE value=VALUES(value), color=VALUES(color)"
self.cursor.execute(ins0, (caseId, i, descField['value'], descField['color']))
if 'total' not in descField.keys():
continue
descriptionFieldId = self.cursor.lastrowid
if descriptionFieldId == 0:
self.cursor.execute("SELECT id FROM descriptionFields WHERE caseId=%s and ind=%s", [caseId, i])
descriptionFieldId = list(self.cursor)[0][0]
ins1 = "INSERT IGNORE INTO descriptionPrices (descriptionFieldId, total, timestamp) VALUES (%s, %s, %s)"
self.cursor.execute(ins1, (descriptionFieldId, descField['total'], int(case['timestamp'])))
# Key description
if 'key' in case.keys():
for i, descField in enumerate(case['key']['asset_description']['descriptions']):
if descField['value'] == ' ':
continue
if 'color' not in descField.keys():
descField['color'] = 'NULL'
ins0 = "INSERT INTO keysDescriptionFields (caseKeyId, ind, value, color) VALUES (%s, %s, %s, %s) ON DUPLICATE KEY UPDATE value=VALUES(value), color=VALUES(color)"
self.cursor.execute(ins0, (keyId, i, descField['value'], descField['color']))
self.cnx.commit()
print('Upsert done.')
def close(self):
self.cursor.close()
self.cnx.close()
self.cnx = None
print('Connection to database successfully closed.') | scraper_extractor/components/uploader.py | import mysql.connector
import os
class Uploader:
def __init__(self, extracted_data):
self.extracted_data = extracted_data
def upload(self):
self.connect()
self.insert()
self.close()
def connect(self):
root_pass = os.environ.get('MARIADB_ROOT_PASSWORD')
# root_pass is defined, if process run from docker-compose
if root_pass:
host = 'db'
root_pass = <PASSWORD>
else:
host = 'localhost'
root_pass = '<PASSWORD>'
self.cnx = mysql.connector.connect(user='root',
password=root_<PASSWORD>,
host=host,
database='extracted')
self.cursor = self.cnx.cursor()
print('Connected to database.')
def insert(self):
def transform_icon(icon_url):
if 'http' not in icon_url:
return 'https://community.cloudflare.steamstatic.com/economy/image/' + icon_url
else:
return icon_url
print('Upsert started...')
for case in self.extracted_data:
self.cursor.execute("INSERT INTO cases (name, icon_url) VALUES (%s, %s) ON DUPLICATE KEY UPDATE icon_url=VALUES(icon_url)", (case['name'], transform_icon(case['asset_description']['icon_url'])))
caseId = self.cursor.lastrowid
if caseId == 0:
self.cursor.execute("SELECT id FROM cases WHERE name=%s", [case['name']])
caseId = list(self.cursor)[0][0]
self.cursor.execute("INSERT IGNORE INTO prices (caseId, sale_price, total, timestamp) VALUES (%s, %s, %s, %s)",
(caseId, case['sale_price'], case['total'], int(case['timestamp'])))
if 'key' in case.keys():
self.cursor.execute("INSERT INTO caseKeys (caseId, name, icon_url) VALUES (%s, %s, %s) ON DUPLICATE KEY UPDATE name=VALUES(name), icon_url=VALUES(icon_url)", (caseId, case['key']['name'], transform_icon(case['key']['asset_description']['icon_url'])))
self.cursor.execute("SELECT id FROM caseKeys WHERE name=%s", [case['key']['name']])
keyId = list(self.cursor)[0][0]
# Case description
for i, descField in enumerate(case['asset_description']['descriptions']):
if descField['value'] == ' ':
continue
if 'color' not in descField.keys():
descField['color'] = 'NULL'
ins0 = "INSERT INTO descriptionFields (caseId, ind, value, color) VALUES (%s, %s, %s, %s) ON DUPLICATE KEY UPDATE value=VALUES(value), color=VALUES(color)"
self.cursor.execute(ins0, (caseId, i, descField['value'], descField['color']))
if 'total' not in descField.keys():
continue
descriptionFieldId = self.cursor.lastrowid
if descriptionFieldId == 0:
self.cursor.execute("SELECT id FROM descriptionFields WHERE caseId=%s and ind=%s", [caseId, i])
descriptionFieldId = list(self.cursor)[0][0]
ins1 = "INSERT IGNORE INTO descriptionPrices (descriptionFieldId, total, timestamp) VALUES (%s, %s, %s)"
self.cursor.execute(ins1, (descriptionFieldId, descField['total'], int(case['timestamp'])))
# Key description
if 'key' in case.keys():
for i, descField in enumerate(case['key']['asset_description']['descriptions']):
if descField['value'] == ' ':
continue
if 'color' not in descField.keys():
descField['color'] = 'NULL'
ins0 = "INSERT INTO keysDescriptionFields (caseKeyId, ind, value, color) VALUES (%s, %s, %s, %s) ON DUPLICATE KEY UPDATE value=VALUES(value), color=VALUES(color)"
self.cursor.execute(ins0, (keyId, i, descField['value'], descField['color']))
self.cnx.commit()
print('Upsert done.')
def close(self):
self.cursor.close()
self.cnx.close()
self.cnx = None
print('Connection to database successfully closed.') | 0.252292 | 0.069542 |
"""DeeWebDemo: web server and front-end for Dee demoCluster"""
__version__ = "0.12"
__author__ = "<NAME>"
__copyright__ = "Copyright (C) 2007 <NAME>"
__license__ = "MIT" #see Licence.txt for licence information
import re
import webbrowser
import mimetypes
from Dee import *
from demoCluster import *
import web #Public domain: see web.py for details
STATIC_DIRS = ('css', 'js', 'images', 'media')
urls = (
'/(' + '|'.join(STATIC_DIRS) + ')/.*', 'static',
'/', 'index',
)
class session:
def __init__(self):
self.input=""
self.output=""
self.history=[]
self.history_cursor=len(self.history)
self.database=demoCluster.values()[0]
sessions = []
nextSessionId = 0
assign_pattern = re.compile("^(\w+)(\s*)(=|\|=|\-=)(\s*)[^=](.*)")
def getSession():
global nextSessionId
res = None
sessionref = web.cookies()
#web.debugwrite("Before:"+str(sessions))
if sessionref:
try:
web.debugwrite("Using existing session %s" % sessionref.id)
res = sessions[int(sessionref.id)]
except:
web.debugwrite(" - session no longer valid")
pass
if not res:
web.debugwrite("Creating new session %s" % nextSessionId)
if len(sessions) == nextSessionId:
sessions.append(session())
else:
assert False, "Sessions out of sync. with nextSessionId"
res = sessions[nextSessionId]
web.setcookie('id', nextSessionId)
nextSessionId += 1 #todo random!
#web.debugwrite("After:"+str(sessions))
return res
class index:
def GET(self):
s = getSession()
print """<html>
<head>
<title>Dee</title>
<link rel="stylesheet" type="text/css" href="css/plainold.css" media="screen"/>
</head>
<body>
<font face=verdana,tahoma,arial,helvetica,sans>
<h1>%(current_database)s</h1>
<form method="post" action="/">
<p>
<p>Default database:
<select name="database_name">%(database)s</select> <input type="submit" name="command" value="Change database" />
</p>
<input type="submit" name="command" value="<<" />
<input type="submit" name="command" value=">>" />
<input type="submit" name="command" value="Paste Relation template" />
<input type="submit" name="command" value="Paste catalog query" />
<br />
<label for="expression">Expression:</label><br />
<font face=courier>
<textarea name="expression" cols=100 rows=10>%(input)s</textarea>
</font>
<input type="submit" name="command" value="Evaluate" />
</p>
<p>
<font face=courier>
%(output)s
</font>
</p>
</form>
</font>
</body>
</html>
""" % {"current_database":s.database.name,
"database": "\n".join(['<option value="%(database_name)s" %(selected)s>%(database_name)s' % t for t in demoCluster.databases(['database_name']).extend(['selected'], lambda t:{'selected':t.database_name==s.database.name and "selected" or ""}).toTupleList(sort=(True,['database_name']))]),
"input":s.input, "output":s.output}
def POST(self):
s = getSession()
i = web.input()
if i.command == "Evaluate":
inp = ""
exp = i.expression.rstrip()
s.history.append(exp)
s.history_cursor=len(s.history)
exp = exp.replace('\n', ' ').replace('\r', '')
if assign_pattern.match(exp):
try:
exec(exp, globals(), s.database.transactions[s.database.transactionId])
r=""
except Exception, e:
r=e
inp=i.expression
else:
try:
r=eval(exp, globals(), s.database.transactions[s.database.transactionId])
if isinstance(r, Relation):
r="""<div id="itsthetable">%s</div>""" % r.renderHTML()
else:
r=str(web.websafe(r))
except Exception, e:
r=e
inp=i.expression
s.input = inp
s.output = "<b>>>> %s</b><br />%s<br />%s" % (exp, r, s.output)
web.redirect('/')
else:
if i.command == "Paste Relation template":
s.input = """Relation(["a", "b"],
[('one', 1),
('two', 2),
('three', 3),
])"""
elif i.command == "Paste catalog query":
s.input = """relations"""
elif i.command == "<<":
if s.history_cursor>0:
s.history_cursor-=1
s.input = s.history[s.history_cursor]
else:
s.input = i.expression
elif i.command == ">>":
if s.history_cursor < len(s.history)-1:
s.history_cursor+=1
s.input = s.history[s.history_cursor]
else:
s.input = i.expression
elif i.command == "Shutdown":
s.database._dump()
sys.exit() #todo better way?
elif i.command == "Change database":
s.database = demoCluster[i.database_name]
else:
assert False, "Unexpected command"
web.redirect('/')
return
def mime_type(filename):
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
class static:
def GET(self, static_dir=''):
try:
static_file_name = web.context.path.split('/')[-1]
web.header('Content-type', mime_type(static_file_name))
static_file = open('.' + web.context.path, 'rb')
web.ctx.output = static_file
except IOError:
web.notfound()
# For debugging use only
web.internalerror = web.debugerror
if __name__ == "__main__":
open("startPage.html", 'w').write("""
<html>
<head>
<title>Starting</title>
</head>
<body>
<meta HTTP-EQUIV="Refresh" CONTENT="1; URL=http://127.0.0.1:8080">
<h1 align="center">Starting</h1>
</body>
</html>
""")
try:
webbrowser.open("startPage.html", new=0, autoraise=1)
except:
print "Point your browser at http://localhost:8080"
web.run(urls, web.reloader) | DeeWebDemo.py | """DeeWebDemo: web server and front-end for Dee demoCluster"""
__version__ = "0.12"
__author__ = "<NAME>"
__copyright__ = "Copyright (C) 2007 <NAME>"
__license__ = "MIT" #see Licence.txt for licence information
import re
import webbrowser
import mimetypes
from Dee import *
from demoCluster import *
import web #Public domain: see web.py for details
STATIC_DIRS = ('css', 'js', 'images', 'media')
urls = (
'/(' + '|'.join(STATIC_DIRS) + ')/.*', 'static',
'/', 'index',
)
class session:
def __init__(self):
self.input=""
self.output=""
self.history=[]
self.history_cursor=len(self.history)
self.database=demoCluster.values()[0]
sessions = []
nextSessionId = 0
assign_pattern = re.compile("^(\w+)(\s*)(=|\|=|\-=)(\s*)[^=](.*)")
def getSession():
global nextSessionId
res = None
sessionref = web.cookies()
#web.debugwrite("Before:"+str(sessions))
if sessionref:
try:
web.debugwrite("Using existing session %s" % sessionref.id)
res = sessions[int(sessionref.id)]
except:
web.debugwrite(" - session no longer valid")
pass
if not res:
web.debugwrite("Creating new session %s" % nextSessionId)
if len(sessions) == nextSessionId:
sessions.append(session())
else:
assert False, "Sessions out of sync. with nextSessionId"
res = sessions[nextSessionId]
web.setcookie('id', nextSessionId)
nextSessionId += 1 #todo random!
#web.debugwrite("After:"+str(sessions))
return res
class index:
def GET(self):
s = getSession()
print """<html>
<head>
<title>Dee</title>
<link rel="stylesheet" type="text/css" href="css/plainold.css" media="screen"/>
</head>
<body>
<font face=verdana,tahoma,arial,helvetica,sans>
<h1>%(current_database)s</h1>
<form method="post" action="/">
<p>
<p>Default database:
<select name="database_name">%(database)s</select> <input type="submit" name="command" value="Change database" />
</p>
<input type="submit" name="command" value="<<" />
<input type="submit" name="command" value=">>" />
<input type="submit" name="command" value="Paste Relation template" />
<input type="submit" name="command" value="Paste catalog query" />
<br />
<label for="expression">Expression:</label><br />
<font face=courier>
<textarea name="expression" cols=100 rows=10>%(input)s</textarea>
</font>
<input type="submit" name="command" value="Evaluate" />
</p>
<p>
<font face=courier>
%(output)s
</font>
</p>
</form>
</font>
</body>
</html>
""" % {"current_database":s.database.name,
"database": "\n".join(['<option value="%(database_name)s" %(selected)s>%(database_name)s' % t for t in demoCluster.databases(['database_name']).extend(['selected'], lambda t:{'selected':t.database_name==s.database.name and "selected" or ""}).toTupleList(sort=(True,['database_name']))]),
"input":s.input, "output":s.output}
def POST(self):
s = getSession()
i = web.input()
if i.command == "Evaluate":
inp = ""
exp = i.expression.rstrip()
s.history.append(exp)
s.history_cursor=len(s.history)
exp = exp.replace('\n', ' ').replace('\r', '')
if assign_pattern.match(exp):
try:
exec(exp, globals(), s.database.transactions[s.database.transactionId])
r=""
except Exception, e:
r=e
inp=i.expression
else:
try:
r=eval(exp, globals(), s.database.transactions[s.database.transactionId])
if isinstance(r, Relation):
r="""<div id="itsthetable">%s</div>""" % r.renderHTML()
else:
r=str(web.websafe(r))
except Exception, e:
r=e
inp=i.expression
s.input = inp
s.output = "<b>>>> %s</b><br />%s<br />%s" % (exp, r, s.output)
web.redirect('/')
else:
if i.command == "Paste Relation template":
s.input = """Relation(["a", "b"],
[('one', 1),
('two', 2),
('three', 3),
])"""
elif i.command == "Paste catalog query":
s.input = """relations"""
elif i.command == "<<":
if s.history_cursor>0:
s.history_cursor-=1
s.input = s.history[s.history_cursor]
else:
s.input = i.expression
elif i.command == ">>":
if s.history_cursor < len(s.history)-1:
s.history_cursor+=1
s.input = s.history[s.history_cursor]
else:
s.input = i.expression
elif i.command == "Shutdown":
s.database._dump()
sys.exit() #todo better way?
elif i.command == "Change database":
s.database = demoCluster[i.database_name]
else:
assert False, "Unexpected command"
web.redirect('/')
return
def mime_type(filename):
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
class static:
def GET(self, static_dir=''):
try:
static_file_name = web.context.path.split('/')[-1]
web.header('Content-type', mime_type(static_file_name))
static_file = open('.' + web.context.path, 'rb')
web.ctx.output = static_file
except IOError:
web.notfound()
# For debugging use only
web.internalerror = web.debugerror
if __name__ == "__main__":
open("startPage.html", 'w').write("""
<html>
<head>
<title>Starting</title>
</head>
<body>
<meta HTTP-EQUIV="Refresh" CONTENT="1; URL=http://127.0.0.1:8080">
<h1 align="center">Starting</h1>
</body>
</html>
""")
try:
webbrowser.open("startPage.html", new=0, autoraise=1)
except:
print "Point your browser at http://localhost:8080"
web.run(urls, web.reloader) | 0.170854 | 0.099121 |
import urllib
import requests
import os
import base64
import uuid
import json
import asyncio
import eth_keys
import binascii
from Crypto.Hash import keccak
from .config import EdenConfig
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
# Utility functions
# Request for Gets
def requestsGet(url='', data='' ):
headers = {'content-type': 'application/json'}
return requests.get(url=url, data=data , headers=headers, verify= False)
# Request for Post
def requestsPost(url='', data ='' ):
headers = {'content-type': 'application/json'}
return requests.post(url=url, data = data, headers=headers, verify = False)
# Json RPC Request Methods
API_SIGN_IN_USER = 'user.signin'
API_GET_USER_INFO='user.get_info'
API_GET_USER_BALANCE ='user.getbalance'
API_GET_USER_TRANSACTION='user.lstransaction'
API_GET_COIN_SERVER_ADDRESS='server.coinhdaddress'
API_ADD_ETH_ADDRESS='eth.add_address'
API_DEL_ETH_ADDRESS='eth.del_address'
API_DEPOSIT_TOKEN='user.deposit'
API_WITHDRAW_TOKEN='user.withdraw'
API_TRANSFER_TOKEN='user.transfer'
"""
API user sdk default class
"""
class EdenClientApi:
# Network Constant
EDENCHAIN_MAINNET_NETWORK = 0
EDENCHAIN_BETA_RELEASE = 1
EDENCHAIN_CANDIDATE_RELEASE = 2
def __init__(self, network):
(result, config) = EdenConfig().getConfig(network)
if result == False:
raise Exception('Network is invalid')
else:
self.config = config
"""
create default JsonRpc Requests Objects.
"""
def makeJsonRpcRequest(self, method, token):
id = str(uuid.uuid4())
params = {}
params["iamtoken"] = token
payload = { "method": method, "params": params , "jsonrpc": "2.0", "id": id , }
return payload
"""
Authenticate
"""
def authenticate_user(self, email, password):
payload = {'email': email , 'password': password , 'returnSecureToken':'true'}
auth_url = 'https://www.googleapis.com/identitytoolkit/v3/relyingparty/verifyPassword?key='+self.config['api_key']
user_auth = requests.post( auth_url, data=json.dumps(payload)).json()
token=user_auth['idToken']
if token is None or token == '':
return None
if self.sign_in_user(token):
return token
return None
"""
Sign In
"""
async def sign_in_user_async(self, token):
res = await asyncio.get_event_loop().run_in_executor(None, self.sign_in_user, token)
return res
def sign_in_user(self, token=''):
payload = self.makeJsonRpcRequest(API_SIGN_IN_USER, token)
res = requestsPost( self.config['api_end_point'], data=json.dumps(payload))
if res.status_code != 200:
return None
data = res.json()
if data['id'] != payload['id']:
return None
if data["result"]["err_code"] == 0:
return True
else:
return False
"""
Get user info from IAM
"""
async def get_user_info_async(self, token):
res = await asyncio.get_event_loop().run_in_executor(None, self.get_user_info, token)
return res
def get_user_info(self, token=''):
payload = self.makeJsonRpcRequest(API_GET_USER_INFO, token)
res = requestsPost( self.config['api_end_point'], data=json.dumps(payload))
if res.status_code != 200:
return None
data = res.json()
if data['id'] != payload['id']:
return None
else:
if data['result'] is None:
return None
else:
return data["result"]["data"]
"""
Token which I have is valid or not?
"""
async def is_token_valid_async(self, token):
res = await asyncio.get_event_loop().run_in_executor(None, self.is_token_valid, token)
return res
def is_token_valid(self, token):
res = self.get_user_info(token)
if res is not None and res.get('tedn_public_key'):
return True
else:
return False
"""
Get User Balance
"""
async def get_balance_async(self, token=''):
res = await asyncio.get_event_loop().run_in_executor(None, self.get_user_balance, token)
return res
def get_user_balance(self, token=''):
payload = self.makeJsonRpcRequest(API_GET_USER_BALANCE, token)
res = requestsPost( self.config['api_end_point'], data=json.dumps(payload))
if res.status_code != 200:
return None
data = res.json()
if data['id'] != payload['id']:
return None
return data["result"]["data"]["amount"]
"""
Get User Transaction
"""
async def get_user_transaction_async(self, token='',page=0,countperpage=0):
res = await asyncio.get_event_loop().run_in_executor(None, self.get_user_transaction, token, page, countperpage)
return res
def get_user_transaction(self,token='', page = 0, countperpage = 0):
payload = self.makeJsonRpcRequest(API_GET_USER_TRANSACTION, token)
payload["params"]["page"] = page
payload["params"]["countperpage"] = countperpage
res = requestsPost( self.config['api_end_point'], data=json.dumps(payload))
if res.status_code != 200:
return None
data = res.json()
if data['id'] != payload['id']:
return None
return data["result"]["data"]
"""
Get Coin Server Address
"""
async def get_coin_server_address_async(self, token=''):
res = await asyncio.get_event_loop().run_in_executor(None, self. get_coin_server_address, token)
return res
def get_coin_server_address(self, token=''):
payload = self.makeJsonRpcRequest(API_GET_COIN_SERVER_ADDRESS, token)
res = requestsPost( self.config['api_end_point'], data=json.dumps(payload))
if res.status_code != 200:
return None
data = res.json()
if data['id'] != payload['id']:
return None
return data["result"]["data"]["hdaddress"]
def remove0xHeader(self, hexString):
if hexString[:2] == '0x':
return hexString[2:]
else:
return hexString
def formSignature(self, hexString):
if hexString[-2:] == '01':
hexString = hexString[:-2]+'1c'
else:
hexString = hexString[:-2]+'1b'
return hexString
"""
Add Eth Address to iam
"""
async def add_eth_address_async(self, token='', private_key=''):
res = await asyncio.get_event_loop().run_in_executor(None, self. add_eth_address, token, private_key)
return res
def add_eth_address(self, token='', private_key=''):
# Create Address Object.
private_key = self.remove0xHeader(private_key)
privKey = eth_keys.keys.PrivateKey(binascii.unhexlify(private_key))
address = privKey.public_key.to_checksum_address()
keccak_hash = keccak.new(digest_bits=256)
keccak_hash.update(address.encode())
hash_msg = keccak_hash.digest()
signature = privKey.sign_msg_hash(hash_msg)
payload = self.makeJsonRpcRequest(API_ADD_ETH_ADDRESS, token)
payload["params"]["address"] = address
payload["params"]["public_key"] = self.remove0xHeader(privKey.public_key.to_hex())
payload["params"]["signature"] = self.formSignature(signature.to_hex())
res = requestsPost( self.config['api_end_point'], data=json.dumps(payload))
if res.status_code != 200:
return False
data = res.json()
if data['id'] != payload['id']:
return False
if data["result"]["err_code"] == 0:
return True
else:
return False
"""
Del Eth Address to iam
"""
async def del_eth_address_async(self, token='', private_key=''):
res = await asyncio.get_event_loop().run_in_executor(None, self. del_eth_address, token, private_key )
return res
def del_eth_address(self, token='', private_key=''):
# Create Address Object.
private_key = self.remove0xHeader(private_key)
privKey = eth_keys.keys.PrivateKey(binascii.unhexlify(private_key))
address = privKey.public_key.to_checksum_address()
keccak_hash = keccak.new(digest_bits=256)
keccak_hash.update(address.encode())
hash_msg = keccak_hash.digest()
signature = privKey.sign_msg_hash(hash_msg)
payload = self.makeJsonRpcRequest(API_DEL_ETH_ADDRESS, token)
payload["params"]["address"] = address
payload["params"]["public_key"] = self.remove0xHeader(privKey.public_key.to_hex())
payload["params"]["signature"] = self.formSignature(signature.to_hex())
res = requestsPost( self.config['api_end_point'], data=json.dumps(payload))
if res.status_code != 200:
return False
data = res.json()
if data['id'] != payload['id']:
return False
if data["result"]["err_code"] == 0:
return True
else:
return False
"""
Deposit Etn Token from ERC20
"""
async def deposit_token_async(self, token='',txhash=''):
res = await asyncio.get_event_loop().run_in_executor(None, self.deposit_token, token, txhash)
return res
def deposit_token(self,token='', txhash=''):
payload = self.makeJsonRpcRequest(API_DEPOSIT_TOKEN, token)
payload["params"]["txhash"] = txhash
res = requestsPost( self.config['api_end_point'], data=json.dumps(payload))
if res.status_code != 200:
return False
data = res.json()
if data['id'] != payload['id']:
return False
if data["result"]["err_code"] == 0:
return True
else:
return False
"""
Withdraw TEDN Token to ERC20
"""
async def withdraw_token_async(self, token='',ethaddress='',amount='0'):
res = await asyncio.get_event_loop().run_in_executor(None, self.withdraw_token, token, ethaddress, amount)
return res
def withdraw_token(self,token='', ethaddress='',amount='0'):
payload = self.makeJsonRpcRequest(API_WITHDRAW_TOKEN, token)
payload["params"]["ethaddress"] = ethaddress
payload["params"]["amount"] = amount
res = requestsPost( self.config['api_end_point'], data=json.dumps(payload))
if res.status_code != 200:
return False
data = res.json()
if data['id'] != payload['id']:
return False
if data["result"]["err_code"] == 0:
return data["result"]["data"]["txhash"]
else:
return False
"""
Transfer TEDN Token to ERC20
"""
async def transfer_token_async(self, token='',tedn_address='',amount='0'):
res = await asyncio.get_event_loop().run_in_executor(None, self.transfer_token, token, tedn_address, amount)
return res
def transfer_token(self,token='', tedn_address='',amount='0'):
payload = self.makeJsonRpcRequest(API_TRANSFER_TOKEN, token)
payload["params"]["receive_tedn_address"] = tedn_address
payload["params"]["amount"] = amount
res = requestsPost( self.config['api_end_point'], data=json.dumps(payload))
if res.status_code != 200:
return False
data = res.json()
if data['id'] != payload['id']:
return False
if data["result"]["err_code"] == 0:
return data["result"]["data"]["tx_id"]
else:
return False | eden_client_api/api.py | import urllib
import requests
import os
import base64
import uuid
import json
import asyncio
import eth_keys
import binascii
from Crypto.Hash import keccak
from .config import EdenConfig
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
# Utility functions
# Request for Gets
def requestsGet(url='', data='' ):
headers = {'content-type': 'application/json'}
return requests.get(url=url, data=data , headers=headers, verify= False)
# Request for Post
def requestsPost(url='', data ='' ):
headers = {'content-type': 'application/json'}
return requests.post(url=url, data = data, headers=headers, verify = False)
# Json RPC Request Methods
API_SIGN_IN_USER = 'user.signin'
API_GET_USER_INFO='user.get_info'
API_GET_USER_BALANCE ='user.getbalance'
API_GET_USER_TRANSACTION='user.lstransaction'
API_GET_COIN_SERVER_ADDRESS='server.coinhdaddress'
API_ADD_ETH_ADDRESS='eth.add_address'
API_DEL_ETH_ADDRESS='eth.del_address'
API_DEPOSIT_TOKEN='user.deposit'
API_WITHDRAW_TOKEN='user.withdraw'
API_TRANSFER_TOKEN='user.transfer'
"""
API user sdk default class
"""
class EdenClientApi:
# Network Constant
EDENCHAIN_MAINNET_NETWORK = 0
EDENCHAIN_BETA_RELEASE = 1
EDENCHAIN_CANDIDATE_RELEASE = 2
def __init__(self, network):
(result, config) = EdenConfig().getConfig(network)
if result == False:
raise Exception('Network is invalid')
else:
self.config = config
"""
create default JsonRpc Requests Objects.
"""
def makeJsonRpcRequest(self, method, token):
id = str(uuid.uuid4())
params = {}
params["iamtoken"] = token
payload = { "method": method, "params": params , "jsonrpc": "2.0", "id": id , }
return payload
"""
Authenticate
"""
def authenticate_user(self, email, password):
payload = {'email': email , 'password': password , 'returnSecureToken':'true'}
auth_url = 'https://www.googleapis.com/identitytoolkit/v3/relyingparty/verifyPassword?key='+self.config['api_key']
user_auth = requests.post( auth_url, data=json.dumps(payload)).json()
token=user_auth['idToken']
if token is None or token == '':
return None
if self.sign_in_user(token):
return token
return None
"""
Sign In
"""
async def sign_in_user_async(self, token):
res = await asyncio.get_event_loop().run_in_executor(None, self.sign_in_user, token)
return res
def sign_in_user(self, token=''):
payload = self.makeJsonRpcRequest(API_SIGN_IN_USER, token)
res = requestsPost( self.config['api_end_point'], data=json.dumps(payload))
if res.status_code != 200:
return None
data = res.json()
if data['id'] != payload['id']:
return None
if data["result"]["err_code"] == 0:
return True
else:
return False
"""
Get user info from IAM
"""
async def get_user_info_async(self, token):
res = await asyncio.get_event_loop().run_in_executor(None, self.get_user_info, token)
return res
def get_user_info(self, token=''):
payload = self.makeJsonRpcRequest(API_GET_USER_INFO, token)
res = requestsPost( self.config['api_end_point'], data=json.dumps(payload))
if res.status_code != 200:
return None
data = res.json()
if data['id'] != payload['id']:
return None
else:
if data['result'] is None:
return None
else:
return data["result"]["data"]
"""
Token which I have is valid or not?
"""
async def is_token_valid_async(self, token):
res = await asyncio.get_event_loop().run_in_executor(None, self.is_token_valid, token)
return res
def is_token_valid(self, token):
res = self.get_user_info(token)
if res is not None and res.get('tedn_public_key'):
return True
else:
return False
"""
Get User Balance
"""
async def get_balance_async(self, token=''):
res = await asyncio.get_event_loop().run_in_executor(None, self.get_user_balance, token)
return res
def get_user_balance(self, token=''):
payload = self.makeJsonRpcRequest(API_GET_USER_BALANCE, token)
res = requestsPost( self.config['api_end_point'], data=json.dumps(payload))
if res.status_code != 200:
return None
data = res.json()
if data['id'] != payload['id']:
return None
return data["result"]["data"]["amount"]
"""
Get User Transaction
"""
async def get_user_transaction_async(self, token='',page=0,countperpage=0):
res = await asyncio.get_event_loop().run_in_executor(None, self.get_user_transaction, token, page, countperpage)
return res
def get_user_transaction(self,token='', page = 0, countperpage = 0):
payload = self.makeJsonRpcRequest(API_GET_USER_TRANSACTION, token)
payload["params"]["page"] = page
payload["params"]["countperpage"] = countperpage
res = requestsPost( self.config['api_end_point'], data=json.dumps(payload))
if res.status_code != 200:
return None
data = res.json()
if data['id'] != payload['id']:
return None
return data["result"]["data"]
"""
Get Coin Server Address
"""
async def get_coin_server_address_async(self, token=''):
res = await asyncio.get_event_loop().run_in_executor(None, self. get_coin_server_address, token)
return res
def get_coin_server_address(self, token=''):
payload = self.makeJsonRpcRequest(API_GET_COIN_SERVER_ADDRESS, token)
res = requestsPost( self.config['api_end_point'], data=json.dumps(payload))
if res.status_code != 200:
return None
data = res.json()
if data['id'] != payload['id']:
return None
return data["result"]["data"]["hdaddress"]
def remove0xHeader(self, hexString):
if hexString[:2] == '0x':
return hexString[2:]
else:
return hexString
def formSignature(self, hexString):
if hexString[-2:] == '01':
hexString = hexString[:-2]+'1c'
else:
hexString = hexString[:-2]+'1b'
return hexString
"""
Add Eth Address to iam
"""
async def add_eth_address_async(self, token='', private_key=''):
res = await asyncio.get_event_loop().run_in_executor(None, self. add_eth_address, token, private_key)
return res
def add_eth_address(self, token='', private_key=''):
# Create Address Object.
private_key = self.remove0xHeader(private_key)
privKey = eth_keys.keys.PrivateKey(binascii.unhexlify(private_key))
address = privKey.public_key.to_checksum_address()
keccak_hash = keccak.new(digest_bits=256)
keccak_hash.update(address.encode())
hash_msg = keccak_hash.digest()
signature = privKey.sign_msg_hash(hash_msg)
payload = self.makeJsonRpcRequest(API_ADD_ETH_ADDRESS, token)
payload["params"]["address"] = address
payload["params"]["public_key"] = self.remove0xHeader(privKey.public_key.to_hex())
payload["params"]["signature"] = self.formSignature(signature.to_hex())
res = requestsPost( self.config['api_end_point'], data=json.dumps(payload))
if res.status_code != 200:
return False
data = res.json()
if data['id'] != payload['id']:
return False
if data["result"]["err_code"] == 0:
return True
else:
return False
"""
Del Eth Address to iam
"""
async def del_eth_address_async(self, token='', private_key=''):
res = await asyncio.get_event_loop().run_in_executor(None, self. del_eth_address, token, private_key )
return res
def del_eth_address(self, token='', private_key=''):
# Create Address Object.
private_key = self.remove0xHeader(private_key)
privKey = eth_keys.keys.PrivateKey(binascii.unhexlify(private_key))
address = privKey.public_key.to_checksum_address()
keccak_hash = keccak.new(digest_bits=256)
keccak_hash.update(address.encode())
hash_msg = keccak_hash.digest()
signature = privKey.sign_msg_hash(hash_msg)
payload = self.makeJsonRpcRequest(API_DEL_ETH_ADDRESS, token)
payload["params"]["address"] = address
payload["params"]["public_key"] = self.remove0xHeader(privKey.public_key.to_hex())
payload["params"]["signature"] = self.formSignature(signature.to_hex())
res = requestsPost( self.config['api_end_point'], data=json.dumps(payload))
if res.status_code != 200:
return False
data = res.json()
if data['id'] != payload['id']:
return False
if data["result"]["err_code"] == 0:
return True
else:
return False
"""
Deposit Etn Token from ERC20
"""
async def deposit_token_async(self, token='',txhash=''):
res = await asyncio.get_event_loop().run_in_executor(None, self.deposit_token, token, txhash)
return res
def deposit_token(self,token='', txhash=''):
payload = self.makeJsonRpcRequest(API_DEPOSIT_TOKEN, token)
payload["params"]["txhash"] = txhash
res = requestsPost( self.config['api_end_point'], data=json.dumps(payload))
if res.status_code != 200:
return False
data = res.json()
if data['id'] != payload['id']:
return False
if data["result"]["err_code"] == 0:
return True
else:
return False
"""
Withdraw TEDN Token to ERC20
"""
async def withdraw_token_async(self, token='',ethaddress='',amount='0'):
res = await asyncio.get_event_loop().run_in_executor(None, self.withdraw_token, token, ethaddress, amount)
return res
def withdraw_token(self,token='', ethaddress='',amount='0'):
payload = self.makeJsonRpcRequest(API_WITHDRAW_TOKEN, token)
payload["params"]["ethaddress"] = ethaddress
payload["params"]["amount"] = amount
res = requestsPost( self.config['api_end_point'], data=json.dumps(payload))
if res.status_code != 200:
return False
data = res.json()
if data['id'] != payload['id']:
return False
if data["result"]["err_code"] == 0:
return data["result"]["data"]["txhash"]
else:
return False
"""
Transfer TEDN Token to ERC20
"""
async def transfer_token_async(self, token='',tedn_address='',amount='0'):
res = await asyncio.get_event_loop().run_in_executor(None, self.transfer_token, token, tedn_address, amount)
return res
def transfer_token(self,token='', tedn_address='',amount='0'):
payload = self.makeJsonRpcRequest(API_TRANSFER_TOKEN, token)
payload["params"]["receive_tedn_address"] = tedn_address
payload["params"]["amount"] = amount
res = requestsPost( self.config['api_end_point'], data=json.dumps(payload))
if res.status_code != 200:
return False
data = res.json()
if data['id'] != payload['id']:
return False
if data["result"]["err_code"] == 0:
return data["result"]["data"]["tx_id"]
else:
return False | 0.335677 | 0.080828 |
from rich import console
import untangle
import click
from typing import Tuple, List
from tempfile import TemporaryDirectory
from shutil import unpack_archive, copyfile
from os.path import join, isdir
from os import makedirs, getcwd
from rich.progress import track
from rich.console import Console
def shortname(fname: str) -> str:
"""
shortname(fname: str) -> str
Extract the short-name of the course from the file `fname`.
ARGUMENTS:
fname: String giving the path to the XML file. This file is typically
called moodle_backup.xml
RETURNS:
A string containing the short-name of the course.
"""
obj = untangle.parse(fname)
sname = obj.moodle_backup.information.original_course_shortname.cdata
return sname
def parse(fname: str) -> List[Tuple[str,str]]:
"""
parse(fname: str) -> List[Tuple[str,str]]
Parses the XML file `fname` and extracts paths to files in the archive and
the original file name. These are then processed into input and output
paths for copying extracted files.
ARGUMENTS:
fname: String giving the path to the XML file. This file is typically
called files.xml
RETURNS:
A list of tuples, where each tuple contains the source and destination
path for files in the archive.
"""
obj = untangle.parse(fname)
results = [(
file.contenthash.cdata, # defines both dir and file name in archive
file.filearea.cdata, # logical to use this as a new directory
file.filename.cdata # original file name
) for file in obj.files.file]
ans = [(
'files/{}/{}'.format(x[0][0:2], x[0]), # path to file in archive
'{}/{}'.format(x[1], x[2]) # output path
) for x in results]
return ans
@click.command()
@click.argument('fname')
def moodle_extract(fname: str):
"""
moodle_extract(fname: str)
Extacts the files inside of the Moodle backup file, `fname`.
ARGUMENTS:
fname: String giving the path to the XML file. This file is typically
called files.xml
DETAILS:
The function creates a directory to which it extacts the files.
The name of this directory is the shortname of the course. Files are
located within subdirectories. The names for the subdirectories are
determined from the "filearea" element associated with each file in the
archive. This does not necessarily result in a logical directory
structure.
"""
console = Console()
cwd = getcwd()
with TemporaryDirectory() as tmp_dir:
# Extract the files to temporary directory.
unpack_archive(fname, extract_dir=tmp_dir, format='gztar')
sname = shortname(join(tmp_dir, 'moodle_backup.xml'))
paths = parse(join(tmp_dir, 'files.xml'))
count = 0
for path in track(paths):
path = (path[0], f'{sname}/{path[1]}')
dname = path[1].split('/')
dname = f'{dname[0]}/{dname[1]}'
if not isdir(dname):
makedirs(join(cwd, dname))
try:
copyfile(join(tmp_dir, path[0]), path[1])
except FileNotFoundError:
count += 1
if count > 0:
msg = f'{count} file/s not found.'
console.print(
f'\n[bold red]WARNING![/bold red]\t{msg}\nThese are often just empty files, so you probably don\'t have to worry about it.\n')
msg = f'Extracted {len(paths)-count} of {len(paths)} files'
console.print(f'[bold green]DONE![/bold green]\t\t{msg} :smile:') | moodle_extract/main.py | from rich import console
import untangle
import click
from typing import Tuple, List
from tempfile import TemporaryDirectory
from shutil import unpack_archive, copyfile
from os.path import join, isdir
from os import makedirs, getcwd
from rich.progress import track
from rich.console import Console
def shortname(fname: str) -> str:
"""
shortname(fname: str) -> str
Extract the short-name of the course from the file `fname`.
ARGUMENTS:
fname: String giving the path to the XML file. This file is typically
called moodle_backup.xml
RETURNS:
A string containing the short-name of the course.
"""
obj = untangle.parse(fname)
sname = obj.moodle_backup.information.original_course_shortname.cdata
return sname
def parse(fname: str) -> List[Tuple[str,str]]:
"""
parse(fname: str) -> List[Tuple[str,str]]
Parses the XML file `fname` and extracts paths to files in the archive and
the original file name. These are then processed into input and output
paths for copying extracted files.
ARGUMENTS:
fname: String giving the path to the XML file. This file is typically
called files.xml
RETURNS:
A list of tuples, where each tuple contains the source and destination
path for files in the archive.
"""
obj = untangle.parse(fname)
results = [(
file.contenthash.cdata, # defines both dir and file name in archive
file.filearea.cdata, # logical to use this as a new directory
file.filename.cdata # original file name
) for file in obj.files.file]
ans = [(
'files/{}/{}'.format(x[0][0:2], x[0]), # path to file in archive
'{}/{}'.format(x[1], x[2]) # output path
) for x in results]
return ans
@click.command()
@click.argument('fname')
def moodle_extract(fname: str):
"""
moodle_extract(fname: str)
Extacts the files inside of the Moodle backup file, `fname`.
ARGUMENTS:
fname: String giving the path to the XML file. This file is typically
called files.xml
DETAILS:
The function creates a directory to which it extacts the files.
The name of this directory is the shortname of the course. Files are
located within subdirectories. The names for the subdirectories are
determined from the "filearea" element associated with each file in the
archive. This does not necessarily result in a logical directory
structure.
"""
console = Console()
cwd = getcwd()
with TemporaryDirectory() as tmp_dir:
# Extract the files to temporary directory.
unpack_archive(fname, extract_dir=tmp_dir, format='gztar')
sname = shortname(join(tmp_dir, 'moodle_backup.xml'))
paths = parse(join(tmp_dir, 'files.xml'))
count = 0
for path in track(paths):
path = (path[0], f'{sname}/{path[1]}')
dname = path[1].split('/')
dname = f'{dname[0]}/{dname[1]}'
if not isdir(dname):
makedirs(join(cwd, dname))
try:
copyfile(join(tmp_dir, path[0]), path[1])
except FileNotFoundError:
count += 1
if count > 0:
msg = f'{count} file/s not found.'
console.print(
f'\n[bold red]WARNING![/bold red]\t{msg}\nThese are often just empty files, so you probably don\'t have to worry about it.\n')
msg = f'Extracted {len(paths)-count} of {len(paths)} files'
console.print(f'[bold green]DONE![/bold green]\t\t{msg} :smile:') | 0.602529 | 0.279085 |
import asyncio, itertools, random, math
from app.utils.misc import convert_to_equiv_emoji_digits
from app.music.music import Music
from app.music.embed import MusicEmbed
class Playlist(asyncio.Queue):
def __init__(self, music_list: list=[], **kwargs):
super().__init__(**kwargs)
self.__page_queue = music_list # creates own list if its a pagination
self.__pagination_details = {"prev_page": "None", "next_page": "None", "start_at": 0, "curr_page": 1}
def __getitem__(self, index: int or slice):
queue = self.__page_queue or self._queue
if isinstance(index, slice):
item = Playlist(list(itertools.islice(queue, index.start, index.stop, index.step)))
elif isinstance(index, int):
idx = PlaylistError.check_index(index, self.size)
item = queue[idx]
else:
raise PlaylistError("Index type should be of type int or slice.")
return item
@property
def pagination_details(self):
return self.__pagination_details
@pagination_details.setter
def pagination_details(self, value: dict = {}):
if not value:
raise PlaylistError("Pagination details must be set!")
self.__pagination_details = value
@property
def size(self):
return len(self.__page_queue or self._queue)
def next(self):
return self.get()
def shuffle(self):
random.shuffle(self._queue)
def add(self, music: Music):
return self.put(music)
def remove(self, index: int):
idx = PlaylistError.check_index(index, self.size)
music = self._queue[idx]
del self._queue[idx]
return music
def clear(self):
return self._queue.clear()
def paginate(self, size: int = 0, page: int = 1):
queue = self
if size < 0:
raise PlaylistError("Size of pagination can not be negative.")
max_page = 1 if self.size <= size or size == 0 else math.ceil(self.size / size)
if page > max_page or page < 1:
raise PlaylistError("Page out of range.")
else:
start = (page - 1) * size
stop = page * size
queue = self[start:stop]
queue.pagination_details = {
"prev_page": page - 1 if page > 1 else "None",
"next_page": page + 1 if stop + size <= self.size else "None",
"start_at": start,
"curr_page": page,
}
return queue
def embed(self):
if self.size == 0:
raise PlaylistError("Did you mean to create an empty embed for playlist instead?")
embed = (MusicEmbed(title="", description="Here are the list of songs that are currently on queue.")
.add_header(header="🎶 Music Queue")
.add_footer())
# add music queued fields
for i in range(self.size):
music = str(self[i])
details = music.split("|")
title = str(details[0]).strip()
desc = "|".join(details[1:]).strip()
music_number = convert_to_equiv_emoji_digits(self.__pagination_details["start_at"] + i + 1)
embed.add_field(name=f"{music_number} {title}", value=desc, inline=False)
embed.add_fields({
"⏮️ Prev Page": self.__pagination_details["prev_page"],
"Current Page": self.__pagination_details["curr_page"],
"Next Page ⏭️": self.__pagination_details["next_page"]
})
return embed
class PlaylistError(Exception):
def __init__(self, *args):
self.message = args[0] if args else None
def __str__(self):
return f"PLAYLIST ERROR: {self.message}" if self.message else f"PLAYLIST ERROR has been raised!"
@classmethod
def check_index(self, index: int, length: int = 0):
if index < 0:
index += length
if index >= length or index < 0:
raise self("Index out of range!")
return index | app/music/playlist.py | import asyncio, itertools, random, math
from app.utils.misc import convert_to_equiv_emoji_digits
from app.music.music import Music
from app.music.embed import MusicEmbed
class Playlist(asyncio.Queue):
def __init__(self, music_list: list=[], **kwargs):
super().__init__(**kwargs)
self.__page_queue = music_list # creates own list if its a pagination
self.__pagination_details = {"prev_page": "None", "next_page": "None", "start_at": 0, "curr_page": 1}
def __getitem__(self, index: int or slice):
queue = self.__page_queue or self._queue
if isinstance(index, slice):
item = Playlist(list(itertools.islice(queue, index.start, index.stop, index.step)))
elif isinstance(index, int):
idx = PlaylistError.check_index(index, self.size)
item = queue[idx]
else:
raise PlaylistError("Index type should be of type int or slice.")
return item
@property
def pagination_details(self):
return self.__pagination_details
@pagination_details.setter
def pagination_details(self, value: dict = {}):
if not value:
raise PlaylistError("Pagination details must be set!")
self.__pagination_details = value
@property
def size(self):
return len(self.__page_queue or self._queue)
def next(self):
return self.get()
def shuffle(self):
random.shuffle(self._queue)
def add(self, music: Music):
return self.put(music)
def remove(self, index: int):
idx = PlaylistError.check_index(index, self.size)
music = self._queue[idx]
del self._queue[idx]
return music
def clear(self):
return self._queue.clear()
def paginate(self, size: int = 0, page: int = 1):
queue = self
if size < 0:
raise PlaylistError("Size of pagination can not be negative.")
max_page = 1 if self.size <= size or size == 0 else math.ceil(self.size / size)
if page > max_page or page < 1:
raise PlaylistError("Page out of range.")
else:
start = (page - 1) * size
stop = page * size
queue = self[start:stop]
queue.pagination_details = {
"prev_page": page - 1 if page > 1 else "None",
"next_page": page + 1 if stop + size <= self.size else "None",
"start_at": start,
"curr_page": page,
}
return queue
def embed(self):
if self.size == 0:
raise PlaylistError("Did you mean to create an empty embed for playlist instead?")
embed = (MusicEmbed(title="", description="Here are the list of songs that are currently on queue.")
.add_header(header="🎶 Music Queue")
.add_footer())
# add music queued fields
for i in range(self.size):
music = str(self[i])
details = music.split("|")
title = str(details[0]).strip()
desc = "|".join(details[1:]).strip()
music_number = convert_to_equiv_emoji_digits(self.__pagination_details["start_at"] + i + 1)
embed.add_field(name=f"{music_number} {title}", value=desc, inline=False)
embed.add_fields({
"⏮️ Prev Page": self.__pagination_details["prev_page"],
"Current Page": self.__pagination_details["curr_page"],
"Next Page ⏭️": self.__pagination_details["next_page"]
})
return embed
class PlaylistError(Exception):
def __init__(self, *args):
self.message = args[0] if args else None
def __str__(self):
return f"PLAYLIST ERROR: {self.message}" if self.message else f"PLAYLIST ERROR has been raised!"
@classmethod
def check_index(self, index: int, length: int = 0):
if index < 0:
index += length
if index >= length or index < 0:
raise self("Index out of range!")
return index | 0.646349 | 0.18054 |
"""Unit tests for the "create_workspace_activity_feed" module."""
import unittest
from unittest import mock
from google.auth.transport import requests
from . import create_workspace_activity_feed
class CreateFeedTest(unittest.TestCase):
@mock.patch.object(requests, "AuthorizedSession", autospec=True)
@mock.patch.object(requests.requests, "Response", autospec=True)
def test_http_error(self, mock_response, mock_session):
mock_session.request.return_value = mock_response
type(mock_response).status_code = mock.PropertyMock(return_value=400)
mock_response.raise_for_status.side_effect = (
requests.requests.exceptions.HTTPError())
with self.assertRaises(requests.requests.exceptions.HTTPError):
create_workspace_activity_feed.create_workspace_activity_feed(
mock_session, "hostname.example.com", "issuer_example",
"subject_example", "audience_example", "privatekey_example",
"customerid_example", "applications_example")
@mock.patch.object(requests, "AuthorizedSession", autospec=True)
@mock.patch.object(requests.requests, "Response", autospec=True)
def test_happy_path(self, mock_response, mock_session):
mock_session.request.return_value = mock_response
type(mock_response).status_code = mock.PropertyMock(return_value=200)
expected_feed = {
"name": "feeds/cf91de35-1256-48f5-8a36-9503e532b879",
"details": {
"logType": "WORKSPACE_ACTIVITY",
"feedSourceType": "API",
"workspaceActivitySettings": {
"authentication": {
"tokenEndpoint": "endpoint.example.com",
"claims": {
"issuer": "issuer_example",
"subject": "subject_example",
"audience": "audience_example"
},
"rsCredentials": {
"privateKey": "privatekey_example"
},
},
"workspaceCustomerId": "customerid_example",
"applications": ["applications_example"]
},
},
"feedState": "PENDING_ENABLEMENT"
}
mock_response.json.return_value = expected_feed
actual_feed = create_workspace_activity_feed.create_workspace_activity_feed(
mock_session, "hostname.example.com", "issuer_example",
"subject_example", "audience_example", "privatekey_example",
"customerid_example", "applications_example")
self.assertEqual(actual_feed, expected_feed)
if __name__ == "__main__":
unittest.main() | feeds/create_workspace_activity_feed_test.py | """Unit tests for the "create_workspace_activity_feed" module."""
import unittest
from unittest import mock
from google.auth.transport import requests
from . import create_workspace_activity_feed
class CreateFeedTest(unittest.TestCase):
@mock.patch.object(requests, "AuthorizedSession", autospec=True)
@mock.patch.object(requests.requests, "Response", autospec=True)
def test_http_error(self, mock_response, mock_session):
mock_session.request.return_value = mock_response
type(mock_response).status_code = mock.PropertyMock(return_value=400)
mock_response.raise_for_status.side_effect = (
requests.requests.exceptions.HTTPError())
with self.assertRaises(requests.requests.exceptions.HTTPError):
create_workspace_activity_feed.create_workspace_activity_feed(
mock_session, "hostname.example.com", "issuer_example",
"subject_example", "audience_example", "privatekey_example",
"customerid_example", "applications_example")
@mock.patch.object(requests, "AuthorizedSession", autospec=True)
@mock.patch.object(requests.requests, "Response", autospec=True)
def test_happy_path(self, mock_response, mock_session):
mock_session.request.return_value = mock_response
type(mock_response).status_code = mock.PropertyMock(return_value=200)
expected_feed = {
"name": "feeds/cf91de35-1256-48f5-8a36-9503e532b879",
"details": {
"logType": "WORKSPACE_ACTIVITY",
"feedSourceType": "API",
"workspaceActivitySettings": {
"authentication": {
"tokenEndpoint": "endpoint.example.com",
"claims": {
"issuer": "issuer_example",
"subject": "subject_example",
"audience": "audience_example"
},
"rsCredentials": {
"privateKey": "privatekey_example"
},
},
"workspaceCustomerId": "customerid_example",
"applications": ["applications_example"]
},
},
"feedState": "PENDING_ENABLEMENT"
}
mock_response.json.return_value = expected_feed
actual_feed = create_workspace_activity_feed.create_workspace_activity_feed(
mock_session, "hostname.example.com", "issuer_example",
"subject_example", "audience_example", "privatekey_example",
"customerid_example", "applications_example")
self.assertEqual(actual_feed, expected_feed)
if __name__ == "__main__":
unittest.main() | 0.751922 | 0.480296 |
from __future__ import print_function
import requests
import getpass
import time
import StringIO
import subprocess
import numpy as np
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
def requests_retry_session(
retries=10,
backoff_factor=0.3,
status_forcelist=(500, 502, 504),
session=None,
):
'''
Creates a requests session that will retry when the server has errors.
We're willing to wait relatively long periods of time to give the server a
chance to reply.
Copied from https://www.peterbe.com/plog/best-practice-with-retries-with-requests
'''
session = session or requests.Session()
retry = Retry(
total=retries,
read=retries,
connect=retries,
backoff_factor=backoff_factor,
status_forcelist=status_forcelist,
)
adapter = HTTPAdapter(max_retries=retry)
session.mount('http://', adapter)
session.mount('https://', adapter)
return session
class StanfordNetworkAccessDeniedException(Exception):
'''
Error thrown when accessing mHealth via an insecure network connection.
'''
pass
class mHealthClient(object):
'''
Default settings:
mHealthClient(
host='https://mhealth-data-qa.stanford.edu/data-KnRJe654r9xkA5tX',
portal_url='https://mhealth-access-qa.stanford.edu/researcher/',
)
'''
def __init__(self, host, portal_url=None, credentials_obj=None, store_credentials=None):
self.host = host
self.portal_url = portal_url
self.credentials_obj = credentials_obj
self.store_credentials_callback = store_credentials
self.download_session = requests_retry_session()
def auth_flow(self):
'''
Starts the authentication flow. If the credentials aren't expired, this is a noop. If the credentials are
expired or unset, this will open the portal and request a refresh token.
'''
creds = self.credentials_obj
expired = creds and time.time() > creds['expire_time']
if expired or not creds:
if creds:
token = creds['refresh_token']
else:
if self.portal_url:
subprocess.check_call(['open', self.portal_url])
print('''
Your browser has been opened to visit:
{}
'''.format(self.portal_url))
token = getpass.getpass('Refresh Token: ')
creds = self._refresh(token)
creds['expire_time'] = creds['expires_in'] + time.time()
self.credentials_obj = creds
if self.store_credentials_callback:
self.store_credentials_callback(creds)
def _refresh(self, token):
r = requests.post(
self.host + '/api/v1/token',
data=dict(grant_type='refresh_token', refresh_token=token))
r.raise_for_status()
return r.json()
def api_request(self, url, method='GET', params=None, session=None):
'''
A method that can make authenticated requests to the API.
'''
assert self.credentials_obj, 'Need to be authenticated to make request to API'
if url[0] == '/':
url = self.host + url
else:
assert url.startswith(self.host), 'URL for request {} did not start with host {}'.format(url, self.host)
params = params or {}
r = (session or requests).request(
method,
url,
params=params,
headers={'Authorization': 'Bearer {}'.format(self.credentials_obj['access_token'])})
if 'Network Access Denied' in r.text:
raise StanfordNetworkAccessDeniedException()
r.raise_for_status()
return r
def files(self, since=None, order=None, pg=None):
'''
Request a list of files. Refer to mHealth docs for more details.
'''
p = {}
if since is not None and not np.isnan(since):
p['since'] = since
if order is not None:
p['order'] = order
if pg is not None and not np.isnan(pg):
p['pg'] = pg
return self.api_request(self.host + '/api/v1/files', params=p).json()
def download_file(self, url, origfileobj=None):
r = self.api_request(url, session=self.download_session)
fileobj = origfileobj or StringIO.StringIO()
for chunk in r.iter_content(4096):
fileobj.write(chunk)
# When no fileobj is passed in, we return the string value of the file.
if not origfileobj:
return fileobj.getvalue()
def files_iter(self, since=None, yield_pages=False):
'''
This is used to iterate over all files following the supplied sequence number `since`. We force
the file iteration order to be ascending, so any modifications to state based on this will result
in consistent computations.
Although the mHealth API permits pagination via the pg parameter, it seems to have occasional
bugs where a page is repeated when requesting the consecutive page. This method instead always
requests pg=1, but changes the `since` parameter to be the largest value from the prior page.
The yield_pages parameter yields an entire page of files at a time, as opposed to a single file at
a time when false.
'''
while True:
files = self.files(pg=1, order='asc', since=since)
if yield_pages:
yield files['dataUrls']
else:
for dataUrl in files['dataUrls']:
yield dataUrl
if files['nextPage']:
since = max(f['sequence'] for f in files['dataUrls'])
else:
break
if __name__ == '__main__':
client = mHealthClient(
host='https://mhealth-data-qa.stanford.edu/data-KnRJe654r9xkA5tX',
portal_url='https://mhealth-access-qa.stanford.edu/researcher/',
)
client.auth_flow()
print('page 1 asc')
files = client.files(pg=1, order='asc')
print('response', dict(files, dataUrls=[f['sequence'] for f in files['dataUrls']]))
print('page 1 asc since=487')
files = client.files(pg=1, order='asc', since=487)
print('response', dict(files, dataUrls=[f['sequence'] for f in files['dataUrls']]))
print('page 1 desc')
files = client.files(pg=1, order='desc')
print('response', dict(files, dataUrls=[f['sequence'] for f in files['dataUrls']]))
print('page 1 desc since=19021')
files = client.files(pg=1, order='desc', since=19021)
print('response', dict(files, dataUrls=[f['sequence'] for f in files['dataUrls']]))
print('page 1 asc since=19021')
files = client.files(pg=1, order='asc', since=19021)
print('response', dict(files, dataUrls=[f['sequence'] for f in files['dataUrls']])) | scripts/mhealth_client.py | from __future__ import print_function
import requests
import getpass
import time
import StringIO
import subprocess
import numpy as np
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
def requests_retry_session(
retries=10,
backoff_factor=0.3,
status_forcelist=(500, 502, 504),
session=None,
):
'''
Creates a requests session that will retry when the server has errors.
We're willing to wait relatively long periods of time to give the server a
chance to reply.
Copied from https://www.peterbe.com/plog/best-practice-with-retries-with-requests
'''
session = session or requests.Session()
retry = Retry(
total=retries,
read=retries,
connect=retries,
backoff_factor=backoff_factor,
status_forcelist=status_forcelist,
)
adapter = HTTPAdapter(max_retries=retry)
session.mount('http://', adapter)
session.mount('https://', adapter)
return session
class StanfordNetworkAccessDeniedException(Exception):
'''
Error thrown when accessing mHealth via an insecure network connection.
'''
pass
class mHealthClient(object):
'''
Default settings:
mHealthClient(
host='https://mhealth-data-qa.stanford.edu/data-KnRJe654r9xkA5tX',
portal_url='https://mhealth-access-qa.stanford.edu/researcher/',
)
'''
def __init__(self, host, portal_url=None, credentials_obj=None, store_credentials=None):
self.host = host
self.portal_url = portal_url
self.credentials_obj = credentials_obj
self.store_credentials_callback = store_credentials
self.download_session = requests_retry_session()
def auth_flow(self):
'''
Starts the authentication flow. If the credentials aren't expired, this is a noop. If the credentials are
expired or unset, this will open the portal and request a refresh token.
'''
creds = self.credentials_obj
expired = creds and time.time() > creds['expire_time']
if expired or not creds:
if creds:
token = creds['refresh_token']
else:
if self.portal_url:
subprocess.check_call(['open', self.portal_url])
print('''
Your browser has been opened to visit:
{}
'''.format(self.portal_url))
token = getpass.getpass('Refresh Token: ')
creds = self._refresh(token)
creds['expire_time'] = creds['expires_in'] + time.time()
self.credentials_obj = creds
if self.store_credentials_callback:
self.store_credentials_callback(creds)
def _refresh(self, token):
r = requests.post(
self.host + '/api/v1/token',
data=dict(grant_type='refresh_token', refresh_token=token))
r.raise_for_status()
return r.json()
def api_request(self, url, method='GET', params=None, session=None):
'''
A method that can make authenticated requests to the API.
'''
assert self.credentials_obj, 'Need to be authenticated to make request to API'
if url[0] == '/':
url = self.host + url
else:
assert url.startswith(self.host), 'URL for request {} did not start with host {}'.format(url, self.host)
params = params or {}
r = (session or requests).request(
method,
url,
params=params,
headers={'Authorization': 'Bearer {}'.format(self.credentials_obj['access_token'])})
if 'Network Access Denied' in r.text:
raise StanfordNetworkAccessDeniedException()
r.raise_for_status()
return r
def files(self, since=None, order=None, pg=None):
'''
Request a list of files. Refer to mHealth docs for more details.
'''
p = {}
if since is not None and not np.isnan(since):
p['since'] = since
if order is not None:
p['order'] = order
if pg is not None and not np.isnan(pg):
p['pg'] = pg
return self.api_request(self.host + '/api/v1/files', params=p).json()
def download_file(self, url, origfileobj=None):
r = self.api_request(url, session=self.download_session)
fileobj = origfileobj or StringIO.StringIO()
for chunk in r.iter_content(4096):
fileobj.write(chunk)
# When no fileobj is passed in, we return the string value of the file.
if not origfileobj:
return fileobj.getvalue()
def files_iter(self, since=None, yield_pages=False):
'''
This is used to iterate over all files following the supplied sequence number `since`. We force
the file iteration order to be ascending, so any modifications to state based on this will result
in consistent computations.
Although the mHealth API permits pagination via the pg parameter, it seems to have occasional
bugs where a page is repeated when requesting the consecutive page. This method instead always
requests pg=1, but changes the `since` parameter to be the largest value from the prior page.
The yield_pages parameter yields an entire page of files at a time, as opposed to a single file at
a time when false.
'''
while True:
files = self.files(pg=1, order='asc', since=since)
if yield_pages:
yield files['dataUrls']
else:
for dataUrl in files['dataUrls']:
yield dataUrl
if files['nextPage']:
since = max(f['sequence'] for f in files['dataUrls'])
else:
break
if __name__ == '__main__':
client = mHealthClient(
host='https://mhealth-data-qa.stanford.edu/data-KnRJe654r9xkA5tX',
portal_url='https://mhealth-access-qa.stanford.edu/researcher/',
)
client.auth_flow()
print('page 1 asc')
files = client.files(pg=1, order='asc')
print('response', dict(files, dataUrls=[f['sequence'] for f in files['dataUrls']]))
print('page 1 asc since=487')
files = client.files(pg=1, order='asc', since=487)
print('response', dict(files, dataUrls=[f['sequence'] for f in files['dataUrls']]))
print('page 1 desc')
files = client.files(pg=1, order='desc')
print('response', dict(files, dataUrls=[f['sequence'] for f in files['dataUrls']]))
print('page 1 desc since=19021')
files = client.files(pg=1, order='desc', since=19021)
print('response', dict(files, dataUrls=[f['sequence'] for f in files['dataUrls']]))
print('page 1 asc since=19021')
files = client.files(pg=1, order='asc', since=19021)
print('response', dict(files, dataUrls=[f['sequence'] for f in files['dataUrls']])) | 0.604399 | 0.138753 |
from __future__ import absolute_import, unicode_literals, print_function
from email.utils import parseaddr
from zope.component import createObject
from gs.group.list.command import CommandResult, CommandABC
from gs.group.member.base import user_member_of_group
from gs.group.member.leave.base import leave_group
from Products.CustomUserFolder.interfaces import IGSUserInfo
from .audit import (LeaveAuditor, LEAVE_COMMAND, LEAVE_COMMAND_NOT_MEMBER,
LEAVE_COMMAND_NO_PROFILE, )
from .notifier import (NotMemberNotifier, NoProfileNotifier)
class LeaveCommand(CommandABC):
'The ``unsubscribe`` command.'
def process(self, email, request):
'Process the email command ``unsubscribe``'
components = self.get_command_components(email)
if components[0] != 'unsubscribe':
m = 'Not a unsubscribe command: {0}'.format(email['Subject'])
raise ValueError(m)
addr = self.get_email_addr(email)
retval = CommandResult.notACommand
if (len(components) == 1):
userInfo = self.get_user(email) # May be None. The auditor will deal.
auditor = LeaveAuditor(self.group, userInfo, self.groupInfo)
if userInfo:
if user_member_of_group(userInfo, self.groupInfo):
auditor.info(LEAVE_COMMAND, addr)
leave_group(self.groupInfo, userInfo, request)
else: # Not a member
auditor.info(LEAVE_COMMAND_NOT_MEMBER, addr)
context = self.group.aq_parent
notifier = NotMemberNotifier(context, request)
notifier.notify(self.groupInfo, userInfo, addr)
else: # No profile
auditor.info(LEAVE_COMMAND_NO_PROFILE, addr)
context = self.group.aq_parent
notifier = NoProfileNotifier(context, request)
notifier.notify(self.groupInfo, addr)
retval = CommandResult.commandStop
return retval
@property
def groupInfo(self):
retval = createObject('groupserver.GroupInfo', self.group)
return retval
@staticmethod
def get_email_addr(emailMessage):
retval = parseaddr(emailMessage['From'])[1]
return retval
def get_user(self, email):
retval = None
sr = self.group.site_root()
addr = self.get_email_addr(email)
user = sr.acl_users.get_userByEmail(addr)
if user:
retval = IGSUserInfo(user)
return retval | gs/group/member/leave/command/listcommand.py | from __future__ import absolute_import, unicode_literals, print_function
from email.utils import parseaddr
from zope.component import createObject
from gs.group.list.command import CommandResult, CommandABC
from gs.group.member.base import user_member_of_group
from gs.group.member.leave.base import leave_group
from Products.CustomUserFolder.interfaces import IGSUserInfo
from .audit import (LeaveAuditor, LEAVE_COMMAND, LEAVE_COMMAND_NOT_MEMBER,
LEAVE_COMMAND_NO_PROFILE, )
from .notifier import (NotMemberNotifier, NoProfileNotifier)
class LeaveCommand(CommandABC):
'The ``unsubscribe`` command.'
def process(self, email, request):
'Process the email command ``unsubscribe``'
components = self.get_command_components(email)
if components[0] != 'unsubscribe':
m = 'Not a unsubscribe command: {0}'.format(email['Subject'])
raise ValueError(m)
addr = self.get_email_addr(email)
retval = CommandResult.notACommand
if (len(components) == 1):
userInfo = self.get_user(email) # May be None. The auditor will deal.
auditor = LeaveAuditor(self.group, userInfo, self.groupInfo)
if userInfo:
if user_member_of_group(userInfo, self.groupInfo):
auditor.info(LEAVE_COMMAND, addr)
leave_group(self.groupInfo, userInfo, request)
else: # Not a member
auditor.info(LEAVE_COMMAND_NOT_MEMBER, addr)
context = self.group.aq_parent
notifier = NotMemberNotifier(context, request)
notifier.notify(self.groupInfo, userInfo, addr)
else: # No profile
auditor.info(LEAVE_COMMAND_NO_PROFILE, addr)
context = self.group.aq_parent
notifier = NoProfileNotifier(context, request)
notifier.notify(self.groupInfo, addr)
retval = CommandResult.commandStop
return retval
@property
def groupInfo(self):
retval = createObject('groupserver.GroupInfo', self.group)
return retval
@staticmethod
def get_email_addr(emailMessage):
retval = parseaddr(emailMessage['From'])[1]
return retval
def get_user(self, email):
retval = None
sr = self.group.site_root()
addr = self.get_email_addr(email)
user = sr.acl_users.get_userByEmail(addr)
if user:
retval = IGSUserInfo(user)
return retval | 0.521715 | 0.056731 |
import asyncio
from typing import List, TYPE_CHECKING, Any, Dict
from txdbus import client # type: ignore
from txdbus.objects import ( # type: ignore
DBusObject,
DBusProperty,
RemoteDBusObject
)
from txdbus.interface import DBusInterface, Property # type: ignore
from bleak.backends.bluezdbus import defs # type: ignore
from .characteristic import BlueZGattCharacteristic, Flags # type: ignore
if TYPE_CHECKING:
from bless.backends.bluezdbus.dbus.application import ( # type: ignore
BlueZGattApplication,
)
class BlueZGattService(DBusObject):
"""
org.bluez.GattService1 interface implementation
"""
interface_name: str = defs.GATT_SERVICE_INTERFACE
iface: DBusInterface = DBusInterface(
interface_name,
Property("UUID", "s"),
Property("Primary", "b"),
)
dbusInterfaces: List[DBusInterface] = [iface]
uuid: DBusProperty = DBusProperty("UUID")
primary: DBusProperty = DBusProperty("Primary")
def __init__(
self,
uuid: str,
primary: bool,
index: int,
app: "BlueZGattApplication", # noqa: F821
):
"""
Initialize the DBusObject
Parameters
----------
uuid : str
A string representation of the unique identifier
primary : bool
Whether the service is the primary service for the application it
belongs to
index : int
The index of the service amongst the other service of the
application
app : BlueZApp
A BlueZApp object that owns this service
"""
hex_index: str = hex(index)[2:].rjust(4, "0")
self.path: str = app.base_path + "/service" + hex_index
self.bus: client = app.bus
self.destination: str = app.destination
self.uuid: str = uuid
self.primary: bool = primary
self.loop: asyncio.AbstractEventLoop = app.loop
self.app: "BlueZGattApplication" = app # noqa: F821
self.characteristics: List[BlueZGattCharacteristic] = []
super(BlueZGattService, self).__init__(self.path)
async def add_characteristic(
self, uuid: str, flags: List[Flags], value: Any
) -> BlueZGattCharacteristic:
"""
Adds a BlueZGattCharacteristic to the service.
Parameters
----------
uuid : str
The string representation of the UUID for the characteristic
flags : List[Flags],
A list of flags to apply to the characteristic
value : Any
The characteristic's value
"""
index: int = len(self.characteristics) + 1
characteristic: BlueZGattCharacteristic = BlueZGattCharacteristic(
uuid, flags, index, self
)
characteristic.value = value
self.characteristics.append(characteristic)
await self.app._register_object(characteristic)
return characteristic
async def get_obj(self) -> Dict:
"""
Obtain the underlying dictionary within the BlueZ API that describes
the service
Returns
-------
Dict
The dictionary that describes the service
"""
dbus_obj: RemoteDBusObject = await self.app.bus.getRemoteObject(
self.app.destination, self.path
).asFuture(self.app.loop)
dict_obj: Dict = await dbus_obj.callRemote(
"GetAll",
defs.GATT_SERVICE_INTERFACE,
interface=defs.PROPERTIES_INTERFACE,
).asFuture(self.app.loop)
return dict_obj | bless/backends/bluezdbus/dbus/service.py | import asyncio
from typing import List, TYPE_CHECKING, Any, Dict
from txdbus import client # type: ignore
from txdbus.objects import ( # type: ignore
DBusObject,
DBusProperty,
RemoteDBusObject
)
from txdbus.interface import DBusInterface, Property # type: ignore
from bleak.backends.bluezdbus import defs # type: ignore
from .characteristic import BlueZGattCharacteristic, Flags # type: ignore
if TYPE_CHECKING:
from bless.backends.bluezdbus.dbus.application import ( # type: ignore
BlueZGattApplication,
)
class BlueZGattService(DBusObject):
"""
org.bluez.GattService1 interface implementation
"""
interface_name: str = defs.GATT_SERVICE_INTERFACE
iface: DBusInterface = DBusInterface(
interface_name,
Property("UUID", "s"),
Property("Primary", "b"),
)
dbusInterfaces: List[DBusInterface] = [iface]
uuid: DBusProperty = DBusProperty("UUID")
primary: DBusProperty = DBusProperty("Primary")
def __init__(
self,
uuid: str,
primary: bool,
index: int,
app: "BlueZGattApplication", # noqa: F821
):
"""
Initialize the DBusObject
Parameters
----------
uuid : str
A string representation of the unique identifier
primary : bool
Whether the service is the primary service for the application it
belongs to
index : int
The index of the service amongst the other service of the
application
app : BlueZApp
A BlueZApp object that owns this service
"""
hex_index: str = hex(index)[2:].rjust(4, "0")
self.path: str = app.base_path + "/service" + hex_index
self.bus: client = app.bus
self.destination: str = app.destination
self.uuid: str = uuid
self.primary: bool = primary
self.loop: asyncio.AbstractEventLoop = app.loop
self.app: "BlueZGattApplication" = app # noqa: F821
self.characteristics: List[BlueZGattCharacteristic] = []
super(BlueZGattService, self).__init__(self.path)
async def add_characteristic(
self, uuid: str, flags: List[Flags], value: Any
) -> BlueZGattCharacteristic:
"""
Adds a BlueZGattCharacteristic to the service.
Parameters
----------
uuid : str
The string representation of the UUID for the characteristic
flags : List[Flags],
A list of flags to apply to the characteristic
value : Any
The characteristic's value
"""
index: int = len(self.characteristics) + 1
characteristic: BlueZGattCharacteristic = BlueZGattCharacteristic(
uuid, flags, index, self
)
characteristic.value = value
self.characteristics.append(characteristic)
await self.app._register_object(characteristic)
return characteristic
async def get_obj(self) -> Dict:
"""
Obtain the underlying dictionary within the BlueZ API that describes
the service
Returns
-------
Dict
The dictionary that describes the service
"""
dbus_obj: RemoteDBusObject = await self.app.bus.getRemoteObject(
self.app.destination, self.path
).asFuture(self.app.loop)
dict_obj: Dict = await dbus_obj.callRemote(
"GetAll",
defs.GATT_SERVICE_INTERFACE,
interface=defs.PROPERTIES_INTERFACE,
).asFuture(self.app.loop)
return dict_obj | 0.817137 | 0.097734 |
import argparse
from pathlib import Path
import re
import subprocess
import sys
CPP_EXTENSIONS = ('.cpp', '.cc', '.cxx', '.hpp', '.hh', '.hxx', '.h')
PY_EXTENSIONS = ('.py',)
def parse_args():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--py', action='store_true', default=False,
help='Enable all Python checks')
parser.add_argument('--yapf', nargs='?', default=None, const='yapf',
help='Reformat Python files')
parser.add_argument('--flake8', nargs='?', default=None, const='flake8',
help='Check Python files with flake8')
parser.add_argument('--cpp', action='store_true', default=False,
help='Enable all C++ checks')
parser.add_argument('--clang-format', nargs='?', default=None, const='clang-format',
help='Reformat C++ code')
parser.add_argument('--ref', default='main',
help='Name / hash of the reference branch / commit')
parser.add_argument('--prefix', metavar='NUM', default=0,
help='Strip this number of directories from file paths')
args = parser.parse_args()
if not any((args.py, args.yapf, args.flake8, args.cpp)):
print('WARNING no checkers are enabled.')
if args.py:
if not args.yapf:
args.yapf = 'yapf'
if not args.flake8:
args.flake8 = 'flake8'
if args.cpp:
if not args.clang_format:
args.clang_format = 'clang-format'
return args
def call_pipe(cmd, cwd=None):
return subprocess.run(cmd, cwd=cwd, check=True, capture_output=True).stdout.decode('utf-8').strip()
def find_repo_root():
try:
return call_pipe(['git', 'rev-parse', '--show-toplevel'])
except subprocess.CalledProcessError:
print('Failed to determine git root directory. Is this a git repository?')
sys.exit(1)
def get_diff(repo_root, ref):
current_branch = call_pipe(['git', 'branch', '--show-current'], cwd=repo_root)
base_commit = call_pipe(['git', 'merge-base', ref, current_branch], cwd=repo_root)
return call_pipe(['git', 'diff', '-U0', '--no-color', '--relative', base_commit], cwd=repo_root)
def parse_diff(diff, n_path_strip):
filename_regex = re.compile(rf'^\+\+\+ (.*?/){{{n_path_strip}}}(\S*)')
lineno_regex = re.compile(r'^@@.*?\+(\d+)(,(\d+))?')
lines = dict()
current_file = None
for line in diff.splitlines():
match = filename_regex.match(line)
if match:
current_file = Path(match[2])
if current_file is None:
continue # did not find a file yet or file name is empty
match = lineno_regex.match(line)
if match:
start_line = int(match[1])
n_lines = int(match[3]) if match[3] else 1
if n_lines == 0:
continue
end_line = start_line + n_lines
lines.setdefault(current_file, []).append(slice(start_line, end_line, 1))
return lines
def run_formatter(cmd, modified_lines, extensions, line_separator, cwd):
for fname, lines in filter(lambda t: t[0].suffix in extensions, modified_lines.items()):
subprocess.check_call([cmd, str(fname), '-i', *[f'--lines={l.start}{line_separator}{l.stop}' for l in lines]], cwd=cwd)
def run_flake8(cmd, modified_lines, cwd):
for fname in filter(lambda fn: fn.suffix in PY_EXTENSIONS, modified_lines):
subprocess.run([cmd, str(fname)], cwd=cwd)
def main():
args = parse_args()
repo_root = find_repo_root()
diff = get_diff(repo_root, args.ref)
modified_lines = parse_diff(diff, args.prefix)
if args.clang_format:
run_formatter(args.clang_format, modified_lines, CPP_EXTENSIONS, ':', repo_root)
if args.yapf:
run_formatter(args.yapf, modified_lines, PY_EXTENSIONS, '-', repo_root)
if args.flake8:
run_flake8(args.flake8, modified_lines, repo_root)
if __name__ == '__main__':
main() | code_quality.py | import argparse
from pathlib import Path
import re
import subprocess
import sys
CPP_EXTENSIONS = ('.cpp', '.cc', '.cxx', '.hpp', '.hh', '.hxx', '.h')
PY_EXTENSIONS = ('.py',)
def parse_args():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--py', action='store_true', default=False,
help='Enable all Python checks')
parser.add_argument('--yapf', nargs='?', default=None, const='yapf',
help='Reformat Python files')
parser.add_argument('--flake8', nargs='?', default=None, const='flake8',
help='Check Python files with flake8')
parser.add_argument('--cpp', action='store_true', default=False,
help='Enable all C++ checks')
parser.add_argument('--clang-format', nargs='?', default=None, const='clang-format',
help='Reformat C++ code')
parser.add_argument('--ref', default='main',
help='Name / hash of the reference branch / commit')
parser.add_argument('--prefix', metavar='NUM', default=0,
help='Strip this number of directories from file paths')
args = parser.parse_args()
if not any((args.py, args.yapf, args.flake8, args.cpp)):
print('WARNING no checkers are enabled.')
if args.py:
if not args.yapf:
args.yapf = 'yapf'
if not args.flake8:
args.flake8 = 'flake8'
if args.cpp:
if not args.clang_format:
args.clang_format = 'clang-format'
return args
def call_pipe(cmd, cwd=None):
return subprocess.run(cmd, cwd=cwd, check=True, capture_output=True).stdout.decode('utf-8').strip()
def find_repo_root():
try:
return call_pipe(['git', 'rev-parse', '--show-toplevel'])
except subprocess.CalledProcessError:
print('Failed to determine git root directory. Is this a git repository?')
sys.exit(1)
def get_diff(repo_root, ref):
current_branch = call_pipe(['git', 'branch', '--show-current'], cwd=repo_root)
base_commit = call_pipe(['git', 'merge-base', ref, current_branch], cwd=repo_root)
return call_pipe(['git', 'diff', '-U0', '--no-color', '--relative', base_commit], cwd=repo_root)
def parse_diff(diff, n_path_strip):
filename_regex = re.compile(rf'^\+\+\+ (.*?/){{{n_path_strip}}}(\S*)')
lineno_regex = re.compile(r'^@@.*?\+(\d+)(,(\d+))?')
lines = dict()
current_file = None
for line in diff.splitlines():
match = filename_regex.match(line)
if match:
current_file = Path(match[2])
if current_file is None:
continue # did not find a file yet or file name is empty
match = lineno_regex.match(line)
if match:
start_line = int(match[1])
n_lines = int(match[3]) if match[3] else 1
if n_lines == 0:
continue
end_line = start_line + n_lines
lines.setdefault(current_file, []).append(slice(start_line, end_line, 1))
return lines
def run_formatter(cmd, modified_lines, extensions, line_separator, cwd):
for fname, lines in filter(lambda t: t[0].suffix in extensions, modified_lines.items()):
subprocess.check_call([cmd, str(fname), '-i', *[f'--lines={l.start}{line_separator}{l.stop}' for l in lines]], cwd=cwd)
def run_flake8(cmd, modified_lines, cwd):
for fname in filter(lambda fn: fn.suffix in PY_EXTENSIONS, modified_lines):
subprocess.run([cmd, str(fname)], cwd=cwd)
def main():
args = parse_args()
repo_root = find_repo_root()
diff = get_diff(repo_root, args.ref)
modified_lines = parse_diff(diff, args.prefix)
if args.clang_format:
run_formatter(args.clang_format, modified_lines, CPP_EXTENSIONS, ':', repo_root)
if args.yapf:
run_formatter(args.yapf, modified_lines, PY_EXTENSIONS, '-', repo_root)
if args.flake8:
run_flake8(args.flake8, modified_lines, repo_root)
if __name__ == '__main__':
main() | 0.294316 | 0.073364 |
import argparse
import logging
import textwrap
import solcx
import sys
from cliff.show import ShowOne
class SolcShow(ShowOne):
"""Show solc compiler information"""
log = logging.getLogger(__name__)
def get_parser(self, prog_name):
parser = super().get_parser(prog_name)
parser.formatter_class = argparse.RawDescriptionHelpFormatter
parser.add_argument(
'field',
metavar='FIELD',
nargs='?',
default=[],
help="Solidity compiler metadata field",
)
parser.epilog = textwrap.dedent("""\
Show information about the active ``solc`` compiler.
::
$ ether-py solc show
+---------------------+------------------------------------+
| Field | Value |
+---------------------+------------------------------------+
| active_version | 0.8.0 |
| active_version_hash | 0.8.0+commit.c7dfd78e |
| executable | /Users/dittrich/.solcx/solc-v0.8.0 |
| installed_versions | 0.8.0,0.7.6 |
+---------------------+------------------------------------+
""") # noqa
return parser
def take_action(self, parsed_args):
self.log.debug('[+] showing solc compiler information')
try:
solc_version = str(solcx.get_solc_version(with_commit_hash=False))
solc_version_with_hash = str(solcx.get_solc_version(with_commit_hash=True)) # noqa
solc_executable = str(solcx.install.get_executable())
solc_installed_versions = ",".join(
[
str(v)
for v in solcx.get_installed_solc_versions()
]
)
except Exception as err:
sys.exit(" ".join(err.args))
columns = [
'active_version',
'active_version_hash',
'executable',
'installed_versions'
]
data = [
solc_version,
solc_version_with_hash,
solc_executable,
solc_installed_versions,
]
return (columns, data)
# vim: set ts=4 sw=4 tw=0 et : | ether_py/solc/show.py |
import argparse
import logging
import textwrap
import solcx
import sys
from cliff.show import ShowOne
class SolcShow(ShowOne):
"""Show solc compiler information"""
log = logging.getLogger(__name__)
def get_parser(self, prog_name):
parser = super().get_parser(prog_name)
parser.formatter_class = argparse.RawDescriptionHelpFormatter
parser.add_argument(
'field',
metavar='FIELD',
nargs='?',
default=[],
help="Solidity compiler metadata field",
)
parser.epilog = textwrap.dedent("""\
Show information about the active ``solc`` compiler.
::
$ ether-py solc show
+---------------------+------------------------------------+
| Field | Value |
+---------------------+------------------------------------+
| active_version | 0.8.0 |
| active_version_hash | 0.8.0+commit.c7dfd78e |
| executable | /Users/dittrich/.solcx/solc-v0.8.0 |
| installed_versions | 0.8.0,0.7.6 |
+---------------------+------------------------------------+
""") # noqa
return parser
def take_action(self, parsed_args):
self.log.debug('[+] showing solc compiler information')
try:
solc_version = str(solcx.get_solc_version(with_commit_hash=False))
solc_version_with_hash = str(solcx.get_solc_version(with_commit_hash=True)) # noqa
solc_executable = str(solcx.install.get_executable())
solc_installed_versions = ",".join(
[
str(v)
for v in solcx.get_installed_solc_versions()
]
)
except Exception as err:
sys.exit(" ".join(err.args))
columns = [
'active_version',
'active_version_hash',
'executable',
'installed_versions'
]
data = [
solc_version,
solc_version_with_hash,
solc_executable,
solc_installed_versions,
]
return (columns, data)
# vim: set ts=4 sw=4 tw=0 et : | 0.298185 | 0.097519 |
from typing import Sequence, Any, Union
from pyexlatex.logic.format.sizing import adjust_to_full_size_and_center, adjust_to_size
from pyexlatex.presentation.beamer.frame.frame import Frame
from pyexlatex.figure.models.graphic import Graphic
from pyexlatex.models.format.fills import VFill, HFill
from pyexlatex.models.format.centering import Center
class FullWidthFrame(Frame):
"""
Resizes passed latex object to take up entire frame.
"""
def __init__(self, content: Sequence[Sequence[Any]], **kwargs):
content = adjust_to_full_size_and_center(content)
super().__init__(content, **kwargs)
class GraphicFrame(FullWidthFrame):
"""
Resizes passed graphic to take up entire frame. Can pass a file path or a latex object.
"""
def __init__(self, content: Any, **kwargs):
if isinstance(content, str):
content = Graphic(content)
super().__init__(content, **kwargs)
class MultiGraphicFrame(Frame):
"""
Resizes each graphic to full width and puts vertical space in between graphics.
Can pass a file path or a latex object.
"""
HORIZTONAL_SPACING = 0.05
VERTICAL_SPACING = 0.05
MAX_WIDTH = 0.9
MAX_HEIGHT = 0.8
def __init__(self, content: Sequence[Sequence[Any]], vertical: bool = True, **kwargs):
self.vertical = vertical
self.num_contents = len(content)
all_content = []
for cont in content:
if isinstance(cont, str):
cont = Graphic(cont)
cont = adjust_to_size(cont, width=self._graphic_width, height=self._graphic_height)
if self.vertical:
cont = Center(cont)
all_content.extend([cont, self._spacer_obj])
all_content = all_content[:-1] # strip final spacer
super().__init__(all_content, **kwargs)
@property
def _graphic_width(self) -> float:
if self.vertical:
return self.MAX_WIDTH
num_spacers = self.num_contents - 1
spacer_space = self.HORIZTONAL_SPACING * num_spacers
available_space = self.MAX_WIDTH - spacer_space
space_per_graphic = available_space / self.num_contents
return space_per_graphic
@property
def _graphic_height(self) -> float:
if not self.vertical:
return self.MAX_HEIGHT
num_spacers = self.num_contents - 1
spacer_space = self.VERTICAL_SPACING * num_spacers
available_space = self.MAX_HEIGHT - spacer_space
space_per_graphic = available_space / self.num_contents
return space_per_graphic
@property
def _spacer_obj(self) -> Union[VFill, HFill]:
if self.vertical:
return VFill()
return HFill() | pyexlatex/presentation/beamer/templates/frames/graphic.py | from typing import Sequence, Any, Union
from pyexlatex.logic.format.sizing import adjust_to_full_size_and_center, adjust_to_size
from pyexlatex.presentation.beamer.frame.frame import Frame
from pyexlatex.figure.models.graphic import Graphic
from pyexlatex.models.format.fills import VFill, HFill
from pyexlatex.models.format.centering import Center
class FullWidthFrame(Frame):
"""
Resizes passed latex object to take up entire frame.
"""
def __init__(self, content: Sequence[Sequence[Any]], **kwargs):
content = adjust_to_full_size_and_center(content)
super().__init__(content, **kwargs)
class GraphicFrame(FullWidthFrame):
"""
Resizes passed graphic to take up entire frame. Can pass a file path or a latex object.
"""
def __init__(self, content: Any, **kwargs):
if isinstance(content, str):
content = Graphic(content)
super().__init__(content, **kwargs)
class MultiGraphicFrame(Frame):
"""
Resizes each graphic to full width and puts vertical space in between graphics.
Can pass a file path or a latex object.
"""
HORIZTONAL_SPACING = 0.05
VERTICAL_SPACING = 0.05
MAX_WIDTH = 0.9
MAX_HEIGHT = 0.8
def __init__(self, content: Sequence[Sequence[Any]], vertical: bool = True, **kwargs):
self.vertical = vertical
self.num_contents = len(content)
all_content = []
for cont in content:
if isinstance(cont, str):
cont = Graphic(cont)
cont = adjust_to_size(cont, width=self._graphic_width, height=self._graphic_height)
if self.vertical:
cont = Center(cont)
all_content.extend([cont, self._spacer_obj])
all_content = all_content[:-1] # strip final spacer
super().__init__(all_content, **kwargs)
@property
def _graphic_width(self) -> float:
if self.vertical:
return self.MAX_WIDTH
num_spacers = self.num_contents - 1
spacer_space = self.HORIZTONAL_SPACING * num_spacers
available_space = self.MAX_WIDTH - spacer_space
space_per_graphic = available_space / self.num_contents
return space_per_graphic
@property
def _graphic_height(self) -> float:
if not self.vertical:
return self.MAX_HEIGHT
num_spacers = self.num_contents - 1
spacer_space = self.VERTICAL_SPACING * num_spacers
available_space = self.MAX_HEIGHT - spacer_space
space_per_graphic = available_space / self.num_contents
return space_per_graphic
@property
def _spacer_obj(self) -> Union[VFill, HFill]:
if self.vertical:
return VFill()
return HFill() | 0.932191 | 0.394901 |
from __future__ import annotations
from dataclasses import dataclass, field
from typing import List
from reamber.osu.OsuSample import OsuSample
from reamber.osu.OsuSampleSet import OsuSampleSet
from reamber.osu.lists.OsuSampleList import OsuSampleList
class OsuMapMode:
""" This determines the mode of the map.
Note that only MANIA is supported for now.
"""
STANDARD: int = 0
TAIKO: int = 1
CATCH: int = 2
MANIA: int = 3
@dataclass
class OsuMapMetaGeneral:
""" All meta under [General] """
audioFileName: str = ""
audioLeadIn: int = 0
previewTime: int = -1
countdown: bool = False
sampleSet: int = OsuSampleSet.AUTO
stackLeniency: float = 0.7
mode: int = OsuMapMode.MANIA
letterboxInBreaks: bool = False
specialStyle: bool = False
widescreenStoryboard: bool = True
@dataclass
class OsuMapMetaEditor:
""" All meta under [Editor] """
distanceSpacing: float = 4
beatDivisor: int = 4
gridSize: int = 8
timelineZoom: float = 0.3
@dataclass
class OsuMapMetaMetadata:
""" All meta under [Metadata] """
title: str = ""
titleUnicode: str = ""
artist: str = ""
artistUnicode: str = ""
creator: str = ""
version: str = ""
source: str = ""
tags: List[str] = ""
beatmapID: int = 0
beatmapSetID: int = -1
@dataclass
class OsuMapMetaDifficulty:
""" All meta under [Difficulty] """
hpDrainRate: float = 5.0
circleSize: float = 4.0
overallDifficulty: float = 5.0
approachRate: float = 5.0
sliderMultiplier: float = 1.4
sliderTickRate: int = 1
@dataclass
class OsuMapMetaEvents:
""" All meta under [Events], Excludes Storyboard. """
backgroundFileName: str = ""
samples: OsuSampleList = field(default_factory=lambda: OsuSampleList())
@dataclass
class OsuMapMeta(OsuMapMetaGeneral,
OsuMapMetaEditor,
OsuMapMetaMetadata,
OsuMapMetaDifficulty,
OsuMapMetaEvents):
""" The umbrella class that holds everything not included in HitObjects and TimingPoints """
def readStringList(self, lines: List[str]):
""" Reads everything Meta """
for index, line in enumerate(lines):
if line == "":
continue
s = line.split(":")
if s[0] == "AudioFilename": self.audioFileName = s[1].strip()
elif s[0] == "AudioLeadIn": self.audioLeadIn = int(s[1])
elif s[0] == "PreviewTime": self.previewTime = int(s[1])
elif s[0] == "Countdown": self.countdown = bool(s[1])
elif s[0] == "SampleSet": self.sampleSet = OsuSampleSet.fromString(s[1].strip())
elif s[0] == "StackLeniency": self.stackLeniency = float(s[1])
elif s[0] == "Mode": self.mode = int(s[1])
elif s[0] == "LetterboxInBreaks": self.letterboxInBreaks = bool(s[1])
elif s[0] == "SpecialStyle": self.specialStyle = bool(s[1])
elif s[0] == "WidescreenStoryboard": self.widescreenStoryboard = bool(s[1])
elif s[0] == "DistanceSpacing": self.distanceSpacing = float(s[1])
elif s[0] == "BeatDivisor": self.beatDivisor = int(s[1])
elif s[0] == "GridSize": self.gridSize = int(s[1])
elif s[0] == "TimelineZoom": self.timelineZoom = float(s[1])
elif s[0] == "Title": self.title = s[1].strip()
elif s[0] == "TitleUnicode": self.titleUnicode = s[1].strip()
elif s[0] == "Artist": self.artist = s[1].strip()
elif s[0] == "ArtistUnicode": self.artistUnicode = s[1].strip()
elif s[0] == "Creator": self.creator = s[1].strip()
elif s[0] == "Version": self.version = s[1].strip()
elif s[0] == "Source": self.source = s[1].strip()
elif s[0] == "Tags": self.tags = [i.strip() for i in s[1].split(",")]
elif s[0] == "BeatmapID": self.beatmapID = int(s[1])
elif s[0] == "BeatmapSetID": self.beatmapSetID = int(s[1])
elif s[0] == "HPDrainRate": self.hpDrainRate = float(s[1])
elif s[0] == "CircleSize": self.circleSize = float(s[1])
elif s[0] == "OverallDifficulty": self.overallDifficulty = float(s[1])
elif s[0] == "ApproachRate": self.approachRate = float(s[1])
elif s[0] == "SliderMultiplier": self.sliderMultiplier = float(s[1])
elif s[0] == "SliderTickRate": self.sliderTickRate = int(s[1])
if s[0] == "//Background and Video events":
line = lines[index + 1]
self.backgroundFileName = line[line.find('"')+1:line.rfind('"')]
if s[0] == "//Storyboard Sound Samples":
for sampLine in lines[index + 1:]:
if not sampLine.startswith('Sample'): break
self.samples.append(OsuSample.readString(sampLine))
break
def writeStringList(self) -> List[str]:
""" Writes everything Meta """
return [
"osu file format v14",
"",
"[General]",
f"AudioFilename: {self.audioFileName}",
f"AudioLeadIn: {self.audioLeadIn}",
f"PreviewTime: {int(self.previewTime)}",
f"Countdown: {int(self.countdown)}",
f"SampleSet: {self.sampleSet}",
f"StackLeniency: {self.stackLeniency}",
f"Mode: {self.mode}",
f"LetterboxInBreaks: {int(self.letterboxInBreaks)}",
f"SpecialStyle: {int(self.specialStyle)}",
f"WidescreenStoryboard: {int(self.widescreenStoryboard)}",
"",
"[Editor]",
f"DistanceSpacing: {self.distanceSpacing}",
f"BeatDivisor: {self.beatDivisor}",
f"GridSize: {self.gridSize}",
f"TimelineZoom: {self.timelineZoom}",
"",
"[Metadata]",
f"Title:{self.title}",
f"TitleUnicode:{self.titleUnicode}",
f"Artist:{self.artist}",
f"ArtistUnicode:{self.artistUnicode}",
f"Creator:{self.creator}",
f"Version:{self.version}",
f"Source:{self.source}",
f"Tags:{', '.join(self.tags)}",
f"BeatmapID:{self.beatmapID}",
f"BeatmapSetID:{self.beatmapSetID}",
"",
"[Difficulty]",
f"HPDrainRate:{self.hpDrainRate}",
f"CircleSize:{self.circleSize}",
f"OverallDifficulty:{self.overallDifficulty}",
f"ApproachRate:{self.approachRate}",
f"SliderMultiplier:{self.sliderMultiplier}",
f"SliderTickRate:{self.sliderTickRate}",
"",
"[Events]",
"//Background and Video events",
f"0,0,\"{self.backgroundFileName}\",0,0",
"//Break Periods",
"//Storyboard Layer 0 (Background)",
"//Storyboard Layer 1 (Fail)",
"//Storyboard Layer 2 (Pass)",
"//Storyboard Layer 3 (Foreground)",
"//Storyboard Layer 4 (Overlay)",
"//Storyboard Sound Samples",
*[sample.writeString() for sample in self.samples] # Unpacks all samples
] | reamber/osu/OsuMapMeta.py | from __future__ import annotations
from dataclasses import dataclass, field
from typing import List
from reamber.osu.OsuSample import OsuSample
from reamber.osu.OsuSampleSet import OsuSampleSet
from reamber.osu.lists.OsuSampleList import OsuSampleList
class OsuMapMode:
""" This determines the mode of the map.
Note that only MANIA is supported for now.
"""
STANDARD: int = 0
TAIKO: int = 1
CATCH: int = 2
MANIA: int = 3
@dataclass
class OsuMapMetaGeneral:
""" All meta under [General] """
audioFileName: str = ""
audioLeadIn: int = 0
previewTime: int = -1
countdown: bool = False
sampleSet: int = OsuSampleSet.AUTO
stackLeniency: float = 0.7
mode: int = OsuMapMode.MANIA
letterboxInBreaks: bool = False
specialStyle: bool = False
widescreenStoryboard: bool = True
@dataclass
class OsuMapMetaEditor:
""" All meta under [Editor] """
distanceSpacing: float = 4
beatDivisor: int = 4
gridSize: int = 8
timelineZoom: float = 0.3
@dataclass
class OsuMapMetaMetadata:
""" All meta under [Metadata] """
title: str = ""
titleUnicode: str = ""
artist: str = ""
artistUnicode: str = ""
creator: str = ""
version: str = ""
source: str = ""
tags: List[str] = ""
beatmapID: int = 0
beatmapSetID: int = -1
@dataclass
class OsuMapMetaDifficulty:
""" All meta under [Difficulty] """
hpDrainRate: float = 5.0
circleSize: float = 4.0
overallDifficulty: float = 5.0
approachRate: float = 5.0
sliderMultiplier: float = 1.4
sliderTickRate: int = 1
@dataclass
class OsuMapMetaEvents:
""" All meta under [Events], Excludes Storyboard. """
backgroundFileName: str = ""
samples: OsuSampleList = field(default_factory=lambda: OsuSampleList())
@dataclass
class OsuMapMeta(OsuMapMetaGeneral,
OsuMapMetaEditor,
OsuMapMetaMetadata,
OsuMapMetaDifficulty,
OsuMapMetaEvents):
""" The umbrella class that holds everything not included in HitObjects and TimingPoints """
def readStringList(self, lines: List[str]):
""" Reads everything Meta """
for index, line in enumerate(lines):
if line == "":
continue
s = line.split(":")
if s[0] == "AudioFilename": self.audioFileName = s[1].strip()
elif s[0] == "AudioLeadIn": self.audioLeadIn = int(s[1])
elif s[0] == "PreviewTime": self.previewTime = int(s[1])
elif s[0] == "Countdown": self.countdown = bool(s[1])
elif s[0] == "SampleSet": self.sampleSet = OsuSampleSet.fromString(s[1].strip())
elif s[0] == "StackLeniency": self.stackLeniency = float(s[1])
elif s[0] == "Mode": self.mode = int(s[1])
elif s[0] == "LetterboxInBreaks": self.letterboxInBreaks = bool(s[1])
elif s[0] == "SpecialStyle": self.specialStyle = bool(s[1])
elif s[0] == "WidescreenStoryboard": self.widescreenStoryboard = bool(s[1])
elif s[0] == "DistanceSpacing": self.distanceSpacing = float(s[1])
elif s[0] == "BeatDivisor": self.beatDivisor = int(s[1])
elif s[0] == "GridSize": self.gridSize = int(s[1])
elif s[0] == "TimelineZoom": self.timelineZoom = float(s[1])
elif s[0] == "Title": self.title = s[1].strip()
elif s[0] == "TitleUnicode": self.titleUnicode = s[1].strip()
elif s[0] == "Artist": self.artist = s[1].strip()
elif s[0] == "ArtistUnicode": self.artistUnicode = s[1].strip()
elif s[0] == "Creator": self.creator = s[1].strip()
elif s[0] == "Version": self.version = s[1].strip()
elif s[0] == "Source": self.source = s[1].strip()
elif s[0] == "Tags": self.tags = [i.strip() for i in s[1].split(",")]
elif s[0] == "BeatmapID": self.beatmapID = int(s[1])
elif s[0] == "BeatmapSetID": self.beatmapSetID = int(s[1])
elif s[0] == "HPDrainRate": self.hpDrainRate = float(s[1])
elif s[0] == "CircleSize": self.circleSize = float(s[1])
elif s[0] == "OverallDifficulty": self.overallDifficulty = float(s[1])
elif s[0] == "ApproachRate": self.approachRate = float(s[1])
elif s[0] == "SliderMultiplier": self.sliderMultiplier = float(s[1])
elif s[0] == "SliderTickRate": self.sliderTickRate = int(s[1])
if s[0] == "//Background and Video events":
line = lines[index + 1]
self.backgroundFileName = line[line.find('"')+1:line.rfind('"')]
if s[0] == "//Storyboard Sound Samples":
for sampLine in lines[index + 1:]:
if not sampLine.startswith('Sample'): break
self.samples.append(OsuSample.readString(sampLine))
break
def writeStringList(self) -> List[str]:
""" Writes everything Meta """
return [
"osu file format v14",
"",
"[General]",
f"AudioFilename: {self.audioFileName}",
f"AudioLeadIn: {self.audioLeadIn}",
f"PreviewTime: {int(self.previewTime)}",
f"Countdown: {int(self.countdown)}",
f"SampleSet: {self.sampleSet}",
f"StackLeniency: {self.stackLeniency}",
f"Mode: {self.mode}",
f"LetterboxInBreaks: {int(self.letterboxInBreaks)}",
f"SpecialStyle: {int(self.specialStyle)}",
f"WidescreenStoryboard: {int(self.widescreenStoryboard)}",
"",
"[Editor]",
f"DistanceSpacing: {self.distanceSpacing}",
f"BeatDivisor: {self.beatDivisor}",
f"GridSize: {self.gridSize}",
f"TimelineZoom: {self.timelineZoom}",
"",
"[Metadata]",
f"Title:{self.title}",
f"TitleUnicode:{self.titleUnicode}",
f"Artist:{self.artist}",
f"ArtistUnicode:{self.artistUnicode}",
f"Creator:{self.creator}",
f"Version:{self.version}",
f"Source:{self.source}",
f"Tags:{', '.join(self.tags)}",
f"BeatmapID:{self.beatmapID}",
f"BeatmapSetID:{self.beatmapSetID}",
"",
"[Difficulty]",
f"HPDrainRate:{self.hpDrainRate}",
f"CircleSize:{self.circleSize}",
f"OverallDifficulty:{self.overallDifficulty}",
f"ApproachRate:{self.approachRate}",
f"SliderMultiplier:{self.sliderMultiplier}",
f"SliderTickRate:{self.sliderTickRate}",
"",
"[Events]",
"//Background and Video events",
f"0,0,\"{self.backgroundFileName}\",0,0",
"//Break Periods",
"//Storyboard Layer 0 (Background)",
"//Storyboard Layer 1 (Fail)",
"//Storyboard Layer 2 (Pass)",
"//Storyboard Layer 3 (Foreground)",
"//Storyboard Layer 4 (Overlay)",
"//Storyboard Sound Samples",
*[sample.writeString() for sample in self.samples] # Unpacks all samples
] | 0.737347 | 0.41324 |
import dolfinx.fem as _fem
import numpy as np
import pytest
from dolfinx.graph import create_adjacencylist
from dolfinx.io import XDMFFile
from dolfinx.mesh import meshtags, locate_entities_boundary
from mpi4py import MPI
import dolfinx_contact
import dolfinx_contact.cpp
from dolfinx_contact.meshing import convert_mesh, create_box_mesh_2D, create_box_mesh_3D
@pytest.mark.parametrize("q_deg", range(1, 4))
@pytest.mark.parametrize("surf", [0, 1])
@pytest.mark.parametrize("dim", [2, 3])
def test_projection(q_deg, surf, dim):
# Create mesh
if dim == 2:
fname = "box_2D"
create_box_mesh_2D(filename=f"{fname}.msh", res=1.0)
convert_mesh(fname, fname, "triangle", prune_z=True)
convert_mesh(f"{fname}", f"{fname}_facets", "line", prune_z=True)
else:
fname = "box_3D"
create_box_mesh_3D(filename=f"{fname}.msh", res=1.0)
convert_mesh(fname, fname, "tetra")
convert_mesh(f"{fname}", f"{fname}_facets", "triangle")
# Read in mesh
with XDMFFile(MPI.COMM_WORLD, f"{fname}.xdmf", "r") as xdmf:
mesh = xdmf.read_mesh(name="Grid")
tdim = mesh.topology.dim
gdim = mesh.geometry.dim
mesh.topology.create_connectivity(tdim - 1, 0)
mesh.topology.create_connectivity(tdim - 1, tdim)
# Surface paramters see contact_meshes.py
L = 0.5
delta = 0.1
disp = -0.6
H = 0.5
# Define surfaces
def surface_0(x):
if dim == 2:
return np.logical_and(np.isclose(x[1], delta * (x[0] + delta) / L), x[1] < delta + 1e-5)
else:
return np.isclose(x[2], 0)
def surface_1(x):
return(np.isclose(x[dim - 1], disp + H))
# define restriced range for x coordinate to ensure closest point is on interior of opposite surface
def x_range(x):
return(np.logical_and(x[0] > delta, x[0] < L - delta))
surface_0_val = 1
surface_1_val = 2
# Create meshtags for surfaces
# restrict range of x coordinate for origin surface
if surf == 0:
facets_0 = locate_entities_boundary(mesh, tdim - 1, lambda x: np.logical_and(surface_0(x), x_range(x)))
facets_1 = locate_entities_boundary(mesh, tdim - 1, surface_1)
else:
facets_0 = locate_entities_boundary(mesh, tdim - 1, surface_0)
facets_1 = locate_entities_boundary(mesh, tdim - 1, lambda x: np.logical_and(surface_1(x), x_range(x)))
values_0 = np.full(len(facets_0), surface_0_val, dtype=np.int32)
values_1 = np.full(len(facets_1), surface_1_val, dtype=np.int32)
indices = np.concatenate([facets_0, facets_1])
values = np.hstack([values_0, values_1])
sorted_ind = np.argsort(indices)
facet_marker = meshtags(mesh, tdim - 1, indices[sorted_ind], values[sorted_ind])
# Functions space
V = _fem.VectorFunctionSpace(mesh, ("CG", 1))
# Create contact class, gap function and normals
data = np.array([surface_0_val, surface_1_val], dtype=np.int32)
offsets = np.array([0, 2], dtype=np.int32)
surfaces = create_adjacencylist(data, offsets)
contact = dolfinx_contact.cpp.Contact([facet_marker], surfaces, [(0, 1), (1, 0)],
V._cpp_object, quadrature_degree=q_deg)
contact.create_distance_map(surf)
gap = contact.pack_gap(surf)
normals = contact.pack_ny(surf, gap)
# Compute dot product and normalise
n_dot = np.zeros((gap.shape[0], gap.shape[1] // gdim))
for facet in range(gap.shape[0]):
for q in range(gap.shape[1] // gdim):
g = gap[facet, q * gdim:(q + 1) * gdim]
n = -normals[facet, q * gdim:(q + 1) * gdim]
n_norm = np.linalg.norm(n)
g_norm = np.linalg.norm(g)
for i in range(gdim):
n_dot[facet, q] += g[i] * n[i] / (n_norm * g_norm)
# Test if angle between -normal and gap function is less than 6.5 degrees
# Is better accuracy needed?
assert(np.allclose(n_dot, np.ones(n_dot.shape))) | python/tests/test_projection.py |
import dolfinx.fem as _fem
import numpy as np
import pytest
from dolfinx.graph import create_adjacencylist
from dolfinx.io import XDMFFile
from dolfinx.mesh import meshtags, locate_entities_boundary
from mpi4py import MPI
import dolfinx_contact
import dolfinx_contact.cpp
from dolfinx_contact.meshing import convert_mesh, create_box_mesh_2D, create_box_mesh_3D
@pytest.mark.parametrize("q_deg", range(1, 4))
@pytest.mark.parametrize("surf", [0, 1])
@pytest.mark.parametrize("dim", [2, 3])
def test_projection(q_deg, surf, dim):
# Create mesh
if dim == 2:
fname = "box_2D"
create_box_mesh_2D(filename=f"{fname}.msh", res=1.0)
convert_mesh(fname, fname, "triangle", prune_z=True)
convert_mesh(f"{fname}", f"{fname}_facets", "line", prune_z=True)
else:
fname = "box_3D"
create_box_mesh_3D(filename=f"{fname}.msh", res=1.0)
convert_mesh(fname, fname, "tetra")
convert_mesh(f"{fname}", f"{fname}_facets", "triangle")
# Read in mesh
with XDMFFile(MPI.COMM_WORLD, f"{fname}.xdmf", "r") as xdmf:
mesh = xdmf.read_mesh(name="Grid")
tdim = mesh.topology.dim
gdim = mesh.geometry.dim
mesh.topology.create_connectivity(tdim - 1, 0)
mesh.topology.create_connectivity(tdim - 1, tdim)
# Surface paramters see contact_meshes.py
L = 0.5
delta = 0.1
disp = -0.6
H = 0.5
# Define surfaces
def surface_0(x):
if dim == 2:
return np.logical_and(np.isclose(x[1], delta * (x[0] + delta) / L), x[1] < delta + 1e-5)
else:
return np.isclose(x[2], 0)
def surface_1(x):
return(np.isclose(x[dim - 1], disp + H))
# define restriced range for x coordinate to ensure closest point is on interior of opposite surface
def x_range(x):
return(np.logical_and(x[0] > delta, x[0] < L - delta))
surface_0_val = 1
surface_1_val = 2
# Create meshtags for surfaces
# restrict range of x coordinate for origin surface
if surf == 0:
facets_0 = locate_entities_boundary(mesh, tdim - 1, lambda x: np.logical_and(surface_0(x), x_range(x)))
facets_1 = locate_entities_boundary(mesh, tdim - 1, surface_1)
else:
facets_0 = locate_entities_boundary(mesh, tdim - 1, surface_0)
facets_1 = locate_entities_boundary(mesh, tdim - 1, lambda x: np.logical_and(surface_1(x), x_range(x)))
values_0 = np.full(len(facets_0), surface_0_val, dtype=np.int32)
values_1 = np.full(len(facets_1), surface_1_val, dtype=np.int32)
indices = np.concatenate([facets_0, facets_1])
values = np.hstack([values_0, values_1])
sorted_ind = np.argsort(indices)
facet_marker = meshtags(mesh, tdim - 1, indices[sorted_ind], values[sorted_ind])
# Functions space
V = _fem.VectorFunctionSpace(mesh, ("CG", 1))
# Create contact class, gap function and normals
data = np.array([surface_0_val, surface_1_val], dtype=np.int32)
offsets = np.array([0, 2], dtype=np.int32)
surfaces = create_adjacencylist(data, offsets)
contact = dolfinx_contact.cpp.Contact([facet_marker], surfaces, [(0, 1), (1, 0)],
V._cpp_object, quadrature_degree=q_deg)
contact.create_distance_map(surf)
gap = contact.pack_gap(surf)
normals = contact.pack_ny(surf, gap)
# Compute dot product and normalise
n_dot = np.zeros((gap.shape[0], gap.shape[1] // gdim))
for facet in range(gap.shape[0]):
for q in range(gap.shape[1] // gdim):
g = gap[facet, q * gdim:(q + 1) * gdim]
n = -normals[facet, q * gdim:(q + 1) * gdim]
n_norm = np.linalg.norm(n)
g_norm = np.linalg.norm(g)
for i in range(gdim):
n_dot[facet, q] += g[i] * n[i] / (n_norm * g_norm)
# Test if angle between -normal and gap function is less than 6.5 degrees
# Is better accuracy needed?
assert(np.allclose(n_dot, np.ones(n_dot.shape))) | 0.601594 | 0.480905 |
import scipy.optimize as spo
import pandas as pd
import numpy as np
import yfinance as yf
import matplotlib.pyplot as plt
from datetime import datetime as dt
plt.style.use("ggplot")
# selected equities and time frame
stocks = ["AAPL", "GOOG", "TSLA", "BABA", "ETH-USD"]
start = dt(2017, 12, 31)
end = dt(2021, 1, 1)
# fetch stock prices
stock_prices = yf.download(stocks, start, end)["Adj Close"].dropna(axis=0)
stock_returns = stock_prices.pct_change().dropna(axis=0)
# fetch risk free rate (US 10-year T-bills)
risk_free = yf.download("^TNX", start, end)["Adj Close"].mean() / 100
# generate random weights for each equity
# np.random.seed(1000)
portfolio_returns = []
portfolio_volatilities = []
for _ in range(2500):
weights = np.random.random(len(stocks))
weights /= sum(weights)
# construct the portfolio
rt = np.sum(stock_returns.mean() * weights) * 252
var = np.dot(np.dot(weights.T, stock_returns.cov() * 252), weights)
std = np.sqrt(var)
portfolio_returns.append(rt)
portfolio_volatilities.append(std)
portfolio_returns = np.array(portfolio_returns)
portfolio_volatilities = np.array(portfolio_volatilities)
# visualise all possible portfolio combinations
plt.figure(figsize=(12, 9))
plt.scatter(
portfolio_volatilities, portfolio_returns, c=(portfolio_returns - risk_free) / portfolio_volatilities, marker="o"
)
plt.xlabel("Risk")
plt.ylabel("Return")
plt.title("Market Portfolio")
plt.colorbar(label="Sharpe Ratio")
plt.show()
# optimisation
def portfolio_stats(weights, rf):
"""Returns an array of portfolio statistics, including
portfolio return, volatility and sharpe ratio."""
weights = np.array(weights)
p_rt = np.sum(stock_returns.mean() * weights) * 252
p_std = np.sqrt(np.dot(np.dot(weights.T, stock_returns.cov() * 252), weights))
sharpe = (p_rt - rf) / p_std
return np.array([p_rt, p_std, sharpe])
def get_sharpe(weights):
"""Returns the negative sharpe ratio."""
return -portfolio_stats(weights, risk_free)[2]
def get_variance(weights):
"""Returns the portfolio variance."""
return portfolio_stats(weights, risk_free)[1] ** 2
# portfolio with the highest sharpe ratio
cons = {"type": "eq", "fun": lambda x: np.sum(x) - 1} # constraints for weights
bnds = [(0, 1) for _ in range(len(stocks))]
equal_weights = len(stocks) * [1.0 / len(stocks)]
sharpe_opt = spo.minimize(get_sharpe, equal_weights, method="SLSQP", constraints=cons, bounds=bnds)
variance_opt = spo.minimize(get_variance, equal_weights, method="SLSQP", constraints=cons, bounds=bnds)
print("Optimal portfolio with the maximum sharpe ratio")
print("=" * 50)
print(sharpe_opt)
print("\n")
print("Optimal portfolio with the minimum variance")
print("=" * 50)
print(variance_opt)
sharpe_opt_weights = sharpe_opt["x"]
variance_opt_weights = variance_opt["x"]
print(
f"""
Market portfolio information:
Expected return: {portfolio_stats(sharpe_opt_weights, risk_free)[0]:2%}
Volatility: {portfolio_stats(sharpe_opt_weights, risk_free)[1]:2%}
Minimum variance portfolio information:
Expected return: {portfolio_stats(variance_opt_weights, risk_free)[0]:2%}
Volatility: {portfolio_stats(variance_opt_weights, risk_free)[1]:2%}
"""
)
# calculate the portfolios on efficient frontier
target_rt = np.linspace(0.39, 0.8, num=100)
target_std = []
for rt in target_rt:
# 1. portfolio return equals to target
# 2. weights sum up to 1
cons = (
{"type": "eq", "fun": lambda x: np.sqrt(portfolio_stats(x, risk_free)[0]) - rt},
{"type": "eq", "fun": lambda x: np.sum(x) - 1},
)
ef_port = spo.minimize(get_variance, equal_weights, method="SLSQP", constraints=cons, bounds=bnds)
target_std.append(ef_port["fun"])
target_std = np.array(target_std)
# exhibit the efficient frontier
plt.figure(figsize=(12, 9))
plt.scatter(target_std, target_rt, c=(target_rt - risk_free) / target_std, marker="o")
plt.xlabel("Risk")
plt.ylabel("Return")
plt.title("Efficient Frontier")
plt.colorbar(label="Sharpe Ratio")
plt.show() | portfolio_optimiser/optimiser.py | import scipy.optimize as spo
import pandas as pd
import numpy as np
import yfinance as yf
import matplotlib.pyplot as plt
from datetime import datetime as dt
plt.style.use("ggplot")
# selected equities and time frame
stocks = ["AAPL", "GOOG", "TSLA", "BABA", "ETH-USD"]
start = dt(2017, 12, 31)
end = dt(2021, 1, 1)
# fetch stock prices
stock_prices = yf.download(stocks, start, end)["Adj Close"].dropna(axis=0)
stock_returns = stock_prices.pct_change().dropna(axis=0)
# fetch risk free rate (US 10-year T-bills)
risk_free = yf.download("^TNX", start, end)["Adj Close"].mean() / 100
# generate random weights for each equity
# np.random.seed(1000)
portfolio_returns = []
portfolio_volatilities = []
for _ in range(2500):
weights = np.random.random(len(stocks))
weights /= sum(weights)
# construct the portfolio
rt = np.sum(stock_returns.mean() * weights) * 252
var = np.dot(np.dot(weights.T, stock_returns.cov() * 252), weights)
std = np.sqrt(var)
portfolio_returns.append(rt)
portfolio_volatilities.append(std)
portfolio_returns = np.array(portfolio_returns)
portfolio_volatilities = np.array(portfolio_volatilities)
# visualise all possible portfolio combinations
plt.figure(figsize=(12, 9))
plt.scatter(
portfolio_volatilities, portfolio_returns, c=(portfolio_returns - risk_free) / portfolio_volatilities, marker="o"
)
plt.xlabel("Risk")
plt.ylabel("Return")
plt.title("Market Portfolio")
plt.colorbar(label="Sharpe Ratio")
plt.show()
# optimisation
def portfolio_stats(weights, rf):
"""Returns an array of portfolio statistics, including
portfolio return, volatility and sharpe ratio."""
weights = np.array(weights)
p_rt = np.sum(stock_returns.mean() * weights) * 252
p_std = np.sqrt(np.dot(np.dot(weights.T, stock_returns.cov() * 252), weights))
sharpe = (p_rt - rf) / p_std
return np.array([p_rt, p_std, sharpe])
def get_sharpe(weights):
"""Returns the negative sharpe ratio."""
return -portfolio_stats(weights, risk_free)[2]
def get_variance(weights):
"""Returns the portfolio variance."""
return portfolio_stats(weights, risk_free)[1] ** 2
# portfolio with the highest sharpe ratio
cons = {"type": "eq", "fun": lambda x: np.sum(x) - 1} # constraints for weights
bnds = [(0, 1) for _ in range(len(stocks))]
equal_weights = len(stocks) * [1.0 / len(stocks)]
sharpe_opt = spo.minimize(get_sharpe, equal_weights, method="SLSQP", constraints=cons, bounds=bnds)
variance_opt = spo.minimize(get_variance, equal_weights, method="SLSQP", constraints=cons, bounds=bnds)
print("Optimal portfolio with the maximum sharpe ratio")
print("=" * 50)
print(sharpe_opt)
print("\n")
print("Optimal portfolio with the minimum variance")
print("=" * 50)
print(variance_opt)
sharpe_opt_weights = sharpe_opt["x"]
variance_opt_weights = variance_opt["x"]
print(
f"""
Market portfolio information:
Expected return: {portfolio_stats(sharpe_opt_weights, risk_free)[0]:2%}
Volatility: {portfolio_stats(sharpe_opt_weights, risk_free)[1]:2%}
Minimum variance portfolio information:
Expected return: {portfolio_stats(variance_opt_weights, risk_free)[0]:2%}
Volatility: {portfolio_stats(variance_opt_weights, risk_free)[1]:2%}
"""
)
# calculate the portfolios on efficient frontier
target_rt = np.linspace(0.39, 0.8, num=100)
target_std = []
for rt in target_rt:
# 1. portfolio return equals to target
# 2. weights sum up to 1
cons = (
{"type": "eq", "fun": lambda x: np.sqrt(portfolio_stats(x, risk_free)[0]) - rt},
{"type": "eq", "fun": lambda x: np.sum(x) - 1},
)
ef_port = spo.minimize(get_variance, equal_weights, method="SLSQP", constraints=cons, bounds=bnds)
target_std.append(ef_port["fun"])
target_std = np.array(target_std)
# exhibit the efficient frontier
plt.figure(figsize=(12, 9))
plt.scatter(target_std, target_rt, c=(target_rt - risk_free) / target_std, marker="o")
plt.xlabel("Risk")
plt.ylabel("Return")
plt.title("Efficient Frontier")
plt.colorbar(label="Sharpe Ratio")
plt.show() | 0.796055 | 0.654577 |
class Hash:
#funcion inicial, con esta funcion siempre se va
#a iniciar cuando llamemos a un objeto de la clase
#en este caso se inicia un arreglo vacio con un tamanio fijo
#para caso practico el tamanio sera de 5
def __init__(self):
self.size = 5
self.map = [None] * self.size
# _get_pos, es el metodo con el cual conocemos la ubicacion ideal
# para nuestro elemento, esto basado en un calculo para omitir duplicar
# o empalmar elementos, de ese modo tener un elemento en cada ubicacion y
# evitar choques. Para eso utilizaremos el valor ASCII de cada letra de nuestro
# indice y sacarle el % size(tamanio de la tabla/diccionario) para lograr una mejor ubicacion
def _get_pos(self, key):
hash = 0
for char in str(key) : #ciclo que recorre caracter por caracter nuestro indice en caso de que sea una palabra
hash += ord(char) #sumar el valor del caracter
#ord(ch) es una funcion de python que regresa el valor ASCII del caracter dado
return hash % self.size #se regresa la ubicacion ideal
# add, es el metodo que agrega a la tabla el valor segun el indice
# almacena el valor en nuestro direcotrio
def add(self, key, value):
key_hash = self._get_pos(key) #obtenemos la posicion en la cual vamos a insertar el valor
#utilizando el metodo anterior
key_value = [key, value] #creamos una variable que va a ser el "valor" que vamos a insertar en nuestra
#tabla, este valor se forma por la tupla key y value
if self.map[key_hash] is None: #revisamos si la ubicacion/index esta disponible
self.map[key_hash] = list([key_value]) #si esta disponible, insertamos el valor
return True #regresamos true para informar que ya se guardo el valor
else:
for pair in self.map[key_hash]: #si encontramos que ya esta ocupada, vamos a iterar por todo nuestro diccionario
if pair[0] == key: #ya que encontramos el indice
pair[1] = value #agregamos el valor a la pareja
return True #regresamos true para informar que ya se guardo el valor
self.map[key_hash].append(key_value) #si no encontramos el indice, creamos uno
return True
#delete, es el metodo que elimina
# elementos del diccionario
def delete(self, key):
key_hash = self._get_pos(key) #primero obtenemos la posicion del indice deseado a eliminar
if self.map[key_hash] is None: #revisar si el indice existe
return False #si no exise el indice, regresamos false
for i in range(0, len(self.map[key_hash])): #iteramos por todo el diccionario para buscar la posicion
if self.map[key_hash][i][0] == key: #ya que encontremos el elemento dentro del mapa
self.map[key_hash].pop(i) # lo eliminamos del diccionario
return True
#print, es la funcion que simplemente imprime todo lo que esta en el diccionario
def print(self):
for item in self.map:
if item is not None: #si el elemento no esta vacio entonces lo imprimimos
print(str(item)) | tarea1/Hash_Directory.py |
class Hash:
#funcion inicial, con esta funcion siempre se va
#a iniciar cuando llamemos a un objeto de la clase
#en este caso se inicia un arreglo vacio con un tamanio fijo
#para caso practico el tamanio sera de 5
def __init__(self):
self.size = 5
self.map = [None] * self.size
# _get_pos, es el metodo con el cual conocemos la ubicacion ideal
# para nuestro elemento, esto basado en un calculo para omitir duplicar
# o empalmar elementos, de ese modo tener un elemento en cada ubicacion y
# evitar choques. Para eso utilizaremos el valor ASCII de cada letra de nuestro
# indice y sacarle el % size(tamanio de la tabla/diccionario) para lograr una mejor ubicacion
def _get_pos(self, key):
hash = 0
for char in str(key) : #ciclo que recorre caracter por caracter nuestro indice en caso de que sea una palabra
hash += ord(char) #sumar el valor del caracter
#ord(ch) es una funcion de python que regresa el valor ASCII del caracter dado
return hash % self.size #se regresa la ubicacion ideal
# add, es el metodo que agrega a la tabla el valor segun el indice
# almacena el valor en nuestro direcotrio
def add(self, key, value):
key_hash = self._get_pos(key) #obtenemos la posicion en la cual vamos a insertar el valor
#utilizando el metodo anterior
key_value = [key, value] #creamos una variable que va a ser el "valor" que vamos a insertar en nuestra
#tabla, este valor se forma por la tupla key y value
if self.map[key_hash] is None: #revisamos si la ubicacion/index esta disponible
self.map[key_hash] = list([key_value]) #si esta disponible, insertamos el valor
return True #regresamos true para informar que ya se guardo el valor
else:
for pair in self.map[key_hash]: #si encontramos que ya esta ocupada, vamos a iterar por todo nuestro diccionario
if pair[0] == key: #ya que encontramos el indice
pair[1] = value #agregamos el valor a la pareja
return True #regresamos true para informar que ya se guardo el valor
self.map[key_hash].append(key_value) #si no encontramos el indice, creamos uno
return True
#delete, es el metodo que elimina
# elementos del diccionario
def delete(self, key):
key_hash = self._get_pos(key) #primero obtenemos la posicion del indice deseado a eliminar
if self.map[key_hash] is None: #revisar si el indice existe
return False #si no exise el indice, regresamos false
for i in range(0, len(self.map[key_hash])): #iteramos por todo el diccionario para buscar la posicion
if self.map[key_hash][i][0] == key: #ya que encontremos el elemento dentro del mapa
self.map[key_hash].pop(i) # lo eliminamos del diccionario
return True
#print, es la funcion que simplemente imprime todo lo que esta en el diccionario
def print(self):
for item in self.map:
if item is not None: #si el elemento no esta vacio entonces lo imprimimos
print(str(item)) | 0.16848 | 0.629234 |
from Common.hive_connection import HiveConnection
import time
from Common.helper import format_two_point_time, sql_to_string
class Interpolation:
def __init__(self, config):
self.config = config
self.hc = HiveConnection()
def calculate_interpolation(self):
self.convert_cdr_to_array_format()
self.create_trip_format()
self.create_trip_24hr_padding()
self.create_poi_relocation()
self.create_route_interpolation()
self.export_to_csv()
def convert_cdr_to_array_format(self):
provider_prefix = self.config.provider_prefix
cursor = self.hc.cursor
print('########## CREATE CDR BY UID ARRAY FORMAT TABLE ##########')
timer = time.time()
print('Checking and dropping {provider_prefix}_cdr_by_uid table if existing.'
.format(provider_prefix=provider_prefix))
cursor.execute('DROP TABLE IF EXISTS {provider_prefix}_cdr_by_uid'
.format(provider_prefix=provider_prefix))
print('Checked and dropped {provider_prefix}_cdr_by_uid table if existing. '
'Elapsed time: {time} seconds'
.format(provider_prefix=provider_prefix, time=format_two_point_time(timer, time.time())))
timer = time.time()
print('Creating {provider_prefix}_cdr_by_uid table'
.format(provider_prefix=provider_prefix))
raw_sql = sql_to_string('interpolation/create_cdr_by_uid.sql')
query = raw_sql.format(provider_prefix=provider_prefix)
cursor.execute(query)
print('Created {provider_prefix}_cdr_by_uid table. Elapsed time: {time} seconds'
.format(provider_prefix=provider_prefix, time=format_two_point_time(timer, time.time())))
timer = time.time()
raw_sql = sql_to_string('interpolation/insert_cdr_by_uid.sql')
print('Inserting into {provider_prefix}_cdr_by_uid table'
.format(provider_prefix=provider_prefix))
query = raw_sql.format(provider_prefix=provider_prefix, max_size_cdr_by_uid=self.config.max_size_cdr_by_uid)
cursor.execute(query)
print('Inserted into {provider_prefix}_cdr_by_uid table. Elapsed time: {time} seconds'
.format(provider_prefix=provider_prefix, time=format_two_point_time(timer, time.time())))
print('########## FINISHED CREATING CDR BY UID TABLE ##########')
def create_trip_format(self):
provider_prefix = self.config.provider_prefix
cursor = self.hc.cursor
print('########## CREATE CDR BY UID ARRAY TRIP FORMAT TABLE ##########')
timer = time.time()
print('Checking and dropping {provider_prefix}_cdr_by_uid_trip table if existing.'
.format(provider_prefix=provider_prefix))
cursor.execute('DROP TABLE IF EXISTS {provider_prefix}_cdr_by_uid_trip'
.format(provider_prefix=provider_prefix))
print('Checked and dropped {provider_prefix}_cdr_by_uid_trip table if existing. '
'Elapsed time: {time} seconds'
.format(provider_prefix=provider_prefix, time=format_two_point_time(timer, time.time())))
timer = time.time()
print('Creating {provider_prefix}_cdr_by_uid_trip table'
.format(provider_prefix=provider_prefix))
raw_sql = sql_to_string('interpolation/create_trip_format.sql')
query = raw_sql.format(provider_prefix=provider_prefix)
cursor.execute(query)
print('Created {provider_prefix}_cdr_by_uid_trip table. Elapsed time: {time} seconds'
.format(provider_prefix=provider_prefix, time=format_two_point_time(timer, time.time())))
timer = time.time()
raw_sql = sql_to_string('interpolation/insert_trip_format.sql')
print('Inserting into {provider_prefix}_cdr_by_uid_trip table'
.format(provider_prefix=provider_prefix))
query = raw_sql.format(provider_prefix=provider_prefix)
cursor.execute(query)
print('Inserted into {provider_prefix}_cdr_by_uid_trip table. Elapsed time: {time} seconds'
.format(provider_prefix=provider_prefix, time=format_two_point_time(timer, time.time())))
print('########## FINISHED CREATING CDR BY UID TRIP FORMAT TABLE ##########')
def create_trip_24hr_padding(self):
provider_prefix = self.config.provider_prefix
cursor = self.hc.cursor
print('########## CREATE TRIP 24 HR PADDING TABLE ##########')
timer = time.time()
print('Checking and dropping {provider_prefix}_cdr_by_uid_trip_organized_array_apd table if existing.'
.format(provider_prefix=provider_prefix))
cursor.execute('DROP TABLE IF EXISTS {provider_prefix}_cdr_by_uid_trip_organized_array_apd'
.format(provider_prefix=provider_prefix))
print('Checked and dropped {provider_prefix}_cdr_by_uid_trip_organized_array_apd table if existing. '
'Elapsed time: {time} seconds'
.format(provider_prefix=provider_prefix, time=format_two_point_time(timer, time.time())))
timer = time.time()
print('Creating {provider_prefix}_cdr_by_uid_trip_organized_array_apd table'
.format(provider_prefix=provider_prefix))
raw_sql = sql_to_string('interpolation/create_trip_24_hr_padding.sql')
query = raw_sql.format(provider_prefix=provider_prefix)
cursor.execute(query)
print('Created {provider_prefix}_cdr_by_uid_trip_organized_array_apd table. Elapsed time: {time} seconds'
.format(provider_prefix=provider_prefix, time=format_two_point_time(timer, time.time())))
timer = time.time()
raw_sql = sql_to_string('interpolation/insert_trip_24_hr_padding.sql')
print('Inserting into {provider_prefix}_cdr_by_uid_trip_organized_array_apd table'
.format(provider_prefix=provider_prefix))
query = raw_sql.format(provider_prefix=provider_prefix)
cursor.execute(query)
print('Inserted into {provider_prefix}_cdr_by_uid_trip_organized_array_apd table. Elapsed time: {time} seconds'
.format(provider_prefix=provider_prefix, time=format_two_point_time(timer, time.time())))
print('########## FINISHED TRIP 24 HR PADDING TABLE ##########')
def create_poi_relocation(self):
provider_prefix = self.config.provider_prefix
cursor = self.hc.cursor
print('########## CREATE POI RELOCATION TABLE ##########')
timer = time.time()
print('Checking and dropping {provider_prefix}_cdr_by_uid_trip_realloc_array_apd table if existing.'
.format(provider_prefix=provider_prefix))
cursor.execute('DROP TABLE IF EXISTS {provider_prefix}_cdr_by_uid_trip_realloc_array_apd'
.format(provider_prefix=provider_prefix))
print('Checked and dropped {provider_prefix}_cdr_by_uid_trip_realloc_array_apd table if existing. '
'Elapsed time: {time} seconds'
.format(provider_prefix=provider_prefix, time=format_two_point_time(timer, time.time())))
timer = time.time()
print('Creating {provider_prefix}_cdr_by_uid_trip_realloc_array_apd table'
.format(provider_prefix=provider_prefix))
raw_sql = sql_to_string('interpolation/create_poi_relocation.sql')
query = raw_sql.format(provider_prefix=provider_prefix)
cursor.execute(query)
print('Created {provider_prefix}_cdr_by_uid_trip_realloc_array_apd table. Elapsed time: {time} seconds'
.format(provider_prefix=provider_prefix, time=format_two_point_time(timer, time.time())))
timer = time.time()
raw_sql = sql_to_string('interpolation/insert_poi_relocation.sql')
print('Inserting into {provider_prefix}_cdr_by_uid_trip_realloc_array_apd table'
.format(provider_prefix=provider_prefix))
query = raw_sql.format(provider_prefix=provider_prefix,
poi=self.config.interpolation_poi_file_location.split('/')[-1])
cursor.execute(query)
print('Inserted into {provider_prefix}_cdr_by_uid_trip_realloc_array_apd table. Elapsed time: {time} seconds'
.format(provider_prefix=provider_prefix, time=format_two_point_time(timer, time.time())))
print('########## FINISHED CREATING POI RELOCATION TABLE ##########')
def create_route_interpolation(self):
provider_prefix = self.config.provider_prefix
cursor = self.hc.cursor
print('########## CREATE ROUTE INTERPOLATION TABLE ##########')
timer = time.time()
print('Checking and dropping {provider_prefix}_cdr_by_uid_trip_routing_array_apd table if existing.'
.format(provider_prefix=provider_prefix))
cursor.execute('DROP TABLE IF EXISTS {provider_prefix}_cdr_by_uid_trip_routing_array_apd'
.format(provider_prefix=provider_prefix))
print('Checked and dropped {provider_prefix}_cdr_by_uid_trip_routing_array_apd table if existing. '
'Elapsed time: {time} seconds'
.format(provider_prefix=provider_prefix, time=format_two_point_time(timer, time.time())))
timer = time.time()
print('Creating {provider_prefix}_cdr_by_uid_trip_routing_array_apd table'
.format(provider_prefix=provider_prefix))
raw_sql = sql_to_string('interpolation/create_route_interpolation.sql')
query = raw_sql.format(provider_prefix=provider_prefix)
cursor.execute(query)
print('Created {provider_prefix}_cdr_by_uid_trip_routing_array_apd table. Elapsed time: {time} seconds'
.format(provider_prefix=provider_prefix, time=format_two_point_time(timer, time.time())))
timer = time.time()
raw_sql = sql_to_string('interpolation/insert_route_interpolation.sql')
print('Inserting into {provider_prefix}_cdr_by_uid_trip_routing_array_apd table'
.format(provider_prefix=provider_prefix))
query = raw_sql.format(provider_prefix=provider_prefix,
max_size_interpolation=self.config.max_size_interpolation,
osm=self.config.interpolation_osm_file_location.split('/')[-1],
voronoi=self.config.interpolation_voronoi_file_location.split('/')[-1])
cursor.execute(query)
print('Inserted into {provider_prefix}_cdr_by_uid_trip_routing_array_apd table. Elapsed time: {time} seconds'
.format(provider_prefix=provider_prefix, time=format_two_point_time(timer, time.time())))
print('########## FINISHED ROUTE INTERPOLATION TABLE ##########')
def export_to_csv(self):
provider_prefix = self.config.provider_prefix
cursor = self.hc.cursor
print('########## Exporting route interpolation to CSV ##########')
timer = time.time()
raw_sql = sql_to_string('interpolation/export_to_gps_format.sql')
query = raw_sql.format(provider_prefix=provider_prefix)
cursor.execute(query)
print('Exported to CSV. Elapsed time: {time} seconds'
.format(provider_prefix=provider_prefix, time=format_two_point_time(timer, time.time())))
print('########## FINISHED EXPORTING, FILE LOCATED IN /tmp/hive/cdr_interpolation ##########') | Common/cdr_interpolation.py | from Common.hive_connection import HiveConnection
import time
from Common.helper import format_two_point_time, sql_to_string
class Interpolation:
def __init__(self, config):
self.config = config
self.hc = HiveConnection()
def calculate_interpolation(self):
self.convert_cdr_to_array_format()
self.create_trip_format()
self.create_trip_24hr_padding()
self.create_poi_relocation()
self.create_route_interpolation()
self.export_to_csv()
def convert_cdr_to_array_format(self):
provider_prefix = self.config.provider_prefix
cursor = self.hc.cursor
print('########## CREATE CDR BY UID ARRAY FORMAT TABLE ##########')
timer = time.time()
print('Checking and dropping {provider_prefix}_cdr_by_uid table if existing.'
.format(provider_prefix=provider_prefix))
cursor.execute('DROP TABLE IF EXISTS {provider_prefix}_cdr_by_uid'
.format(provider_prefix=provider_prefix))
print('Checked and dropped {provider_prefix}_cdr_by_uid table if existing. '
'Elapsed time: {time} seconds'
.format(provider_prefix=provider_prefix, time=format_two_point_time(timer, time.time())))
timer = time.time()
print('Creating {provider_prefix}_cdr_by_uid table'
.format(provider_prefix=provider_prefix))
raw_sql = sql_to_string('interpolation/create_cdr_by_uid.sql')
query = raw_sql.format(provider_prefix=provider_prefix)
cursor.execute(query)
print('Created {provider_prefix}_cdr_by_uid table. Elapsed time: {time} seconds'
.format(provider_prefix=provider_prefix, time=format_two_point_time(timer, time.time())))
timer = time.time()
raw_sql = sql_to_string('interpolation/insert_cdr_by_uid.sql')
print('Inserting into {provider_prefix}_cdr_by_uid table'
.format(provider_prefix=provider_prefix))
query = raw_sql.format(provider_prefix=provider_prefix, max_size_cdr_by_uid=self.config.max_size_cdr_by_uid)
cursor.execute(query)
print('Inserted into {provider_prefix}_cdr_by_uid table. Elapsed time: {time} seconds'
.format(provider_prefix=provider_prefix, time=format_two_point_time(timer, time.time())))
print('########## FINISHED CREATING CDR BY UID TABLE ##########')
def create_trip_format(self):
provider_prefix = self.config.provider_prefix
cursor = self.hc.cursor
print('########## CREATE CDR BY UID ARRAY TRIP FORMAT TABLE ##########')
timer = time.time()
print('Checking and dropping {provider_prefix}_cdr_by_uid_trip table if existing.'
.format(provider_prefix=provider_prefix))
cursor.execute('DROP TABLE IF EXISTS {provider_prefix}_cdr_by_uid_trip'
.format(provider_prefix=provider_prefix))
print('Checked and dropped {provider_prefix}_cdr_by_uid_trip table if existing. '
'Elapsed time: {time} seconds'
.format(provider_prefix=provider_prefix, time=format_two_point_time(timer, time.time())))
timer = time.time()
print('Creating {provider_prefix}_cdr_by_uid_trip table'
.format(provider_prefix=provider_prefix))
raw_sql = sql_to_string('interpolation/create_trip_format.sql')
query = raw_sql.format(provider_prefix=provider_prefix)
cursor.execute(query)
print('Created {provider_prefix}_cdr_by_uid_trip table. Elapsed time: {time} seconds'
.format(provider_prefix=provider_prefix, time=format_two_point_time(timer, time.time())))
timer = time.time()
raw_sql = sql_to_string('interpolation/insert_trip_format.sql')
print('Inserting into {provider_prefix}_cdr_by_uid_trip table'
.format(provider_prefix=provider_prefix))
query = raw_sql.format(provider_prefix=provider_prefix)
cursor.execute(query)
print('Inserted into {provider_prefix}_cdr_by_uid_trip table. Elapsed time: {time} seconds'
.format(provider_prefix=provider_prefix, time=format_two_point_time(timer, time.time())))
print('########## FINISHED CREATING CDR BY UID TRIP FORMAT TABLE ##########')
def create_trip_24hr_padding(self):
provider_prefix = self.config.provider_prefix
cursor = self.hc.cursor
print('########## CREATE TRIP 24 HR PADDING TABLE ##########')
timer = time.time()
print('Checking and dropping {provider_prefix}_cdr_by_uid_trip_organized_array_apd table if existing.'
.format(provider_prefix=provider_prefix))
cursor.execute('DROP TABLE IF EXISTS {provider_prefix}_cdr_by_uid_trip_organized_array_apd'
.format(provider_prefix=provider_prefix))
print('Checked and dropped {provider_prefix}_cdr_by_uid_trip_organized_array_apd table if existing. '
'Elapsed time: {time} seconds'
.format(provider_prefix=provider_prefix, time=format_two_point_time(timer, time.time())))
timer = time.time()
print('Creating {provider_prefix}_cdr_by_uid_trip_organized_array_apd table'
.format(provider_prefix=provider_prefix))
raw_sql = sql_to_string('interpolation/create_trip_24_hr_padding.sql')
query = raw_sql.format(provider_prefix=provider_prefix)
cursor.execute(query)
print('Created {provider_prefix}_cdr_by_uid_trip_organized_array_apd table. Elapsed time: {time} seconds'
.format(provider_prefix=provider_prefix, time=format_two_point_time(timer, time.time())))
timer = time.time()
raw_sql = sql_to_string('interpolation/insert_trip_24_hr_padding.sql')
print('Inserting into {provider_prefix}_cdr_by_uid_trip_organized_array_apd table'
.format(provider_prefix=provider_prefix))
query = raw_sql.format(provider_prefix=provider_prefix)
cursor.execute(query)
print('Inserted into {provider_prefix}_cdr_by_uid_trip_organized_array_apd table. Elapsed time: {time} seconds'
.format(provider_prefix=provider_prefix, time=format_two_point_time(timer, time.time())))
print('########## FINISHED TRIP 24 HR PADDING TABLE ##########')
def create_poi_relocation(self):
provider_prefix = self.config.provider_prefix
cursor = self.hc.cursor
print('########## CREATE POI RELOCATION TABLE ##########')
timer = time.time()
print('Checking and dropping {provider_prefix}_cdr_by_uid_trip_realloc_array_apd table if existing.'
.format(provider_prefix=provider_prefix))
cursor.execute('DROP TABLE IF EXISTS {provider_prefix}_cdr_by_uid_trip_realloc_array_apd'
.format(provider_prefix=provider_prefix))
print('Checked and dropped {provider_prefix}_cdr_by_uid_trip_realloc_array_apd table if existing. '
'Elapsed time: {time} seconds'
.format(provider_prefix=provider_prefix, time=format_two_point_time(timer, time.time())))
timer = time.time()
print('Creating {provider_prefix}_cdr_by_uid_trip_realloc_array_apd table'
.format(provider_prefix=provider_prefix))
raw_sql = sql_to_string('interpolation/create_poi_relocation.sql')
query = raw_sql.format(provider_prefix=provider_prefix)
cursor.execute(query)
print('Created {provider_prefix}_cdr_by_uid_trip_realloc_array_apd table. Elapsed time: {time} seconds'
.format(provider_prefix=provider_prefix, time=format_two_point_time(timer, time.time())))
timer = time.time()
raw_sql = sql_to_string('interpolation/insert_poi_relocation.sql')
print('Inserting into {provider_prefix}_cdr_by_uid_trip_realloc_array_apd table'
.format(provider_prefix=provider_prefix))
query = raw_sql.format(provider_prefix=provider_prefix,
poi=self.config.interpolation_poi_file_location.split('/')[-1])
cursor.execute(query)
print('Inserted into {provider_prefix}_cdr_by_uid_trip_realloc_array_apd table. Elapsed time: {time} seconds'
.format(provider_prefix=provider_prefix, time=format_two_point_time(timer, time.time())))
print('########## FINISHED CREATING POI RELOCATION TABLE ##########')
def create_route_interpolation(self):
provider_prefix = self.config.provider_prefix
cursor = self.hc.cursor
print('########## CREATE ROUTE INTERPOLATION TABLE ##########')
timer = time.time()
print('Checking and dropping {provider_prefix}_cdr_by_uid_trip_routing_array_apd table if existing.'
.format(provider_prefix=provider_prefix))
cursor.execute('DROP TABLE IF EXISTS {provider_prefix}_cdr_by_uid_trip_routing_array_apd'
.format(provider_prefix=provider_prefix))
print('Checked and dropped {provider_prefix}_cdr_by_uid_trip_routing_array_apd table if existing. '
'Elapsed time: {time} seconds'
.format(provider_prefix=provider_prefix, time=format_two_point_time(timer, time.time())))
timer = time.time()
print('Creating {provider_prefix}_cdr_by_uid_trip_routing_array_apd table'
.format(provider_prefix=provider_prefix))
raw_sql = sql_to_string('interpolation/create_route_interpolation.sql')
query = raw_sql.format(provider_prefix=provider_prefix)
cursor.execute(query)
print('Created {provider_prefix}_cdr_by_uid_trip_routing_array_apd table. Elapsed time: {time} seconds'
.format(provider_prefix=provider_prefix, time=format_two_point_time(timer, time.time())))
timer = time.time()
raw_sql = sql_to_string('interpolation/insert_route_interpolation.sql')
print('Inserting into {provider_prefix}_cdr_by_uid_trip_routing_array_apd table'
.format(provider_prefix=provider_prefix))
query = raw_sql.format(provider_prefix=provider_prefix,
max_size_interpolation=self.config.max_size_interpolation,
osm=self.config.interpolation_osm_file_location.split('/')[-1],
voronoi=self.config.interpolation_voronoi_file_location.split('/')[-1])
cursor.execute(query)
print('Inserted into {provider_prefix}_cdr_by_uid_trip_routing_array_apd table. Elapsed time: {time} seconds'
.format(provider_prefix=provider_prefix, time=format_two_point_time(timer, time.time())))
print('########## FINISHED ROUTE INTERPOLATION TABLE ##########')
def export_to_csv(self):
provider_prefix = self.config.provider_prefix
cursor = self.hc.cursor
print('########## Exporting route interpolation to CSV ##########')
timer = time.time()
raw_sql = sql_to_string('interpolation/export_to_gps_format.sql')
query = raw_sql.format(provider_prefix=provider_prefix)
cursor.execute(query)
print('Exported to CSV. Elapsed time: {time} seconds'
.format(provider_prefix=provider_prefix, time=format_two_point_time(timer, time.time())))
print('########## FINISHED EXPORTING, FILE LOCATED IN /tmp/hive/cdr_interpolation ##########') | 0.440108 | 0.12603 |
from PyQt5.QtCore import QObject, pyqtSignal
from deriva.core import format_exception
from deriva.transfer import DerivaUpload
from deriva.qt import async_execute, Task
class UploadTask(QObject):
status_update_signal = pyqtSignal(bool, str, str, object)
progress_update_signal = pyqtSignal(int, int)
def __init__(self, uploader, parent=None):
super(UploadTask, self).__init__(parent)
assert (uploader is not None and isinstance(uploader, DerivaUpload))
self.uploader = uploader
self.task = None
def start(self):
async_execute(self.task)
def cancel(self):
self.task.cancel()
def set_status(self, success, status, detail, result):
self.status_update_signal.emit(success, status, detail, result)
def result_callback(self, success, result):
self.set_status(success, str(status), "", result)
def progress_callback(self, current, maximum):
if self.task.canceled:
return False
self.progress_update_signal.emit(current, maximum)
return True
class SessionQueryTask(UploadTask):
def __init__(self, parent=None):
super(SessionQueryTask, self).__init__(parent)
def result_callback(self, success, result):
self.set_status(success,
"Session query success" if success else "Session query failure",
"" if success else format_exception(result),
result.json() if success else None)
def query(self):
self.task = Task(self.uploader.catalog.get_authn_session, [], self.result_callback)
self.start()
class ConfigUpdateTask(UploadTask):
def __init__(self, parent=None):
super(ConfigUpdateTask, self).__init__(parent)
def result_callback(self, success, result):
self.set_status(success,
"Configuration update success" if success else "Configuration update failure",
"" if success else format_exception(result),
result if success else None)
def update_config(self):
self.task = Task(self.uploader.getUpdatedConfig, [], self.result_callback)
self.start()
class ScanDirectoryTask(UploadTask):
def __init__(self, parent=None):
super(ScanDirectoryTask, self).__init__(parent)
def result_callback(self, success, result):
self.set_status(success,
"Directory scan success" if success else "Directory scan failure.",
"" if success else format_exception(result),
None)
def scan(self, path):
self.task = Task(self.uploader.scanDirectory, [path], self.result_callback)
self.start()
class UploadFilesTask(UploadTask):
def __init__(self, parent=None):
super(UploadFilesTask, self).__init__(parent)
def result_callback(self, success, result):
self.set_status(success,
"File upload success" if success else "File upload failure",
"" if success else format_exception(result),
None)
def upload(self, status_callback=None, file_callback=None):
self.task = Task(self.uploader.uploadFiles, [status_callback, file_callback], self.result_callback)
self.start() | Lib/site-packages/deriva/qt/upload_gui/impl/upload_tasks.py | from PyQt5.QtCore import QObject, pyqtSignal
from deriva.core import format_exception
from deriva.transfer import DerivaUpload
from deriva.qt import async_execute, Task
class UploadTask(QObject):
status_update_signal = pyqtSignal(bool, str, str, object)
progress_update_signal = pyqtSignal(int, int)
def __init__(self, uploader, parent=None):
super(UploadTask, self).__init__(parent)
assert (uploader is not None and isinstance(uploader, DerivaUpload))
self.uploader = uploader
self.task = None
def start(self):
async_execute(self.task)
def cancel(self):
self.task.cancel()
def set_status(self, success, status, detail, result):
self.status_update_signal.emit(success, status, detail, result)
def result_callback(self, success, result):
self.set_status(success, str(status), "", result)
def progress_callback(self, current, maximum):
if self.task.canceled:
return False
self.progress_update_signal.emit(current, maximum)
return True
class SessionQueryTask(UploadTask):
def __init__(self, parent=None):
super(SessionQueryTask, self).__init__(parent)
def result_callback(self, success, result):
self.set_status(success,
"Session query success" if success else "Session query failure",
"" if success else format_exception(result),
result.json() if success else None)
def query(self):
self.task = Task(self.uploader.catalog.get_authn_session, [], self.result_callback)
self.start()
class ConfigUpdateTask(UploadTask):
def __init__(self, parent=None):
super(ConfigUpdateTask, self).__init__(parent)
def result_callback(self, success, result):
self.set_status(success,
"Configuration update success" if success else "Configuration update failure",
"" if success else format_exception(result),
result if success else None)
def update_config(self):
self.task = Task(self.uploader.getUpdatedConfig, [], self.result_callback)
self.start()
class ScanDirectoryTask(UploadTask):
def __init__(self, parent=None):
super(ScanDirectoryTask, self).__init__(parent)
def result_callback(self, success, result):
self.set_status(success,
"Directory scan success" if success else "Directory scan failure.",
"" if success else format_exception(result),
None)
def scan(self, path):
self.task = Task(self.uploader.scanDirectory, [path], self.result_callback)
self.start()
class UploadFilesTask(UploadTask):
def __init__(self, parent=None):
super(UploadFilesTask, self).__init__(parent)
def result_callback(self, success, result):
self.set_status(success,
"File upload success" if success else "File upload failure",
"" if success else format_exception(result),
None)
def upload(self, status_callback=None, file_callback=None):
self.task = Task(self.uploader.uploadFiles, [status_callback, file_callback], self.result_callback)
self.start() | 0.677474 | 0.266975 |
import json
import io
import sqlalchemy as sa
from sqlalchemy.inspection import inspect
import pandas as pd
from datetime import datetime
from collections import OrderedDict
from pyramid.traversal import find_root
from pyramid.response import Response
from zope.interface import implementer
from . import Base
from ..core import get_redis_con
from .base_view import IRestCommonView, IRestCollectionView, IRestItemView
from .configuration_model.frontmodules import FrontModules
from ..utils.decorator import timing
localRedis = get_redis_con()
class Resource(dict):
children = []
def __init__(self, ref, parent):
self.__name__ = ref
self.__parent__ = parent
self.__root__ = find_root(self)
self.add_children()
def __getitem__(self, item):
next_resource = self.get(item, None)
if next_resource is not None:
return next_resource(item, self)
else:
raise KeyError
def __repr__(self):
# use standard object representation (not dict's)
return object.__repr__(self)
def add_child(self, ref, klass):
self[ref] = klass
def add_children(self):
for ref, klass in self.children:
self.add_child(ref, klass)
@implementer(IRestCommonView)
class CustomResource(Resource):
__acl__ = []
def __init__(self, ref, parent):
Resource.__init__(self, ref, parent)
self.request = self.__root__.request
self.session = self.__root__.request.dbsession
def __getitem__(self, ref):
if ref.isdigit():
next_resource = self.get('{int}')
return next_resource(ref, self)
else:
return super().__getitem__(ref)
def retrieve(self):
raise NotImplementedError()
class AutocompleteResource(CustomResource):
def __init__(self, ref, parent):
CustomResource.__init__(self, ref, parent)
self.targetValue = None
self.attribute = None
def __getitem__(self, ref):
if self.attribute:
self.targetValue = ref
else:
self.attribute = ref
return self
def retrieve(self):
objName = self.__parent__.item.model.__tablename__
criteria = self.request.params['term']
prop = self.attribute
if self.integers(prop):
table = Base.metadata.tables[objName + 'DynPropValuesNow']
query = sa.select([table.c['ValueString'].label('label'),
table.c['ValueString'].label('value')]
).distinct(table.c['ValueString']
).where(table.c['FK_' + objName + 'DynProp'] == prop)
query = query.where(table.c['ValueString'].like('%' + criteria + '%')
).order_by(sa.asc(table.c['ValueString']))
else:
NameValReturn = prop
if self.targetValue:
NameValReturn = self.targetValue
table = Base.metadata.tables[objName]
query = sa.select([table.c[NameValReturn].label('value'),
table.c[prop].label('label')]
).distinct(table.c[prop])
query = query.where(table.c[prop].like(
'%' + criteria + '%')).order_by(sa.asc(table.c[prop]))
return [dict(row) for row in self.session.execute(query).fetchall()]
class DynamicValueResource(CustomResource):
model = None
def __init__(self, ref, parent):
CustomResource.__init__(self, ref, parent)
self.objectDB = self.session.query(self.model).get(ref)
def retrieve(self):
pass
def delete(self):
self.session.delete(self.objectDB)
class DynamicValuesResource(CustomResource):
def retrieve(self):
from ecoreleve_server.utils.parseValue import formatThesaurus
propertiesTable = Base.metadata.tables[self.__parent__.objectDB.TypeClass.PropertiesClass.__tablename__]
dynamicValuesTable = Base.metadata.tables[self.__parent__.objectDB.DynamicValuesClass.__tablename__]
FK_name = 'FK_' + self.__parent__.objectDB.__tablename__
FK_property_name = self.__parent__.objectDB.fk_table_DynProp_name
tableJoin = sa.join(dynamicValuesTable, propertiesTable,
dynamicValuesTable.c[FK_property_name] == propertiesTable.c['ID'])
query = sa.select([dynamicValuesTable, propertiesTable.c['Name']]
).select_from(tableJoin).where(
dynamicValuesTable.c[FK_name] == self.__parent__.objectDB.ID
).order_by(sa.desc(dynamicValuesTable.c['StartDate']))
result = self.session.execute(query).fetchall()
response = []
for row in result:
curRow = OrderedDict(row)
dictRow = {}
for key in curRow:
if curRow[key] is not None:
if key == 'ValueString' in key and curRow[key] is not None:
try:
thesauralValueObj = formatThesaurus(curRow[key])
dictRow['value'] = thesauralValueObj['displayValue']
except:
dictRow['value'] = curRow[key]
elif 'FK' not in key:
dictRow[key] = curRow[key]
dictRow['StartDate'] = curRow[
'StartDate'].strftime('%Y-%m-%d %H:%M:%S')
response.append(dictRow)
return response
def delete(self):
pass
@implementer(IRestItemView)
class DynamicObjectResource(CustomResource):
def __init__(self, ref, parent):
CustomResource.__init__(self, ref, parent)
if int(ref) != 0:
self.objectDB = self.session.query(self.model).get(ref)
else:
self.objectDB = None
self.__acl__ = self.__parent__.__acl__
@property
def model(self):
raise Exception('method has to be overriden')
def getData(self):
# self.objectDB.LoadNowValues()
return self.objectDB.values
def getDataWithForm(self):
try:
displayMode = self.request.params['DisplayMode']
except:
displayMode = 'display'
# form = self.objectDB.getForm(displayMode, objectType, moduleName)
return self.objectDB.getDataWithSchema(displayMode=displayMode)
def retrieve(self):
if 'FormName' in self.request.params:
if not self.objectDB:
return self.__parent__.getForm(objectType=self.request.params['ObjectType'])
else:
return self.getDataWithForm()
else:
return self.getData()
def update(self):
data = self.request.json_body
self.objectDB.beforeUpdate()
self.objectDB.values = data
self.objectDB.afterUpdate()
return 'updated'
def delete(self):
if not self.objectDB:
return None
self.objectDB.beforeDelete()
self.session.delete(self.objectDB)
self.objectDB.afterDelete()
return 'deleted'
@implementer(IRestCollectionView)
class DynamicObjectCollectionResource(CustomResource):
def __init__(self, ref, parent):
CustomResource.__init__(self, ref, parent)
self.objectDB = self.model()
if not hasattr(self.objectDB, 'session') or not self.objectDB.session:
self.objectDB.session = self.session
if 'typeObj' in self.request.params and self.request.params['typeObj'] is not None:
objType = self.request.params['typeObj']
self.objectDB.type_id = objType
self.typeObj = objType
else:
self.typeObj = None
@property
def model(self):
raise NotImplementedError()
@property
def moduleFormName(self):
raise NotImplementedError('moduleFormName is needed to get Form generation from in-database configuration (ModuleForms table)')
@property
def moduleGridName(self):
raise NotImplementedError('moduleGridName is needed to get Grid & Filters generation from in-database configuration (ModuleGrids table)')
@property
def Collection(self):
raise NotImplementedError('Collection is needed to search with filters and get datas')
def getCollection(self, from_history=None, startDate=None):
return self.Collection(session=self.session, object_type=self.typeObj, from_history=from_history)
def insert(self):
data = {}
for items, value in self.request.json_body.items():
data[items] = value
self.handleDataBeforeInsert(data)
self.objectDB.values = data
self.session.add(self.objectDB)
self.session.flush()
return {'ID': self.objectDB.ID}
def insertMany(self):
pass
def handleDataBeforeInsert(self, data):
return data
def handleCriteria(self, criteria):
return criteria
def handleResult(self, result):
return result
def handleCount(self, count, callback, params):
return callback(**params)
def retrieve(self):
return self.search()
def traduct_from_thesaurus(self, item, dataConfigWithThesaurus):
from ..utils.parseValue import formatThesaurus
key, value = item
configThesaurus = list(filter(lambda obj: key == obj.Name, dataConfigWithThesaurus))
if configThesaurus and value:
newVal = formatThesaurus(value, nodeID=configThesaurus[0].Options)['displayValue']
else:
newVal = value
return (key, newVal)
def collection_traduct_from_thesaurus(self, data):
traduced_data = []
dataConfigWithThesaurus = list(
filter(lambda obj: 'AutocompTreeEditor' == obj.FilterType, self.getConf(self.moduleGridName).ModuleGrids))
# listWithThes = list(map(lambda x: x.Name, listWithThes))
# change thesaural term into laguage user
for row in data:
row = dict(map(lambda i: self.traduct_from_thesaurus(i, dataConfigWithThesaurus), row.items()))
traduced_data.append(row)
return traduced_data
def formatParams(self, params, paging):
history = False
startDate = None
searchInfo = {}
searchInfo['criteria'] = []
if not bool(params):
params = self.request.params.mixed()
if 'criteria' in params:
params['criteria'] = json.loads(params['criteria'])
if params['criteria'] != {}:
searchInfo['criteria'] = [obj for obj in params[
'criteria'] if obj['Value'] != str(-1)]
else:
searchInfo['criteria'] = []
if 'history' in params and params['history'] == '1':
history = True
if 'startDate' in params and params['startDate'] != '':
startDate = datetime.strptime(params['startDate'],
'%Y-%m-%dT%H:%M:%S.%fZ')
if paging:
self.pagingSearch(searchInfo, params)
searchInfo = self.handleCriteria(searchInfo)
return searchInfo, history, startDate
def count_(self, listObj=None):
moduleFront = self.getConf(self.moduleGridName)
params, history, startDate = self.formatParams({}, paging=False)
from_history = 'all' if history else startDate
collection = self.getCollection(from_history=from_history)
count = collection._count(filters=params.get('criteria', []))
return count
@timing
def search(self, paging=True, params={}, noCount=False):
params, history, startDate = self.formatParams(params, paging)
if int(params.get('offset', 0)) > 0:
if not params.get('order_by', []):
params['order_by'] = [inspect(self.model).primary_key[0].name+':asc']
conf_grid = self.getGrid()
cols = list(map(lambda x: x['field'],conf_grid))
from_history = 'all' if history else startDate
self.collection = self.getCollection(from_history=from_history)
if not noCount:
countResult = self.collection._count(filters=params.get('criteria', []))
result = [{'total_entries': countResult}]
dataResult = self.handleCount(countResult,
self.collection.search,
{
'selectable':cols,
'filters':params.get('criteria', []),
'offset':params.get('offset'),
'limit':params.get('per_page'),
'order_by':params.get('order_by')
}
)
if dataResult:
dataResult = self.collection_traduct_from_thesaurus(dataResult)
result.append(dataResult)
else:
result = self.collection.search(selectable=cols,
filters=params.get('criteria', []),
offset=params.get('offset'),
limit=params.get('per_page'),
order_by=params.get('order_by'))
result = self.collection_traduct_from_thesaurus(result)
return self.handleResult(result)
def pagingSearch(self, searchInfo, params):
listKeys = ['offset','per_page','order_by']
for key in listKeys:
if key in params:
searchInfo[key] = json.loads(params[key])
else :
searchInfo[key] = None
return searchInfo
def create(self):
data = self.request.json_body
if not isinstance(data, list):
return self.insert()
else:
return self.insertMany()
def getConf(self, moduleName=None):
if not moduleName:
moduleName = self.objectDB.moduleFormName
return self.session.query(FrontModules
).filter(FrontModules.Name == moduleName
).first()
@timing
def getForm(self, objectType=None, moduleName=None, mode='edit'):
if 'ObjectType' in self.request.params:
objectType = self.request.params['ObjectType']
if objectType:
self.objectDB.type_id = objectType
if not moduleName:
moduleName = self.moduleFormName
form = self.getConfigJSON(moduleName + mode, objectType)
# form = None
if not form:
form = self.objectDB.getForm(mode, objectType, moduleName)
self.setConfigJSON(moduleName + mode, objectType, form)
return form
@timing
def getGrid(self, type_=None, moduleName=None):
if not moduleName:
moduleName = self.moduleGridName
if not type_:
type_ = self.typeObj
gridCols = self.getConfigJSON(moduleName, type_)
# gridCols = None
if not gridCols:
gridCols = self.objectDB.getGrid(
type_=type_, moduleName=moduleName)
self.setConfigJSON(moduleName, type_, gridCols)
return gridCols
@timing
def getFilter(self, type_=None, moduleName=None):
moduleName = self.request.params.get('FilterName', None)
if not moduleName:
moduleName = self.objectDB.moduleGridName
if not type_:
type_ = self.typeObj
filters = self.getConfigJSON(moduleName+'Filter', type_)
# filters = None
if not filters:
filtersList = self.objectDB.getFilters(
type_=type_, moduleName=moduleName)
filters = {}
for i in range(len(filtersList)):
filters[str(i)] = filtersList[i]
self.setConfigJSON(moduleName + 'Filter', type_, filters)
return filters
def getConfigJSON(self, moduleName, typeObj):
configJson = None
if localRedis is not None:
try:
config_from_redis = localRedis.get(moduleName+'_'+str(typeObj))
configJson = json.loads(config_from_redis.decode())
except:
pass
return configJson
def setConfigJSON(self, moduleName, typeObj, configObject):
# use Redis ? save json configuration for Forms, Grids and Filters
if localRedis is not None:
localRedis.set(moduleName+'_' + str(typeObj), json.dumps(configObject), ex=3600*12)
def getType(self):
table = self.objectDB.TypeClass.__table__
query = sa.select([table.c['ID'].label('val'),
table.c['Name'].label('label')])
response = [OrderedDict(row)
for row in self.session.execute(query).fetchall()]
return response
def export(self):
# dataResult = self.search(paging=False, noCount=True)
params, history, startDate = self.formatParams({}, False)
collection = self.getCollection()
dataResult = collection.search(filters=params.get('criteria'))
df = pd.DataFrame.from_records(dataResult,
columns=dataResult[0].keys(),
coerce_float=True)
fout = io.BytesIO()
writer = pd.ExcelWriter(fout)
df.to_excel(writer, sheet_name='Sheet1')
writer.save()
file = fout.getvalue()
dt = datetime.now().strftime('%d-%m-%Y')
return Response(
file,
content_disposition="attachment; filename=" + self.__name__ + "_export_" + dt + ".xlsx",
content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet') | Back/ecoreleve_server/core/base_resource.py | import json
import io
import sqlalchemy as sa
from sqlalchemy.inspection import inspect
import pandas as pd
from datetime import datetime
from collections import OrderedDict
from pyramid.traversal import find_root
from pyramid.response import Response
from zope.interface import implementer
from . import Base
from ..core import get_redis_con
from .base_view import IRestCommonView, IRestCollectionView, IRestItemView
from .configuration_model.frontmodules import FrontModules
from ..utils.decorator import timing
localRedis = get_redis_con()
class Resource(dict):
children = []
def __init__(self, ref, parent):
self.__name__ = ref
self.__parent__ = parent
self.__root__ = find_root(self)
self.add_children()
def __getitem__(self, item):
next_resource = self.get(item, None)
if next_resource is not None:
return next_resource(item, self)
else:
raise KeyError
def __repr__(self):
# use standard object representation (not dict's)
return object.__repr__(self)
def add_child(self, ref, klass):
self[ref] = klass
def add_children(self):
for ref, klass in self.children:
self.add_child(ref, klass)
@implementer(IRestCommonView)
class CustomResource(Resource):
__acl__ = []
def __init__(self, ref, parent):
Resource.__init__(self, ref, parent)
self.request = self.__root__.request
self.session = self.__root__.request.dbsession
def __getitem__(self, ref):
if ref.isdigit():
next_resource = self.get('{int}')
return next_resource(ref, self)
else:
return super().__getitem__(ref)
def retrieve(self):
raise NotImplementedError()
class AutocompleteResource(CustomResource):
def __init__(self, ref, parent):
CustomResource.__init__(self, ref, parent)
self.targetValue = None
self.attribute = None
def __getitem__(self, ref):
if self.attribute:
self.targetValue = ref
else:
self.attribute = ref
return self
def retrieve(self):
objName = self.__parent__.item.model.__tablename__
criteria = self.request.params['term']
prop = self.attribute
if self.integers(prop):
table = Base.metadata.tables[objName + 'DynPropValuesNow']
query = sa.select([table.c['ValueString'].label('label'),
table.c['ValueString'].label('value')]
).distinct(table.c['ValueString']
).where(table.c['FK_' + objName + 'DynProp'] == prop)
query = query.where(table.c['ValueString'].like('%' + criteria + '%')
).order_by(sa.asc(table.c['ValueString']))
else:
NameValReturn = prop
if self.targetValue:
NameValReturn = self.targetValue
table = Base.metadata.tables[objName]
query = sa.select([table.c[NameValReturn].label('value'),
table.c[prop].label('label')]
).distinct(table.c[prop])
query = query.where(table.c[prop].like(
'%' + criteria + '%')).order_by(sa.asc(table.c[prop]))
return [dict(row) for row in self.session.execute(query).fetchall()]
class DynamicValueResource(CustomResource):
model = None
def __init__(self, ref, parent):
CustomResource.__init__(self, ref, parent)
self.objectDB = self.session.query(self.model).get(ref)
def retrieve(self):
pass
def delete(self):
self.session.delete(self.objectDB)
class DynamicValuesResource(CustomResource):
def retrieve(self):
from ecoreleve_server.utils.parseValue import formatThesaurus
propertiesTable = Base.metadata.tables[self.__parent__.objectDB.TypeClass.PropertiesClass.__tablename__]
dynamicValuesTable = Base.metadata.tables[self.__parent__.objectDB.DynamicValuesClass.__tablename__]
FK_name = 'FK_' + self.__parent__.objectDB.__tablename__
FK_property_name = self.__parent__.objectDB.fk_table_DynProp_name
tableJoin = sa.join(dynamicValuesTable, propertiesTable,
dynamicValuesTable.c[FK_property_name] == propertiesTable.c['ID'])
query = sa.select([dynamicValuesTable, propertiesTable.c['Name']]
).select_from(tableJoin).where(
dynamicValuesTable.c[FK_name] == self.__parent__.objectDB.ID
).order_by(sa.desc(dynamicValuesTable.c['StartDate']))
result = self.session.execute(query).fetchall()
response = []
for row in result:
curRow = OrderedDict(row)
dictRow = {}
for key in curRow:
if curRow[key] is not None:
if key == 'ValueString' in key and curRow[key] is not None:
try:
thesauralValueObj = formatThesaurus(curRow[key])
dictRow['value'] = thesauralValueObj['displayValue']
except:
dictRow['value'] = curRow[key]
elif 'FK' not in key:
dictRow[key] = curRow[key]
dictRow['StartDate'] = curRow[
'StartDate'].strftime('%Y-%m-%d %H:%M:%S')
response.append(dictRow)
return response
def delete(self):
pass
@implementer(IRestItemView)
class DynamicObjectResource(CustomResource):
def __init__(self, ref, parent):
CustomResource.__init__(self, ref, parent)
if int(ref) != 0:
self.objectDB = self.session.query(self.model).get(ref)
else:
self.objectDB = None
self.__acl__ = self.__parent__.__acl__
@property
def model(self):
raise Exception('method has to be overriden')
def getData(self):
# self.objectDB.LoadNowValues()
return self.objectDB.values
def getDataWithForm(self):
try:
displayMode = self.request.params['DisplayMode']
except:
displayMode = 'display'
# form = self.objectDB.getForm(displayMode, objectType, moduleName)
return self.objectDB.getDataWithSchema(displayMode=displayMode)
def retrieve(self):
if 'FormName' in self.request.params:
if not self.objectDB:
return self.__parent__.getForm(objectType=self.request.params['ObjectType'])
else:
return self.getDataWithForm()
else:
return self.getData()
def update(self):
data = self.request.json_body
self.objectDB.beforeUpdate()
self.objectDB.values = data
self.objectDB.afterUpdate()
return 'updated'
def delete(self):
if not self.objectDB:
return None
self.objectDB.beforeDelete()
self.session.delete(self.objectDB)
self.objectDB.afterDelete()
return 'deleted'
@implementer(IRestCollectionView)
class DynamicObjectCollectionResource(CustomResource):
def __init__(self, ref, parent):
CustomResource.__init__(self, ref, parent)
self.objectDB = self.model()
if not hasattr(self.objectDB, 'session') or not self.objectDB.session:
self.objectDB.session = self.session
if 'typeObj' in self.request.params and self.request.params['typeObj'] is not None:
objType = self.request.params['typeObj']
self.objectDB.type_id = objType
self.typeObj = objType
else:
self.typeObj = None
@property
def model(self):
raise NotImplementedError()
@property
def moduleFormName(self):
raise NotImplementedError('moduleFormName is needed to get Form generation from in-database configuration (ModuleForms table)')
@property
def moduleGridName(self):
raise NotImplementedError('moduleGridName is needed to get Grid & Filters generation from in-database configuration (ModuleGrids table)')
@property
def Collection(self):
raise NotImplementedError('Collection is needed to search with filters and get datas')
def getCollection(self, from_history=None, startDate=None):
return self.Collection(session=self.session, object_type=self.typeObj, from_history=from_history)
def insert(self):
data = {}
for items, value in self.request.json_body.items():
data[items] = value
self.handleDataBeforeInsert(data)
self.objectDB.values = data
self.session.add(self.objectDB)
self.session.flush()
return {'ID': self.objectDB.ID}
def insertMany(self):
pass
def handleDataBeforeInsert(self, data):
return data
def handleCriteria(self, criteria):
return criteria
def handleResult(self, result):
return result
def handleCount(self, count, callback, params):
return callback(**params)
def retrieve(self):
return self.search()
def traduct_from_thesaurus(self, item, dataConfigWithThesaurus):
from ..utils.parseValue import formatThesaurus
key, value = item
configThesaurus = list(filter(lambda obj: key == obj.Name, dataConfigWithThesaurus))
if configThesaurus and value:
newVal = formatThesaurus(value, nodeID=configThesaurus[0].Options)['displayValue']
else:
newVal = value
return (key, newVal)
def collection_traduct_from_thesaurus(self, data):
traduced_data = []
dataConfigWithThesaurus = list(
filter(lambda obj: 'AutocompTreeEditor' == obj.FilterType, self.getConf(self.moduleGridName).ModuleGrids))
# listWithThes = list(map(lambda x: x.Name, listWithThes))
# change thesaural term into laguage user
for row in data:
row = dict(map(lambda i: self.traduct_from_thesaurus(i, dataConfigWithThesaurus), row.items()))
traduced_data.append(row)
return traduced_data
def formatParams(self, params, paging):
history = False
startDate = None
searchInfo = {}
searchInfo['criteria'] = []
if not bool(params):
params = self.request.params.mixed()
if 'criteria' in params:
params['criteria'] = json.loads(params['criteria'])
if params['criteria'] != {}:
searchInfo['criteria'] = [obj for obj in params[
'criteria'] if obj['Value'] != str(-1)]
else:
searchInfo['criteria'] = []
if 'history' in params and params['history'] == '1':
history = True
if 'startDate' in params and params['startDate'] != '':
startDate = datetime.strptime(params['startDate'],
'%Y-%m-%dT%H:%M:%S.%fZ')
if paging:
self.pagingSearch(searchInfo, params)
searchInfo = self.handleCriteria(searchInfo)
return searchInfo, history, startDate
def count_(self, listObj=None):
moduleFront = self.getConf(self.moduleGridName)
params, history, startDate = self.formatParams({}, paging=False)
from_history = 'all' if history else startDate
collection = self.getCollection(from_history=from_history)
count = collection._count(filters=params.get('criteria', []))
return count
@timing
def search(self, paging=True, params={}, noCount=False):
params, history, startDate = self.formatParams(params, paging)
if int(params.get('offset', 0)) > 0:
if not params.get('order_by', []):
params['order_by'] = [inspect(self.model).primary_key[0].name+':asc']
conf_grid = self.getGrid()
cols = list(map(lambda x: x['field'],conf_grid))
from_history = 'all' if history else startDate
self.collection = self.getCollection(from_history=from_history)
if not noCount:
countResult = self.collection._count(filters=params.get('criteria', []))
result = [{'total_entries': countResult}]
dataResult = self.handleCount(countResult,
self.collection.search,
{
'selectable':cols,
'filters':params.get('criteria', []),
'offset':params.get('offset'),
'limit':params.get('per_page'),
'order_by':params.get('order_by')
}
)
if dataResult:
dataResult = self.collection_traduct_from_thesaurus(dataResult)
result.append(dataResult)
else:
result = self.collection.search(selectable=cols,
filters=params.get('criteria', []),
offset=params.get('offset'),
limit=params.get('per_page'),
order_by=params.get('order_by'))
result = self.collection_traduct_from_thesaurus(result)
return self.handleResult(result)
def pagingSearch(self, searchInfo, params):
listKeys = ['offset','per_page','order_by']
for key in listKeys:
if key in params:
searchInfo[key] = json.loads(params[key])
else :
searchInfo[key] = None
return searchInfo
def create(self):
data = self.request.json_body
if not isinstance(data, list):
return self.insert()
else:
return self.insertMany()
def getConf(self, moduleName=None):
if not moduleName:
moduleName = self.objectDB.moduleFormName
return self.session.query(FrontModules
).filter(FrontModules.Name == moduleName
).first()
@timing
def getForm(self, objectType=None, moduleName=None, mode='edit'):
if 'ObjectType' in self.request.params:
objectType = self.request.params['ObjectType']
if objectType:
self.objectDB.type_id = objectType
if not moduleName:
moduleName = self.moduleFormName
form = self.getConfigJSON(moduleName + mode, objectType)
# form = None
if not form:
form = self.objectDB.getForm(mode, objectType, moduleName)
self.setConfigJSON(moduleName + mode, objectType, form)
return form
@timing
def getGrid(self, type_=None, moduleName=None):
if not moduleName:
moduleName = self.moduleGridName
if not type_:
type_ = self.typeObj
gridCols = self.getConfigJSON(moduleName, type_)
# gridCols = None
if not gridCols:
gridCols = self.objectDB.getGrid(
type_=type_, moduleName=moduleName)
self.setConfigJSON(moduleName, type_, gridCols)
return gridCols
@timing
def getFilter(self, type_=None, moduleName=None):
moduleName = self.request.params.get('FilterName', None)
if not moduleName:
moduleName = self.objectDB.moduleGridName
if not type_:
type_ = self.typeObj
filters = self.getConfigJSON(moduleName+'Filter', type_)
# filters = None
if not filters:
filtersList = self.objectDB.getFilters(
type_=type_, moduleName=moduleName)
filters = {}
for i in range(len(filtersList)):
filters[str(i)] = filtersList[i]
self.setConfigJSON(moduleName + 'Filter', type_, filters)
return filters
def getConfigJSON(self, moduleName, typeObj):
configJson = None
if localRedis is not None:
try:
config_from_redis = localRedis.get(moduleName+'_'+str(typeObj))
configJson = json.loads(config_from_redis.decode())
except:
pass
return configJson
def setConfigJSON(self, moduleName, typeObj, configObject):
# use Redis ? save json configuration for Forms, Grids and Filters
if localRedis is not None:
localRedis.set(moduleName+'_' + str(typeObj), json.dumps(configObject), ex=3600*12)
def getType(self):
table = self.objectDB.TypeClass.__table__
query = sa.select([table.c['ID'].label('val'),
table.c['Name'].label('label')])
response = [OrderedDict(row)
for row in self.session.execute(query).fetchall()]
return response
def export(self):
# dataResult = self.search(paging=False, noCount=True)
params, history, startDate = self.formatParams({}, False)
collection = self.getCollection()
dataResult = collection.search(filters=params.get('criteria'))
df = pd.DataFrame.from_records(dataResult,
columns=dataResult[0].keys(),
coerce_float=True)
fout = io.BytesIO()
writer = pd.ExcelWriter(fout)
df.to_excel(writer, sheet_name='Sheet1')
writer.save()
file = fout.getvalue()
dt = datetime.now().strftime('%d-%m-%Y')
return Response(
file,
content_disposition="attachment; filename=" + self.__name__ + "_export_" + dt + ".xlsx",
content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet') | 0.564098 | 0.097864 |
import random
import inspect
import urlparse
import traceback
from collections import Iterable
from pkgutil import iter_modules
import gevent
from gevent import Greenlet
from crawler.http import Request, Response
from crawler.queue import Empty
class Spider(Greenlet):
name = None
allowed_domains = []
start_urls = []
def __init__(self, crawler=None):
Greenlet.__init__(self)
self.crawler = crawler
self.redis = crawler.redis
self.session = crawler.session
self.spider_queue = self.crawler.spider_queue
self.schedule_queue = self.crawler.schedule_queue
self.running = False
self.log = crawler.log
@classmethod
def from_crawler(cls, crawler):
obj = cls(crawler)
return obj
def fetch(self):
while not self.crawler.event.is_set():
try:
request = self.spider_queue.get()
except Empty:
gevent.sleep(random.random())
continue
if not self._domain_allow(request.url):
self.log.warn("Url out of domain: {}".format(request.url))
continue
self.crawler.running = True
try:
resp = self.session.request(request.method, request.url,
data=request.data,
headers=request.headers,
allow_redirects=True,
verify=request.verify,
timeout=30)
response = Response(url=resp.url, content=resp.text,
request=request, code=resp.status_code,
headers=resp.headers, meta=request.meta,
obj=resp)
meta_refresh = response.xpath("//meta[@http-equiv='refresh']/@content").re(".*?url=(.*)$")
if meta_refresh:
self.log.debug("(meta refresh) %s" % meta_refresh[0])
resp = self.session.get(meta_refresh[0], allow_redirects=True)
response = Response(url=resp.url, content=resp.text,
request=request, code=resp.status_code,
headers=resp.headers, meta=request.meta,
obj=resp)
# registry fingeprint for request url.
self.crawler.fp.do_fingerprint(request)
reqs = self._extra(response)
if reqs:
if isinstance(reqs, Iterable):
for req in reqs:
if isinstance(req, Request):
self.schedule_queue.put(req)
else:
if isinstance(reqs, Request):
self.schedule_queue.put(reqs)
self.log.debug("(%d) %s %s" % (response.code, response.request.method, response.url))
except Exception as e:
self.log.error("Url fetch request error: %s" % request.url)
traceback.print_exc()
self.crawler.running = False
gevent.sleep(random.random())
def _extra(self, response):
callback = response.request.callback
if callback:
if callable(callback):
return callback(response)
elif type(callback) in (str, unicode):
cb = self.__getattribute__(callback)
return cb(response)
else:
return self.parse(response)
def _domain_allow(self, url):
if self.allowed_domains:
return urlparse.urlparse(url).netloc in self.allowed_domains
else:
return True
def parse(self, response):
urls = response.xpath("//a/@href").fetch()
for url in urls:
if not url.startswith("http:"):
url = self.abs_url(response, url)
yield Request(url)
def abs_url(self, response, url):
if isinstance(response, str):
prefix = response
else:
prefix = response.url
return urlparse.urljoin(prefix, url)
def _run(self):
self.fetch()
class SpiderManager(object):
def __init__(self):
self.spider_module = 'spiders'
self._spiders = {}
for module in self.walk_modules(self.spider_module):
self._filter_spiders(module)
def _filter_spiders(self, module):
for spcls in self.iter_spider_classes(module):
self._spiders[spcls.name] = spcls
def iter_spider_classes(self, module):
for obj in vars(module).itervalues():
if inspect.isclass(obj) and \
issubclass(obj, Spider) and \
obj.__module__ == module.__name__ and \
getattr(obj, 'name', None):
yield obj
def walk_modules(self, path, load=False):
mods = []
mod = __import__(path, {}, {}, [''])
mods.append(mod)
if hasattr(mod, '__path__'):
for _, subpath, ispkg in iter_modules(mod.__path__):
fullpath = path + '.' + subpath
if ispkg:
mods += self.walk_modules(fullpath)
else:
submod = __import__(fullpath, {}, {}, [''])
mods.append(submod)
return mods
def create(self, spider_name):
try:
spcls = self._spiders[spider_name]
except KeyError:
raise KeyError("Spider not found: %s" % spider_name)
return spcls
def get_list(self):
return self._spiders.keys() | crawler/spider.py | import random
import inspect
import urlparse
import traceback
from collections import Iterable
from pkgutil import iter_modules
import gevent
from gevent import Greenlet
from crawler.http import Request, Response
from crawler.queue import Empty
class Spider(Greenlet):
name = None
allowed_domains = []
start_urls = []
def __init__(self, crawler=None):
Greenlet.__init__(self)
self.crawler = crawler
self.redis = crawler.redis
self.session = crawler.session
self.spider_queue = self.crawler.spider_queue
self.schedule_queue = self.crawler.schedule_queue
self.running = False
self.log = crawler.log
@classmethod
def from_crawler(cls, crawler):
obj = cls(crawler)
return obj
def fetch(self):
while not self.crawler.event.is_set():
try:
request = self.spider_queue.get()
except Empty:
gevent.sleep(random.random())
continue
if not self._domain_allow(request.url):
self.log.warn("Url out of domain: {}".format(request.url))
continue
self.crawler.running = True
try:
resp = self.session.request(request.method, request.url,
data=request.data,
headers=request.headers,
allow_redirects=True,
verify=request.verify,
timeout=30)
response = Response(url=resp.url, content=resp.text,
request=request, code=resp.status_code,
headers=resp.headers, meta=request.meta,
obj=resp)
meta_refresh = response.xpath("//meta[@http-equiv='refresh']/@content").re(".*?url=(.*)$")
if meta_refresh:
self.log.debug("(meta refresh) %s" % meta_refresh[0])
resp = self.session.get(meta_refresh[0], allow_redirects=True)
response = Response(url=resp.url, content=resp.text,
request=request, code=resp.status_code,
headers=resp.headers, meta=request.meta,
obj=resp)
# registry fingeprint for request url.
self.crawler.fp.do_fingerprint(request)
reqs = self._extra(response)
if reqs:
if isinstance(reqs, Iterable):
for req in reqs:
if isinstance(req, Request):
self.schedule_queue.put(req)
else:
if isinstance(reqs, Request):
self.schedule_queue.put(reqs)
self.log.debug("(%d) %s %s" % (response.code, response.request.method, response.url))
except Exception as e:
self.log.error("Url fetch request error: %s" % request.url)
traceback.print_exc()
self.crawler.running = False
gevent.sleep(random.random())
def _extra(self, response):
callback = response.request.callback
if callback:
if callable(callback):
return callback(response)
elif type(callback) in (str, unicode):
cb = self.__getattribute__(callback)
return cb(response)
else:
return self.parse(response)
def _domain_allow(self, url):
if self.allowed_domains:
return urlparse.urlparse(url).netloc in self.allowed_domains
else:
return True
def parse(self, response):
urls = response.xpath("//a/@href").fetch()
for url in urls:
if not url.startswith("http:"):
url = self.abs_url(response, url)
yield Request(url)
def abs_url(self, response, url):
if isinstance(response, str):
prefix = response
else:
prefix = response.url
return urlparse.urljoin(prefix, url)
def _run(self):
self.fetch()
class SpiderManager(object):
def __init__(self):
self.spider_module = 'spiders'
self._spiders = {}
for module in self.walk_modules(self.spider_module):
self._filter_spiders(module)
def _filter_spiders(self, module):
for spcls in self.iter_spider_classes(module):
self._spiders[spcls.name] = spcls
def iter_spider_classes(self, module):
for obj in vars(module).itervalues():
if inspect.isclass(obj) and \
issubclass(obj, Spider) and \
obj.__module__ == module.__name__ and \
getattr(obj, 'name', None):
yield obj
def walk_modules(self, path, load=False):
mods = []
mod = __import__(path, {}, {}, [''])
mods.append(mod)
if hasattr(mod, '__path__'):
for _, subpath, ispkg in iter_modules(mod.__path__):
fullpath = path + '.' + subpath
if ispkg:
mods += self.walk_modules(fullpath)
else:
submod = __import__(fullpath, {}, {}, [''])
mods.append(submod)
return mods
def create(self, spider_name):
try:
spcls = self._spiders[spider_name]
except KeyError:
raise KeyError("Spider not found: %s" % spider_name)
return spcls
def get_list(self):
return self._spiders.keys() | 0.255437 | 0.053651 |
import numpy as np
import tensorflow as tf
from tensorflow.contrib.learn import ModeKeys
from rgat.datasets import rdf
def sp2tfsp(x):
coo = x.tocoo()
indices = np.mat([coo.row, coo.col]).transpose()
return tf.SparseTensor(indices, coo.data, coo.shape)
def get_input_fn(
mode,
dataset_name,
validation=True,
name='data'):
"""Build the input function from RDF dataset.
Args:
mode (str): The current modality, one of 'train', 'eval', 'infer'.
dataset_name (str): Specifies type of the RDF Dataset
name (str): The name of the data set for variable name scoping. Defaults
to 'data'.
validation (bool): Whether to do validation. Defaults to `True`.
Returns:
tuple(dict, dict) The dictionaries corresponding to the values for x and
y provided by the generator.
"""
ModeKeys.validate(mode)
data_dict = rdf.get_dataset(dataset_name)
# Convert to SparseTensors
support = {k: sp2tfsp(v) for (k, v) in data_dict['support'].items()}
features = sp2tfsp(data_dict['features'])
y_train, y_val, y_test, idx_train, idx_val, idx_test = get_splits(
y=data_dict['labels'],
train_idx=data_dict['train_idx'],
test_idx=data_dict['test_idx'],
validation=validation)
if mode == ModeKeys.TRAIN:
y, y_ind = y_train, idx_train
elif mode == ModeKeys.EVAL:
y, y_ind = y_val, idx_val
else:
y, y_ind = y_test, idx_test
# Convert y to an integer representation
y = np.argmax(y, axis=1)
def input_fn():
with tf.name_scope(name):
dataset = tf.data.Dataset.from_tensors(
{'labels': y,
'support': support,
'mask': y_ind})
dataset = dataset.repeat()
iterator = dataset.make_one_shot_iterator()
next_elements = iterator.get_next()
next_features = {
'features': features, 'support': next_elements['support']}
next_labels = {k: next_elements[k] for k in ['labels', 'mask']}
return next_features, next_labels
return input_fn
def get_splits(y, train_idx, test_idx, validation):
if validation:
tf.logging.info("Training on 80% of training set, evaluating on 20% of "
"training set. Test set is the test set, do not use "
"it.")
idx_train = train_idx[int(len(train_idx) / 5):]
idx_val = train_idx[:int(len(train_idx) / 5)]
idx_test = test_idx
else:
tf.logging.info("Training on training set, evaluating on "
"training set. Test set is the test set, use at your "
"peril.")
idx_train = train_idx
idx_val = train_idx # NB not not validation
idx_test = test_idx
tf.logging.info("Train set size: {}".format(len(idx_train)))
tf.logging.info("Validation set size: {}".format(len(idx_val)))
tf.logging.info("Test set size: {}".format(len(idx_test)))
y_train = np.zeros(y.shape)
y_val = np.zeros(y.shape)
y_test = np.zeros(y.shape)
y_train[idx_train] = np.array(y[idx_train].todense())
y_val[idx_val] = np.array(y[idx_val].todense())
y_test[idx_test] = np.array(y[idx_test].todense())
return y_train, y_val, y_test, idx_train, idx_val, idx_test | examples/rdf/inputs.py | import numpy as np
import tensorflow as tf
from tensorflow.contrib.learn import ModeKeys
from rgat.datasets import rdf
def sp2tfsp(x):
coo = x.tocoo()
indices = np.mat([coo.row, coo.col]).transpose()
return tf.SparseTensor(indices, coo.data, coo.shape)
def get_input_fn(
mode,
dataset_name,
validation=True,
name='data'):
"""Build the input function from RDF dataset.
Args:
mode (str): The current modality, one of 'train', 'eval', 'infer'.
dataset_name (str): Specifies type of the RDF Dataset
name (str): The name of the data set for variable name scoping. Defaults
to 'data'.
validation (bool): Whether to do validation. Defaults to `True`.
Returns:
tuple(dict, dict) The dictionaries corresponding to the values for x and
y provided by the generator.
"""
ModeKeys.validate(mode)
data_dict = rdf.get_dataset(dataset_name)
# Convert to SparseTensors
support = {k: sp2tfsp(v) for (k, v) in data_dict['support'].items()}
features = sp2tfsp(data_dict['features'])
y_train, y_val, y_test, idx_train, idx_val, idx_test = get_splits(
y=data_dict['labels'],
train_idx=data_dict['train_idx'],
test_idx=data_dict['test_idx'],
validation=validation)
if mode == ModeKeys.TRAIN:
y, y_ind = y_train, idx_train
elif mode == ModeKeys.EVAL:
y, y_ind = y_val, idx_val
else:
y, y_ind = y_test, idx_test
# Convert y to an integer representation
y = np.argmax(y, axis=1)
def input_fn():
with tf.name_scope(name):
dataset = tf.data.Dataset.from_tensors(
{'labels': y,
'support': support,
'mask': y_ind})
dataset = dataset.repeat()
iterator = dataset.make_one_shot_iterator()
next_elements = iterator.get_next()
next_features = {
'features': features, 'support': next_elements['support']}
next_labels = {k: next_elements[k] for k in ['labels', 'mask']}
return next_features, next_labels
return input_fn
def get_splits(y, train_idx, test_idx, validation):
if validation:
tf.logging.info("Training on 80% of training set, evaluating on 20% of "
"training set. Test set is the test set, do not use "
"it.")
idx_train = train_idx[int(len(train_idx) / 5):]
idx_val = train_idx[:int(len(train_idx) / 5)]
idx_test = test_idx
else:
tf.logging.info("Training on training set, evaluating on "
"training set. Test set is the test set, use at your "
"peril.")
idx_train = train_idx
idx_val = train_idx # NB not not validation
idx_test = test_idx
tf.logging.info("Train set size: {}".format(len(idx_train)))
tf.logging.info("Validation set size: {}".format(len(idx_val)))
tf.logging.info("Test set size: {}".format(len(idx_test)))
y_train = np.zeros(y.shape)
y_val = np.zeros(y.shape)
y_test = np.zeros(y.shape)
y_train[idx_train] = np.array(y[idx_train].todense())
y_val[idx_val] = np.array(y[idx_val].todense())
y_test[idx_test] = np.array(y[idx_test].todense())
return y_train, y_val, y_test, idx_train, idx_val, idx_test | 0.876951 | 0.489931 |
import os, itertools
from data_read.imarisfiles import ImarisFiles
from config import system
import argparse
import numpy as np
import urllib
import zipfile
import logging
import pandas as pd
import glob
logging.getLogger(__name__)
def shift_list(seq, n):
n = n % len(seq)
return seq if n == 0 else seq[n:] + seq[:n]
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def input_dataset(dataset):
if len(dataset) == 1:
ds_train, ds_val, ds_test = dataset, None, None
if len(dataset) == 2:
ds_train, ds_val = dataset
ds_test = None
if len(dataset) == 3:
ds_train, ds_val, ds_test = dataset
return ds_train, ds_val, ds_test
def check_channels(ifile, channels):
ext = os.path.splitext(ifile)[1]
if ext == '.ims':
imFile = ImarisFiles(ifile)
lchannels = set([x.lower() for x in channels])
fchannels = set([x.lower() for x in imFile.channelNames])
lspots = set([x.lower() for x in system.spots_GT])
return len(fchannels.union(lspots).intersection(lchannels)) >= len(channels)
else:
Warning('extension not recognized, channels are not checked')
return True
def input_filenames(filenames, fext=None, do_recursive=False):
if fext is None:
fext = ['.ims']
if not isinstance(filenames, list):
filenames = [filenames]
l_trainpath = []
for ifile in filenames:
if os.path.isdir(filenames):
if do_recursive:
trainpath_aux = [os.path.join(dp, f) for dp, dn, filenames in os.walk(ifile) for f in filenames if
os.path.splitext(f)[1] in fext]
else:
trainpath_aux = [os.path.join(ifile, x) for x in os.listdir(ifile) if
os.path.splitext(x)[1] in fext]
else:
trainpath_aux = [ifile]
for x in trainpath_aux:
l_trainpath.append(x)
def input_files_format(in_file, channels=None, do_recursive=False, fext=None):
if fext is None:
fext = ['.ims']
elif not isinstance(fext, list):
fext = [fext]
if in_file is None:
return in_file
elif not isinstance(in_file, list):
in_file = [in_file]
l_trainpath = []
for ifile in in_file:
if os.path.isdir(ifile):
if do_recursive:
trainpath_aux = [os.path.join(dp, f) for dp, dn, filenames in os.walk(ifile) for f in filenames if
os.path.splitext(f)[1] in fext]
else:
trainpath_aux = [os.path.join(ifile, x) for x in os.listdir(ifile) if
os.path.splitext(x)[1] in fext]
else:
trainpath_aux = [ifile]
for x in trainpath_aux:
l_trainpath.append(x)
if not channels is None:
l_trainpath = [x for x in l_trainpath if check_channels(x, channels)]
return l_trainpath
def download_url_zip(data_url, download_dir, authentify=None):
# Login if needed
if authentify is not None:
password_mgr = urllib.request.HTTPPasswordMgrWithDefaultRealm()
password_mgr.add_password(None, authentify["root_url"], authentify["username"], authentify["password"])
handler = urllib.request.HTTPBasicAuthHandler(password_mgr)
opener = urllib.request.build_opener(handler)
opener.open(authentify["root_url"])
urllib.request.install_opener(opener)
logging.info("Downloading: {:s}".format(data_url))
# Download file
fname = data_url.split('/')[-1]
download_dir = os.path.join(download_dir, fname)
fdir, _ = urllib.request.urlretrieve(data_url, download_dir)
# Unzip file
with zipfile.ZipFile(fdir, 'r') as zip_ref:
zip_ref.extractall(os.path.split(zip_ref.filename)[0])
# Delete zip
os.remove(fdir)
def invert_listdict(orig_dict):
inv_dict = {}
for id, vals in orig_dict.items():
for v in vals:
inv_dict[v] = id
return inv_dict
def aggregate_metrics(save_dir, fname='metrics.csv', read_dir=None):
cmetrics = pd.DataFrame()
has_metrics = False
if read_dir:
# If reading boundmax
dir_aux = read_dir
fname_aux = 'metrics.csv'
chcomb = "".join([str(int(x) - 1) for x in
os.path.split(read_dir)[1].replace("ch", "").replace("_l2", "").replace("_l4", "")])
save_metrics = os.path.join(
save_dir, 'metrics_ch' + chcomb + '.csv')
else:
dir_aux = save_dir
fname_aux = fname
save_metrics = os.path.join(save_dir, fname)
for dir in os.listdir(dir_aux):
metrics_file = os.path.join(dir_aux, dir, fname_aux)
if os.path.isdir(os.path.join(dir_aux, dir)) and dir.isdigit():
if os.path.isfile(metrics_file):
has_metrics = True
pmetrics = pd.read_csv(metrics_file,
sep=',',
header=0,
index_col=0
).transpose()
# pmetrics = pd.read_csv(os.path.join(save_dir, dir, 'metrics.csv'))
cmetrics['model_cv' + dir] = pmetrics['model']
elif 'notrain.txt' in os.listdir(os.path.join(dir_aux, dir)):
has_metrics = True
cmetrics.to_csv(save_metrics)
return has_metrics
def aggregate_metrics_chdel(save_dir):
cmetrics = pd.DataFrame()
has_metrics = False
for dir in os.listdir(save_dir):
metrics_file = os.path.join(save_dir, dir, 'metrics.csv')
if os.path.isdir(os.path.join(save_dir, dir)) and dir.isdigit() and os.path.isfile(metrics_file):
has_metrics = True
pmetrics = pd.read_csv(metrics_file,
sep=',',
header=0,
index_col=0
).transpose()
# pmetrics = pd.read_csv(os.path.join(save_dir, dir, 'metrics.csv'))
cmetrics['model_cv' + dir] = pmetrics['model']
cmetrics.to_csv(os.path.join(save_dir, 'metrics.csv'))
return has_metrics
def aggregate_metrics_sample(save_dir, chdel=False):
cmetrics = pd.DataFrame()
has_metrics = False
fdir = os.path.join(save_dir, '0')
for metrics_file in glob.glob(os.path.join(fdir, 'metrics_sample*.csv')):
has_metrics = True
pmetrics = pd.read_csv(metrics_file,
sep=',',
header=0,
index_col=0
).transpose()
# pmetrics = pd.read_csv(os.path.join(save_dir, dir, 'metrics.csv'))
sname = os.path.splitext(os.path.split(metrics_file)[1])[0].replace("metrics_", "")
cmetrics[sname] = pmetrics['model']
if has_metrics:
cmetrics.to_csv(os.path.join(save_dir, 'metrics_samples.csv'))
return has_metrics
def get_weights(class_counts, log_weight=True):
class_counts = np.array(class_counts)
class_weight = sum(class_counts) / (len(class_counts) * class_counts)
if log_weight:
return np.log(np.e + class_weight)
else:
return class_weight
def sort_markers(lmarkers='12345', length_first=True):
if length_first:
l = []
nt = len(lmarkers)
for n1 in range(nt):
l += [lmarkers[n1]]
for n1 in range(nt):
for n2 in range(n1 + 1, nt):
l += ["".join([lmarkers[x] for x in (n1, n2)])]
for n1 in range(nt):
for n2 in range(n1 + 1, nt):
for n3 in range(n2 + 1, nt):
l += ["".join([lmarkers[x] for x in (n1, n2, n3)])]
for n1 in range(nt):
for n2 in range(n1 + 1, nt):
for n3 in range(n2 + 1, nt):
for n4 in range(n3 + 1, nt):
l += ["".join([lmarkers[x] for x in (n1, n2, n3, n4)])]
for n1 in range(nt):
for n2 in range(n1 + 1, nt):
for n3 in range(n2 + 1, nt):
for n4 in range(n3 + 1, nt):
for n5 in range(n4 + 1, nt):
l += ["".join([lmarkers[x] for x in (n1, n2, n3, n4, n5)])]
l = ["".join(sorted([y for y in x])) for x in l]
else:
if length_first:
l = []
nt = len(lmarkers)
for n1 in range(nt):
for n2 in range(n1 + 1, nt):
for n3 in range(n2 + 1, nt):
for n4 in range(n3 + 1, nt):
for n5 in range(n4 + 1, nt):
l += ["".join([lmarkers[x] for x in (n1, n2, n3, n4, n5)])]
l += ["".join([lmarkers[x] for x in (n1, n2, n3, n4)])]
l += ["".join([lmarkers[x] for x in (n1, n2, n3, n4)])]
for n4 in range(n3 + 1, nt):
l += ["".join([lmarkers[x] for x in (n1, n2, n4)])]
for n3 in range(n2 + 1, nt):
l += ["".join([lmarkers[x] for x in (n1, n3, n4)])]
for n1 in range(nt):
for n2 in range(n1 + 1, nt):
for n3 in range(n2 + 1, nt):
for n4 in range(n3 + 1, nt):
l += ["".join([lmarkers[x] for x in (n1, n2, n3, n4, n5)])]
for n1 in range(nt):
for n2 in range(n1 + 1, nt):
for n3 in range(n2 + 1, nt):
l += ["".join([lmarkers[x] for x in (n1, n2, n3, n4, n5)])]
for n1 in range(nt):
for n2 in range(n1 + 1, nt):
l += ["".join([lmarkers[x] for x in (n1, n2, n3, n4, n5)])]
for n1 in range(nt):
l += ["".join([lmarkers[x] for x in (n1, n2, n3, n4, n5)])]
l = ["".join(sorted([y for y in x])) for x in l]
return l
def rename_channels(names_old, lmarkers):
mrename = {str(k): str(v) for k, v in zip(lmarkers, range(1, len(lmarkers) + 1))}
for k, v in mrename.items():
names_old = [x.replace(k, chr(int(v))) for x in names_old]
for v in mrename.values():
names_old = [x.replace(chr(int(v)), str(v)) for x in names_old]
names_aux = ['m' + "".join(sorted(x.replace('ch', ''))) for x in names_old]
names_new_sorted = []
for n in range(1, len(lmarkers) + 1):
names_new_sorted += sorted([x for x in names_aux if str(n) in x], key=lambda x: (-len(x), x), reverse=False)
names_aux = [x for x in names_aux if str(n) not in x]
names_sorted = [x.replace("m", "") for x in names_new_sorted]
for k, v in mrename.items():
names_sorted = [x.replace(v, chr(int(k))) for x in names_sorted]
for k in mrename.keys():
names_sorted = [x.replace(chr(int(k)), k) for x in names_sorted]
names_sorted = ["".join(sorted(x.replace('ch', ''))) for x in names_sorted]
return names_sorted
def marker_combinations(nmarkers):
return list(set([tuple(set(x)) for x in itertools.product(np.arange(nmarkers), repeat=nmarkers)])) | MiNTiF_Utils/utils/common_utils.py | import os, itertools
from data_read.imarisfiles import ImarisFiles
from config import system
import argparse
import numpy as np
import urllib
import zipfile
import logging
import pandas as pd
import glob
logging.getLogger(__name__)
def shift_list(seq, n):
n = n % len(seq)
return seq if n == 0 else seq[n:] + seq[:n]
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def input_dataset(dataset):
if len(dataset) == 1:
ds_train, ds_val, ds_test = dataset, None, None
if len(dataset) == 2:
ds_train, ds_val = dataset
ds_test = None
if len(dataset) == 3:
ds_train, ds_val, ds_test = dataset
return ds_train, ds_val, ds_test
def check_channels(ifile, channels):
ext = os.path.splitext(ifile)[1]
if ext == '.ims':
imFile = ImarisFiles(ifile)
lchannels = set([x.lower() for x in channels])
fchannels = set([x.lower() for x in imFile.channelNames])
lspots = set([x.lower() for x in system.spots_GT])
return len(fchannels.union(lspots).intersection(lchannels)) >= len(channels)
else:
Warning('extension not recognized, channels are not checked')
return True
def input_filenames(filenames, fext=None, do_recursive=False):
if fext is None:
fext = ['.ims']
if not isinstance(filenames, list):
filenames = [filenames]
l_trainpath = []
for ifile in filenames:
if os.path.isdir(filenames):
if do_recursive:
trainpath_aux = [os.path.join(dp, f) for dp, dn, filenames in os.walk(ifile) for f in filenames if
os.path.splitext(f)[1] in fext]
else:
trainpath_aux = [os.path.join(ifile, x) for x in os.listdir(ifile) if
os.path.splitext(x)[1] in fext]
else:
trainpath_aux = [ifile]
for x in trainpath_aux:
l_trainpath.append(x)
def input_files_format(in_file, channels=None, do_recursive=False, fext=None):
if fext is None:
fext = ['.ims']
elif not isinstance(fext, list):
fext = [fext]
if in_file is None:
return in_file
elif not isinstance(in_file, list):
in_file = [in_file]
l_trainpath = []
for ifile in in_file:
if os.path.isdir(ifile):
if do_recursive:
trainpath_aux = [os.path.join(dp, f) for dp, dn, filenames in os.walk(ifile) for f in filenames if
os.path.splitext(f)[1] in fext]
else:
trainpath_aux = [os.path.join(ifile, x) for x in os.listdir(ifile) if
os.path.splitext(x)[1] in fext]
else:
trainpath_aux = [ifile]
for x in trainpath_aux:
l_trainpath.append(x)
if not channels is None:
l_trainpath = [x for x in l_trainpath if check_channels(x, channels)]
return l_trainpath
def download_url_zip(data_url, download_dir, authentify=None):
# Login if needed
if authentify is not None:
password_mgr = urllib.request.HTTPPasswordMgrWithDefaultRealm()
password_mgr.add_password(None, authentify["root_url"], authentify["username"], authentify["password"])
handler = urllib.request.HTTPBasicAuthHandler(password_mgr)
opener = urllib.request.build_opener(handler)
opener.open(authentify["root_url"])
urllib.request.install_opener(opener)
logging.info("Downloading: {:s}".format(data_url))
# Download file
fname = data_url.split('/')[-1]
download_dir = os.path.join(download_dir, fname)
fdir, _ = urllib.request.urlretrieve(data_url, download_dir)
# Unzip file
with zipfile.ZipFile(fdir, 'r') as zip_ref:
zip_ref.extractall(os.path.split(zip_ref.filename)[0])
# Delete zip
os.remove(fdir)
def invert_listdict(orig_dict):
inv_dict = {}
for id, vals in orig_dict.items():
for v in vals:
inv_dict[v] = id
return inv_dict
def aggregate_metrics(save_dir, fname='metrics.csv', read_dir=None):
cmetrics = pd.DataFrame()
has_metrics = False
if read_dir:
# If reading boundmax
dir_aux = read_dir
fname_aux = 'metrics.csv'
chcomb = "".join([str(int(x) - 1) for x in
os.path.split(read_dir)[1].replace("ch", "").replace("_l2", "").replace("_l4", "")])
save_metrics = os.path.join(
save_dir, 'metrics_ch' + chcomb + '.csv')
else:
dir_aux = save_dir
fname_aux = fname
save_metrics = os.path.join(save_dir, fname)
for dir in os.listdir(dir_aux):
metrics_file = os.path.join(dir_aux, dir, fname_aux)
if os.path.isdir(os.path.join(dir_aux, dir)) and dir.isdigit():
if os.path.isfile(metrics_file):
has_metrics = True
pmetrics = pd.read_csv(metrics_file,
sep=',',
header=0,
index_col=0
).transpose()
# pmetrics = pd.read_csv(os.path.join(save_dir, dir, 'metrics.csv'))
cmetrics['model_cv' + dir] = pmetrics['model']
elif 'notrain.txt' in os.listdir(os.path.join(dir_aux, dir)):
has_metrics = True
cmetrics.to_csv(save_metrics)
return has_metrics
def aggregate_metrics_chdel(save_dir):
cmetrics = pd.DataFrame()
has_metrics = False
for dir in os.listdir(save_dir):
metrics_file = os.path.join(save_dir, dir, 'metrics.csv')
if os.path.isdir(os.path.join(save_dir, dir)) and dir.isdigit() and os.path.isfile(metrics_file):
has_metrics = True
pmetrics = pd.read_csv(metrics_file,
sep=',',
header=0,
index_col=0
).transpose()
# pmetrics = pd.read_csv(os.path.join(save_dir, dir, 'metrics.csv'))
cmetrics['model_cv' + dir] = pmetrics['model']
cmetrics.to_csv(os.path.join(save_dir, 'metrics.csv'))
return has_metrics
def aggregate_metrics_sample(save_dir, chdel=False):
cmetrics = pd.DataFrame()
has_metrics = False
fdir = os.path.join(save_dir, '0')
for metrics_file in glob.glob(os.path.join(fdir, 'metrics_sample*.csv')):
has_metrics = True
pmetrics = pd.read_csv(metrics_file,
sep=',',
header=0,
index_col=0
).transpose()
# pmetrics = pd.read_csv(os.path.join(save_dir, dir, 'metrics.csv'))
sname = os.path.splitext(os.path.split(metrics_file)[1])[0].replace("metrics_", "")
cmetrics[sname] = pmetrics['model']
if has_metrics:
cmetrics.to_csv(os.path.join(save_dir, 'metrics_samples.csv'))
return has_metrics
def get_weights(class_counts, log_weight=True):
class_counts = np.array(class_counts)
class_weight = sum(class_counts) / (len(class_counts) * class_counts)
if log_weight:
return np.log(np.e + class_weight)
else:
return class_weight
def sort_markers(lmarkers='12345', length_first=True):
if length_first:
l = []
nt = len(lmarkers)
for n1 in range(nt):
l += [lmarkers[n1]]
for n1 in range(nt):
for n2 in range(n1 + 1, nt):
l += ["".join([lmarkers[x] for x in (n1, n2)])]
for n1 in range(nt):
for n2 in range(n1 + 1, nt):
for n3 in range(n2 + 1, nt):
l += ["".join([lmarkers[x] for x in (n1, n2, n3)])]
for n1 in range(nt):
for n2 in range(n1 + 1, nt):
for n3 in range(n2 + 1, nt):
for n4 in range(n3 + 1, nt):
l += ["".join([lmarkers[x] for x in (n1, n2, n3, n4)])]
for n1 in range(nt):
for n2 in range(n1 + 1, nt):
for n3 in range(n2 + 1, nt):
for n4 in range(n3 + 1, nt):
for n5 in range(n4 + 1, nt):
l += ["".join([lmarkers[x] for x in (n1, n2, n3, n4, n5)])]
l = ["".join(sorted([y for y in x])) for x in l]
else:
if length_first:
l = []
nt = len(lmarkers)
for n1 in range(nt):
for n2 in range(n1 + 1, nt):
for n3 in range(n2 + 1, nt):
for n4 in range(n3 + 1, nt):
for n5 in range(n4 + 1, nt):
l += ["".join([lmarkers[x] for x in (n1, n2, n3, n4, n5)])]
l += ["".join([lmarkers[x] for x in (n1, n2, n3, n4)])]
l += ["".join([lmarkers[x] for x in (n1, n2, n3, n4)])]
for n4 in range(n3 + 1, nt):
l += ["".join([lmarkers[x] for x in (n1, n2, n4)])]
for n3 in range(n2 + 1, nt):
l += ["".join([lmarkers[x] for x in (n1, n3, n4)])]
for n1 in range(nt):
for n2 in range(n1 + 1, nt):
for n3 in range(n2 + 1, nt):
for n4 in range(n3 + 1, nt):
l += ["".join([lmarkers[x] for x in (n1, n2, n3, n4, n5)])]
for n1 in range(nt):
for n2 in range(n1 + 1, nt):
for n3 in range(n2 + 1, nt):
l += ["".join([lmarkers[x] for x in (n1, n2, n3, n4, n5)])]
for n1 in range(nt):
for n2 in range(n1 + 1, nt):
l += ["".join([lmarkers[x] for x in (n1, n2, n3, n4, n5)])]
for n1 in range(nt):
l += ["".join([lmarkers[x] for x in (n1, n2, n3, n4, n5)])]
l = ["".join(sorted([y for y in x])) for x in l]
return l
def rename_channels(names_old, lmarkers):
mrename = {str(k): str(v) for k, v in zip(lmarkers, range(1, len(lmarkers) + 1))}
for k, v in mrename.items():
names_old = [x.replace(k, chr(int(v))) for x in names_old]
for v in mrename.values():
names_old = [x.replace(chr(int(v)), str(v)) for x in names_old]
names_aux = ['m' + "".join(sorted(x.replace('ch', ''))) for x in names_old]
names_new_sorted = []
for n in range(1, len(lmarkers) + 1):
names_new_sorted += sorted([x for x in names_aux if str(n) in x], key=lambda x: (-len(x), x), reverse=False)
names_aux = [x for x in names_aux if str(n) not in x]
names_sorted = [x.replace("m", "") for x in names_new_sorted]
for k, v in mrename.items():
names_sorted = [x.replace(v, chr(int(k))) for x in names_sorted]
for k in mrename.keys():
names_sorted = [x.replace(chr(int(k)), k) for x in names_sorted]
names_sorted = ["".join(sorted(x.replace('ch', ''))) for x in names_sorted]
return names_sorted
def marker_combinations(nmarkers):
return list(set([tuple(set(x)) for x in itertools.product(np.arange(nmarkers), repeat=nmarkers)])) | 0.318273 | 0.175009 |
from django import forms
from ummeli.opportunities.models import *
from ummeli.vlive.forms import PMLModelForm, PMLForm
class StatusUpdateForm(PMLForm):
title = forms.CharField(label='Status', required=True, max_length=160)
class JobEditForm(PMLModelForm):
province = forms.ModelChoiceField(empty_label=None, queryset=Province.objects.all(), label='Province', required=True)
category = forms.IntegerField(widget=forms.Select(choices=CATEGORY_CHOICES),
required=True,
min_value=1,
error_messages={'min_value': 'Please choose a category.'})
title = forms.CharField(label='title', required=True)
description = forms.CharField(label='Description',
required=True,
help_text='Please provide as much information about the job as possible including contact details.',
widget=forms.Textarea)
class Meta:
model = Job
fields = ('province', 'category', 'title', 'description')
class OpportunityEditForm(PMLForm):
BURSARY = 1
TRAINING = 2
VOLUNTEERING = 3
INTERNSHIP = 4
OPPORTUNITY_CHOICES = [(0, 'Please choose'),
(BURSARY, 'Bursary'),
(TRAINING, 'Training'),
(VOLUNTEERING, 'Volunteering'),
(INTERNSHIP, 'Internship')]
opportunity_type = forms.IntegerField(widget=forms.Select(choices=OPPORTUNITY_CHOICES),
required=True,
min_value=1,
error_messages={'min_value': 'Please choose an opportunity type.'})
province = forms.ChoiceField(choices=[(p.pk, p.get_province_display()) for p in Province.objects.all()], label='Province', required=True)
title = forms.CharField(label='title', required=True)
description = forms.CharField(label='Description',
required=True,
help_text='Please provide as much information about the opportunity as possible including contact details.',
widget=forms.Textarea)
def get_model(self):
if self.is_valid():
opportunity_type = self.cleaned_data['opportunity_type']
if opportunity_type == self.BURSARY:
return Bursary
if opportunity_type == self.TRAINING:
return Training
if opportunity_type == self.VOLUNTEERING:
return Volunteer
if opportunity_type == self.INTERNSHIP:
return Internship
return None | ummeli/vlive/community/forms.py | from django import forms
from ummeli.opportunities.models import *
from ummeli.vlive.forms import PMLModelForm, PMLForm
class StatusUpdateForm(PMLForm):
title = forms.CharField(label='Status', required=True, max_length=160)
class JobEditForm(PMLModelForm):
province = forms.ModelChoiceField(empty_label=None, queryset=Province.objects.all(), label='Province', required=True)
category = forms.IntegerField(widget=forms.Select(choices=CATEGORY_CHOICES),
required=True,
min_value=1,
error_messages={'min_value': 'Please choose a category.'})
title = forms.CharField(label='title', required=True)
description = forms.CharField(label='Description',
required=True,
help_text='Please provide as much information about the job as possible including contact details.',
widget=forms.Textarea)
class Meta:
model = Job
fields = ('province', 'category', 'title', 'description')
class OpportunityEditForm(PMLForm):
BURSARY = 1
TRAINING = 2
VOLUNTEERING = 3
INTERNSHIP = 4
OPPORTUNITY_CHOICES = [(0, 'Please choose'),
(BURSARY, 'Bursary'),
(TRAINING, 'Training'),
(VOLUNTEERING, 'Volunteering'),
(INTERNSHIP, 'Internship')]
opportunity_type = forms.IntegerField(widget=forms.Select(choices=OPPORTUNITY_CHOICES),
required=True,
min_value=1,
error_messages={'min_value': 'Please choose an opportunity type.'})
province = forms.ChoiceField(choices=[(p.pk, p.get_province_display()) for p in Province.objects.all()], label='Province', required=True)
title = forms.CharField(label='title', required=True)
description = forms.CharField(label='Description',
required=True,
help_text='Please provide as much information about the opportunity as possible including contact details.',
widget=forms.Textarea)
def get_model(self):
if self.is_valid():
opportunity_type = self.cleaned_data['opportunity_type']
if opportunity_type == self.BURSARY:
return Bursary
if opportunity_type == self.TRAINING:
return Training
if opportunity_type == self.VOLUNTEERING:
return Volunteer
if opportunity_type == self.INTERNSHIP:
return Internship
return None | 0.485112 | 0.090816 |
from alembic import op, context
import sqlalchemy as sa
from rucio.db.sqla.constants import DIDType
from rucio.db.sqla.types import GUID
# revision identifiers, used by Alembic.
revision = '3ad36e2268b0'
down_revision = '42db2617c364'
def upgrade():
if context.get_context().dialect.name != 'sqlite':
op.add_column('collection_replicas', sa.Column('available_replicas_cnt', sa.BigInteger()))
op.add_column('collection_replicas', sa.Column('available_bytes', sa.BigInteger()))
op.create_table('updated_col_rep',
sa.Column('id', GUID()),
sa.Column('scope', sa.String(25)),
sa.Column('name', sa.String(255)),
sa.Column('did_type', DIDType.db_type(name='UPDATED_COL_REP_TYPE_CHK')),
sa.Column('rse_id', GUID()),
sa.Column('updated_at', sa.DateTime),
sa.Column('created_at', sa.DateTime))
if context.get_context().dialect.name != 'sqlite':
op.create_primary_key('UPDATED_COL_REP_PK', 'updated_col_rep', ['id'])
op.create_check_constraint('UPDATED_COL_REP_SCOPE_NN', 'updated_col_rep', 'scope IS NOT NULL')
op.create_check_constraint('UPDATED_COL_REP_NAME_NN', 'updated_col_rep', 'name IS NOT NULL')
op.create_index('UPDATED_COL_REP_SNR_IDX', 'updated_col_rep', ['scope', 'name', 'rse_id'])
def downgrade():
if context.get_context().dialect.name != 'sqlite':
op.drop_column('collection_replicas', 'available_replicas_cnt')
op.drop_column('collection_replicas', 'available_bytes')
if context.get_context().dialect.name == 'postgresql':
op.drop_constraint('UPDATED_COL_REP_PK', 'updated_col_rep', type_='primary')
op.drop_constraint('UPDATED_COL_REP_SCOPE_NN', 'updated_col_rep')
op.drop_constraint('UPDATED_COL_REP_NAME_NN', 'updated_col_rep')
op.drop_constraint('UPDATED_COL_REP_TYPE_CHK', 'updated_col_rep')
op.drop_index('UPDATED_COL_REP_SNR_IDX', 'updated_col_rep')
op.drop_table('updated_col_rep') | lib/rucio/db/sqla/migrate_repo/versions/3ad36e2268b0_create_collection_replicas_updates_table.py | from alembic import op, context
import sqlalchemy as sa
from rucio.db.sqla.constants import DIDType
from rucio.db.sqla.types import GUID
# revision identifiers, used by Alembic.
revision = '3ad36e2268b0'
down_revision = '42db2617c364'
def upgrade():
if context.get_context().dialect.name != 'sqlite':
op.add_column('collection_replicas', sa.Column('available_replicas_cnt', sa.BigInteger()))
op.add_column('collection_replicas', sa.Column('available_bytes', sa.BigInteger()))
op.create_table('updated_col_rep',
sa.Column('id', GUID()),
sa.Column('scope', sa.String(25)),
sa.Column('name', sa.String(255)),
sa.Column('did_type', DIDType.db_type(name='UPDATED_COL_REP_TYPE_CHK')),
sa.Column('rse_id', GUID()),
sa.Column('updated_at', sa.DateTime),
sa.Column('created_at', sa.DateTime))
if context.get_context().dialect.name != 'sqlite':
op.create_primary_key('UPDATED_COL_REP_PK', 'updated_col_rep', ['id'])
op.create_check_constraint('UPDATED_COL_REP_SCOPE_NN', 'updated_col_rep', 'scope IS NOT NULL')
op.create_check_constraint('UPDATED_COL_REP_NAME_NN', 'updated_col_rep', 'name IS NOT NULL')
op.create_index('UPDATED_COL_REP_SNR_IDX', 'updated_col_rep', ['scope', 'name', 'rse_id'])
def downgrade():
if context.get_context().dialect.name != 'sqlite':
op.drop_column('collection_replicas', 'available_replicas_cnt')
op.drop_column('collection_replicas', 'available_bytes')
if context.get_context().dialect.name == 'postgresql':
op.drop_constraint('UPDATED_COL_REP_PK', 'updated_col_rep', type_='primary')
op.drop_constraint('UPDATED_COL_REP_SCOPE_NN', 'updated_col_rep')
op.drop_constraint('UPDATED_COL_REP_NAME_NN', 'updated_col_rep')
op.drop_constraint('UPDATED_COL_REP_TYPE_CHK', 'updated_col_rep')
op.drop_index('UPDATED_COL_REP_SNR_IDX', 'updated_col_rep')
op.drop_table('updated_col_rep') | 0.303525 | 0.076788 |
import torch
import torch.nn as nn
from ove.utils.modeling import Sequential
from ..networks import (
FilterInterpolationModule, DepthFlowProjectionModule,
MultipleBasicBlock, S2DF, PWCDCNet, HourGlass
)
from ..utils import Stack
class DAIN_slowmotion(nn.Module):
def __init__(
self,
size,
batch_size=1,
sf=2,
rectify=False,
padding=None,
useAnimationMethod=0
):
super().__init__()
self.rectify = rectify
self.padding = padding
self.batch_size = batch_size
self.useAnimationMethod = useAnimationMethod
self.time_offsets = [kk / sf for kk in range(1, sf)]
self.initScaleNets_filter, self.initScaleNets_filter1, self.initScaleNets_filter2 = self.get_MonoNet5()
self.ctxNet = S2DF() if rectify else None
self.rectifyNet = MultipleBasicBlock() if rectify else None
self._initialize_weights()
self.flownets = PWCDCNet(*size)
self.depthNet = HourGlass
self.filterModule = FilterInterpolationModule().cuda()
self.depth_flow_projection = DepthFlowProjectionModule()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.xavier_uniform_(m.weight.data)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def forward(self, I0, I1, target, count):
target[[count]] = I0[:, :, self.padding[2]:self.padding[3], self.padding[0]:self.padding[1]]
count += 1
cat0 = torch.cat((I0, I1), dim=0)
cat1 = torch.cat((I0, I1), dim=1)
with torch.cuda.stream(torch.cuda.current_stream()):
if self.useAnimationMethod:
temp = I1[:, 1:2, :, :]
else:
temp = self.depthNet(cat0)
log_depth = [temp[:self.batch_size], temp[self.batch_size:]]
if self.useAnimationMethod == 1:
log_depth = [(d * 0) for d in log_depth]
temp = self.forward_singlePath(self.initScaleNets_filter, cat1)
cur_filter_output = [
self.forward_singlePath(self.initScaleNets_filter1, temp),
self.forward_singlePath(self.initScaleNets_filter2, temp)
]
if self.useAnimationMethod == 1:
depth_inv = [(d * 0) + 1e-6 + 10000 for d in log_depth]
else:
depth_inv = [1e-6 + 1 / torch.exp(d) for d in log_depth]
with torch.cuda.stream(torch.cuda.current_stream()):
cur_offset_outputs = [
self.forward_flownets(I0, I1, inv=False),
self.forward_flownets(I1, I0, inv=True)
]
torch.cuda.synchronize() # synchronize s1 and s2
cur_offset_outputs = [
self.FlowProject(cur_offset_outputs[0], depth_inv[0]),
self.FlowProject(cur_offset_outputs[1], depth_inv[1])
]
for temp_0, temp_1, timeoffset in zip(cur_offset_outputs[0], cur_offset_outputs[1], self.time_offsets):
cur_offset_output = [temp_0, temp_1]
cur_output_temp, ref0, ref2 = self.FilterInterpolate(
I0, I1,
cur_offset_output, cur_filter_output,
timeoffset
)
cur_output_temp = cur_output_temp[:, :, self.padding[2]:self.padding[3], self.padding[0]: self.padding[1]]
if self.rectify:
cur_ctx_output = [
torch.cat((self.ctxNet(I0), log_depth[0].detach()), dim=1),
torch.cat((self.ctxNet(I1), log_depth[1].detach()), dim=1)
]
ctx0, ctx2 = self.FilterInterpolate_ctx(
cur_ctx_output[0], cur_ctx_output[1],
cur_offset_output, cur_filter_output, timeoffset
)
ref0 = ref0[:, :, self.padding[2]:self.padding[3], self.padding[0]: self.padding[1]]
ref2 = ref2[:, :, self.padding[2]:self.padding[3], self.padding[0]: self.padding[1]]
ctx0 = ctx0[:, :, self.padding[2]:self.padding[3], self.padding[0]: self.padding[1]]
ctx2 = ctx2[:, :, self.padding[2]:self.padding[3], self.padding[0]: self.padding[1]]
rectify_input = torch.cat((
cur_output_temp, ref0, ref2,
temp_0[:, :, self.padding[2]:self.padding[3], self.padding[0]: self.padding[1]],
temp_1[:, :, self.padding[2]:self.padding[3], self.padding[0]: self.padding[1]],
cur_filter_output[0][:, :, self.padding[2]:self.padding[3], self.padding[0]: self.padding[1]],
cur_filter_output[1][:, :, self.padding[2]:self.padding[3], self.padding[0]: self.padding[1]],
ctx0, ctx2
), dim=1)
cur_output_temp = self.rectifyNet(rectify_input) + cur_output_temp
target[[count]] = cur_output_temp
count += 1
return count
def forward_flownets(self, im1, im2, inv):
temp = self.flownets(im1, im2)
temps = [20.0 * temp * time_offset for time_offset in self.time_offsets]
if inv:
temps = temps[::-1]
temps = [nn.Upsample(scale_factor=4, mode='bilinear', align_corners=True)(temp) for temp in temps]
return temps
def forward_singlePath(self, model, input):
stack = Stack()
k = 0
temp = []
for layers in model:
if k == 0:
temp = layers(input)
else:
if isinstance(layers, (nn.AvgPool2d, nn.MaxPool2d)):
stack.push(temp)
temp = layers(temp)
if isinstance(layers, nn.Upsample):
temp += stack.pop()
k += 1
return temp
def get_MonoNet5(self):
model = Sequential(
*self.conv_relu(6, 16, (3, 3), (1, 1)),
*self.conv_relu_maxpool(16, 32, (3, 3), (1, 1), (2, 2)),
*self.conv_relu_maxpool(32, 64, (3, 3), (1, 1), (2, 2)),
*self.conv_relu_maxpool(64, 128, (3, 3), (1, 1), (2, 2)),
*self.conv_relu_maxpool(128, 256, (3, 3), (1, 1), (2, 2)),
*self.conv_relu_maxpool(256, 512, (3, 3), (1, 1), (2, 2)),
*self.conv_relu(512, 512, (3, 3), (1, 1)),
*self.conv_relu_unpool(512, 256, (3, 3), (1, 1), 2),
*self.conv_relu_unpool(256, 128, (3, 3), (1, 1), 2),
*self.conv_relu_unpool(128, 64, (3, 3), (1, 1), 2),
*self.conv_relu_unpool(64, 32, (3, 3), (1, 1), 2),
*self.conv_relu_unpool(32, 16, (3, 3), (1, 1), 2)
)
branch1 = self.conv_relu_conv(16, 16, (3, 3), (1, 1))
branch2 = self.conv_relu_conv(16, 16, (3, 3), (1, 1))
return model, branch1, branch2
def FlowProject(self, inputs, depth=None):
return [self.depth_flow_projection(x, depth) for x in inputs]
def FilterInterpolate_ctx(self, ctx0, ctx2, offset, filter, timeoffset):
ctx0_offset = self.filterModule(ctx0, offset[0].detach(), filter[0].detach())
ctx2_offset = self.filterModule(ctx2, offset[1].detach(), filter[1].detach())
return ctx0_offset, ctx2_offset
def FilterInterpolate(self, ref0, ref2, offset, filter, time_offset):
ref0_offset = self.filterModule(ref0, offset[0], filter[0])
ref2_offset = self.filterModule(ref2, offset[1], filter[1])
return ref0_offset * (1.0 - time_offset) + ref2_offset * time_offset, ref0_offset, ref2_offset
@staticmethod
def conv_relu_conv(input_filter, output_filter, kernel_size, padding):
layers = Sequential(
nn.Conv2d(input_filter, input_filter, kernel_size, 1, padding),
nn.ReLU(inplace=True),
nn.Conv2d(input_filter, output_filter, kernel_size, 1, padding),
)
return layers
@staticmethod
def conv_relu(input_filter, output_filter, kernel_size, padding):
layers = [
nn.Conv2d(input_filter, output_filter, kernel_size, 1, padding),
nn.ReLU(inplace=True)
]
return layers
@staticmethod
def conv_relu_maxpool(input_filter, output_filter, kernel_size,
padding, kernel_size_pooling):
layers = [
nn.Conv2d(input_filter, output_filter, kernel_size, 1, padding),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size_pooling)
]
return layers
@staticmethod
def conv_relu_unpool(input_filter, output_filter, kernel_size, padding, unpooling_factor):
layers = [
nn.Upsample(scale_factor=unpooling_factor, mode='bilinear', align_corners=True),
nn.Conv2d(input_filter, output_filter, kernel_size, 1, padding),
nn.ReLU(inplace=True),
]
return layers | ove/algorithm/dain/models/DAIN_slowmotion.py | import torch
import torch.nn as nn
from ove.utils.modeling import Sequential
from ..networks import (
FilterInterpolationModule, DepthFlowProjectionModule,
MultipleBasicBlock, S2DF, PWCDCNet, HourGlass
)
from ..utils import Stack
class DAIN_slowmotion(nn.Module):
def __init__(
self,
size,
batch_size=1,
sf=2,
rectify=False,
padding=None,
useAnimationMethod=0
):
super().__init__()
self.rectify = rectify
self.padding = padding
self.batch_size = batch_size
self.useAnimationMethod = useAnimationMethod
self.time_offsets = [kk / sf for kk in range(1, sf)]
self.initScaleNets_filter, self.initScaleNets_filter1, self.initScaleNets_filter2 = self.get_MonoNet5()
self.ctxNet = S2DF() if rectify else None
self.rectifyNet = MultipleBasicBlock() if rectify else None
self._initialize_weights()
self.flownets = PWCDCNet(*size)
self.depthNet = HourGlass
self.filterModule = FilterInterpolationModule().cuda()
self.depth_flow_projection = DepthFlowProjectionModule()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.xavier_uniform_(m.weight.data)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def forward(self, I0, I1, target, count):
target[[count]] = I0[:, :, self.padding[2]:self.padding[3], self.padding[0]:self.padding[1]]
count += 1
cat0 = torch.cat((I0, I1), dim=0)
cat1 = torch.cat((I0, I1), dim=1)
with torch.cuda.stream(torch.cuda.current_stream()):
if self.useAnimationMethod:
temp = I1[:, 1:2, :, :]
else:
temp = self.depthNet(cat0)
log_depth = [temp[:self.batch_size], temp[self.batch_size:]]
if self.useAnimationMethod == 1:
log_depth = [(d * 0) for d in log_depth]
temp = self.forward_singlePath(self.initScaleNets_filter, cat1)
cur_filter_output = [
self.forward_singlePath(self.initScaleNets_filter1, temp),
self.forward_singlePath(self.initScaleNets_filter2, temp)
]
if self.useAnimationMethod == 1:
depth_inv = [(d * 0) + 1e-6 + 10000 for d in log_depth]
else:
depth_inv = [1e-6 + 1 / torch.exp(d) for d in log_depth]
with torch.cuda.stream(torch.cuda.current_stream()):
cur_offset_outputs = [
self.forward_flownets(I0, I1, inv=False),
self.forward_flownets(I1, I0, inv=True)
]
torch.cuda.synchronize() # synchronize s1 and s2
cur_offset_outputs = [
self.FlowProject(cur_offset_outputs[0], depth_inv[0]),
self.FlowProject(cur_offset_outputs[1], depth_inv[1])
]
for temp_0, temp_1, timeoffset in zip(cur_offset_outputs[0], cur_offset_outputs[1], self.time_offsets):
cur_offset_output = [temp_0, temp_1]
cur_output_temp, ref0, ref2 = self.FilterInterpolate(
I0, I1,
cur_offset_output, cur_filter_output,
timeoffset
)
cur_output_temp = cur_output_temp[:, :, self.padding[2]:self.padding[3], self.padding[0]: self.padding[1]]
if self.rectify:
cur_ctx_output = [
torch.cat((self.ctxNet(I0), log_depth[0].detach()), dim=1),
torch.cat((self.ctxNet(I1), log_depth[1].detach()), dim=1)
]
ctx0, ctx2 = self.FilterInterpolate_ctx(
cur_ctx_output[0], cur_ctx_output[1],
cur_offset_output, cur_filter_output, timeoffset
)
ref0 = ref0[:, :, self.padding[2]:self.padding[3], self.padding[0]: self.padding[1]]
ref2 = ref2[:, :, self.padding[2]:self.padding[3], self.padding[0]: self.padding[1]]
ctx0 = ctx0[:, :, self.padding[2]:self.padding[3], self.padding[0]: self.padding[1]]
ctx2 = ctx2[:, :, self.padding[2]:self.padding[3], self.padding[0]: self.padding[1]]
rectify_input = torch.cat((
cur_output_temp, ref0, ref2,
temp_0[:, :, self.padding[2]:self.padding[3], self.padding[0]: self.padding[1]],
temp_1[:, :, self.padding[2]:self.padding[3], self.padding[0]: self.padding[1]],
cur_filter_output[0][:, :, self.padding[2]:self.padding[3], self.padding[0]: self.padding[1]],
cur_filter_output[1][:, :, self.padding[2]:self.padding[3], self.padding[0]: self.padding[1]],
ctx0, ctx2
), dim=1)
cur_output_temp = self.rectifyNet(rectify_input) + cur_output_temp
target[[count]] = cur_output_temp
count += 1
return count
def forward_flownets(self, im1, im2, inv):
temp = self.flownets(im1, im2)
temps = [20.0 * temp * time_offset for time_offset in self.time_offsets]
if inv:
temps = temps[::-1]
temps = [nn.Upsample(scale_factor=4, mode='bilinear', align_corners=True)(temp) for temp in temps]
return temps
def forward_singlePath(self, model, input):
stack = Stack()
k = 0
temp = []
for layers in model:
if k == 0:
temp = layers(input)
else:
if isinstance(layers, (nn.AvgPool2d, nn.MaxPool2d)):
stack.push(temp)
temp = layers(temp)
if isinstance(layers, nn.Upsample):
temp += stack.pop()
k += 1
return temp
def get_MonoNet5(self):
model = Sequential(
*self.conv_relu(6, 16, (3, 3), (1, 1)),
*self.conv_relu_maxpool(16, 32, (3, 3), (1, 1), (2, 2)),
*self.conv_relu_maxpool(32, 64, (3, 3), (1, 1), (2, 2)),
*self.conv_relu_maxpool(64, 128, (3, 3), (1, 1), (2, 2)),
*self.conv_relu_maxpool(128, 256, (3, 3), (1, 1), (2, 2)),
*self.conv_relu_maxpool(256, 512, (3, 3), (1, 1), (2, 2)),
*self.conv_relu(512, 512, (3, 3), (1, 1)),
*self.conv_relu_unpool(512, 256, (3, 3), (1, 1), 2),
*self.conv_relu_unpool(256, 128, (3, 3), (1, 1), 2),
*self.conv_relu_unpool(128, 64, (3, 3), (1, 1), 2),
*self.conv_relu_unpool(64, 32, (3, 3), (1, 1), 2),
*self.conv_relu_unpool(32, 16, (3, 3), (1, 1), 2)
)
branch1 = self.conv_relu_conv(16, 16, (3, 3), (1, 1))
branch2 = self.conv_relu_conv(16, 16, (3, 3), (1, 1))
return model, branch1, branch2
def FlowProject(self, inputs, depth=None):
return [self.depth_flow_projection(x, depth) for x in inputs]
def FilterInterpolate_ctx(self, ctx0, ctx2, offset, filter, timeoffset):
ctx0_offset = self.filterModule(ctx0, offset[0].detach(), filter[0].detach())
ctx2_offset = self.filterModule(ctx2, offset[1].detach(), filter[1].detach())
return ctx0_offset, ctx2_offset
def FilterInterpolate(self, ref0, ref2, offset, filter, time_offset):
ref0_offset = self.filterModule(ref0, offset[0], filter[0])
ref2_offset = self.filterModule(ref2, offset[1], filter[1])
return ref0_offset * (1.0 - time_offset) + ref2_offset * time_offset, ref0_offset, ref2_offset
@staticmethod
def conv_relu_conv(input_filter, output_filter, kernel_size, padding):
layers = Sequential(
nn.Conv2d(input_filter, input_filter, kernel_size, 1, padding),
nn.ReLU(inplace=True),
nn.Conv2d(input_filter, output_filter, kernel_size, 1, padding),
)
return layers
@staticmethod
def conv_relu(input_filter, output_filter, kernel_size, padding):
layers = [
nn.Conv2d(input_filter, output_filter, kernel_size, 1, padding),
nn.ReLU(inplace=True)
]
return layers
@staticmethod
def conv_relu_maxpool(input_filter, output_filter, kernel_size,
padding, kernel_size_pooling):
layers = [
nn.Conv2d(input_filter, output_filter, kernel_size, 1, padding),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size_pooling)
]
return layers
@staticmethod
def conv_relu_unpool(input_filter, output_filter, kernel_size, padding, unpooling_factor):
layers = [
nn.Upsample(scale_factor=unpooling_factor, mode='bilinear', align_corners=True),
nn.Conv2d(input_filter, output_filter, kernel_size, 1, padding),
nn.ReLU(inplace=True),
]
return layers | 0.813127 | 0.329823 |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
from sklearn.metrics import r2_score
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn import metrics
from sklearn import preprocessing
from sklearn.model_selection import GridSearchCV
df= pd.read_csv('avocado.csv') #read avocado.csv
df.head() #get first 5 rows of the data
df.shape #know the shape of the data (rows and columns)
df.info() #information of all the columns
df.describe() #various parameters of the columns
df.isnull().sum() #checking for null values if any
df['Date']=pd.to_datetime(df['Date'])
df['month'] = df['Date'].apply(lambda x:x.month) #extract month and day from Date column
df['day'] = df['Date'].apply(lambda x:x.day)
df['type'] = df['type'].replace(['conventional'], 'non-organic') #replacing conventional type to non-organic
df.drop('Unnamed: 0', inplace = True, axis = 1) #dropping Unnamed column as it is of no use
df.head() #first 5 rows of modified dataframe
#plotting counts of organic and non-organic avocados
ax = df['type'].value_counts().plot(kind = 'bar', figsize=(7,5), title="Counts of Organic vs. Non- Organic")
ax.set_xlabel("Types of avocado")
ax.set_ylabel("Counts")
#plotting average prices of organic and non-organic avocados
sns.boxplot(x = 'type', y = 'AveragePrice', data = df).set(title = "Prices of Organic and Non-Organic including outliers")
plt.show()
#plotting average prices of organic and non-organic avocados over years
sns.boxplot(x = 'year', y = 'AveragePrice', hue = 'type', data = df).set(title="Average prices of Organic and Non-Organic over years including outliers ")
plt.show()
#ploting histogram of organic prices
grouped = df.groupby('type') #to group organic and non-organic rows
grouped.get_group('organic').hist(figsize = (20,20), grid = True, layout = (4,4), bins = 30)
#ploting histogram of non-organic prices
grouped.get_group('non-organic').hist(figsize = (20,20), grid = True, layout = (4,4), bins = 30)
final_df = df.drop(['region', 'Date'], axis = 1) #dropping region and date columns as they do not define the type of the avocados
label_encoder = preprocessing.LabelEncoder()
final_df['type']= label_encoder.fit_transform(df['type']) #replacing type column by numerical data using label encoding
X = final_df.drop(['type'], axis = 1, inplace = False) #dropping type column to make it as target
y = final_df['type']
clf = [SVC(), KNeighborsClassifier(), RandomForestClassifier()] #checking accuracy of different classifiers
score = 0
for r_state in range(10,11):
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = r_state)
for c in clf:
c.fit(X_train,y_train)
y_pred=c.predict(X_test)
accuracy=accuracy_score(y_test,y_pred)
if accuracy>score:
score=accuracy
final_state=r_state
final_classifier=c
print("Maximum accuracy score corresponding to random state ",final_state , "is" ,score, "and classifier is ", final_classifier)
#best performance by RandomForestClassifier
#Performing GridSearch to find best hyperparameters
n_estimators = [100, 300]
max_depth = [5, 8]
min_samples_split = [2, 5]
min_samples_leaf = [1, 2]
hyperF = dict(n_estimators = n_estimators, max_depth = max_depth,
min_samples_split = min_samples_split,
min_samples_leaf = min_samples_leaf)
gridF = GridSearchCV(final_classifier, hyperF, cv = 3, verbose = 1,
n_jobs = 3)
bestF = gridF.fit(X_train, y_train)
print(bestF.best_params_)
#Classification using best hyperparameters
clf = RandomForestClassifier(n_estimators=300, random_state=10, max_depth=8, min_samples_leaf=1, min_samples_split=5)
clf.fit(X_train, y_train)
y_pred=clf.predict(X_test)
print(accuracy_score(y_test, y_pred))
#prediction of type by giving random values of all the colums
input= (1.33, 64236.62, 1036.74, 54454.85, 48.16, 8696.87, 8603.62, 93.25, 0.0, 2015, 12, 27)
input_arr = np.asarray(input)
reshape = input_arr.reshape(1,-1)
prediction = clf.predict(reshape)
print(prediction)
if (prediction[0] == 0):
print('The type is non-organic')
else:
print('The type is organic')
#extracting features from data
X = final_df.drop(['AveragePrice'], axis = 1, inplace = False) #dropping AveragePrice column to define it as target
y = final_df['AveragePrice']
#split data into train and test datasets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 10)
#price prediction using LinearRegression
lr = LinearRegression()
lr.fit(X_train, y_train)
pred_lr = lr.predict(X_test)
print('MAE :', metrics.mean_absolute_error(y_test, pred_lr))
print('MSE :', metrics.mean_squared_error(y_test, pred_lr))
print('RMSE :', np.sqrt(metrics.mean_squared_error(y_test, pred_lr)))
print('R2 :', r2_score(y_test, pred_lr)) #coefficient of determination (proportion of variability)
plt.scatter(x = y_test, y = pred_lr)
#price prediction using using Random Forest
rf = RandomForestRegressor()
rf.fit(X_train, y_train)
pred_rf = rf.predict(X_test)
print('MAE :', metrics.mean_absolute_error(y_test, pred_rf))
print('MSE :', metrics.mean_squared_error(y_test, pred_rf))
print('RMSE :', np.sqrt(metrics.mean_squared_error(y_test, pred_rf)))
print('R2 :', r2_score(y_test, pred_rf))
plt.scatter(x = y_test, y = pred_rf)
#price prediction using using Decision Tree
dt = DecisionTreeRegressor()
dt.fit(X_train, y_train)
pred_dt = dt.predict(X_test)
print('MAE :', metrics.mean_absolute_error(y_test, pred_dt))
print('MSE :', metrics.mean_squared_error(y_test, pred_dt))
print('RMSE :', np.sqrt(metrics.mean_squared_error(y_test, pred_dt)))
print('R2 :', r2_score(y_test, pred_dt))
plt.scatter(x = y_test, y = pred_dt)
price = pd.DataFrame({'Y-Test' : y_test , 'Pred' : pred_rf}, columns = ['Y-Test', 'Pred'])
sns.lmplot(x = 'Y-Test', y = 'Pred', data = price, palette = 'rainbow')
#plotting region by AveragePrice bar graph
price_ranking=df.groupby('region')[['AveragePrice']].mean().sort_values(by="AveragePrice", ascending=True)
plt.figure(figsize=(20,10))
plt.xticks(rotation=70)
ax = sns.barplot(x=price_ranking.index, y="AveragePrice", data=price_ranking)
ax.set_xlabel('region')
ax.set_ylabel("Average Price")
plt.title('Average Price of Avocado by region')
plt.savefig('price_ranking') | Avocado_price_prediction.py | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
from sklearn.metrics import r2_score
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn import metrics
from sklearn import preprocessing
from sklearn.model_selection import GridSearchCV
df= pd.read_csv('avocado.csv') #read avocado.csv
df.head() #get first 5 rows of the data
df.shape #know the shape of the data (rows and columns)
df.info() #information of all the columns
df.describe() #various parameters of the columns
df.isnull().sum() #checking for null values if any
df['Date']=pd.to_datetime(df['Date'])
df['month'] = df['Date'].apply(lambda x:x.month) #extract month and day from Date column
df['day'] = df['Date'].apply(lambda x:x.day)
df['type'] = df['type'].replace(['conventional'], 'non-organic') #replacing conventional type to non-organic
df.drop('Unnamed: 0', inplace = True, axis = 1) #dropping Unnamed column as it is of no use
df.head() #first 5 rows of modified dataframe
#plotting counts of organic and non-organic avocados
ax = df['type'].value_counts().plot(kind = 'bar', figsize=(7,5), title="Counts of Organic vs. Non- Organic")
ax.set_xlabel("Types of avocado")
ax.set_ylabel("Counts")
#plotting average prices of organic and non-organic avocados
sns.boxplot(x = 'type', y = 'AveragePrice', data = df).set(title = "Prices of Organic and Non-Organic including outliers")
plt.show()
#plotting average prices of organic and non-organic avocados over years
sns.boxplot(x = 'year', y = 'AveragePrice', hue = 'type', data = df).set(title="Average prices of Organic and Non-Organic over years including outliers ")
plt.show()
#ploting histogram of organic prices
grouped = df.groupby('type') #to group organic and non-organic rows
grouped.get_group('organic').hist(figsize = (20,20), grid = True, layout = (4,4), bins = 30)
#ploting histogram of non-organic prices
grouped.get_group('non-organic').hist(figsize = (20,20), grid = True, layout = (4,4), bins = 30)
final_df = df.drop(['region', 'Date'], axis = 1) #dropping region and date columns as they do not define the type of the avocados
label_encoder = preprocessing.LabelEncoder()
final_df['type']= label_encoder.fit_transform(df['type']) #replacing type column by numerical data using label encoding
X = final_df.drop(['type'], axis = 1, inplace = False) #dropping type column to make it as target
y = final_df['type']
clf = [SVC(), KNeighborsClassifier(), RandomForestClassifier()] #checking accuracy of different classifiers
score = 0
for r_state in range(10,11):
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = r_state)
for c in clf:
c.fit(X_train,y_train)
y_pred=c.predict(X_test)
accuracy=accuracy_score(y_test,y_pred)
if accuracy>score:
score=accuracy
final_state=r_state
final_classifier=c
print("Maximum accuracy score corresponding to random state ",final_state , "is" ,score, "and classifier is ", final_classifier)
#best performance by RandomForestClassifier
#Performing GridSearch to find best hyperparameters
n_estimators = [100, 300]
max_depth = [5, 8]
min_samples_split = [2, 5]
min_samples_leaf = [1, 2]
hyperF = dict(n_estimators = n_estimators, max_depth = max_depth,
min_samples_split = min_samples_split,
min_samples_leaf = min_samples_leaf)
gridF = GridSearchCV(final_classifier, hyperF, cv = 3, verbose = 1,
n_jobs = 3)
bestF = gridF.fit(X_train, y_train)
print(bestF.best_params_)
#Classification using best hyperparameters
clf = RandomForestClassifier(n_estimators=300, random_state=10, max_depth=8, min_samples_leaf=1, min_samples_split=5)
clf.fit(X_train, y_train)
y_pred=clf.predict(X_test)
print(accuracy_score(y_test, y_pred))
#prediction of type by giving random values of all the colums
input= (1.33, 64236.62, 1036.74, 54454.85, 48.16, 8696.87, 8603.62, 93.25, 0.0, 2015, 12, 27)
input_arr = np.asarray(input)
reshape = input_arr.reshape(1,-1)
prediction = clf.predict(reshape)
print(prediction)
if (prediction[0] == 0):
print('The type is non-organic')
else:
print('The type is organic')
#extracting features from data
X = final_df.drop(['AveragePrice'], axis = 1, inplace = False) #dropping AveragePrice column to define it as target
y = final_df['AveragePrice']
#split data into train and test datasets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 10)
#price prediction using LinearRegression
lr = LinearRegression()
lr.fit(X_train, y_train)
pred_lr = lr.predict(X_test)
print('MAE :', metrics.mean_absolute_error(y_test, pred_lr))
print('MSE :', metrics.mean_squared_error(y_test, pred_lr))
print('RMSE :', np.sqrt(metrics.mean_squared_error(y_test, pred_lr)))
print('R2 :', r2_score(y_test, pred_lr)) #coefficient of determination (proportion of variability)
plt.scatter(x = y_test, y = pred_lr)
#price prediction using using Random Forest
rf = RandomForestRegressor()
rf.fit(X_train, y_train)
pred_rf = rf.predict(X_test)
print('MAE :', metrics.mean_absolute_error(y_test, pred_rf))
print('MSE :', metrics.mean_squared_error(y_test, pred_rf))
print('RMSE :', np.sqrt(metrics.mean_squared_error(y_test, pred_rf)))
print('R2 :', r2_score(y_test, pred_rf))
plt.scatter(x = y_test, y = pred_rf)
#price prediction using using Decision Tree
dt = DecisionTreeRegressor()
dt.fit(X_train, y_train)
pred_dt = dt.predict(X_test)
print('MAE :', metrics.mean_absolute_error(y_test, pred_dt))
print('MSE :', metrics.mean_squared_error(y_test, pred_dt))
print('RMSE :', np.sqrt(metrics.mean_squared_error(y_test, pred_dt)))
print('R2 :', r2_score(y_test, pred_dt))
plt.scatter(x = y_test, y = pred_dt)
price = pd.DataFrame({'Y-Test' : y_test , 'Pred' : pred_rf}, columns = ['Y-Test', 'Pred'])
sns.lmplot(x = 'Y-Test', y = 'Pred', data = price, palette = 'rainbow')
#plotting region by AveragePrice bar graph
price_ranking=df.groupby('region')[['AveragePrice']].mean().sort_values(by="AveragePrice", ascending=True)
plt.figure(figsize=(20,10))
plt.xticks(rotation=70)
ax = sns.barplot(x=price_ranking.index, y="AveragePrice", data=price_ranking)
ax.set_xlabel('region')
ax.set_ylabel("Average Price")
plt.title('Average Price of Avocado by region')
plt.savefig('price_ranking') | 0.557966 | 0.412116 |