input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
in range(N): # updata of P
mu_ti = mu[int(self.points_hawkes[i]/delta_t_mu)]
intensity_total = 0
for j in range(i):
tji = self.points_hawkes[i] - self.points_hawkes[j]
if tji >= self.T_phi: continue
intensity_total += phi[int(tji/delta_t_phi)]
intensity_total += mu_ti
P[i][i] = mu_ti/intensity_total
for j in range(i):
tji = self.points_hawkes[i] - self.points_hawkes[j]
if tji >= self.T_phi: P[i][j] = 0
else: P[i][j] = phi[int(tji/delta_t_phi)]/intensity_total
X[i][:(i+1)] = multinomial(n=1,p=P[i][:(i+1)]).rvs()
assert np.sum(X) == N
# loglikelihood
logl_train = self.loglikelihood_discrete_phi_mu(self.points_hawkes, mu, phi, self.T_phi, self.T)
logl_test = self.loglikelihood_discrete_phi_mu(self.points_hawkes_test, mu, phi, self.T_phi, self.T_test)
# record
mu_list.append(mu)
phi_list.append(phi)
lamda_mu_list.append(lamda_mu)
lamda_phi_list.append(lamda_phi)
logl_train_list.append(logl_train)
logl_test_list.append(logl_test)
return mu_list,phi_list,lamda_mu_list,lamda_phi_list,logl_train_list,logl_test_list
#########################################################################################################
'EM Algorithm'
@staticmethod
def gq_points_weights(a,b,Q):
r"""
Generate the Gaussian quadrature nodes and weights for the integral :math:`\int_a^b f(t) dt`
:type a: float
:param a: the lower end of the integral
:type b: float
:param b: the upper end of the integral
:type Q: int
:param Q: the number of Gaussian quadrature nodes (weights)
:rtype: 1D numpy array, 1D numpy array
:return: Gaussian quadrature nodes and the corresponding weights
"""
p,w = legendre.leggauss(Q)
c = np.array([0] * Q + [1])
p_new = (a + b + (b - a) * p) / 2
w_new = (b - a) / (legendre.legval(p, legendre.legder(c))**2*(1-p**2))
return p_new,w_new
def ini_P(self):
r"""
Initialize the probabilistic branching matrix.
:rtype: numpy array
:return: probabilistic branching matrix, the flattened P_ij, interval of timestamps :\tau, the number of P_ij!=0 in each row
"""
N = len(self.points_hawkes)
P = np.zeros((N,N))
Pij_flat = []
tau = []
num_Pij_row = []
for i in range(N): # initial value of P
for j in range(i+1):
tij = self.points_hawkes[i] - self.points_hawkes[j]
if tij >= self.T_phi: continue
else:
P[i][j:i+1] = np.random.dirichlet([1]*(i-j+1))
Pij_flat += list(P[i][j:i])
tau.append(list(self.points_hawkes[i] - np.array(self.points_hawkes[j:i])))
num_Pij_row.append(i-j)
break
return P, Pij_flat, tau, num_Pij_row
def a_predict(self, x_M, y_M, theta0, theta1, K_MM_inv, x_pred):
r"""
The mean of y_pred based on x_M, y_M (Gaussian process regression).
:type x_M: numpy array
:param x_M: input x
:type y_M: numpy array
:param y_M: input y
:type theta0:
:param theta0:
:type theta1:
:param theta1:
:type K_MM_inv: 2D numpy array
:param K_MM_inv: the inverse kernel matrix of x_M
:type x_pred: numpy array
:param x_pred: the predictive points
:rtype: numpy array
:return: mean of y_pred
"""
k = self.rbf_kernel(theta0, theta1, 0, x_pred, x_M)
k_C = np.dot(k, K_MM_inv)
y_pred_mean = np.dot(k_C, y_M)
return y_pred_mean
def EM(self, num_gq_mu, num_gq_phi, num_pre_mu, num_pre_phi, num_iter):
r"""
EM algorithm which is used to estimate the MAP of lamda_ub_mu, lamda_ub_phi, g_s_mu and g_s_phi.
:type num_gq_mu: int
:param num_gq_mu: the number of Gaussian quadrature nodes on [0,T]
:type num_gq_phi: int
:param num_gq_phi: the number of Gaussian quadrature nodes on [0,T_phi]
:type num_pre_mu: int
:param num_pre_mu: the number of prediction points on [0,T]
:type num_pre_phi: int
:param num_pre_phi: the number of prediction points on [0,T_phi]
:type num_iter: int
:param num_iter: the number of EM iterations
:rtype: numpy array
:return: the MAP estimates of \mu(t), \phi(\tau), lamda_ub_mu, lamda_ub_phi, the training and
test log-likelihood along EM iterations.
"""
N = len(self.points_hawkes)
P, Pij_flat, tau_phi_md, num_Pij_row = self.ini_P()
tau_phi = sum(tau_phi_md,[])
N_phi = len(tau_phi)
M_mu = len(self.ind_p_mu)
M_phi = len(self.ind_p_phi)
K_MM_mu = self.rbf_kernel(self.theta0_mu, self.theta1_mu, self.noise_var_mu, self.ind_p_mu, self.ind_p_mu)
K_MM_mu_inv = np.linalg.inv(K_MM_mu)
K_MM_phi = self.rbf_kernel(self.theta0_phi, self.theta1_phi, self.noise_var_phi, self.ind_p_phi, self.ind_p_phi)
K_MM_phi_inv = np.linalg.inv(K_MM_phi)
K_NM_mu = self.rbf_kernel(self.theta0_mu, self.theta1_mu, 0, np.array(self.points_hawkes), self.ind_p_mu)
K_NM_phi = self.rbf_kernel(self.theta0_phi, self.theta1_phi, 0, np.array(tau_phi), self.ind_p_phi)
# initial gm_mu and lamda_mu, gm_phi and lamda_phi
gm_mu = np.random.uniform(-1, 1, size = M_mu)
gm_phi = np.random.uniform(-1, 1, size = M_phi)
lamda_mu = sum(np.diag(P))*2/self.T
lamda_phi = sum(Pij_flat)*2/N/self.T_phi
# gaussian quadreture points and weights
p_gq_mu, w_gq_mu = self.gq_points_weights(0,self.T,num_gq_mu)
p_gq_phi, w_gq_phi = self.gq_points_weights(0,self.T_phi,num_gq_phi)
K_gqM_mu = self.rbf_kernel(self.theta0_mu, self.theta1_mu, 0, p_gq_mu, self.ind_p_mu)
K_gqM_phi = self.rbf_kernel(self.theta0_phi, self.theta1_phi, 0, p_gq_phi, self.ind_p_phi)
mu_list=[]
phi_list=[]
lamda_mu_list=[]
lamda_phi_list=[]
logl_train_list=[]
logl_test_list=[]
for iteration in range(num_iter):
# update distribution of w_ii and w_ij
a_ii = self.a_predict(self.ind_p_mu, gm_mu, self.theta0_mu, self.theta1_mu, K_MM_mu_inv, np.array(self.points_hawkes))
E_w_ii = 1/2/a_ii*np.tanh(a_ii/2)
a_ij = self.a_predict(self.ind_p_phi, gm_phi, self.theta0_phi, self.theta1_phi, K_MM_phi_inv, np.array(tau_phi))
E_w_ij = 1/2/a_ij*np.tanh(a_ij/2)
# update lamda_mu and lamda_phi
a_gq_mu = self.a_predict(self.ind_p_mu, gm_mu, self.theta0_mu, self.theta1_mu, K_MM_mu_inv, p_gq_mu)
int_intensity = np.sum(w_gq_mu*lamda_mu*expit(-a_gq_mu))
lamda_mu = (np.sum(np.diag(P))+int_intensity)/self.T
a_gq_phi = self.a_predict(self.ind_p_phi, gm_phi, self.theta0_phi, self.theta1_phi, K_MM_phi_inv, p_gq_phi)
int_intensity = np.sum(w_gq_phi*lamda_phi*expit(-a_gq_phi))
lamda_phi = (np.sum(Pij_flat) + N*int_intensity)/N/self.T_phi
# update gm_mu and gm_phi
int_A_mu=np.zeros((M_mu,M_mu))
for i in range(N):
int_A_mu+=P[i][i]*E_w_ii[i]*np.outer(K_NM_mu[i],K_NM_mu[i])
for i in range(num_gq_mu):
int_A_mu+=w_gq_mu[i]*(lamda_mu/2/a_gq_mu[i]*np.tanh(a_gq_mu[i]/2)*expit(-a_gq_mu[i])*np.outer(K_gqM_mu[i],K_gqM_mu[i]))
int_B_mu=np.zeros(M_mu)
for i in range(N):
int_B_mu+=0.5*P[i][i]*K_NM_mu[i]
for i in range(num_gq_mu):
int_B_mu+=-w_gq_mu[i]/2*(lamda_mu*expit(-a_gq_mu[i])*K_gqM_mu[i])
gm_mu=np.dot(np.dot(np.linalg.inv(np.dot(np.dot(K_MM_mu_inv,int_A_mu),K_MM_mu_inv)+K_MM_mu_inv),K_MM_mu_inv),int_B_mu)
int_A_phi=np.zeros((M_phi,M_phi))
for i in range(N_phi):
int_A_phi+=Pij_flat[i]*E_w_ij[i]*np.outer(K_NM_phi[i],K_NM_phi[i])
for i in range(num_gq_phi):
int_A_phi+=w_gq_phi[i]*N*lamda_phi/2/a_gq_phi[i]*np.tanh(a_gq_phi[i]/2)*expit(-a_gq_phi[i])*np.outer(K_gqM_phi[i],K_gqM_phi[i])
int_B_phi=np.zeros(M_phi)
for i in range(N_phi):
int_B_phi+=0.5*Pij_flat[i]*K_NM_phi[i]
for i in range(num_gq_phi):
int_B_phi+=-w_gq_phi[i]/2*N*lamda_phi*expit(-a_gq_phi[i])*K_gqM_phi[i]
gm_phi=np.dot(np.dot(np.linalg.inv(np.dot(np.dot(K_MM_phi_inv,int_A_phi),K_MM_phi_inv)+K_MM_phi_inv),K_MM_phi_inv),int_B_phi)
# update P
Pij_flat=[]
for i in range(1,N): # start from the second row, because the first row of P is always 1
mu_ti=lamda_mu*expit(a_ii[i])
phi_ti=lamda_phi*expit(a_ij[sum(num_Pij_row[:i]):sum(num_Pij_row[:i+1])])
intensity_total=mu_ti+np.sum(phi_ti)
P[i][i]=mu_ti/intensity_total
P_i_j=phi_ti/intensity_total
P[i][i-len(phi_ti):i]=P_i_j
Pij_flat+=list(P_i_j)
# compute g_{\mu}(t) and g_{\phi}(\tau) on finer grid and the corresponding train/test loglikelihood
g_mu_em = self.a_predict(self.ind_p_mu, gm_mu, self.theta0_mu, self.theta1_mu, K_MM_mu_inv, np.linspace(0, self.T, num_pre_mu))
g_phi_em = self.a_predict(self.ind_p_phi, gm_phi, self.theta0_phi, self.theta1_phi, K_MM_phi_inv, np.linspace(0, self.T_phi, num_pre_phi))
mu_em = lamda_mu*expit(g_mu_em)
phi_em = lamda_phi*expit(g_phi_em)
logl_train = self.loglikelihood_discrete_phi_mu(self.points_hawkes, mu_em, phi_em, self.T_phi, self.T)
logl_test = self.loglikelihood_discrete_phi_mu(self.points_hawkes_test, mu_em, phi_em, self.T_phi, self.T_test)
# record
mu_list.append(mu_em)
phi_list.append(phi_em)
lamda_mu_list.append(lamda_mu)
lamda_phi_list.append(lamda_phi)
logl_train_list.append(logl_train)
logl_test_list.append(logl_test)
return mu_list,phi_list,lamda_mu_list,lamda_phi_list,logl_train_list,logl_test_list
#########################################################################################################
'Mean-Field Variational Inference'
def a_c_predict(self, x_M, y_M_mean, y_M_cov, theta0, theta1, noise_var, K_MM_inv, x_pred):
r"""
The mean, sqrt(E[y_pred^2]) and covariance of y_pred based on x_M, y_M (Gaussian process regression).
:type x_M: numpy array
:param x_M: input x
:type y_M_mean: numpy array
:param y_M_mean: input y mean
:type y_M_cov: 2D numpy array
:param y_M_cov: input y covariance
:type theta0:
:param theta0:
:type theta1:
:param theta1:
:type noise_var:
:param noise_var:
:type K_MM_inv: 2D numpy array
:param K_MM_inv: the inverse of kernel matrix of x_M
:type x_pred: numpy array
:param x_pred: the predictive points
:rtype: numpy arrays
:return: mean, sqrt(E[y_pred^2]) and covariance of y_pred
"""
k = self.rbf_kernel(theta0, theta1, noise_var, x_pred, x_M)
k_C = np.dot(k, K_MM_inv)
y_pred_mean = np.dot(k_C, y_M_mean)
k_matrix_pre = self.rbf_kernel(theta0, theta1, noise_var, x_pred, x_pred)
y_pred_cov = k_matrix_pre - np.dot(k_C,k.T) + np.dot(np.dot(k_C, y_M_cov), k_C.T)
return y_pred_mean, np.sqrt(np.diag(y_pred_cov)+y_pred_mean**2), y_pred_cov
def MF(self, num_gq_mu, num_gq_phi, num_pre_mu, num_pre_phi, num_iter):
r"""
Mean-field variational inference algorithm which is used to estimate the posterior of lamda_ub_mu, lamda_ub_phi, g_s_mu and g_s_phi.
:type num_gq_mu: int
:param num_gq_mu: the number of Gaussian quadrature nodes on [0,T]
:type num_gq_phi: int
:param num_gq_phi: the number of Gaussian quadrature nodes on [0,T_phi]
:type num_pre_mu: int
:param num_pre_mu: the number of prediction points on [0,T]
:type num_pre_phi: int
:param num_pre_phi: the number of prediction points on [0,T_phi]
:type num_iter: int
:param num_iter: the number of MF iterations
:rtype: numpy array
:return: the mean and covariance of g_{\mu}(t), g_{\phi}(\tau), the location parameter of \lambda_{\mu}, \lambda_{\phi}, and the training and
test log-likelihood along MF iterations.
"""
N = len(self.points_hawkes)
P, Pij_flat, tau_phi_md, num_Pij_row = self.ini_P()
tau_phi = sum(tau_phi_md,[])
N_phi = len(tau_phi)
M_mu = len(self.ind_p_mu)
M_phi = len(self.ind_p_phi)
K_MM_mu = self.rbf_kernel(self.theta0_mu, self.theta1_mu, self.noise_var_mu, self.ind_p_mu, self.ind_p_mu)
K_MM_mu_inv = np.linalg.inv(K_MM_mu)
K_MM_phi = self.rbf_kernel(self.theta0_phi, self.theta1_phi,self.noise_var_phi, self.ind_p_phi, self.ind_p_phi)
K_MM_phi_inv = np.linalg.inv(K_MM_phi)
K_NM_mu = self.rbf_kernel(self.theta0_mu, self.theta1_mu, 0, np.array(self.points_hawkes), self.ind_p_mu)
K_NM_phi = self.rbf_kernel(self.theta0_phi, self.theta1_phi, 0, np.array(tau_phi), self.ind_p_phi)
# initial q_gm_mu q_gm_phi and q_lamda_mu q_lamda_phi
# q_lamda_mu is a gamma distribution gamma(alpha=sum(P_ii)+E(|pi|),scale=1/T)
alpha_mu = sum(np.diag(P))*2
# q_lamda_phi is a gamma distribution gamma(alpha=sum(P_ij)+N*E(|pi_n|),scale=1/N/T_phi)
alpha_phi = sum(Pij_flat)*12
# q_gm_mu is a gaussian distribution N(mean_gm_mu,cov_gm_mu)
mean_gm_mu = np.random.uniform(-1, 1, size = M_mu)
cov_gm_mu = K_MM_mu
mean_gm_phi = np.random.uniform(-1, 1, size = M_phi)
cov_gm_phi = K_MM_phi
# gaussian quadreture points and weights
p_gq_mu, w_gq_mu = self.gq_points_weights(0, self.T, num_gq_mu)
p_gq_phi, w_gq_phi = self.gq_points_weights(0, self.T_phi, num_gq_phi)
K_gqM_mu = self.rbf_kernel(self.theta0_mu, self.theta1_mu, 0, p_gq_mu, self.ind_p_mu)
K_gqM_phi = self.rbf_kernel(self.theta0_phi, self.theta1_phi, 0, p_gq_phi, self.ind_p_phi)
g_mu_mean_list=[]
g_mu_cov_list=[]
g_phi_mean_list=[]
g_phi_cov_list=[]
alpha_mu_list=[]
alpha_phi_list=[]
logl_train_list=[]
logl_test_list=[]
for iteration in range(num_iter):
# update parameters of density of q_wij: PG(wij|1,cij)
a_ii, c_ii, _ = self.a_c_predict(self.ind_p_mu, mean_gm_mu, cov_gm_mu, self.theta0_mu, self.theta1_mu, self.noise_var_mu, K_MM_mu_inv, np.array(self.points_hawkes))
E_w_ii = 1/2/c_ii*np.tanh(c_ii/2)
a_ij, c_ij, _ = self.a_c_predict(self.ind_p_phi, mean_gm_phi, cov_gm_phi, self.theta0_phi, self.theta1_phi, self.noise_var_phi, K_MM_phi_inv, np.array(tau_phi))
E_w_ij = 1/2/c_ij*np.tanh(c_ij/2)
# update parameters of q_pi_mu intensity=exp(E(log lamda_mu))sigmoid(-c(t))exp((c(t)-a(t))/2)*P_pg(wii|1,c(t))
# update parameters | |
from datetime import date, timedelta
import itertools
import os
from django.contrib.auth import get_user_model
from django.contrib.auth.models import AnonymousUser
from django.conf import settings
from django.core import mail
from django.core.exceptions import ValidationError
from django.test import TestCase, override_settings
from django.urls import reverse
from opentech.apply.funds.models import ApplicationSubmission
from opentech.apply.funds.blocks import EmailBlock, FullNameBlock
from opentech.apply.funds.workflow import Request
from opentech.apply.review.tests.factories import ReviewFactory, ReviewOpinionFactory
from opentech.apply.review.options import NO, MAYBE
from opentech.apply.utils.testing import make_request
from opentech.apply.users.tests.factories import StaffFactory
from .factories import (
ApplicationSubmissionFactory,
AssignedReviewersFactory,
CustomFormFieldsFactory,
FundTypeFactory,
InvitedToProposalFactory,
LabFactory,
RequestForPartnersFactory,
RoundFactory,
TodayRoundFactory,
)
def days_from_today(days):
return date.today() + timedelta(days=days)
class TestFundModel(TestCase):
def setUp(self):
self.fund = FundTypeFactory(parent=None)
def test_can_access_workflow_class(self):
self.assertEqual(self.fund.workflow_name, 'single')
self.assertEqual(self.fund.workflow, Request)
def test_no_open_rounds(self):
self.assertIsNone(self.fund.open_round)
def test_open_ended_round(self):
open_round = RoundFactory(start_date=date.today(), end_date=None, parent=self.fund)
self.assertEqual(self.fund.open_round, open_round)
def test_normal_round(self):
open_round = RoundFactory(parent=self.fund, now=True)
self.assertEqual(self.fund.open_round, open_round)
def test_closed_round(self):
yesterday = days_from_today(-1)
last_week = days_from_today(-7)
RoundFactory(start_date=last_week, end_date=yesterday, parent=self.fund)
self.assertIsNone(self.fund.open_round)
def test_round_not_open(self):
tomorrow = days_from_today(1)
RoundFactory(start_date=tomorrow, parent=self.fund)
self.assertIsNone(self.fund.open_round)
def test_multiple_open_rounds(self):
open_round = RoundFactory(parent=self.fund, now=True)
next_round_start = open_round.end_date + timedelta(days=1)
RoundFactory(start_date=next_round_start, end_date=None, parent=self.fund)
self.assertEqual(self.fund.open_round, open_round)
def test_can_not_be_open_with_draft_round(self):
new_round = RoundFactory(parent=self.fund)
new_round.live = False
new_round.save()
self.assertEqual(self.fund.open_round, None)
def test_no_round_exists(self):
self.assertIsNone(self.fund.next_deadline())
class TestRoundModelDates(TestCase):
def setUp(self):
self.fund = FundTypeFactory(parent=None)
def make_round(self, **kwargs):
data = {'parent': self.fund}
data.update(kwargs)
return RoundFactory(**data)
def test_normal_start_end_doesnt_error(self):
self.make_round()
def test_end_before_start(self):
yesterday = date.today() - timedelta(days=1)
with self.assertRaises(ValidationError):
self.make_round(end_date=yesterday)
def test_end_overlaps(self):
existing_round = self.make_round()
overlapping_end = existing_round.end_date - timedelta(1)
start = existing_round.start_date - timedelta(1)
with self.assertRaises(ValidationError):
self.make_round(start_date=start, end_date=overlapping_end)
def test_start_overlaps(self):
existing_round = self.make_round()
overlapping_start = existing_round.start_date + timedelta(1)
end = existing_round.end_date + timedelta(1)
with self.assertRaises(ValidationError):
self.make_round(start_date=overlapping_start, end_date=end)
def test_inside_overlaps(self):
existing_round = self.make_round()
overlapping_start = existing_round.start_date + timedelta(1)
overlapping_end = existing_round.end_date - timedelta(1)
with self.assertRaises(ValidationError):
self.make_round(start_date=overlapping_start, end_date=overlapping_end)
def test_other_fund_not_impacting(self):
self.make_round()
new_fund = FundTypeFactory(parent=None)
# Will share the same start and end dates
self.make_round(parent=new_fund)
def test_can_create_without_end_date(self):
self.make_round(end_date=None)
def test_can_not_create_with_other_open_end_date(self):
existing_round = self.make_round(end_date=None)
start = existing_round.start_date + timedelta(1)
with self.assertRaises(ValidationError):
self.make_round(start_date=start, end_date=None)
def test_can_not_overlap_with_normal_round(self):
existing_round = self.make_round()
overlapping_start = existing_round.end_date - timedelta(1)
with self.assertRaises(ValidationError):
self.make_round(start_date=overlapping_start, end_date=None)
def test_can_not_overlap_clean(self):
existing_round = self.make_round()
overlapping_start = existing_round.end_date - timedelta(1)
new_round = RoundFactory.build(start_date=overlapping_start, end_date=None)
# we add on the parent page which gets included from a pre_create_hook
new_round.parent_page = {new_round.__class__: {new_round.title: self.fund}}
with self.assertRaises(ValidationError):
new_round.clean()
class TestRoundModelWorkflowAndForms(TestCase):
def setUp(self):
self.fund = FundTypeFactory(parent=None)
# Must create lead, adding child complains about "built" user with no id
lead = RoundFactory.lead.get_factory()(**RoundFactory.lead.defaults)
self.round = RoundFactory.build(lead=lead, parent=None)
# Assign parent_page like the init does
self.round.parent_page = {self.round.__class__: {self.round.title: self.fund}}
self.fund.add_child(instance=self.round)
def test_workflow_is_copied_to_new_rounds(self):
self.round.save()
self.assertEqual(self.round.workflow_name, self.fund.workflow_name)
def test_forms_are_copied_to_new_rounds(self):
self.round.save()
for round_form, fund_form in itertools.zip_longest(self.round.forms.all(), self.fund.forms.all()):
self.assertEqual(round_form, fund_form)
def test_can_change_round_form_not_fund(self):
self.round.save()
# We are no longer creating a round
del self.round.parent_page
form = self.round.forms.first().form
# Not ideal, would prefer better way to create the stream values
new_field = CustomFormFieldsFactory.generate(None, {})
form.form_fields = new_field
form.save()
for round_form, fund_form in itertools.zip_longest(self.round.forms.all(), self.fund.forms.all()):
self.assertNotEqual(round_form, fund_form)
@override_settings(ROOT_URLCONF='opentech.apply.urls')
class TestFormSubmission(TestCase):
def setUp(self):
self.User = get_user_model()
self.email = '<EMAIL>'
self.name = '<NAME>'
fund = FundTypeFactory()
self.site = fund.get_site()
self.round_page = RoundFactory(parent=fund, now=True)
self.lab_page = LabFactory(lead=self.round_page.lead)
def submit_form(self, page=None, email=None, name=None, user=AnonymousUser(), ignore_errors=False):
page = page or self.round_page
fields = page.forms.first().fields
data = CustomFormFieldsFactory.form_response(fields)
# Add our own data
for field in page.forms.first().fields:
if isinstance(field.block, EmailBlock):
data[field.id] = self.email if email is None else email
if isinstance(field.block, FullNameBlock):
data[field.id] = self.name if name is None else name
request = make_request(user, data, method='post', site=self.site)
if page.get_parent().id != self.site.root_page.id:
# Its a fund
response = page.get_parent().serve(request)
else:
response = page.serve(request)
if not ignore_errors:
# Check the data we submit is correct
self.assertNotContains(response, 'errors')
return response
def test_workflow_and_status_assigned(self):
self.submit_form()
submission = ApplicationSubmission.objects.first()
first_phase = list(self.round_page.workflow.keys())[0]
self.assertEqual(submission.workflow, self.round_page.workflow)
self.assertEqual(submission.status, first_phase)
def test_workflow_and_status_assigned_lab(self):
self.submit_form(page=self.lab_page)
submission = ApplicationSubmission.objects.first()
first_phase = list(self.lab_page.workflow.keys())[0]
self.assertEqual(submission.workflow, self.lab_page.workflow)
self.assertEqual(submission.status, first_phase)
def test_can_submit_if_new(self):
self.submit_form()
# Lead + applicant
self.assertEqual(self.User.objects.count(), 2)
new_user = self.User.objects.get(email=self.email)
self.assertEqual(new_user.get_full_name(), self.name)
self.assertEqual(ApplicationSubmission.objects.count(), 1)
self.assertEqual(ApplicationSubmission.objects.first().user, new_user)
def test_doesnt_mess_with_name(self):
full_name = "I have; <a> wei'rd name"
self.submit_form(name=full_name)
submission = ApplicationSubmission.objects.first()
self.assertEqual(submission.user.full_name, full_name)
def test_associated_if_not_new(self):
self.submit_form()
self.submit_form()
# Lead + applicant
self.assertEqual(self.User.objects.count(), 2)
user = self.User.objects.get(email=self.email)
self.assertEqual(ApplicationSubmission.objects.count(), 2)
self.assertEqual(ApplicationSubmission.objects.first().user, user)
def test_associated_if_another_user_exists(self):
email = '<EMAIL>'
self.submit_form()
# Someone else submits a form
self.submit_form(email=email)
# Lead + 2 x applicant
self.assertEqual(self.User.objects.count(), 3)
first_user, second_user = self.User.objects.get(email=self.email), self.User.objects.get(email=email)
self.assertEqual(ApplicationSubmission.objects.count(), 2)
self.assertEqual(ApplicationSubmission.objects.first().user, first_user)
self.assertEqual(ApplicationSubmission.objects.last().user, second_user)
def test_associated_if_logged_in(self):
user, _ = self.User.objects.get_or_create(email=self.email, defaults={'full_name': self.name})
# Lead + Applicant
self.assertEqual(self.User.objects.count(), 2)
self.submit_form(email=self.email, name=self.name, user=user)
# Lead + Applicant
self.assertEqual(self.User.objects.count(), 2)
self.assertEqual(ApplicationSubmission.objects.count(), 1)
self.assertEqual(ApplicationSubmission.objects.first().user, user)
# This will need to be updated when we hide user information contextually
def test_errors_if_blank_user_data_even_if_logged_in(self):
user, _ = self.User.objects.get_or_create(email=self.email, defaults={'full_name': self.name})
# Lead + applicant
self.assertEqual(self.User.objects.count(), 2)
response = self.submit_form(email='', name='', user=user, ignore_errors=True)
self.assertContains(response, 'This field is required')
# Lead + applicant
self.assertEqual(self.User.objects.count(), 2)
self.assertEqual(ApplicationSubmission.objects.count(), 0)
def test_valid_email(self):
email = 'not_a_valid_email@'
response = self.submit_form(email=email, ignore_errors=True)
self.assertContains(response, 'Enter a valid email address')
@override_settings(SEND_MESSAGES=True)
def test_email_sent_to_user_on_submission_fund(self):
self.submit_form()
# "Thank you for your submission" and "Account Creation"
self.assertEqual(len(mail.outbox), 2)
self.assertEqual(mail.outbox[0].to[0], self.email)
@override_settings(SEND_MESSAGES=True)
def test_email_sent_to_user_on_submission_lab(self):
self.submit_form(page=self.lab_page)
# "Thank you for your submission" and "Account Creation"
self.assertEqual(len(mail.outbox), 2)
self.assertEqual(mail.outbox[0].to[0], self.email)
class TestApplicationSubmission(TestCase):
def make_submission(self, **kwargs):
return ApplicationSubmissionFactory(**kwargs)
def refresh(self, instance):
return instance.__class__.objects.get(id=instance.id)
def test_can_get_required_block_names(self):
email = '<EMAIL>'
submission = self.make_submission(user__email=email)
self.assertEqual(submission.email, email)
def test_can_get_ordered_qs(self):
# Emails are created sequentially
submission_a = self.make_submission()
submission_b = self.make_submission(round=submission_a.round)
submissions = [submission_a, submission_b]
self.assertEqual(
list(ApplicationSubmission.objects.order_by('id')),
submissions,
)
def test_can_get_reverse_ordered_qs(self):
submission_a = self.make_submission()
submission_b = self.make_submission(round=submission_a.round)
submissions = [submission_b, submission_a]
self.assertEqual(
list(ApplicationSubmission.objects.order_by('-id')),
submissions,
)
def test_richtext_in_char_is_removed_for_search(self):
text = 'I am text'
rich_text = f'<b>{text}</b>'
submission = self.make_submission(form_data__char=rich_text)
self.assertNotIn(rich_text, submission.search_data)
self.assertIn(text, submission.search_data)
def test_richtext_is_removed_for_search(self):
text = 'I am text'
rich_text = f'<b>{text}</b>'
submission = self.make_submission(form_data__rich_text=rich_text)
self.assertNotIn(rich_text, submission.search_data)
self.assertIn(text, submission.search_data)
def test_choices_added_for_search(self):
choices = ['blah', 'foo']
submission = self.make_submission(form_fields__radios__choices=choices, form_data__radios=['blah'])
self.assertIn('blah', submission.search_data)
def test_number_not_in_search(self):
value = 12345
submission = self.make_submission(form_data__number=value)
self.assertNotIn(str(value), submission.search_data)
def test_file_gets_uploaded(self):
filename = 'file_name.png'
submission = self.make_submission(form_data__image__filename=filename)
path = os.path.join(settings.MEDIA_ROOT, 'submission', str(submission.id))
# Check we created the top level folder
self.assertTrue(os.path.isdir(path))
found_files = []
for _, _, files in os.walk(path):
found_files.extend(files)
# Check we saved the file somewhere beneath it
self.assertIn(filename, found_files)
def test_correct_file_path_generated(self):
submission = ApplicationSubmissionFactory()
for file_id in submission.file_field_ids:
def check_generated_file_path(file_to_test):
file_path_generated = file_to_test.generate_filename()
file_path_required = os.path.join('submission', str(submission.id), str(file_id), file_to_test.basename)
self.assertEqual(file_path_generated, file_path_required)
file_response = submission.data(file_id)
if isinstance(file_response, list):
for stream_file in file_response:
check_generated_file_path(stream_file)
else:
check_generated_file_path(file_response)
def test_create_revision_on_create(self):
submission = ApplicationSubmissionFactory()
self.assertEqual(submission.revisions.count(), 1)
self.assertDictEqual(submission.live_revision.form_data, submission.form_data)
self.assertEqual(submission.live_revision.author, submission.user)
def test_create_revision_on_data_change(self):
submission = ApplicationSubmissionFactory()
submission.form_data['title'] = 'My Awesome Title'
new_data = submission.form_data
submission.create_revision()
submission = self.refresh(submission)
self.assertEqual(submission.revisions.count(), 2)
self.assertDictEqual(submission.live_revision.form_data, new_data)
def test_dont_create_revision_on_data_same(self):
submission = ApplicationSubmissionFactory()
submission.create_revision()
self.assertEqual(submission.revisions.count(), 1)
self.assertDictEqual(submission.live_revision.form_data, submission.form_data)
def test_can_get_draft_data(self):
submission = ApplicationSubmissionFactory()
title = 'My new title'
submission.form_data['title'] = title
submission.create_revision(draft=True)
self.assertEqual(submission.revisions.count(), 2)
draft_submission = submission.from_draft()
self.assertDictEqual(draft_submission.form_data, submission.form_data)
self.assertEqual(draft_submission.title, title)
self.assertTrue(draft_submission.is_draft, True)
with self.assertRaises(ValueError):
draft_submission.save()
submission = self.refresh(submission)
self.assertNotEqual(submission.title, title)
def test_draft_updated(self):
submission = ApplicationSubmissionFactory()
title = 'My new title'
submission.form_data['title'] = title
submission.create_revision(draft=True)
self.assertEqual(submission.revisions.count(), 2)
title = 'My even newer title'
submission.form_data['title'] = title
submission.create_revision(draft=True)
self.assertEqual(submission.revisions.count(), 2)
def test_in_final_stage(self):
submission = InvitedToProposalFactory().previous
self.assertFalse(submission.in_final_stage)
submission = InvitedToProposalFactory()
self.assertTrue(submission.in_final_stage)
@override_settings(ROOT_URLCONF='opentech.apply.urls')
class TestSubmissionRenderMethods(TestCase):
def test_named_blocks_not_included_in_answers(self):
submission = ApplicationSubmissionFactory()
answers = submission.output_answers()
for name in submission.named_blocks:
field = submission.field(name)
self.assertNotIn(field.value['field_label'], answers)
def test_normal_answers_included_in_answers(self):
submission = ApplicationSubmissionFactory()
answers = submission.output_answers()
for field_name in submission.question_field_ids:
if field_name not in submission.named_blocks:
field = submission.field(field_name)
self.assertIn(field.value['field_label'], answers)
def test_paragraph_not_rendered_in_answers(self):
rich_text_label = 'My rich text label!'
submission = ApplicationSubmissionFactory(
form_fields__text_markup__value=rich_text_label
)
answers = submission.output_answers()
self.assertNotIn(rich_text_label, answers)
def test_named_blocks_dont_break_if_no_response(self):
submission = ApplicationSubmissionFactory()
# the user didn't respond
del submission.form_data['value']
# value doesnt sneak into raw_data
self.assertTrue('value' not in submission.raw_data)
# value field_id gone
field_id = submission.get_definitive_id('value')
self.assertTrue(field_id not in submission.raw_data)
# value attr is None
self.assertIsNone(submission.value)
def test_file_private_url_included(self):
submission = ApplicationSubmissionFactory()
answers = submission.output_answers()
for file_id in submission.file_field_ids:
def file_url_in_answers(file_to_test):
url = reverse(
'apply:submissions:serve_private_media', kwargs={
'pk': submission.pk,
'field_id': file_id,
'file_name': file_to_test.basename,
}
)
self.assertIn(url, answers)
file_response = submission.data(file_id)
if isinstance(file_response, list):
for stream_file in file_response:
file_url_in_answers(stream_file)
else:
file_url_in_answers(file_response)
class TestRequestForPartners(TestCase):
def test_message_when_no_round(self):
rfp = RequestForPartnersFactory()
request = make_request(site=rfp.get_site())
response = rfp.serve(request)
self.assertContains(response, 'not accepting')
self.assertNotContains(response, 'Submit')
def test_form_when_round(self):
rfp = RequestForPartnersFactory()
TodayRoundFactory(parent=rfp)
request = make_request(site=rfp.get_site())
response = rfp.serve(request)
self.assertNotContains(response, 'not accepting')
self.assertContains(response, 'Submit')
class TestForTableQueryset(TestCase):
def test_assigned_but_not_reviewed(self):
staff = StaffFactory()
submission = ApplicationSubmissionFactory()
AssignedReviewersFactory(submission=submission, reviewer=staff)
qs = ApplicationSubmission.objects.for_table(user=staff)
submission = qs[0]
self.assertEqual(submission.opinion_disagree, None)
self.assertEqual(submission.review_count, 1)
self.assertEqual(submission.review_submitted_count, None)
self.assertEqual(submission.review_recommendation, None)
def test_review_outcome(self):
staff = StaffFactory()
submission = ApplicationSubmissionFactory()
ReviewFactory(submission=submission)
qs = ApplicationSubmission.objects.for_table(user=staff)
submission = qs[0]
self.assertEqual(submission.opinion_disagree, | |
Constraint(expr=m.x1357*m.x1357 - m.x4377*m.b3010 <= 0)
m.c4509 = Constraint(expr=m.x1358*m.x1358 - m.x4378*m.b3010 <= 0)
m.c4510 = Constraint(expr=m.x1359*m.x1359 - m.x4379*m.b3010 <= 0)
m.c4511 = Constraint(expr=m.x1360*m.x1360 - m.x4380*m.b3010 <= 0)
m.c4512 = Constraint(expr=m.x1361*m.x1361 - m.x4381*m.b3010 <= 0)
m.c4513 = Constraint(expr=m.x1362*m.x1362 - m.x4382*m.b3010 <= 0)
m.c4514 = Constraint(expr=m.x1363*m.x1363 - m.x4383*m.b3010 <= 0)
m.c4515 = Constraint(expr=m.x1364*m.x1364 - m.x4384*m.b3010 <= 0)
m.c4516 = Constraint(expr=m.x1365*m.x1365 - m.x4385*m.b3010 <= 0)
m.c4517 = Constraint(expr=m.x1366*m.x1366 - m.x4386*m.b3010 <= 0)
m.c4518 = Constraint(expr=m.x1367*m.x1367 - m.x4387*m.b3010 <= 0)
m.c4519 = Constraint(expr=m.x1368*m.x1368 - m.x4388*m.b3010 <= 0)
m.c4520 = Constraint(expr=m.x1369*m.x1369 - m.x4389*m.b3010 <= 0)
m.c4521 = Constraint(expr=m.x1370*m.x1370 - m.x4390*m.b3010 <= 0)
m.c4522 = Constraint(expr=m.x1371*m.x1371 - m.x4391*m.b3010 <= 0)
m.c4523 = Constraint(expr=m.x1372*m.x1372 - m.x4392*m.b3010 <= 0)
m.c4524 = Constraint(expr=m.x1373*m.x1373 - m.x4393*m.b3010 <= 0)
m.c4525 = Constraint(expr=m.x1374*m.x1374 - m.x4394*m.b3010 <= 0)
m.c4526 = Constraint(expr=m.x1375*m.x1375 - m.x4395*m.b3010 <= 0)
m.c4527 = Constraint(expr=m.x1376*m.x1376 - m.x4396*m.b3010 <= 0)
m.c4528 = Constraint(expr=m.x1377*m.x1377 - m.x4397*m.b3010 <= 0)
m.c4529 = Constraint(expr=m.x1378*m.x1378 - m.x4398*m.b3010 <= 0)
m.c4530 = Constraint(expr=m.x1379*m.x1379 - m.x4399*m.b3010 <= 0)
m.c4531 = Constraint(expr=m.x1380*m.x1380 - m.x4400*m.b3010 <= 0)
m.c4532 = Constraint(expr=m.x1381*m.x1381 - m.x4401*m.b3010 <= 0)
m.c4533 = Constraint(expr=m.x1382*m.x1382 - m.x4402*m.b3010 <= 0)
m.c4534 = Constraint(expr=m.x1383*m.x1383 - m.x4403*m.b3010 <= 0)
m.c4535 = Constraint(expr=m.x1384*m.x1384 - m.x4404*m.b3010 <= 0)
m.c4536 = Constraint(expr=m.x1385*m.x1385 - m.x4405*m.b3010 <= 0)
m.c4537 = Constraint(expr=m.x1386*m.x1386 - m.x4406*m.b3010 <= 0)
m.c4538 = Constraint(expr=m.x1387*m.x1387 - m.x4407*m.b3010 <= 0)
m.c4539 = Constraint(expr=m.x1388*m.x1388 - m.x4408*m.b3010 <= 0)
m.c4540 = Constraint(expr=m.x1389*m.x1389 - m.x4409*m.b3010 <= 0)
m.c4541 = Constraint(expr=m.x1390*m.x1390 - m.x4410*m.b3010 <= 0)
m.c4542 = Constraint(expr=m.x1391*m.x1391 - m.x4411*m.b3010 <= 0)
m.c4543 = Constraint(expr=m.x1392*m.x1392 - m.x4412*m.b3010 <= 0)
m.c4544 = Constraint(expr=m.x1393*m.x1393 - m.x4413*m.b3010 <= 0)
m.c4545 = Constraint(expr=m.x1394*m.x1394 - m.x4414*m.b3010 <= 0)
m.c4546 = Constraint(expr=m.x1395*m.x1395 - m.x4415*m.b3010 <= 0)
m.c4547 = Constraint(expr=m.x1396*m.x1396 - m.x4416*m.b3010 <= 0)
m.c4548 = Constraint(expr=m.x1397*m.x1397 - m.x4417*m.b3010 <= 0)
m.c4549 = Constraint(expr=m.x1398*m.x1398 - m.x4418*m.b3010 <= 0)
m.c4550 = Constraint(expr=m.x1399*m.x1399 - m.x4419*m.b3010 <= 0)
m.c4551 = Constraint(expr=m.x1400*m.x1400 - m.x4420*m.b3010 <= 0)
m.c4552 = Constraint(expr=m.x1401*m.x1401 - m.x4421*m.b3010 <= 0)
m.c4553 = Constraint(expr=m.x1402*m.x1402 - m.x4422*m.b3010 <= 0)
m.c4554 = Constraint(expr=m.x1403*m.x1403 - m.x4423*m.b3010 <= 0)
m.c4555 = Constraint(expr=m.x1404*m.x1404 - m.x4424*m.b3010 <= 0)
m.c4556 = Constraint(expr=m.x1405*m.x1405 - m.x4425*m.b3010 <= 0)
m.c4557 = Constraint(expr=m.x1406*m.x1406 - m.x4426*m.b3010 <= 0)
m.c4558 = Constraint(expr=m.x1407*m.x1407 - m.x4427*m.b3010 <= 0)
m.c4559 = Constraint(expr=m.x1408*m.x1408 - m.x4428*m.b3010 <= 0)
m.c4560 = Constraint(expr=m.x1409*m.x1409 - m.x4429*m.b3010 <= 0)
m.c4561 = Constraint(expr=m.x1410*m.x1410 - m.x4430*m.b3010 <= 0)
m.c4562 = Constraint(expr=m.x1411*m.x1411 - m.x4431*m.b3010 <= 0)
m.c4563 = Constraint(expr=m.x1412*m.x1412 - m.x4432*m.b3010 <= 0)
m.c4564 = Constraint(expr=m.x1413*m.x1413 - m.x4433*m.b3010 <= 0)
m.c4565 = Constraint(expr=m.x1414*m.x1414 - m.x4434*m.b3010 <= 0)
m.c4566 = Constraint(expr=m.x1415*m.x1415 - m.x4435*m.b3010 <= 0)
m.c4567 = Constraint(expr=m.x1416*m.x1416 - m.x4436*m.b3010 <= 0)
m.c4568 = Constraint(expr=m.x1417*m.x1417 - m.x4437*m.b3010 <= 0)
m.c4569 = Constraint(expr=m.x1418*m.x1418 - m.x4438*m.b3010 <= 0)
m.c4570 = Constraint(expr=m.x1419*m.x1419 - m.x4439*m.b3010 <= 0)
m.c4571 = Constraint(expr=m.x1420*m.x1420 - m.x4440*m.b3010 <= 0)
m.c4572 = Constraint(expr=m.x1421*m.x1421 - m.x4441*m.b3010 <= 0)
m.c4573 = Constraint(expr=m.x1422*m.x1422 - m.x4442*m.b3010 <= 0)
m.c4574 = Constraint(expr=m.x1423*m.x1423 - m.x4443*m.b3010 <= 0)
m.c4575 = Constraint(expr=m.x1424*m.x1424 - m.x4444*m.b3010 <= 0)
m.c4576 = Constraint(expr=m.x1425*m.x1425 - m.x4445*m.b3010 <= 0)
m.c4577 = Constraint(expr=m.x1426*m.x1426 - m.x4446*m.b3010 <= 0)
m.c4578 = Constraint(expr=m.x1427*m.x1427 - m.x4447*m.b3010 <= 0)
m.c4579 = Constraint(expr=m.x1428*m.x1428 - m.x4448*m.b3010 <= 0)
m.c4580 = Constraint(expr=m.x1429*m.x1429 - m.x4449*m.b3010 <= 0)
m.c4581 = Constraint(expr=m.x1430*m.x1430 - m.x4450*m.b3010 <= 0)
m.c4582 = Constraint(expr=m.x1431*m.x1431 - m.x4451*m.b3010 <= 0)
m.c4583 = Constraint(expr=m.x1432*m.x1432 - m.x4452*m.b3010 <= 0)
m.c4584 = Constraint(expr=m.x1433*m.x1433 - m.x4453*m.b3010 <= 0)
m.c4585 = Constraint(expr=m.x1434*m.x1434 - m.x4454*m.b3010 <= 0)
m.c4586 = Constraint(expr=m.x1435*m.x1435 - m.x4455*m.b3010 <= 0)
m.c4587 = Constraint(expr=m.x1436*m.x1436 - m.x4456*m.b3010 <= 0)
m.c4588 = Constraint(expr=m.x1437*m.x1437 - m.x4457*m.b3010 <= 0)
m.c4589 = Constraint(expr=m.x1438*m.x1438 - m.x4458*m.b3010 <= 0)
m.c4590 = Constraint(expr=m.x1439*m.x1439 - m.x4459*m.b3010 <= 0)
m.c4591 = Constraint(expr=m.x1440*m.x1440 - m.x4460*m.b3010 <= 0)
m.c4592 = Constraint(expr=m.x1441*m.x1441 - m.x4461*m.b3010 <= 0)
m.c4593 = Constraint(expr=m.x1442*m.x1442 - m.x4462*m.b3010 <= 0)
m.c4594 = Constraint(expr=m.x1443*m.x1443 - m.x4463*m.b3010 <= 0)
m.c4595 = Constraint(expr=m.x1444*m.x1444 - m.x4464*m.b3010 <= 0)
m.c4596 = Constraint(expr=m.x1445*m.x1445 - m.x4465*m.b3010 <= 0)
m.c4597 = Constraint(expr=m.x1446*m.x1446 - m.x4466*m.b3010 <= 0)
m.c4598 = Constraint(expr=m.x1447*m.x1447 - m.x4467*m.b3010 <= 0)
m.c4599 = Constraint(expr=m.x1448*m.x1448 - m.x4468*m.b3010 <= 0)
m.c4600 = Constraint(expr=m.x1449*m.x1449 - m.x4469*m.b3010 <= 0)
m.c4601 = Constraint(expr=m.x1450*m.x1450 - m.x4470*m.b3010 <= 0)
m.c4602 = Constraint(expr=m.x1451*m.x1451 - m.x4471*m.b3010 <= 0)
m.c4603 = Constraint(expr=m.x1452*m.x1452 - m.x4472*m.b3010 <= 0)
m.c4604 = Constraint(expr=m.x1453*m.x1453 - m.x4473*m.b3010 <= 0)
m.c4605 = Constraint(expr=m.x1454*m.x1454 - m.x4474*m.b3010 <= 0)
m.c4606 = Constraint(expr=m.x1455*m.x1455 - m.x4475*m.b3010 <= 0)
m.c4607 = Constraint(expr=m.x1456*m.x1456 - m.x4476*m.b3010 <= 0)
m.c4608 = Constraint(expr=m.x1457*m.x1457 - m.x4477*m.b3010 <= 0)
m.c4609 = Constraint(expr=m.x1458*m.x1458 - m.x4478*m.b3010 <= 0)
m.c4610 = Constraint(expr=m.x1459*m.x1459 - m.x4479*m.b3010 <= 0)
m.c4611 = Constraint(expr=m.x1460*m.x1460 - m.x4480*m.b3010 <= 0)
m.c4612 = Constraint(expr=m.x1461*m.x1461 - m.x4481*m.b3010 <= 0)
m.c4613 = Constraint(expr=m.x1462*m.x1462 - m.x4482*m.b3010 <= 0)
m.c4614 = Constraint(expr=m.x1463*m.x1463 - m.x4483*m.b3010 <= 0)
m.c4615 = Constraint(expr=m.x1464*m.x1464 - m.x4484*m.b3010 <= 0)
m.c4616 = Constraint(expr=m.x1465*m.x1465 - m.x4485*m.b3010 <= 0)
m.c4617 = Constraint(expr=m.x1466*m.x1466 - m.x4486*m.b3010 <= 0)
m.c4618 = Constraint(expr=m.x1467*m.x1467 - m.x4487*m.b3010 <= 0)
m.c4619 = Constraint(expr=m.x1468*m.x1468 - m.x4488*m.b3010 <= 0)
m.c4620 = Constraint(expr=m.x1469*m.x1469 - m.x4489*m.b3010 <= 0)
m.c4621 = Constraint(expr=m.x1470*m.x1470 - m.x4490*m.b3010 <= 0)
m.c4622 = Constraint(expr=m.x1471*m.x1471 - m.x4491*m.b3010 <= 0)
m.c4623 = Constraint(expr=m.x1472*m.x1472 - m.x4492*m.b3010 <= 0)
m.c4624 = Constraint(expr=m.x1473*m.x1473 - m.x4493*m.b3010 <= 0)
m.c4625 = Constraint(expr=m.x1474*m.x1474 - m.x4494*m.b3010 <= 0)
m.c4626 = Constraint(expr=m.x1475*m.x1475 - m.x4495*m.b3010 <= 0)
m.c4627 = Constraint(expr=m.x1476*m.x1476 - m.x4496*m.b3010 <= 0)
m.c4628 = Constraint(expr=m.x1477*m.x1477 - m.x4497*m.b3010 <= 0)
m.c4629 = Constraint(expr=m.x1478*m.x1478 - m.x4498*m.b3010 <= 0)
m.c4630 = Constraint(expr=m.x1479*m.x1479 - m.x4499*m.b3010 <= 0)
m.c4631 = Constraint(expr=m.x1480*m.x1480 - m.x4500*m.b3010 <= 0)
m.c4632 = Constraint(expr=m.x1481*m.x1481 - m.x4501*m.b3010 <= 0)
m.c4633 = Constraint(expr=m.x1482*m.x1482 - m.x4502*m.b3010 <= 0)
m.c4634 = Constraint(expr=m.x1483*m.x1483 - m.x4503*m.b3010 <= 0)
m.c4635 = Constraint(expr=m.x1484*m.x1484 - m.x4504*m.b3010 <= 0)
m.c4636 = Constraint(expr=m.x1485*m.x1485 - m.x4505*m.b3010 <= 0)
m.c4637 = Constraint(expr=m.x1486*m.x1486 - m.x4506*m.b3010 <= 0)
m.c4638 = Constraint(expr=m.x1487*m.x1487 - m.x4507*m.b3010 <= 0)
m.c4639 = Constraint(expr=m.x1488*m.x1488 - m.x4508*m.b3010 <= 0)
m.c4640 = Constraint(expr=m.x1489*m.x1489 - m.x4509*m.b3010 <= 0)
m.c4641 = Constraint(expr=m.x1490*m.x1490 - m.x4510*m.b3010 <= 0)
m.c4642 = Constraint(expr=m.x1491*m.x1491 - m.x4511*m.b3010 <= 0)
m.c4643 = Constraint(expr=m.x1492*m.x1492 - m.x4512*m.b3010 <= 0)
m.c4644 = Constraint(expr=m.x1493*m.x1493 - m.x4513*m.b3010 <= 0)
m.c4645 = Constraint(expr=m.x1494*m.x1494 - m.x4514*m.b3010 <= 0)
m.c4646 = Constraint(expr=m.x1495*m.x1495 - m.x4515*m.b3010 <= 0)
m.c4647 = Constraint(expr=m.x1496*m.x1496 - m.x4516*m.b3010 <= 0)
m.c4648 = Constraint(expr=m.x1497*m.x1497 - m.x4517*m.b3010 <= 0)
m.c4649 = Constraint(expr=m.x1498*m.x1498 - m.x4518*m.b3010 <= 0)
m.c4650 = Constraint(expr=m.x1499*m.x1499 - m.x4519*m.b3010 <= 0)
m.c4651 = Constraint(expr=m.x1500*m.x1500 - m.x4520*m.b3010 <= 0)
m.c4652 = Constraint(expr=m.x1501*m.x1501 - m.x4521*m.b3011 <= 0)
m.c4653 = Constraint(expr=m.x1502*m.x1502 - m.x4522*m.b3011 <= 0)
m.c4654 = Constraint(expr=m.x1503*m.x1503 - m.x4523*m.b3011 <= 0)
m.c4655 = Constraint(expr=m.x1504*m.x1504 - m.x4524*m.b3011 <= 0)
m.c4656 = Constraint(expr=m.x1505*m.x1505 - m.x4525*m.b3011 <= 0)
m.c4657 = Constraint(expr=m.x1506*m.x1506 - m.x4526*m.b3011 <= 0)
m.c4658 = Constraint(expr=m.x1507*m.x1507 - m.x4527*m.b3011 <= 0)
m.c4659 = Constraint(expr=m.x1508*m.x1508 - m.x4528*m.b3011 <= 0)
m.c4660 = Constraint(expr=m.x1509*m.x1509 - m.x4529*m.b3011 <= 0)
m.c4661 = Constraint(expr=m.x1510*m.x1510 - m.x4530*m.b3011 <= 0)
m.c4662 = Constraint(expr=m.x1511*m.x1511 - m.x4531*m.b3011 <= 0)
m.c4663 = Constraint(expr=m.x1512*m.x1512 - m.x4532*m.b3011 <= 0)
m.c4664 = Constraint(expr=m.x1513*m.x1513 - m.x4533*m.b3011 <= 0)
m.c4665 = Constraint(expr=m.x1514*m.x1514 - m.x4534*m.b3011 <= 0)
m.c4666 = Constraint(expr=m.x1515*m.x1515 - m.x4535*m.b3011 <= 0)
m.c4667 = Constraint(expr=m.x1516*m.x1516 - m.x4536*m.b3011 <= 0)
m.c4668 = Constraint(expr=m.x1517*m.x1517 - m.x4537*m.b3011 <= 0)
m.c4669 = Constraint(expr=m.x1518*m.x1518 - m.x4538*m.b3011 <= 0)
m.c4670 = Constraint(expr=m.x1519*m.x1519 - m.x4539*m.b3011 <= 0)
m.c4671 = Constraint(expr=m.x1520*m.x1520 - m.x4540*m.b3011 <= 0)
m.c4672 = Constraint(expr=m.x1521*m.x1521 - m.x4541*m.b3011 <= 0)
m.c4673 = Constraint(expr=m.x1522*m.x1522 - m.x4542*m.b3011 <= 0)
m.c4674 = Constraint(expr=m.x1523*m.x1523 - m.x4543*m.b3011 <= 0)
m.c4675 = Constraint(expr=m.x1524*m.x1524 - m.x4544*m.b3011 <= 0)
m.c4676 = Constraint(expr=m.x1525*m.x1525 - m.x4545*m.b3011 <= 0)
m.c4677 = Constraint(expr=m.x1526*m.x1526 - m.x4546*m.b3011 <= 0)
m.c4678 = Constraint(expr=m.x1527*m.x1527 - m.x4547*m.b3011 <= 0)
m.c4679 = Constraint(expr=m.x1528*m.x1528 - m.x4548*m.b3011 <= 0)
m.c4680 = Constraint(expr=m.x1529*m.x1529 - m.x4549*m.b3011 <= 0)
m.c4681 = Constraint(expr=m.x1530*m.x1530 - m.x4550*m.b3011 <= 0)
m.c4682 = Constraint(expr=m.x1531*m.x1531 - m.x4551*m.b3011 <= 0)
m.c4683 = Constraint(expr=m.x1532*m.x1532 - m.x4552*m.b3011 <= 0)
m.c4684 = Constraint(expr=m.x1533*m.x1533 - m.x4553*m.b3011 <= 0)
m.c4685 = Constraint(expr=m.x1534*m.x1534 - m.x4554*m.b3011 <= 0)
m.c4686 = Constraint(expr=m.x1535*m.x1535 - m.x4555*m.b3011 <= 0)
m.c4687 = Constraint(expr=m.x1536*m.x1536 - m.x4556*m.b3011 <= 0)
m.c4688 = Constraint(expr=m.x1537*m.x1537 - m.x4557*m.b3011 <= 0)
m.c4689 = Constraint(expr=m.x1538*m.x1538 - m.x4558*m.b3011 <= 0)
m.c4690 = Constraint(expr=m.x1539*m.x1539 - m.x4559*m.b3011 <= 0)
m.c4691 = Constraint(expr=m.x1540*m.x1540 - m.x4560*m.b3011 <= 0)
m.c4692 = Constraint(expr=m.x1541*m.x1541 - m.x4561*m.b3011 <= 0)
m.c4693 = Constraint(expr=m.x1542*m.x1542 - m.x4562*m.b3011 <= 0)
m.c4694 = Constraint(expr=m.x1543*m.x1543 - m.x4563*m.b3011 <= 0)
m.c4695 = Constraint(expr=m.x1544*m.x1544 - m.x4564*m.b3011 <= 0)
m.c4696 = Constraint(expr=m.x1545*m.x1545 - m.x4565*m.b3011 <= 0)
m.c4697 = Constraint(expr=m.x1546*m.x1546 - m.x4566*m.b3011 <= 0)
m.c4698 = Constraint(expr=m.x1547*m.x1547 - m.x4567*m.b3011 <= 0)
m.c4699 = Constraint(expr=m.x1548*m.x1548 - m.x4568*m.b3011 <= 0)
m.c4700 = Constraint(expr=m.x1549*m.x1549 - m.x4569*m.b3011 <= 0)
m.c4701 = Constraint(expr=m.x1550*m.x1550 - m.x4570*m.b3011 <= 0)
m.c4702 = Constraint(expr=m.x1551*m.x1551 - m.x4571*m.b3011 <= 0)
m.c4703 = Constraint(expr=m.x1552*m.x1552 - m.x4572*m.b3011 <= 0)
m.c4704 = Constraint(expr=m.x1553*m.x1553 - m.x4573*m.b3011 <= 0)
m.c4705 = Constraint(expr=m.x1554*m.x1554 - m.x4574*m.b3011 <= 0)
m.c4706 = Constraint(expr=m.x1555*m.x1555 - m.x4575*m.b3011 <= 0)
m.c4707 = Constraint(expr=m.x1556*m.x1556 - m.x4576*m.b3011 <= 0)
m.c4708 = Constraint(expr=m.x1557*m.x1557 - m.x4577*m.b3011 <= 0)
m.c4709 = Constraint(expr=m.x1558*m.x1558 - m.x4578*m.b3011 <= 0)
m.c4710 = Constraint(expr=m.x1559*m.x1559 - m.x4579*m.b3011 <= 0)
m.c4711 = Constraint(expr=m.x1560*m.x1560 - m.x4580*m.b3011 <= 0)
m.c4712 = Constraint(expr=m.x1561*m.x1561 - m.x4581*m.b3011 <= 0)
m.c4713 = Constraint(expr=m.x1562*m.x1562 - m.x4582*m.b3011 <= 0)
m.c4714 = Constraint(expr=m.x1563*m.x1563 - m.x4583*m.b3011 <= 0)
m.c4715 = Constraint(expr=m.x1564*m.x1564 - m.x4584*m.b3011 <= 0)
m.c4716 = Constraint(expr=m.x1565*m.x1565 - m.x4585*m.b3011 <= 0)
m.c4717 = Constraint(expr=m.x1566*m.x1566 - m.x4586*m.b3011 <= 0)
m.c4718 = Constraint(expr=m.x1567*m.x1567 - m.x4587*m.b3011 <= 0)
m.c4719 = Constraint(expr=m.x1568*m.x1568 - m.x4588*m.b3011 <= 0)
m.c4720 = Constraint(expr=m.x1569*m.x1569 - m.x4589*m.b3011 <= 0)
m.c4721 = Constraint(expr=m.x1570*m.x1570 - | |
<filename>visnav/algo/orig/tools.py<gh_stars>0
import math
import time
import numpy as np
import numba as nb
import quaternion # adds to numpy # noqa # pylint: disable=unused-import
import sys
import scipy
from astropy.coordinates import SkyCoord
from scipy.interpolate import RectBivariateSpline
from scipy.interpolate import NearestNDInterpolator
# from scipy.spatial.ckdtree import cKDTree
from visnav.settings import *
class PositioningException(Exception):
pass
class Stopwatch:
# from https://www.safaribooksonline.com/library/view/python-cookbook-3rd/9781449357337/ch13s13.html
def __init__(self, elapsed=0.0, func=time.perf_counter):
self._elapsed = elapsed
self._func = func
self._start = None
@property
def elapsed(self):
return self._elapsed + ((self._func() - self._start) if self.running else 0)
def start(self):
if self._start is not None:
raise RuntimeError('Already started')
self._start = self._func()
def stop(self):
if self._start is None:
raise RuntimeError('Not started')
end = self._func()
self._elapsed += end - self._start
self._start = None
def reset(self):
self._elapsed = 0.0
@property
def running(self):
return self._start is not None
def __enter__(self):
self.start()
return self
def __exit__(self, *args):
self.stop()
def sphere_angle_radius(loc, r):
return np.arcsin(r / np.linalg.norm(loc, axis=1))
def dist_across_and_along_vect(A, b):
""" A: array of vectors, b: axis vector """
lat, lon, r = cartesian2spherical(*b)
q = ypr_to_q(lat, lon, 0).conj()
R = quaternion.as_rotation_matrix(q)
Ab = R.dot(A.T).T
d = Ab[:, 0:1]
r = np.linalg.norm(Ab[:, 1:3], axis=1).reshape((-1, 1))
return r, d
def point_vector_dist(A, B, dist_along_v=False):
""" A: point, B: vector """
# (length of b)**2
normB2 = (B ** 2).sum(-1).reshape((-1, 1))
# a dot b vector product (project a on b but also times length of b)
diagAB = (A * B).sum(-1).reshape((-1, 1))
# A projected along B (projection = a dot b/||b|| * b/||b||)
A_B = (diagAB / normB2) * B
# vector from projected A to A, it is perpendicular to B
AB2A = A - A_B
# diff vector lengths
normD = np.sqrt((AB2A ** 2).sum(-1)).reshape((-1, 1))
return (normD, diagAB / np.sqrt(normB2)) if dist_along_v else normD
def sc_asteroid_max_shift_error(A, B):
"""
Calculate max error between two set of vertices when projected to camera,
A = estimated vertex positions
B = true vertex positions
Error is a vector perpendicular to B, i.e. A - A||
"""
# diff vector lengths
normD = point_vector_dist(A, B)
# max length of diff vectors
return np.max(normD)
@nb.njit(nb.f8[:](nb.f8[:], nb.f8[:]))
def cross3d(left, right):
# for short vectors cross product is faster in pure python than with numpy.cross
x = ((left[1] * right[2]) - (left[2] * right[1]))
y = ((left[2] * right[0]) - (left[0] * right[2]))
z = ((left[0] * right[1]) - (left[1] * right[0]))
return np.array((x, y, z))
def normalize_v(v):
norm = np.linalg.norm(v)
return v / norm if norm != 0 else v
@nb.njit(nb.types.f8[:](nb.types.f8[:]))
def normalize_v_f8(v):
norm = np.linalg.norm(v)
return v / norm if norm != 0 else v
def generate_field_fft(shape, sd=(0.33, 0.33, 0.34), len_sc=(0.5, 0.5 / 4, 0.5 / 16)):
from visnav.algo.image import ImageProc
sds = sd if getattr(sd, '__len__', False) else [sd]
len_scs = len_sc if getattr(len_sc, '__len__', False) else [len_sc]
assert len(shape) == 2, 'only 2d shapes are valid'
assert len(sds) == len(len_scs), 'len(sd) differs from len(len_sc)'
n = np.prod(shape)
kernel = np.sum(
np.stack([1 / len_sc * sd * n * ImageProc.gkern2d(shape, 1 / len_sc) for sd, len_sc in zip(sds, len_scs)],
axis=2), axis=2)
f_img = np.random.normal(0, 1, shape) + np.complex(0, 1) * np.random.normal(0, 1, shape)
f_img = np.real(np.fft.ifft2(np.fft.fftshift(kernel * f_img)))
return f_img
@nb.njit(nb.types.f8[:](nb.types.f8[:], nb.types.f8[:], nb.types.f8[:]))
def _surf_normal(x1, x2, x3):
# a, b, c = np.array(x1, dtype=np.float64), np.array(x2, dtype=np.float64), np.array(x3, dtype=np.float64)
return normalize_v_f8(cross3d(x2-x1, x3-x1))
def surf_normal(x1, x2, x3):
a, b, c = np.array(x1, dtype=np.float64), np.array(x2, dtype=np.float64), np.array(x3, dtype=np.float64)
return _surf_normal(a, b, c)
# return normalize_v_f8(cross3d(b-a, c-a))
def vector_projection(a, b):
return a.dot(b) / b.dot(b) * b
def vector_rejection(a, b):
return a - vector_projection(a, b)
def angle_between_v(v1, v2):
# Notice: only returns angles between 0 and 180 deg
try:
v1 = np.reshape(v1, (1, -1))
v2 = np.reshape(v2, (-1, 1))
n1 = v1 / np.linalg.norm(v1)
n2 = v2 / np.linalg.norm(v2)
cos_angle = n1.dot(n2)
except TypeError as e:
raise Exception('Bad vectors:\n\tv1: %s\n\tv2: %s' % (v1, v2)) from e
return math.acos(np.clip(cos_angle, -1, 1))
def angle_between_v_mx(a, B, normalize=True):
Bn = B / np.linalg.norm(B, axis=1).reshape((-1, 1)) if normalize else B
an = normalize_v(a).reshape((-1, 1)) if normalize else a
return np.arccos(np.clip(Bn.dot(an), -1.0, 1.0))
def angle_between_mx(A, B):
return angle_between_rows(A, B)
def angle_between_rows(A, B, normalize=True):
assert A.shape[1] == 3 and B.shape[1] == 3, 'matrices need to be of shape (n, 3) and (m, 3)'
if A.shape[0] == B.shape[0]:
# from https://stackoverflow.com/questions/50772176/calculate-the-angle-between-the-rows-of-two-matrices-in-numpy/50772253
cos_angles = np.einsum('ij,ij->i', A, B)
if normalize:
p2 = np.einsum('ij,ij->i', A, A)
p3 = np.einsum('ij,ij->i', B, B)
cos_angles /= np.sqrt(p2 * p3)
else:
if normalize:
A = A / np.linalg.norm(A, axis=1).reshape((-1, 1))
B = B / np.linalg.norm(B, axis=1).reshape((-1, 1))
cos_angles = B.dot(A.T)
return np.arccos(np.clip(cos_angles, -1.0, 1.0))
def rand_q(angle):
r = normalize_v(np.random.normal(size=3))
return angleaxis_to_q(np.hstack((angle, r)))
def angle_between_q(q1, q2):
# from https://chrischoy.github.io/research/measuring-rotation/
qd = q1.conj() * q2
return abs(wrap_rads(2 * math.acos(qd.normalized().w)))
def angle_between_q_arr(q1, q2):
qd = quaternion.as_float_array(q1.conj() * q2)
qd = qd / np.linalg.norm(qd, axis=1).reshape((-1, 1))
return np.abs(wrap_rads(2 * np.arccos(qd[:, 0])))
def angle_between_ypr(ypr1, ypr2):
q1 = ypr_to_q(*ypr1)
q2 = ypr_to_q(*ypr2)
return angle_between_q(q1, q2)
def distance_mx(A, B):
assert A.shape[1] == B.shape[1], 'matrices must have same amount of columns'
k = A.shape[1]
O = np.repeat(A.reshape((-1, 1, k)), B.shape[0], axis=1) - np.repeat(B.reshape((1, -1, k)), A.shape[0], axis=0)
D = np.linalg.norm(O, axis=2)
return D
def q_to_unitbase(q):
U0 = quaternion.as_quat_array([[0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1.]])
Uq = q * U0 * q.conj()
return quaternion.as_float_array(Uq)[:, 1:]
def equatorial_to_ecliptic(ra, dec):
""" translate from equatorial ra & dec to ecliptic ones """
sc = SkyCoord(ra, dec, unit='deg', frame='icrs', obstime='J2000') \
.transform_to('barycentrictrueecliptic')
return sc.lat.value, sc.lon.value
def q_to_angleaxis(q, compact=False):
theta = math.acos(np.clip(q.w, -1, 1)) * 2.0
v = normalize_v(np.array([q.x, q.y, q.z]))
if compact:
return theta * v
else:
return np.array((theta,) + tuple(v))
def angleaxis_to_q(rv):
""" first angle, then axis """
if len(rv) == 4:
theta = rv[0]
v = normalize_v(np.array(rv[1:]))
elif len(rv) == 3:
theta = math.sqrt(sum(x ** 2 for x in rv))
v = np.array(rv) / (1 if theta == 0 else theta)
else:
raise Exception('Invalid angle-axis vector: %s' % (rv,))
w = math.cos(theta / 2)
v = v * math.sin(theta / 2)
return np.quaternion(w, *v).normalized()
def ypr_to_q(lat, lon, roll):
# Tait-Bryan angles, aka yaw-pitch-roll, nautical angles, cardan angles
# intrinsic euler rotations z-y'-x'', pitch=-lat, yaw=lon
return (
np.quaternion(math.cos(lon / 2), 0, 0, math.sin(lon / 2))
* np.quaternion(math.cos(-lat / 2), 0, math.sin(-lat / 2), 0)
* np.quaternion(math.cos(roll / 2), math.sin(roll / 2), 0, 0)
)
def eul_to_q(angles, order='xyz', reverse=False):
assert len(angles) == len(order), 'len(angles) != len(order)'
q = quaternion.one
idx = {'x': 0, 'y': 1, 'z': 2}
for angle, axis in zip(angles, order):
w = math.cos(angle / 2)
v = [0, 0, 0]
v[idx[axis]] = math.sin(angle / 2)
dq = np.quaternion(w, *v)
q = (dq * q) if reverse else (q * dq)
return q
def q_to_ypr(q):
# from https://math.stackexchange.com/questions/687964/getting-euler-tait-bryan-angles-from-quaternion-representation
q0, q1, q2, q3 = quaternion.as_float_array(q)
roll = np.arctan2(q2 * q3 + q0 * q1, .5 - q1 ** 2 - q2 ** 2)
lat = -np.arcsin(np.clip(-2 * (q1 * q3 - q0 * q2), -1, 1))
lon = np.arctan2(q1 * q2 + q0 * q3, .5 - q2 ** 2 - q3 ** 2)
return lat, lon, roll
def mean_q(qs, ws=None):
"""
returns a (weighted) mean of a set of quaternions
idea is to rotate a bit in the direction of new quaternion from the sum of previous rotations
NOTE: not tested properly, might not return same mean quaternion if order of input changed
"""
wtot = 0
qtot = quaternion.one
for q, w in zip(qs, np.ones((len(qs),)) if ws is None else ws):
ddaa = q_to_angleaxis(qtot.conj() * q)
ddaa[0] = wrap_rads(ddaa[0]) * w / (w + wtot)
qtot = angleaxis_to_q(ddaa) * qtot
wtot += w
return qtot
def q_times_v(q, v):
qv = np.quaternion(0, *v)
qv2 = q * qv * q.conj()
return np.array([qv2.x, qv2.y, qv2.z])
def q_times_mx(q, mx):
qqmx = q * mx2qmx(mx) * q.conj()
aqqmx = quaternion.as_float_array(qqmx)
return aqqmx[:, 1:]
def mx2qmx(mx):
qmx = np.zeros((mx.shape[0], 4))
qmx[:, 1:] = mx
return quaternion.as_quat_array(qmx)
def wrap_rads(a):
return (a + math.pi) % (2 * math.pi) - math.pi
def wrap_degs(a):
return (a + 180) % 360 - 180
def eccentric_anomaly(eccentricity, mean_anomaly, tol=1e-6):
# from http://www.jgiesen.de/kepler/kepler.html
E = mean_anomaly if eccentricity < 0.8 else math.pi
F = E - eccentricity * math.sin(mean_anomaly) - mean_anomaly;
for i in range(30):
if abs(F) | |
["is_null"],
},
"url": {
"defaultLookup": "equals",
"defaultValue": "",
"lookups": {
"contains": {"prettyName": "contains", "type": "url"},
"ends_with": {"prettyName": "ends with", "type": "url"},
"equals": {"prettyName": "equals", "type": "url"},
"is_null": {"prettyName": "is null", "type": "isnull"},
"not_contains": {"prettyName": "not contains", "type": "url"},
"not_ends_with": {"prettyName": "not ends with", "type": "url"},
"not_equals": {"prettyName": "not equals", "type": "url"},
"not_regex": {"prettyName": "not regex", "type": "regex"},
"not_starts_with": {"prettyName": "not starts with", "type": "url"},
"regex": {"prettyName": "regex", "type": "regex"},
"starts_with": {"prettyName": "starts with", "type": "url"},
},
"sortedLookups": [
"equals",
"contains",
"starts_with",
"ends_with",
"regex",
"not_equals",
"not_contains",
"not_starts_with",
"not_ends_with",
"not_regex",
"is_null",
],
},
"uuid": {
"defaultLookup": "equals",
"defaultValue": None,
"lookups": {
"equals": {"prettyName": "equals", "type": "uuid"},
"is_null": {"prettyName": "is null", "type": "isnull"},
"not_equals": {"prettyName": "not equals", "type": "uuid"},
},
"sortedLookups": ["equals", "not_equals", "is_null"],
},
},
}
snapshots["test_query_html_no_perms config"] = {
"allModelFields": {
"boolean": {
"defaultFilters": [],
"fields": {
"average": {
"actions": [],
"canPivot": False,
"choices": [],
"concrete": True,
"defaultSort": None,
"model": None,
"prettyName": "average",
"toMany": False,
"type": "number",
},
"is_null": {
"actions": [],
"canPivot": True,
"choices": [],
"concrete": True,
"defaultSort": None,
"model": None,
"prettyName": "is null",
"toMany": False,
"type": "isnull",
},
"sum": {
"actions": [],
"canPivot": False,
"choices": [],
"concrete": True,
"defaultSort": None,
"model": None,
"prettyName": "sum",
"toMany": False,
"type": "number",
},
},
"sortedFields": ["average", "is_null", "sum"],
},
"date": {
"defaultFilters": [],
"fields": {
"count": {
"actions": [],
"canPivot": False,
"choices": [],
"concrete": True,
"defaultSort": None,
"model": None,
"prettyName": "count",
"toMany": False,
"type": "number",
},
"day": {
"actions": [],
"canPivot": True,
"choices": [],
"concrete": True,
"defaultSort": "asc",
"model": None,
"prettyName": "day",
"toMany": False,
"type": "number",
},
"is_null": {
"actions": [],
"canPivot": True,
"choices": [],
"concrete": True,
"defaultSort": None,
"model": None,
"prettyName": "is null",
"toMany": False,
"type": "isnull",
},
"iso_week": {
"actions": [],
"canPivot": True,
"choices": [],
"concrete": True,
"defaultSort": "asc",
"model": None,
"prettyName": "iso week",
"toMany": False,
"type": "number",
},
"iso_year": {
"actions": [],
"canPivot": True,
"choices": [],
"concrete": True,
"defaultSort": "asc",
"model": None,
"prettyName": "iso year",
"toMany": False,
"type": "number",
},
"max": {
"actions": [],
"canPivot": False,
"choices": [],
"concrete": True,
"defaultSort": None,
"model": None,
"prettyName": "max",
"toMany": False,
"type": "date",
},
"min": {
"actions": [],
"canPivot": False,
"choices": [],
"concrete": True,
"defaultSort": None,
"model": None,
"prettyName": "min",
"toMany": False,
"type": "date",
},
"month": {
"actions": [],
"canPivot": True,
"choices": [
"January",
"February",
"March",
"April",
"May",
"June",
"July",
"August",
"September",
"October",
"November",
"December",
],
"concrete": True,
"defaultSort": "asc",
"model": None,
"prettyName": "month",
"toMany": False,
"type": "numberchoice",
},
"month_start": {
"actions": [],
"canPivot": True,
"choices": [],
"concrete": True,
"defaultSort": "asc",
"model": None,
"prettyName": "month start",
"toMany": False,
"type": "date",
},
"quarter": {
"actions": [],
"canPivot": True,
"choices": [],
"concrete": True,
"defaultSort": "asc",
"model": None,
"prettyName": "quarter",
"toMany": False,
"type": "number",
},
"week_day": {
"actions": [],
"canPivot": True,
"choices": [
"Sunday",
"Monday",
"Tuesday",
"Wednesday",
"Thursday",
"Friday",
"Saturday",
],
"concrete": True,
"defaultSort": "asc",
"model": None,
"prettyName": "week day",
"toMany": False,
"type": "numberchoice",
},
"week_start": {
"actions": [],
"canPivot": True,
"choices": [],
"concrete": True,
"defaultSort": "asc",
"model": None,
"prettyName": "week start",
"toMany": False,
"type": "date",
},
"year": {
"actions": [],
"canPivot": True,
"choices": [],
"concrete": True,
"defaultSort": "asc",
"model": None,
"prettyName": "year",
"toMany": False,
"type": "number",
},
},
"sortedFields": [
"count",
"day",
"is_null",
"iso_week",
"iso_year",
"max",
"min",
"month",
"month_start",
"quarter",
"week_day",
"week_start",
"year",
],
},
"datetime": {
"defaultFilters": [],
"fields": {
"count": {
"actions": [],
"canPivot": False,
"choices": [],
"concrete": True,
"defaultSort": None,
"model": None,
"prettyName": "count",
"toMany": False,
"type": "number",
},
"date": {
"actions": [],
"canPivot": True,
"choices": [],
"concrete": True,
"defaultSort": "asc",
"model": None,
"prettyName": "date",
"toMany": False,
"type": "date",
},
"day": {
"actions": [],
"canPivot": True,
"choices": [],
"concrete": True,
"defaultSort": "asc",
"model": None,
"prettyName": "day",
"toMany": False,
"type": "number",
},
"hour": {
"actions": [],
"canPivot": True,
"choices": [],
"concrete": True,
"defaultSort": "asc",
"model": None,
"prettyName": "hour",
"toMany": False,
"type": "number",
},
"is_null": {
"actions": [],
"canPivot": True,
"choices": [],
"concrete": True,
"defaultSort": None,
"model": None,
"prettyName": "is null",
"toMany": False,
"type": "isnull",
},
"iso_week": {
"actions": [],
"canPivot": True,
"choices": [],
"concrete": True,
"defaultSort": "asc",
"model": None,
"prettyName": "iso week",
"toMany": False,
"type": "number",
},
"iso_year": {
"actions": [],
"canPivot": True,
"choices": [],
"concrete": True,
"defaultSort": "asc",
"model": None,
"prettyName": "iso year",
"toMany": False,
"type": "number",
},
"max": {
"actions": [],
"canPivot": False,
"choices": [],
"concrete": True,
"defaultSort": None,
"model": None,
"prettyName": "max",
"toMany": False,
"type": "datetime",
},
"min": {
"actions": [],
"canPivot": False,
"choices": [],
"concrete": True,
"defaultSort": None,
"model": None,
"prettyName": "min",
"toMany": False,
"type": "datetime",
},
"minute": {
"actions": [],
"canPivot": True,
"choices": [],
"concrete": True,
"defaultSort": "asc",
"model": None,
"prettyName": "minute",
"toMany": False,
"type": "number",
},
"month": {
"actions": [],
"canPivot": True,
"choices": [
"January",
"February",
"March",
"April",
"May",
"June",
"July",
"August",
"September",
"October",
"November",
"December",
],
"concrete": True,
"defaultSort": "asc",
"model": None,
"prettyName": "month",
"toMany": False,
"type": "numberchoice",
},
"month_start": {
"actions": [],
"canPivot": True,
"choices": [],
"concrete": True,
"defaultSort": "asc",
"model": None,
"prettyName": "month start",
"toMany": False,
"type": "date",
},
"quarter": {
"actions": [],
"canPivot": True,
"choices": [],
"concrete": True,
"defaultSort": "asc",
"model": None,
"prettyName": "quarter",
"toMany": False,
"type": "number",
},
"second": {
"actions": [],
"canPivot": True,
"choices": [],
"concrete": True,
"defaultSort": "asc",
"model": None,
"prettyName": "second",
"toMany": False,
"type": "number",
},
"week_day": {
"actions": [],
"canPivot": True,
"choices": [
"Sunday",
"Monday",
"Tuesday",
"Wednesday",
"Thursday",
"Friday",
"Saturday",
],
"concrete": True,
"defaultSort": "asc",
"model": None,
"prettyName": "week day",
"toMany": False,
"type": "numberchoice",
},
"week_start": {
"actions": [],
"canPivot": True,
"choices": [],
"concrete": True,
"defaultSort": "asc",
"model": None,
"prettyName": "week start",
"toMany": False,
"type": "date",
},
"year": {
"actions": [],
"canPivot": True,
"choices": [],
"concrete": True,
"defaultSort": "asc",
"model": None,
"prettyName": "year",
"toMany": False,
"type": "number",
},
},
"sortedFields": [
"count",
"date",
"day",
"hour",
"is_null",
"iso_week",
"iso_year",
"max",
"min",
"minute",
"month",
"month_start",
"quarter",
"second",
"week_day",
"week_start",
"year",
],
},
"duration": {
"defaultFilters": [],
"fields": {
"average": {
"actions": [],
"canPivot": False,
"choices": [],
"concrete": True,
"defaultSort": None,
"model": None,
"prettyName": "average",
"toMany": False,
"type": "duration",
},
"count": {
"actions": [],
"canPivot": False,
"choices": [],
"concrete": True,
"defaultSort": None,
"model": None,
"prettyName": "count",
"toMany": False,
"type": "number",
},
"is_null": {
"actions": [],
"canPivot": True,
"choices": [],
"concrete": True,
"defaultSort": None,
"model": None,
"prettyName": "is null",
"toMany": False,
"type": "isnull",
},
"max": {
"actions": [],
"canPivot": False,
"choices": [],
"concrete": True,
"defaultSort": None,
"model": None,
"prettyName": "max",
"toMany": False,
"type": "duration",
},
"min": {
"actions": [],
"canPivot": False,
"choices": [],
"concrete": True,
"defaultSort": None,
"model": None,
"prettyName": "min",
"toMany": False,
"type": "duration",
},
"sum": {
"actions": [],
"canPivot": False,
"choices": [],
"concrete": True,
"defaultSort": None,
"model": None,
"prettyName": "sum",
"toMany": False,
"type": "duration",
},
},
"sortedFields": ["average", "count", "is_null", "max", "min", "sum"],
},
"html": {
"defaultFilters": [],
"fields": {
"count": {
"actions": [],
"canPivot": False,
"choices": [],
"concrete": True,
"defaultSort": None,
"model": None,
"prettyName": "count",
"toMany": False,
"type": "number",
},
"is_null": {
"actions": [],
"canPivot": True,
"choices": [],
"concrete": True,
"defaultSort": None,
"model": None,
"prettyName": "is null",
"toMany": False,
"type": "isnull",
},
},
"sortedFields": ["count", "is_null"],
},
"isnull": {
"defaultFilters": [],
"fields": {
"count": {
"actions": [],
"canPivot": False,
"choices": [],
"concrete": True,
"defaultSort": None,
"model": None,
"prettyName": "count",
"toMany": False,
"type": "number",
},
"is_null": {
"actions": [],
"canPivot": True,
"choices": [],
"concrete": True,
"defaultSort": None,
"model": None,
"prettyName": "is null",
"toMany": False,
"type": "isnull",
},
},
"sortedFields": ["count", "is_null"],
},
"json": {
"defaultFilters": [],
"fields": {
"count": {
"actions": [],
"canPivot": False,
"choices": [],
"concrete": True,
"defaultSort": None,
"model": None,
"prettyName": "count",
"toMany": False,
"type": "number",
},
"is_null": {
"actions": [],
"canPivot": True,
"choices": [],
"concrete": True,
"defaultSort": None,
"model": None,
"prettyName": "is null",
"toMany": False,
"type": "isnull",
},
},
"sortedFields": ["count", "is_null"],
},
"jsonfield": {
"defaultFilters": [],
"fields": {
"count": {
"actions": [],
"canPivot": False,
"choices": [],
"concrete": True,
"defaultSort": None,
"model": None,
"prettyName": "count",
"toMany": False,
"type": "number",
},
"is_null": {
"actions": [],
"canPivot": True,
"choices": [],
"concrete": | |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 13 13:50:26 2016
@author: aholaj
"""
###### read ice
#################################
### imports and global values ###
#################################
import numpy as np
import sys
import os
import matplotlib.pyplot as plt
from itertools import cycle
import matplotlib.colors as colors
import seaborn as sns
#################################
### subroutines ###
#################################
#################################
### time steps to strings ###
#################################
def tstepH(time):
return str(time*360/3600.)
#################################
### set values >= 10e-10 ###
#################################
def nollaa(data):
for i in range(np.shape(data)[0]):
if data[i]<10e-10:
data[i]=10e-10
return data
#################################
### read variable from data ###
#################################
def read_Data(filename,var):
from netCDF4 import Dataset
fileH = Dataset(filename,mode='r')
data = fileH.variables[var][:]
fileH.close()
return data
def read_NamelistValue( filename = 'NAMELIST', var = 'Tspinup' ):
import re
f = open( filename )
value = None
for line in f:
aa = line.split('=')
k = 0
nimi = ''
arvo = ''
for sentence in aa:
if np.mod(k,2) == 0:
nimi = re.sub(r"\s+", "", sentence, flags=re.UNICODE)
elif np.mod(k,2) == 1:
apu = re.sub(r"\s+", "", sentence, flags=re.UNICODE)
arvo = apu.split('!', 1)[0]
k+=1
#print 'nimi', nimi, 'arvo', arvo
if nimi == var:
value = arvo
break
f.close()
return float(value)
######################################################
### count min and max values of 5 dimensional data ###
### time x y z bin ###
######################################################
def laske_minimi_maksimi_bini(nimi,data):
print(' ')
print(nimi)
maxi = data[ 0, 0, 0, 0, 0 ]
mini = data[ 0, 0, 0, 0, 0 ]
maxikoord = [ 0, 0, 0, 0, 0 ]
minikoord = [ 0, 0, 0, 0, 0 ]
for z in range( np.shape(data)[4] ):
for y in range( np.shape(data)[3] ):
for x in range( np.shape(data)[2] ):
for bini in range( np.shape(data)[1] ):
for time in range( np.shape(data)[0] ):
if (data[ time, bini, x, y, z ] > maxi):
maxi = data[ time, bini, x, y, z ]
maxikoord = [ time, bini, x, y, z ]
if (data[ time, bini, x, y, z ] < mini):
mini = data[ time, bini, x, y, z ]
minikoord = [ time, bini, x, y, z ]
print('maxi ' + str(maxi))
print('mini ' + str(mini))
print('maxikoord' + str(maxikoord))
print('minikoord' + str(minikoord))
print(' ')
######################################################
### count min and max values of 4 dimensional data ###
### time x y z ###
######################################################
def laske_minimi_maksimi(nimi,data):
print(' ')
print(nimi)
maxi= data[0,0,0,0]
mini= data[0,0,0,0]
maxikoord=[0,0,0,0]
minikoord=[0,0,0,0]
for z in range(np.shape(data)[3]):
for y in range(np.shape(data)[2]):
for x in range(np.shape(data)[1]):
for time in range(np.shape(data)[0]):
if (data[time,x,y,z] > maxi):
maxi = data[time,x,y,z]
maxikoord=[time,x,y,z]
if (data[time,x,y,z] < mini):
mini = data[time,x,y,z]
minikoord=[time,x,y,z]
print('maxi ' + str(maxi))
print('mini ' + str(mini))
print('maxikoord' + str(maxikoord))
print('minikoord' + str(minikoord))
print(' ')
##########################################################
### print filename of data file ###
### ###
##########################################################
def print_filename(fname):
head, tail = os.path.split(fname)
print(' ')
print('file: ' + tail)
##########################################################
### print shape of data file ###
### ###
##########################################################
def print_shape( var, data ):
print(' ')
print('variable: ' + var)
print('shape var1: '+ str(np.asarray(np.shape(data))))
##########################################################
### return area of the domain ###
### ###
##########################################################
def area( xm_data, ym_data ):
x_dim = int(np.asarray(np.shape(xm_data)))
y_dim = int(np.asarray(np.shape(ym_data)))
x_size = 0.
y_size = 0.;
if ( x_dim == 1 ):
x_size = xm_data[0]*2
else:
x_size = xm_data[ x_dim-1 ]*2
if ( y_dim == 1 ):
y_size = ym_data[0]*2
else:
y_size = ym_data[ y_dim-1 ]*2
return x_size*y_size
##########################################################
### calculate vertical path of a variable according ###
### to the air density (g/m^2) ###
### ###
### input variable: mix ratio ( kg / kg ) ###
### 4 dimensions: time, x, y z ###
##########################################################
def laske_path_aikasarjaXYZ( mixRatData, dn0_data, korkeus, aika = None, muunnosKerroin = 1000., onlyCloudy = False, tulostus = False, piirra = False, uusikuva = True, nimi = 'path aikasarja', xlabel = 'aika [s]', tightXAxis=False, label = None, LEGEND = True):
#fig.laske_path_aikasarjaXYZ = None
#ax.laske_path_aikasarjaXYZ = None
mixRatData = mixRatData*muunnosKerroin # kg/kg -> g/kg
timeDim = np.shape( mixRatData )[0]
xDim = np.shape( mixRatData )[1]
yDim = np.shape( mixRatData )[2]
dn0Kork = dn0_data * korkeus
onlyCloudyTXY = np.zeros( ( timeDim, xDim, yDim ) ) #
onesTXY = np.ones( ( timeDim, xDim, yDim ) )
timeSeriesTXY = np.zeros( ( timeDim, xDim, yDim ) )
timeSeries = np.zeros( timeDim )
#[a[i] < b[i] for i in range(5)]
for t in range(timeDim):
for i in range(xDim):
for j in range(yDim):
timeSeriesTXY[t, i, j] = np.dot( mixRatData[ t, i, j, : ], dn0Kork )
if ( onlyCloudy and timeSeriesTXY[t, i, j] > 0.0 ):
onlyCloudyTXY[t, i, j] = 1.0
timeSeries = np.sum( np.sum( timeSeriesTXY, axis = 1), axis = 1 )
onlyCloudyT = np.sum( np.sum(onlyCloudyTXY , axis = 1), axis = 1 )
if onlyCloudy:
timeSeries = np.where( onlyCloudyT > 0.0, timeSeries / onlyCloudyT , 0.0 )
else:
timeSeries = timeSeries / np.sum( np.sum( onesTXY, axis = 1) , axis = 1 )
if tulostus:
print('dimensiot aikasarjaXYZ'+ str(np.shape(aika))+ ' timeseries '+ str(np.shape(timeSeries)))
print(' ')
print(nimi)
for t in range(timeDim):
print('ajanhetki: ' + str(aika[t]) + ' ' + ' arvo : ' + str(timeSeries[t]))
print(' ')
## drawing ##
uusikuva = ( piirra and uusikuva )
if uusikuva:
laske_path_aikasarjaXYZ.fig, laske_path_aikasarjaXYZ.ax = plot_alustus()
plottaa( aika, timeSeries, nimi, xlabel , 'path [g/m^2]', tightXAxis = tightXAxis, label = label, LEGEND = LEGEND ) if piirra else False
# if uusikuva:
return laske_path_aikasarjaXYZ.fig, laske_path_aikasarjaXYZ.ax, aika, timeSeries
#else:
#return None, None
##########################################################
### calculate vertical path of a variable according ###
### to the air density (g/m^2) ###
### ###
### input variable: mix ratio ( kg / kg ) ###
### 2 dimensions: time, z ###
##########################################################
def laske_path_aikasarjaZ( mixRatData, dn0_data, korkeus, aika, tulostus = False, piirra = False, uusikuva = True, nimi = 'path aikasarja', label = None ):
print(' ')
mixRatData = mixRatData * 1000.0 # kg/kg -> g/kg # ( timeDim, zdim )
timeDim = np.shape( mixRatData )[0]
dn0Kork = dn0_data * korkeus
timeSeries = np.zeros( timeDim )
timeSeries = np.dot(mixRatData, dn0Kork)
print('timeDim '+ str(timeDim))
if ( np.shape( aika )[0] - np.shape( timeSeries )[0] == 1 ):
timeSeries = np.insert( timeSeries, 0, 0 )
elif ( np.shape( timeSeries )[0] - np.shape( aika )[0] == 1):
aika = np.insert( aika, 0, 0 )
elif ( np.abs( np.shape( timeSeries )[0] - np.shape( aika )[0] ) > 1):
sys.exit( "something went really wrong with dimensions in laske_path_aikasarjaZ()" )
if tulostus:
print(' ')
print(nimi)
for t in range(timeDim):
print('ajanhetki: ' + str(aika[t]) + ' ' + ' arvo : ' + str(timeSeries[t]))
print(' ')
print('dimensiot aikasarjaZ'+ str(np.shape(aika))+ ' timeseries '+ str(np.shape(timeSeries)))
## drawing ##
uusikuva = ( piirra and uusikuva )
plot_alustus() if uusikuva else False
plottaa( aika, timeSeries, nimi, 'aika [s]', 'path [g/m^2]', label = label) if piirra else False
#######################################################################
### calculate mean diameter in a bin ###
### according to the reference data ###
### ###
### input variable: mix ratio ( kg / kg ) ###
### 5 dimensions: time, x, y z, bin ###
#######################################################################
def laske_MeanDiameterInBin( RadiusBinData, bini, refNdata, aika, tulostus = False, piirra =False, uusikuva = True, nimi = 'Mean diameter in bin ', label = None ):
biniNimi = str(bini+1)
nimi = nimi + biniNimi
timeDim = np.shape( RadiusBinData )[0]
binDim = np.shape( RadiusBinData )[1]
xDim = np.shape( RadiusBinData )[2]
yDim = np.shape( RadiusBinData )[3]
zDim = np.shape( RadiusBinData )[4]
nCol = xDim * yDim
onlyCloudyTXYZ = np.zeros( ( timeDim, xDim, yDim, zDim ) ) #
timeSeriesTXYZ = np.zeros( ( timeDim, xDim, yDim, zDim ) )
timeSeries = np.zeros( timeDim )
timeSeriesTXYZ = 2.0 * RadiusBinData[ :, bini, :, : , : ]*1e6 # select only one bin and change to diameter in um
onlyCloudyTXYZ = np.where( refNdata > 1e-10, 1.0, 0.0)
timeSeries = np.sum( np.sum( np.sum( timeSeriesTXYZ, axis = 1), axis = 1), axis = 1 )
onlyCloudyT = np.sum( np.sum( np.sum( onlyCloudyTXYZ, axis = 1), axis = 1), axis = 1 )
timeSeries = np.where( onlyCloudyT > 0.0, timeSeries / onlyCloudyT , 0.0 )
if tulostus:
print(' ')
print('bini: ' + biniNimi)
print(nimi)
for t in range(timeDim):
print('ajanhetki: ' + str(aika[t]) + ' ' + ' arvo : ' + str(timeSeries[t]))
print(' ')
## drawing ##
uusikuva = ( piirra and uusikuva )
plot_alustus() if uusikuva else False
plottaa( aika, timeSeries, nimi, 'aika [s]', 'diameter [um]', label = label ) if piirra else False
#######################################################################
### calculate column mean PSD divided into bins ###
### at | |
from fill import *
from ontology import *
from unitor import *
from shuffle import *
from syllepsis import *
from tricathom import *
_H = Functor("H")
_a = ConstPrim0("a")
_b = ConstPrim0("b")
_c = ConstPrim0("c")
_d = ConstPrim0("d")
_e = ConstPrim0("e")
_Ha = app(_H, _a)
_Hb = app(_H, _b)
_Hc = app(_H, _c)
_Hd = app(_H, _d)
_He = app(_H, _e)
# TODO: Choose to use _H1x instead of _Hx whenever possible, since it's easier to reduce than expand?
_H1a = app(_H, ensureEqMol1(_a))
_H1b = app(_H, ensureEqMol1(_b))
_H1c = app(_H, ensureEqMol1(_c))
_H1d = app(_H, ensureEqMol1(_d))
_H1e = app(_H, ensureEqMol1(_e))
chi1Source = minimalASTFromMol0(ensureMol0(
comp0(app(_H,_a), app(_H, _b))), [_a, _b], [_H])
chi1Target = minimalASTFromMol0(ensureMol0(
app(_H, comp0(_a, _b))), [_a, _b], [_H])
chi1 = PrimitiveFamily("chi1", 1, 1, [0, 0], chi1Source, chi1Target)
chi1Dim2 = tritrans2CellPrimFamily(chi1)
chi1Dim3 = tritrans3CellPrimFamily(chi1, chi1Dim2)
chi1Pi = tritransPiPrimFamily(chi1, chi1Dim2)
chi1M = tritransMPrimFamily(chi1, chi1Dim2)
# We use the same prim for the unit in both categories; this shouldn't cause any problems, since these we can tell which is meant by which category it should fall in.
iota1Source = ConstNode(unit)
iota1Target = FunctorNode(0, ConstNode(unit))
iota1 = PrimitiveFamily("iota1", 1, 1, [], iota1Source, iota1Target)
omega2Source = minimalASTFromEqMol1(ensureEqMol1(
comp1(
comp0(_Ha, chi1.fprimf([_H], _b, _c)),
chi1.fprimf([_H], _a, comp0(_b, _c)))), [_a, _b, _c], [_H])
omega2Target = minimalASTFromEqMol1(ensureEqMol1(
comp1(
comp0(chi1.fprimf([_H], _a, _b), _Hc),
chi1.fprimf([_H], comp0(_a, _b), _c))), [_a, _b, _c], [_H])
omega2 = PrimitiveFamily("omega2", 2, 1, [0, 0, 0], omega2Source, omega2Target)
omega2Dim3 = trimod3CellPrimFamily(omega2, {chi1: chi1Dim2})
gamma2Source = minimalASTFromEqMol1(ensureEqMol1(
comp1s(
comp0(iota1.fprimf([_H]), app(_H, _a)),
chi1.fprimf([_H], unit, _a),
app(_H, unitor10.fprim(_a))
)), [_H], [_a])
gamma2Target = minimalASTFromEqMol1(ensureEqMol1(
unitor10.fprim(app(_H, _a))), [_a], [_H])
gamma2 = PrimitiveFamily("gamma2", 2, 1, [0], gamma2Source, gamma2Target)
delta2Source = minimalASTFromEqMol1(ensureEqMol1(
comp1s(
unitor11.adj.fprim(app(_H, _a)),
comp0(app(_H, _a), iota1.fprimf([_H])),
chi1.fprimf([_H], _a, unit)
)), [_a], [_H])
delta2Target = minimalASTFromEqMol1(ensureEqMol1(
app(_H, unitor11.adj.fprim(_a))), [_a], [_H])
delta2 = PrimitiveFamily("delta2", 2, 1, [0], delta2Source, delta2Target)
# We switch a and b form other notes.
# We also switch source and target, since it's u2.adj that seems to come up in diagrams otherwise.
u2Source = minimalASTFromEqMol1(ensureEqMol1(
comp1(
shuffle1.fprim(app(_H, _a), app(_H, _b)),
chi1.fprimf([_H], _b, _a))), [_a, _b], [_H])
u2Target = minimalASTFromEqMol1(ensureEqMol1(
comp1(
chi1.fprimf([_H], _a, _b),
app(_H, shuffle1.fprim(_a, _b)))), [_a, _b], [_H])
u2 = PrimitiveFamily("u2", 2, 1, [0, 0], u2Source, u2Target)
u2Dim3 = trimod3CellPrimFamily(u2, {chi1: chi1Dim2, shuffle1: shuffle1Dim2})
#omega3Source0 = ensureMol0(comp0s(app(_H, _a), app(_H, _b), app(_H, _c), app(_H, _d)))
#omega3Target0 = ensureMol0(app(_H, comp0s(_a, _b, _c, _d)))
#omega3Paths1 = findPaths1(omega3Source0, omega3Target0, [chi1])
#searchForPathPairs2(omega3Paths1, [omega2])
#[((H(a) @ H(b)) @ chi1{0}(c, d)) . (H(a) @ chi1{0}(b, (c @ d))) . (chi1{0}(a, (b @ c @ d)))]
# --
#[(chi1{H}(a, b) @ (H(c) @ H(d))) . (chi1{H}((a @ b), c) @ H(d)) . (chi1{0}((a @ b @ c), d))]
omega3Source1 = ensureEqMol1(
comp1s(
comp0s(_H1a, _H1b, chi1.fprimf([_H], _c, _d)),
comp0(_H1a, chi1.fprimf([_H], _b, comp0(_c, _d))),
chi1.fprimf([_H], _a, comp0s(_b, _c, _d))))
omega3Target1 = ensureEqMol1(
comp1s(
comp0s(chi1.fprimf([_H], _a, _b), _H1c, _H1d),
comp0(chi1.fprimf([_H], comp0(_a, _b), _c), _H1d),
chi1.fprimf([_H], comp0s(_a, _b, _c), _d)))
#omega3Paths2 = findPaths2(omega3Source1, omega3Target1, [omega2])
print(len(cellsAway2(omega3Source1, [omega2])))
# [[((H(a) @ H(b)) @ chi1{H}(c, d)) . omega2{0}(a, b, (c @ d))] &
# [(chi1{H}(a, b) @ (H(c) @ H(d))) . omega2{0}((a @ b), c, d)]]
# --
# [[(H(a) @ omega2{0}(b, c, d)) . (chi1{H}(a, (b @ c @ d)))] &
# [(H(a) @ chi1{H}(b, c) @ H(d)) . omega2{0}(a, (b @ c), d)] &
# [(omega2{0}(a, b, c) @ H(d)) . (chi1{H}((a @ b @ c), d))]]
omega3SourceAST = minimalASTFromEqAEMol2(ensureEqAEMol2(
comp2(
comp1(
comp0s(app(_H, _a), app(_H, _b), chi1.fprimf([_H], _c, _d)),
omega2.fprimf([_H], _a, _b, comp0(_c, _d))),
comp1(
comp0s(chi1.fprimf([_H], _a, _b), app(_H, _c), app(_H, _d)),
omega2.fprimf([_H], comp0(_a, _b), _c, _d))
)
), [_a, _b, _c, _d], [_H])
omega3TargetAST = minimalASTFromEqAEMol2(ensureEqAEMol2(
comp2s(
comp1(
comp0(app(_H, _a), omega2.fprimf([_H], _b, _c, _d)),
chi1.fprimf([_H], _a, comp0s(_b, _c, _d))),
comp1(
comp0s(app(_H, _a), chi1.fprimf([_H], _b, _c), app(_H, _d)),
omega2.fprimf([_H], _a, comp0(_b, _c), _d)),
comp1(
comp0(omega2.fprimf([_H], _a, _b, _c), app(_H, _d)),
chi1.fprimf([_H], comp0s(_a, _b, _c), _d))
)
), [_a, _b, _c, _d], [_H])
omega3 = PrimitiveFamily("omega3", 3, 1, [0, 0, 0, 0], omega3SourceAST, omega3TargetAST)
omega4Source0 = ensureMol0(comp0s(app(_H, _a), app(_H, _b), app(_H, _c), app(_H, _d), app(_H, _e)))
omega4Target0 = ensureMol0(app(_H, comp0s(_a, _b, _c, _d, _e)))
#omega4Paths1 = findPaths1(omega4Source0, omega4Target0, [chi1])
#searchForPathPairs2(omega4Paths1, [omega2])
# We pick the choice that lines up with the pattern one dimension down; grouping left versus grouping right.
# This is also the choice with the most 2-dimensional paths; in general, the ability to find properly oriented 3-paths depends on choice of both 1st and 2nd dimension?
# TODO: Do we need to revisit 1-paths for permutahedron?
# [((H(a) @ H(b) @ H(c)) @ chi1{0}(d, e)) . ((H(a) @ H(b)) @ chi1{0}(c, (d @ e))) . (H(a) @ chi1{0}(b, (c @ d @ e))) . (chi1{0}(a, (b @ c @ d @ e)))]
# --
#[(chi1{H}(a, b) @ (H(c) @ H(d) @ H(e))) . (chi1{H}((a @ b), c) @ (H(d) @ H(e))) . (chi1{H}((a @ b @ c), d) @ H(e)) . (chi1{0}((a @ b @ c @ d), e))]
omega4Source1 = ensureEqMol1(
comp1s(
comp0s(app(_H, _a), app(_H, _b), app(_H, _c), chi1.fprimf([_H], _d, _e)),
comp0s(app(_H, _a), app(_H, _b), chi1.fprimf([_H], _c, comp0(_d, _e))),
comp0s(app(_H, _a), chi1.fprimf([_H], _b, comp0s(_c, _d, _e))),
chi1.fprimf([_H], _a, comp0s(_b, _c, _d, _e))
)
)
omega4Target1 = ensureEqMol1(
comp1s(
comp0s(chi1.fprimf([_H], _a, _b), app(_H, _c), app(_H, _d), app(_H, _e)),
comp0s(chi1.fprimf([_H], comp0(_a, _b), _c), app(_H, _d), app(_H, _e)),
comp0s(chi1.fprimf([_H], comp0s(_a, _b, _c), _d), app(_H, _e)),
chi1.fprimf([_H], comp0s(_a, _b, _c, _d), _e)
)
)
#omega4Paths2 = findPaths2(omega4Source1, omega4Target1, [omega2])
#searchForPathPairs3(omega4Paths2, omega4Paths2, [omega3])
# SUCCESS!!!!!!
# [[((H(a) @ H(b) @ H(c)) @ chi1{H}(d, e)) . ((H(a) @ H(b)) @ chi1{H}(c, (d @ e))) . omega2{0}(a, b, (c @ d @ e))] &
# [((H(a) @ H(b) @ H(c)) @ chi1{H}(d, e)) . (chi1{H}(a, b) @ (H(c) @ H((d @ e)))) . omega2{0}((a @ b), c, (d @ e))] &
# [(chi1{H}(a, b) @ (H(c) @ H(d) @ H(e))) . (chi1{H}((a @ b), c) @ (H(d) @ H(e))) . omega2{0}((a @ b @ c), d, e)]]
omega4Source2 = ensureEqAEMol2(
comp2s(
comp1s(
comp0s(app(_H, _a), app(_H, _b), app(_H, _c), chi1.fprimf([_H], _d, _e)),
comp0s(app(_H, _a), app(_H, _b), chi1.fprimf([_H], _c, comp0(_d, _e))),
omega2.fprimf([_H], _a, _b, comp0s(_c, _d, _e))),
comp1s(
comp0s(app(_H, _a), app(_H, _b), app(_H, _c), chi1.fprimf([_H], _d, _e)),
comp0s(chi1.fprimf([_H], _a, _b), app(_H, _c), app(_H, comp0(_d, _e))),
omega2.fprimf([_H], comp0(_a, _b), _c, comp0(_d, _e))),
comp1s(
comp0s(chi1.fprimf([_H], _a, _b), app(_H, _c), app(_H, _d), app(_H, _e)),
comp0s(chi1.fprimf([_H], comp0(_a, _b), _c), app(_H, _d), app(_H, _e)),
omega2.fprimf([_H], comp0s(_a, _b, _c), _d, _e))
)
)
# --
# [[((H(a) @ H(b)) @ omega2{0}(c, d, e)) . (H(a) @ chi1{H}(b, (c @ d @ e))) . (chi1{H}(a, (b @ c @ d @ e)))] &
# [((H(a) @ H(b)) @ chi1{H}(c, d) @ H(e)) . (H(a) @ omega2{0}(b, (c @ d), e)) . (chi1{H}(a, (b @ c @ d @ e)))] &
# [(H(a) @ omega2{0}(b, c, d) @ H(e)) . (H(a) @ chi1{0}((b @ c @ d), e)) . (chi1{H}(a, (b @ c @ d @ e)))] &
# [(H(a) @ chi1{H}(b, c) @ (H(d) @ H(e))) . (H(a) @ chi1{0}((b @ c), d) @ H(e)) . omega2{0}(a, (b @ c @ d), e)] &
# [(H(a) @ chi1{H}(b, c) @ (H(d) @ H(e))) . (omega2{0}(a, (b @ c), d) @ H(e)) . (chi1{H}((a @ b @ c @ d), e))] &
# @ [(omega2{0}(a, b, c) @ (H(d) @ H(e))) . (chi1{H}((a @ b @ c), d) @ H(e)) . (chi1{H}((a @ b @ c @ d), e))]]
omega4Target2 = ensureEqAEMol2(
comp2s(
comp1s(
comp0s(_Ha, _Hb, omega2.fprimf([_H], _c, _d, _e)),
comp0(_Ha, chi1.fprimf([_H], _b, comp0s(_c, _d, _e))),
chi1.fprimf([_H], _a, comp0s(_b, _c, _d, _e))),
comp1s(
comp0s(_Ha, _Hb, chi1.fprimf([_H], _c, _d), _He),
comp0(_Ha, omega2.fprimf([_H], _b, comp0(_c, _d), _e)),
chi1.fprimf([_H], _a, comp0s(_b, _c, _d, _e))),
comp1s(
comp0s(_Ha, omega2.fprimf([_H], _b, _c, _d), _He),
comp0(_Ha, chi1.fprimf([_H], comp0s(_b, _c, _d), _e)),
chi1.fprimf([_H], _a, comp0s(_b, _c, _d, _e))),
comp1s(
comp0s(_Ha, chi1.fprimf([_H], _b, _c), _Hd, _He),
comp0s(_Ha, chi1.fprimf([_H], comp0(_b, _c), _d), _He),
omega2.fprimf([_H], _a, comp0s(_b, _c, _d), _e)),
comp1s(
comp0s(_Ha, chi1.fprimf([_H], _b, _c), _Hd, _He),
comp0(omega2.fprimf([_H], _a, comp0(_b, _c), _d), _He),
chi1.fprimf([_H], comp0s(_a, _b, _c, _d), _e)),
comp1s(
comp0s(omega2.fprimf([_H], _a, _b, _c), _Hd, _He),
comp0(chi1.fprimf([_H], comp0s(_a, _b, _c), _d), _He),
chi1.fprimf([_H], comp0s(_a, _b, _c, _d), _e))
)
)
# NOTE: This has two components, giving an axiom.
omega4Paths3 = findPaths3(omega4Source2, omega4Target2, [omega3])
tensorShuf30Source0 = ensureMol0(comp0s(_Ha, _Hb, _Hc))
tensorShuf30Target0 = ensureMol0(app(_H, comp0s(_b, _c, _a)))
def _explorePaths1():
tensorShuf30Paths1 = findPaths1(tensorShuf30Source0, tensorShuf30Target0, [shuffle1, chi1])
exploreFromToAdjCells2(tensorShuf30Paths1, tensorShuf30Paths1, [], [omega2, u2, shuffle20, shuffle1Dim2], [omega2, u2])
#_explorePaths1()
# Too many paths; we try limiting number of shuffle1s to <= 2. Also limit to performing all chi1s, then all | |
[ALERT] url {url}\n\nUrl {url} registered an alarming change of {round(change,4)} at {datestr}"
# set up email server config
port = 465
context = ssl.create_default_context()
try:
with smtplib.SMTP_SSL("smtp.gmail.com", port, context=context) as server:
server.login(sender, psswd)
server.sendmail(sender, reciever, message)
except:
if logfile:
print(f"[ERROR] {datestr} Could not send email to {permaConfig.mail_to_notify} from {permaConfig.sender_mail} of anomaly with change {round(change,4)} \
with url {url}", file=logfile)
def help(self):
"""
Print help message
"""
print("Usage:\n AOCW.py ACTION [ACTION OPTIONS]")
print(f" Where ACTION = {AOCW.ACTIONS.INIT} | {AOCW.ACTIONS.RUN} | {AOCW.ACTIONS.SET} | {AOCW.ACTIONS.SHOW}")
for d in AOCW.ACTIONS.info:
print(f" {d['action']} : {d['help']}")
print("Try --help option with any action to get more details")
def init(self, argv : List[str]):
"""
Init the config for first time.
Optional positional arguments:
1) -f --file file : File with the list of urls to check [REQUIRED]
2) -r --critical-rate : a float number
3) -a --active true | false : if the program should start running or not
4) -n --n-days int : how many days before to this to take in consideration
5) -e --email-to-notify str : the email address to send notifications to
"""
home = os.environ['HOME']
aocw_dir : str = os.path.join(home, ".aocw")
if "-h" in argv or "--help" in argv or len(argv) == 2:
AOCW.init_help()
self.state = AOCW.ERRORS.OK
return
# Parse the provided arguments
list_file : str = None
# set the file
if AOCW.INIT_FLAGS.INIT_FILE_FLAG in argv or AOCW.INIT_FLAGS.INIT_FILE_FLAG_LONG in argv:
for i in range(len(argv)):
if argv[i] == AOCW.INIT_FLAGS.INIT_FILE_FLAG or argv[i] == AOCW.INIT_FLAGS.INIT_FILE_FLAG_LONG:
if i == len(argv) - 1:
self.state = AOCW.ERRORS.INVALID_ARGUMENTS
return
list_file = argv[i+1]
try:
list_file = os.path.abspath(list_file)
except:
self.state = AOCW.ERRORS.INVALID_ARGUMENTS
return
break
else:
self.state = AOCW.ERRORS.MISSING_REQUIRED_ARGUMENT
return
# set the critical rate value
initial_rate = AOCW.DEFAULTS.CRITICAL_RATE
if AOCW.INIT_FLAGS.INIT_RATE_FLAG in argv or AOCW.INIT_FLAGS.INIT_RATE_FLAG_LONG in argv:
for i in range(len(argv)):
if argv[i] == AOCW.INIT_FLAGS.INIT_RATE_FLAG or argv[i] == AOCW.INIT_FLAGS.INIT_RATE_FLAG_LONG:
if i == len(argv) - 1:
self.state = AOCW.ERRORS.INVALID_ARGUMENTS
return
try:
initial_rate = float(argv[i+1])
assert initial_rate > 0
except:
self.state = AOCW.ERRORS.INVALID_ARGUMENTS
return
break
# set the n-days value
n_days_before = AOCW.DEFAULTS.N_DAYS_BEFORE
if AOCW.INIT_FLAGS.INIT_N_DAYS_FLAG in argv or AOCW.INIT_FLAGS.INIT_N_DAYS_FLAG_LONG in argv:
for i in range(len(argv)):
if argv[i] == AOCW.INIT_FLAGS.INIT_N_DAYS_FLAG or argv[i] == AOCW.INIT_FLAGS.INIT_N_DAYS_FLAG_LONG:
if i == len(argv) - 1:
self.state = AOCW.ERRORS.INVALID_ARGUMENTS
return
try:
n_days_before = int(argv[i+1])
assert n_days_before > 0
except:
self.state = AOCW.ERRORS.INVALID_ARGUMENTS
return
break
# set if the app is performing measurements or not
active : bool = AOCW.DEFAULTS.ACTIVE
if AOCW.INIT_FLAGS.INIT_ACTIVE_FLAG in argv or AOCW.INIT_FLAGS.INIT_ACTIVE_FLAG_LONG in argv:
for i in range(len(argv)):
if argv[i] == AOCW.INIT_FLAGS.INIT_ACTIVE_FLAG or argv[i] == AOCW.INIT_FLAGS.INIT_ACTIVE_FLAG_LONG:
if i == len(argv) - 1:
self.state = AOCW.ERRORS.INVALID_ARGUMENTS
return
active = argv[i+1].lower()
if active == 'true':
active = True
elif active == 'false':
active = False
else:
self.state = AOCW.ERRORS.INVALID_ARGUMENTS
return
break
# parse the email to notify
email : str = None
if AOCW.INIT_FLAGS.INIT_EMAIL_FLAG in argv or AOCW.INIT_FLAGS.INIT_EMAIL_FLAG_LONG in argv:
for i in range(len(argv)):
if argv[i] == AOCW.INIT_FLAGS.INIT_EMAIL_FLAG or argv[i] == AOCW.INIT_FLAGS.INIT_EMAIL_FLAG_LONG:
if i == len(argv) - 1:
self.state = AOCW.ERRORS.INVALID_ARGUMENTS
return
email = argv[i+1]
break
else:
self.state = AOCW.ERRORS.MISSING_REQUIRED_ARGUMENT
return
# Create data dir if not exists
if not os.path.exists(aocw_dir):
try:
print(f"Creating local storage dir: {aocw_dir}")
os.mkdir(aocw_dir)
except:
self.state = AOCW.ERRORS.COULD_NOT_CREATE_DIR
return
# Create log file:
log_file = os.path.join(aocw_dir, AOCW.DEFAULTS.LOG_FILE)
try:
print(f"Creating log file in: {log_file}")
with open(log_file, 'w') as log:
pass
except:
self.state = AOCW.ERRORS.COULD_NOT_CREATE_LOG
return
# search for our config json
config_file = os.path.join(aocw_dir, self.DEFAULTS.CONFIG_FILE)
# ask the user to override the current file
if os.path.exists(config_file):
should_override : str = input("AOCW file already exist. Override? [y/n]: ").lower()
while should_override != 'y' and should_override != 'n':
print("Urecognized input")
should_override = input("AOCW file already exist. Override? [y/n]: ").lower()
if should_override == 'n':
print("No changes made.")
self.state = AOCW.ERRORS.OK
return
# Create the config file
try:
with open(config_file, 'w') as f:
pass
except:
self.state = AOCW.ERRORS.COULD_NOT_ACCESS_CONFIG_FILE
return
YELLOW = "\u001b[33m"
ENDC = '\033[0m'
# Get sender mail
confirm = False
print("We need a gmail email address that will send notifications in case of some anomaly.")
print(f"{YELLOW}WARNING: We highly recommend you to create a dummy email account for this purpose since we can ensure{ENDC}")
print(f"{YELLOW}your security for now.{ENDC}")
sender_mail = ''
while not confirm:
sender_mail : str = input("Sender mail: ")
print(f"Your sender account will be: {sender_mail}")
yn = input("Are you agree?[y/n]: ").lower()
while yn != 'y' and yn != 'n':
print("unrecognized input")
yn = input("Are you agree?[y/n]: ").lower()
if yn == 'y':
confirm = True
# Get sender password
sender_pswd = ''
confirm = False
while not confirm:
sender_pswd = getpass.getpass(f"Password for {sender_mail}: ")
confirmation_pswd = getpass.getpass("Confirm password: ")
confirm = sender_pswd == confirmation_pswd
if not confirm:
print("Password not matching, retry")
try:
permaConfig : PermaConfig = PermaConfig(log_file,
list_file,
initial_rate,
active,
n_days_before,
email,
sender_mail,
sender_pswd)
except:
self.state = AOCW.ERRORS.COULD_NOT_SETUP_CONFIG_FILE
return
permaConfig.save()
if (permaConfig.state != PermaConfig.ERROR.OK):
print("Error: ", permaConfig.state)
self.state = AOCW.ERRORS.COULD_NOT_SETUP_CONFIG_FILE
return
def init_help():
print(" Initialize local files to store logic and data retrieved from ooni.")
print(" Possible arguments to setup the installation:")
for f in AOCW.INIT_FLAGS.info:
print(f" {f['short']}, {f['long']}: {f['help']}\n")
def run(self, argv : List[str]):
"""
Run this program if it is set up to do so. Load urls from the
specified file and evaluate them in the ooni api, store their data
"""
# Shortcut function
date2str = lambda d : d.strftime("%Y-%m-%d %H:%M:%S")
# Load the config
permaConfig : PermaConfig = PermaConfig()
permaConfig.load()
if permaConfig.state != PermaConfig.ERROR.OK:
self.state = AOCW.ERRORS.COULD_NOT_ACCESS_CONFIG_FILE
return
# Check if the program is currently active
if not permaConfig.active:
return
# Open the log file in append mode
try:
log_file = open(permaConfig.log_file, 'a')
except:
self.state = AOCW.ERRORS.COULD_NOT_ACCESS_LOG_FILE
return
print(f"[INFO] {date2str(datetime.now())} Running check:", file=log_file)
# setup since date
until : datetime = datetime.now()
since : datetime = until - timedelta(days=permaConfig.n_days_before)
# Open list file and measure every url
try:
with open(permaConfig.list_file, "r") as list_file:
for line in list_file.readlines():
# Some lines have an endline, trim it so they won't mess up the ooni input
if line[-1] == '\n':
line = line[:len(line)-1]
# Get data for this url
print(f"Retrieving data for {line}...")
days = aoc_bend.get_measurements_list_api(since, until, line)
now : datetime = datetime.now()
if isinstance(days, str):
print(f"[ERROR] {date2str(now)} Could not retrieve data for url: {line}. Error: {days}", file=log_file)
continue
if permaConfig.urls.get(line) is None:
permaConfig.add_entry(line)
dayset = aoc_bend.MeasurementSet(days)
ratio = dayset.get_avg_anomalies()
# If there was no measurements (metrics == -1), update the check date and keep going
if ratio == -1:
permaConfig.urls[line]['last_check'] = date2str(now)
continue
last = permaConfig.urls[line]['current_rate']
if last == -1:
change = 0
else:
change = ratio - permaConfig.urls[line]['current_rate']
permaConfig.urls[line]['previous_rate'] = permaConfig.urls[line]['current_rate']
permaConfig.urls[line]['current_rate'] = ratio
permaConfig.urls[line]['change'] = change
permaConfig.urls[line]['last_check'] = date2str(now)
permaConfig.urls[line]['last_update'] = date2str(now)
if change > permaConfig.critical_anomaly_rate:
self.alert(permaConfig, change, line, date2str(now))
print(f"[ALERT] {date2str(now)} Change of {round(change, 3)} for url: {line}", file=log_file)
except:
self.state = AOCW.ERRORS.COULD_NOT_ACCESS_LIST_FILE
return
permaConfig.save()
log_file.close()
def show(self, argv : List[str]):
"""
Print the current config and the currently stored data
"""
# Load local data
permaConfig : PermaConfig = PermaConfig()
permaConfig.load()
if permaConfig.state != PermaConfig.ERROR.OK:
self.state = AOCW.ERRORS.COULD_NOT_ACCESS_CONFIG_FILE
return
# For string coloring:
RED = '\033[91m'
YELLOW = "\u001b[33m"
GREEN = "\u001b[32m"
BLUE = "\u001b[36m"
ENDC = '\033[0m'
critical_rate : float = permaConfig.critical_anomaly_rate
alert = lambda f : f"{RED if f > critical_rate else ''}{f}{ENDC if f > critical_rate else ''}"
# Build the table
header : List[str] = ["input", "last check", "last update", "previous rate", "current rate", "change"]
header = map(lambda s : f"{BLUE}{s}{ENDC}",header)
body = [
[
inpt, details["last_check"], details["last_update"],
f"{round(details['previous_rate'], 3)}",
f"{round(details['current_rate'], 3)}",
f"{alert(round(details['change'], 3)) if details['change'] is not None else 'No change to show'}",
]
for (inpt, details) in permaConfig.urls.items()
]
# If there's no rows to show:
if len(body) == 0:
body.append(
["-- No sites to show --"]
)
table = [header] + body
print(f"List file: {YELLOW}{permaConfig.list_file}{ENDC}")
print(f"Specified critical rate: {RED}{permaConfig.critical_anomaly_rate}{ENDC}")
print(f"Checking {GREEN}{permaConfig.n_days_before}{ENDC} days before the | |
#!/usr/bin/env python
'''
print_all.py - a script to print a MRT format data using mrtparse.
Copyright (C) 2016 greenHippo, LLC.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Authors:
<NAME> <<EMAIL>>
<NAME> <<EMAIL>>
<NAME> <<EMAIL>>
'''
import sys
from optparse import OptionParser
from datetime import *
from mrtparse import *
indt = 0
def prerror(m):
print('%s: %s' % (MRT_ERR_C[m.err], m.err_msg))
if m.err == MRT_ERR_C['MRT Header Error']:
buf = m.buf
else:
buf = m.buf[12:]
s = ''
for i in range(len(buf)):
if isinstance(buf[i], str):
s += '%02x ' % ord(buf[i])
else:
s += '%02x ' % buf[i]
if (i + 1) % 16 == 0:
print(' %s' % s)
s = ''
elif (i + 1) % 8 == 0:
s += ' '
if len(s):
print(' %s' % s)
def prline(line):
global indt
print(' ' * indt + line)
def print_mrt(m):
global indt
indt = 0
prline('MRT Header')
indt += 1
prline('Timestamp: %d(%s)' %
(m.ts, datetime.fromtimestamp(m.ts)))
prline('Type: %d(%s)' % (m.type, MRT_T[m.type]))
prline('Subtype: %d(%s)' %
(m.subtype, MRT_ST[m.type][m.subtype]))
prline('Length: %d' % m.len)
if ( m.type == MRT_T['BGP4MP_ET']
or m.type == MRT_T['ISIS_ET']
or m.type == MRT_T['OSPFv3_ET']):
prline('Microsecond Timestamp: %d' % m.micro_ts)
def print_td(m):
global indt
indt = 0
prline('%s' % MRT_T[m.type])
indt += 1
prline('View Number: %d' % m.td.view)
prline('Sequence Number: %d' % m.td.seq)
prline('Prefix: %s' % m.td.prefix)
prline('Prefix length: %d' % m.td.plen)
prline('Status: %d' % m.td.status)
prline('Originated Time: %d(%s)' %
(m.td.org_time,
datetime.fromtimestamp(m.td.org_time)))
prline('Peer IP Address: %s' % m.td.peer_ip)
prline('Peer AS: %s' % m.td.peer_as)
prline('Attribute Length: %d' % m.td.attr_len)
for attr in m.td.attr:
print_bgp_attr(attr, 1)
def print_td_v2(m):
global indt
indt = 0
prline('%s' % TD_V2_ST[m.subtype])
indt += 1
if m.subtype == TD_V2_ST['PEER_INDEX_TABLE']:
prline('Collector: %s' % m.peer.collector)
prline('View Name Length: %d' % m.peer.view_len)
prline('View Name: %s' % m.peer.view)
prline('Peer Count: %d' % m.peer.count)
for entry in m.peer.entry:
prline('Peer Type: 0x%02x' % entry.type)
prline('Peer BGP ID: %s' % entry.bgp_id)
prline('Peer IP Address: %s' % entry.ip)
prline('Peer AS: %s' % entry.asn)
elif ( m.subtype == TD_V2_ST['RIB_IPV4_UNICAST']
or m.subtype == TD_V2_ST['RIB_IPV4_MULTICAST']
or m.subtype == TD_V2_ST['RIB_IPV6_UNICAST']
or m.subtype == TD_V2_ST['RIB_IPV6_MULTICAST']):
prline('Sequence Number: %d' % m.rib.seq)
prline('Prefix Length: %d' % m.rib.plen)
prline('Prefix: %s' % m.rib.prefix)
prline('Entry Count: %d' % m.rib.count)
for entry in m.rib.entry:
indt = 1
prline('Peer Index: %d' % entry.peer_index)
prline('Originated Time: %d(%s)' %
(entry.org_time,
datetime.fromtimestamp(entry.org_time)))
prline('Attribute Length: %d' % entry.attr_len)
for attr in entry.attr:
print_bgp_attr(attr, 1)
elif m.subtype == TD_V2_ST['RIB_GENERIC']:
prline('Sequence Number: %d' % m.rib.seq)
prline('AFI: %d(%s)' % (m.rib.afi, AFI_T[m.rib.afi]))
prline('SAFI: %d(%s)' % (m.rib.safi, SAFI_T[m.rib.safi]))
for nlri in m.rib.nlri:
print_nlri(nlri, 'NLRI', m.rib.safi)
prline('Entry Count: %d' % m.rib.count)
for entry in m.rib.entry:
indt = 1
prline('Peer Index: %d' % entry.peer_index)
prline('Originated Time: %d(%s)' %
(entry.org_time,
datetime.fromtimestamp(entry.org_time)))
prline('Attribute Length: %d' % entry.attr_len)
for attr in entry.attr:
print_bgp_attr(attr, 1)
def print_bgp4mp(m):
global indt
indt = 0
prline('%s' % BGP4MP_ST[m.subtype])
indt += 1
prline('Peer AS Number: %s' % m.bgp.peer_as)
prline('Local AS Number: %s' % m.bgp.local_as)
prline('Interface Index: %d' % m.bgp.ifindex)
prline('Address Family: %d(%s)' %
(m.bgp.af, AFI_T[m.bgp.af]))
prline('Peer IP Address: %s' % m.bgp.peer_ip)
prline('Local IP Address: %s' % m.bgp.local_ip)
if ( m.subtype == BGP4MP_ST['BGP4MP_STATE_CHANGE']
or m.subtype == BGP4MP_ST['BGP4MP_STATE_CHANGE_AS4']):
prline('Old State: %d(%s)' %
(m.bgp.old_state, BGP_FSM[m.bgp.old_state]))
prline('New State: %d(%s)' %
(m.bgp.new_state, BGP_FSM[m.bgp.new_state]))
elif ( m.subtype == BGP4MP_ST['BGP4MP_MESSAGE']
or m.subtype == BGP4MP_ST['BGP4MP_MESSAGE_AS4']
or m.subtype == BGP4MP_ST['BGP4MP_MESSAGE_LOCAL']
or m.subtype == BGP4MP_ST['BGP4MP_MESSAGE_AS4_LOCAL']):
print_bgp_msg(m.bgp.msg, m.subtype)
def print_bgp_msg(msg, subtype):
global indt
indt = 0
prline('BGP Message')
indt += 1
prline('Marker: -- ignored --')
prline('Length: %d' % msg.len)
prline('Type: %d(%s)' %
(msg.type, BGP_MSG_T[msg.type]))
if msg.type == BGP_MSG_T['OPEN']:
prline('Version: %d' % msg.ver)
prline('My AS: %d' % msg.my_as)
prline('Hold Time: %d' % msg.holdtime)
prline('BGP Identifier: %s' % msg.bgp_id)
prline('Optional Parameter Length: %d' % msg.opt_len)
for opt in msg.opt_params:
print_bgp_opt_params(opt)
elif msg.type == BGP_MSG_T['UPDATE']:
prline('Withdrawn Routes Length: %d' % msg.wd_len)
for withdrawn in msg.withdrawn:
print_nlri(withdrawn, 'Withdrawn Routes')
prline('Total Path Attribute Length: %d' % msg.attr_len)
for attr in msg.attr:
print_bgp_attr(attr, 1)
indt = 1
for nlri in msg.nlri:
print_nlri(nlri, 'NLRI')
elif msg.type == BGP_MSG_T['NOTIFICATION']:
prline('Error Code: %d(%s)' %
(msg.err_code, BGP_ERR_C[msg.err_code]))
prline('Error Subcode: %d(%s)' %
(msg.err_subcode, BGP_ERR_SC[msg.err_code][msg.err_subcode]))
prline('Data: %s' % msg.data)
elif msg.type == BGP_MSG_T['ROUTE-REFRESH']:
prline('AFI: %d(%s)' % (msg.afi, AFI_T[msg.afi]))
prline('Reserved: %d' % (msg.rsvd))
prline('SAFI: %d(%s)' % (msg.safi, SAFI_T[msg.safi]))
def print_bgp_opt_params(opt):
global indt
indt = 1
prline('Parameter Type/Length: %d/%d' % (opt.type, opt.len))
indt += 1
prline('%s' % BGP_OPT_PARAMS_T[opt.type])
if opt.type != BGP_OPT_PARAMS_T['Capabilities']:
return
indt += 1
prline('Capability Code: %d(%s)' %
(opt.cap_type, BGP_CAP_C[opt.cap_type]))
prline('Capability Length: %d' % opt.cap_len)
if opt.cap_type == BGP_CAP_C['Multiprotocol Extensions for BGP-4']:
prline('AFI: %d(%s)' %
(opt.multi_ext['afi'], AFI_T[opt.multi_ext['afi']]))
prline('Reserved: %d' % opt.multi_ext['rsvd'])
prline('SAFI: %d(%s)' %
(opt.multi_ext['safi'], SAFI_T[opt.multi_ext['safi']]))
elif opt.cap_type == BGP_CAP_C['Route Refresh Capability for BGP-4']:
pass
elif opt.cap_type == BGP_CAP_C['Outbound Route Filtering Capability']:
prline('AFI: %d(%s)' %
(opt.orf['afi'], AFI_T[opt.orf['afi']]))
prline('Reserved: %d' % opt.orf['rsvd'])
prline('SAFI: %d(%s)' %
(opt.orf['safi'], SAFI_T[opt.orf['safi']]))
prline('Number: %d' % opt.orf['number'])
for entry in opt.orf['entry']:
prline('Type: %d' % entry['type'])
prline('Send Receive: %d(%s)' %
(entry['send_recv'], ORF_SEND_RECV[entry['send_recv']]))
elif opt.cap_type == BGP_CAP_C['Graceful Restart Capability']:
prline('Restart Flags: 0x%x' %
opt.graceful_restart['flag'])
prline('Restart Time in Seconds: %d' %
opt.graceful_restart['sec'])
for entry in opt.graceful_restart['entry']:
prline('AFI: %d(%s)' %
(entry['afi'], AFI_T[entry['afi']]))
prline('SAFI: %d(%s)' %
(entry['safi'], SAFI_T[entry['safi']]))
prline('Flag: 0x%02x' % entry['flag'])
elif opt.cap_type == BGP_CAP_C['Support for 4-octet AS number capability']:
prline('AS Number: %s' % opt.support_as4)
elif opt.cap_type == BGP_CAP_C['ADD-PATH Capability']:
for entry in opt.add_path:
prline('AFI: %d(%s)' %
(entry['afi'], AFI_T[entry['afi']]))
prline('SAFI: %d(%s)' %
(entry['safi'], SAFI_T[entry['safi']]))
prline('Send Receive: %d(%s)' %
(entry['send_recv'],
ADD_PATH_SEND_RECV[entry['send_recv']]))
def print_bgp_attr(attr, n):
global indt
indt = n
prline('Path Attribute Flags/Type/Length: 0x%02x/%d/%d' %
(attr.flag, attr.type, attr.len))
indt += 1
line = '%s' % BGP_ATTR_T[attr.type]
if attr.type == BGP_ATTR_T['ORIGIN']:
prline(line + ': %d(%s)' % (attr.origin, ORIGIN_T[attr.origin]))
elif attr.type == BGP_ATTR_T['AS_PATH']:
prline(line)
indt += 1
for path_seg in attr.as_path:
prline('Path Segment Type: %d(%s)' %
(path_seg['type'], AS_PATH_SEG_T[path_seg['type']]))
prline('Path Segment Length: %d' % path_seg['len'])
prline('Path Segment Value: %s' % ' '.join(path_seg['val']))
elif attr.type == BGP_ATTR_T['NEXT_HOP']:
prline(line + ': %s' % attr.next_hop)
elif attr.type == BGP_ATTR_T['MULTI_EXIT_DISC']:
prline(line + ': %d' % attr.med)
elif attr.type == BGP_ATTR_T['LOCAL_PREF']:
prline(line + ': %d' % attr.local_pref)
elif attr.type == BGP_ATTR_T['ATOMIC_AGGREGATE']:
prline(line)
elif attr.type == BGP_ATTR_T['AGGREGATOR']:
prline(line + ': %s %s' % (attr.aggr['asn'], attr.aggr['id']))
elif attr.type == BGP_ATTR_T['COMMUNITY']:
prline(line + ': %s' % ' '.join(attr.comm))
elif attr.type == BGP_ATTR_T['ORIGINATOR_ID']:
prline(line + ': %s' % attr.org_id)
elif attr.type == BGP_ATTR_T['CLUSTER_LIST']:
prline(line + ': %s' % ' '.join(attr.cl_list))
elif attr.type == BGP_ATTR_T['MP_REACH_NLRI']:
prline(line)
indt += 1
if 'afi' in attr.mp_reach:
prline('AFI: %d(%s)' %
(attr.mp_reach['afi'], AFI_T[attr.mp_reach['afi']]))
if 'safi' in attr.mp_reach:
prline('SAFI: %d(%s)' %
(attr.mp_reach['safi'], SAFI_T[attr.mp_reach['safi']]))
if ( attr.mp_reach['safi'] == SAFI_T['L3VPN_UNICAST']
or attr.mp_reach['safi'] == SAFI_T['L3VPN_MULTICAST']):
prline('Route Distinguisher: %s' % attr.mp_reach['rd'])
prline('Length: %d' % attr.mp_reach['nlen'])
if 'next_hop' not in attr.mp_reach:
return
next_hop = " ".join(attr.mp_reach['next_hop'])
prline('Next-Hop: %s' % next_hop)
if 'nlri' in attr.mp_reach:
for nlri in attr.mp_reach['nlri']:
print_nlri(nlri, 'NLRI', attr.mp_reach['safi'])
elif attr.type == BGP_ATTR_T['MP_UNREACH_NLRI']:
prline(line)
indt += 1
prline('AFI: %d(%s)' %
(attr.mp_unreach['afi'], AFI_T[attr.mp_unreach['afi']]))
prline('SAFI: %d(%s)' %
(attr.mp_unreach['safi'], SAFI_T[attr.mp_unreach['safi']]))
for withdrawn in attr.mp_unreach['withdrawn']:
print_nlri(withdrawn, 'Withdrawn Routes', attr.mp_unreach['safi'])
elif attr.type == BGP_ATTR_T['EXTENDED_COMMUNITIES']:
ext_comm_list = []
for ext_comm in attr.ext_comm:
ext_comm_list.append('0x%016x' % ext_comm)
prline(line + ': %s' % ' '.join(ext_comm_list))
elif attr.type == BGP_ATTR_T['AS4_PATH']:
prline(line)
indt += 1
for path_seg in attr.as4_path:
prline('Path Segment Type: %d(%s)' %
(path_seg['type'], AS_PATH_SEG_T[path_seg['type']]))
prline('Path Segment Length: %d' % path_seg['len'])
prline('Path Segment Value: %s' % ' '.join(path_seg['val']))
elif attr.type == BGP_ATTR_T['AS4_AGGREGATOR']:
prline(line + ': %s %s' % (attr.as4_aggr['asn'], attr.as4_aggr['id']))
elif attr.type == BGP_ATTR_T['AIGP']:
prline(line)
indt += 1
for aigp in attr.aigp:
prline('Type: %d' % aigp['type'])
prline('Length: %d' % aigp['len'])
prline('Value: %d' % aigp['val'])
elif attr.type == BGP_ATTR_T['ATTR_SET']:
prline(line)
indt += 1
prline('Origin AS: %s' % attr.attr_set['origin_as'])
for attr in attr.attr_set['attr']:
print_bgp_attr(attr, 3)
else:
line += ': 0x'
for c in attr.val:
if isinstance(c, str):
c = ord(c)
line += '%02x' % c
prline(line)
def print_nlri(nlri, title, *args):
global indt
safi = args[0] if len(args) > 0 else 0
if ( safi == SAFI_T['L3VPN_UNICAST']
or | |
"""NLG is a Natural Language Generator used to produce a human-like response for Dialogue Acts
of the agent."""
__author__ = "<NAME>"
import random
from copy import deepcopy
from typing import List
from moviebot.dialogue_manager.dialogue_act import DialogueAct
from moviebot.dialogue_manager.dialogue_state import DialogueState
from moviebot.nlu.annotation.item_constraint import ItemConstraint
from moviebot.nlu.annotation.operator import Operator
from moviebot.intents.agent_intents import AgentIntents
from moviebot.intents.user_intents import UserIntents
from moviebot.nlu.annotation.slots import Slots
from moviebot.nlu.annotation.values import Values
class NLG:
"""NLG is a Natural Language Generator used to produce a human-like response for Dialogue
Acts of the agent."""
def __init__(self, args=None):
"""Initializes any necessary components.
:type self.dialogue_state: DialogueState
Args:
args: basic settings of NLG
"""
self.dialogue_state = None
self.previous_count = 0
self.agent_elicit_nlg = {
Slots.GENRES.value: [
"Do you have any specific genres in mind?",
"Which genres do you prefer?"
],
Slots.KEYWORDS.value: [
"Can you give me a few keywords?",
"What are you looking for in a movie? Some keywords "
"would be good."
],
Slots.DIRECTORS.value: [
"Any specific director you are looking for?",
"Is there any specific director in your mind?"
],
Slots.ACTORS.value: [
"Do you have any favourite actor these days?",
"Any hints regarding the cast? Can you give me a name of any "
"actor?"
],
Slots.YEAR.value: [
"Which timeline do you prefer? For example, 90s or 80s?",
"Do you have any preference of when the movie was produced? "
"For example, 1992 or 90s."
]
}
self.inform_key = {
Slots.TITLE.value: f'_{Slots.TITLE.value}_',
Slots.GENRES.value: f'_{Slots.GENRES.value}_',
Slots.PLOT.value: f'_{Slots.PLOT.value}_',
Slots.KEYWORDS.value: f'_{Slots.KEYWORDS.value}_',
Slots.DIRECTORS.value: f'_{Slots.DIRECTORS.value}_',
Slots.DURATION.value: f'_{Slots.DURATION.value}_',
Slots.ACTORS.value: f'_{Slots.ACTORS.value}_',
Slots.YEAR.value: f'_{Slots.YEAR.value}_',
Slots.MOVIE_LINK.value: f'_{Slots.MOVIE_LINK.value}_',
Slots.RATING.value: f'_{Slots.RATING.value}_'
}
self.agent_inform_nlg = {
Slots.TITLE.value: [
f'The title of the movie is "{self.inform_key[Slots.TITLE.value]}".',
f'Its name is "{self.inform_key[Slots.TITLE.value]}".'
],
Slots.GENRES.value: [
f'The genres it belongs to are {self.inform_key[Slots.GENRES.value]}.',
f'Its genres are {self.inform_key[Slots.GENRES.value]}.'
],
Slots.PLOT.value: [f'{self.inform_key[Slots.PLOT.value]}'],
Slots.KEYWORDS.value: [
f'The plot of the movie revolves around {self.inform_key[Slots.KEYWORDS.value]}.',
f'The movie plot is about {self.inform_key[Slots.KEYWORDS.value]}.'
],
Slots.DIRECTORS.value: [
f'The director of this movie is {self.inform_key[Slots.DIRECTORS.value]}.',
f'Its directed by {self.inform_key[Slots.DIRECTORS.value]}.'
],
Slots.DURATION.value: [
f'Its duration is {self.inform_key[Slots.DURATION.value]}.',
f'This movie is {self.inform_key[Slots.DURATION.value]} long.'
],
Slots.ACTORS.value: [
f'Some of the famous actors in this movie are '
f'{self.inform_key[Slots.ACTORS.value]}.',
f'Actors {self.inform_key[Slots.ACTORS.value]} have played prominent roles in '
f'this movie.'
],
Slots.YEAR.value: [
f'The movie was released in {self.inform_key[Slots.YEAR.value]}.',
f'It was released in the year {self.inform_key[Slots.YEAR.value]}.'
],
Slots.MOVIE_LINK.value: [
f'The link of the movie on IMDb is {self.inform_key[Slots.MOVIE_LINK.value]}',
f'You can find more about the movie at this link: '
f'{self.inform_key[Slots.MOVIE_LINK.value]}'
],
Slots.RATING.value: [
f'Its rating on IMDb is {self.inform_key[Slots.RATING.value]}.',
f'The rating of this movie on IMDb is {self.inform_key[Slots.RATING.value]}.'
]
}
self.slot_not_found_gen = {
Slots.GENRES.value: ["I could not find the genres you mentioned."],
Slots.KEYWORDS.value: [
"I couldn't find the keywords in your response."
],
Slots.DIRECTORS.value: [
"I could not find the the director name you specified."
],
Slots.ACTORS.value: [
"I couldn't find the The actor you mentioned."
],
Slots.YEAR.value: ["I couldn't find any timeline specification."]
}
self.slot_not_found = {
Slots.GENRES.value: ["I could not find the genres __replace__."],
Slots.KEYWORDS.value: ["I couldn't find the keywords __replace__."],
Slots.DIRECTORS.value: [
"I could not find the the director name __replace__."
],
Slots.ACTORS.value: ["I couldn't find the The actor __replace__."],
Slots.YEAR.value: ["I couldn't find any timeline specification."]
}
def generate_output(self,
agent_dacts,
dialogue_state=None,
user_fname=None):
"""Selects an appropriate response based on the dialogue acts.
Args:
agent_dacts: a list of agent dialogue acts
dialogue_state: the current dialogue state (Default value = None)
user_fname: (Default value = None)
Returns:
string containing natural response
"""
if dialogue_state:
CIN = deepcopy(dialogue_state.frame_CIN)
self.dialogue_state = dialogue_state
utterance = []
user_options = {}
if dialogue_state and dialogue_state.last_user_dacts:
for user_dact in dialogue_state.last_user_dacts:
if user_dact.intent == UserIntents.REVEAL:
for param in user_dact.params:
if param.value == Values.NOT_FOUND:
if len(dialogue_state.user_utterance.get_tokens()
) <= 3:
not_found_response = random.choice(
self.slot_not_found[param.slot])
utterance.append(
not_found_response.replace(
'__replace__',
dialogue_state.user_utterance.get_text(
)))
else:
utterance.append(
random.choice(
self.slot_not_found_gen[param.slot]))
elif param.value != Values.DONT_CARE:
pass
for agent_dact in agent_dacts:
if agent_dact.intent == AgentIntents.WELCOME:
isBot = False
new_user = False
intent_response = ""
for param in agent_dact.params:
if param.slot == 'new_user':
new_user = param.value
if param.slot == 'is_bot':
isBot = param.value
if isBot:
if new_user:
intent_response += f'Hi {user_fname}. Welcome to IAI MovieBot. '
else:
intent_response += f'Hi {user_fname}. Welcome back. '
welcome_message = [
'How may I help you?', 'How can I assist you today?',
'Shall we start?'
]
if len(agent_dacts) == 1:
intent_response += f'{random.choice(welcome_message)}'
utterance.append(intent_response)
if agent_dact.intent == AgentIntents.RESTART:
utterance.append(
random.choice(['Let\'s restart.',
'We are starting again.']))
elif agent_dact.intent == AgentIntents.ELICIT:
intent_response = random.choice(
self.agent_elicit_nlg[agent_dact.params[0].slot])
if agent_dact.params[0].value:
intent_response += f' For example, {agent_dact.params[0].value}.'
utterance.append(intent_response)
elif agent_dact.intent == AgentIntents.COUNT_RESULTS:
for param in agent_dact.params:
if param.slot == 'count':
round_value = int(round(param.value / 100.0)) * 100
clarify_response = self._clarify_CIN(CIN, agent_dact)
if len(clarify_response.split()) == 1:
narrow_space = [
'Can you guide me to narrow down the search space?',
'Please answer a few questions to help me find a good '
'movie.'
]
intent_response = random.choice(narrow_space)
else:
count_message = [
f'There are almost {round_value} {clarify_response}.',
f'I have found almost {round_value} {clarify_response}.'
]
narrow_space = [
'Can you guide me more to narrow down the search space?',
'Please answer a few more questions to help me find a '
'good movie.'
]
intent_response = " ".join([
random.choice(count_message),
random.choice(narrow_space)
])
if round_value != self.previous_count:
utterance.append(intent_response)
self.previous_count = round_value
if dialogue_state.agent_must_clarify:
user_options.update(
self._user_options_remove_preference(
deepcopy(dialogue_state.dual_params)))
elif agent_dact.intent == AgentIntents.RECOMMEND:
for param in agent_dact.params:
if param.slot == Slots.TITLE.value:
clarify_response = self._clarify_CIN(CIN, agent_dact)
clarify_response = 'an ' + clarify_response if clarify_response[
0] in ['a', 'e', 'i', 'o', 'u'
] else 'a ' + clarify_response
link = dialogue_state.item_in_focus[
Slots.MOVIE_LINK.value]
clarify_response = [
f'I would like to recommend you {clarify_response} '
f'named **"[{param.value}]({link})"**. Have '
f'you watched it?',
f'There is {clarify_response} named '
f'**"[{param.value}]({link})"**. '
f'Have you seen this one?'
]
intent_response = random.choice(clarify_response)
# if dialogue_state.agent_repeats_offer:
# intent_response = "(This has been recommended before but I am out " \
# "of options. Sorry)\n" + \
# intent_response
utterance.append(intent_response)
user_options.update(self._user_options_recommend())
if dialogue_state.agent_must_clarify:
user_options.update(
self._user_options_remove_preference(
deepcopy(dialogue_state.dual_params)))
elif agent_dact.intent == AgentIntents.NO_RESULTS:
intent_response = random.choice([
f'Sorry, I don\'t have any '
f'{"other " if dialogue_state.items_in_context else ""}'
f'{self._clarify_CIN(CIN, agent_dact)}.',
f'Sorry, I couldn\'t find any '
f'{"other " if dialogue_state.items_in_context else ""}'
f'{self._clarify_CIN(CIN, agent_dact)}.'
])
intent_response += ' Please select from the list of options to continue.'
utterance.append(intent_response)
if dialogue_state.agent_must_clarify:
user_options.update(
self._user_options_remove_preference(
deepcopy(dialogue_state.dual_params)))
else:
user_options.update(
self._user_options_remove_preference_CIN(CIN))
elif agent_dact.intent == AgentIntents.INFORM:
for param in deepcopy(agent_dact.params):
if param.slot == Slots.MORE_INFO.value:
intent_response = random.choice([
f'What would you like to know '
f'about '
f'"{param.value}"?'
])
elif param.slot == 'deny':
intent_response = random.choice([
f'Would you want to know more about '
f'"{param.value}"?'
])
else:
intent_response = random.choice(
self.agent_inform_nlg[param.slot])
if param.value:
if param.slot == Slots.DURATION.value:
param.value = self._summarize_duration(
param.value)
intent_response = intent_response.replace(
self.inform_key[param.slot], str(param.value))
# if param.slot == Slots.PLOT.value:
# intent_response += 'You can see more '
else:
intent_response = intent_response.replace(
self.inform_key[param.slot], 'unknown')
user_options.update(
self._user_options_inquire(dialogue_state))
utterance.append(intent_response)
elif agent_dact.intent == AgentIntents.CONTINUE_RECOMMENDATION:
utterance.append('Please choose your next step:')
user_options.update(self._user_options_continue(agent_dact))
elif agent_dact.intent == AgentIntents.BYE:
bye_message = [
'I hope you had a good experience. Bye.',
'Hope to see you soon. Bye.'
]
utterance.append(random.choice(bye_message))
elif agent_dact.intent == AgentIntents.CANT_HELP:
cant_help_message = [
'Sorry I can\'t help you with that.',
'I believe I am stuck. I can\'t help you here.'
]
utterance.append(random.choice(cant_help_message))
if len(utterance) == 0:
return " ".join([str(dact) for dact in agent_dacts]), user_options
return '\n\n'.join(utterance), user_options
def _summarize_duration(self, value):
"""
Args:
value:
"""
value = int(value)
hours = int(value / 60)
minutes = value - int(value / 60) * 60
if minutes > 60:
return random.choice([
f'{value} minutes',
f'{hours} {"hours" if hours > 1 else "hour"} and '
f'{minutes} {"minutes" if minutes > 1 else "minute"}'
])
else:
return f'{value} minutes'
def _summarize_title_year(self, value):
"""
Args:
value:
"""
negate = False
if value.startswith('.NOT.'):
negate = True
value = value.replace('.NOT.', '')
if value.strip().startswith('BETWEEN'):
years = [int(x) for x in value.split() if str.isdigit(x)]
difference = years[1] - years[0]
if difference == 10:
return str(years[0])[-2:] + 's'
elif difference == 100:
return str(years[0])[:2] + 'th century'
else:
return f'year {"not " if negate else " "}' + value
def _clarify_CIN(self, CIN, agent_dact):
"""Clarify the user CIN in the utterance
| |
super(ResNet, self).__init__()
self.stem = stem
self.num_classes = num_classes
current_stride = self.stem.stride
self._out_feature_strides = {"stem": current_stride}
self._out_feature_channels = {"stem": self.stem.out_channels}
self.stages_and_names = []
for i, blocks in enumerate(stages):
for block in blocks:
assert isinstance(block, ResNetBlockBase), block
curr_channels = block.out_channels
stage = nn.Sequential(*blocks)
name = "res" + str(i + 2)
self.add_module(name, stage)
self.stages_and_names.append((stage, name))
self._out_feature_strides[name] = current_stride = int(
current_stride * np.prod([k.stride for k in blocks])
)
self._out_feature_channels[name] = blocks[-1].out_channels
if num_classes is not None:
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.linear = nn.Linear(curr_channels, num_classes)
# Sec 5.1 in "Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour":
# "The 1000-way fully-connected layer is initialized by
# drawing weights from a zero-mean Gaussian with standard deviation of 0.01."
nn.init.normal_(self.linear.weight, std=0.01)
name = "linear"
if out_features is None:
out_features = [name]
self._out_features = out_features
assert len(self._out_features)
children = [x[0] for x in self.named_children()]
for out_feature in self._out_features:
assert out_feature in children, "Available children: {}".format(", ".join(children))
def forward(self, x):
outputs = {}
x = self.stem(x)
if "stem" in self._out_features:
outputs["stem"] = x
for stage, name in self.stages_and_names:
x = stage(x)
if name in self._out_features:
outputs[name] = x
if self.num_classes is not None:
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.linear(x)
if "linear" in self._out_features:
outputs["linear"] = x
return outputs
def output_shape(self):
return {
name: ShapeSpec(
channels=self._out_feature_channels[name], stride=self._out_feature_strides[name]
)
for name in self._out_features
}
class CSPResNet(Backbone):
def __init__(self, stem, stages, num_classes=None, out_features=None):
"""
Args:
stem (nn.Module): a stem module
stages (list[CSPStage]): several (typically 4) stages,
each contains multiple :class:`ResNetBlockBase`.
num_classes (None or int): if None, will not perform classification.
out_features (list[str]): name of the layers whose outputs should
be returned in forward. Can be anything in "stem", "linear", or "res2" ...
If None, will return the output of the last layer.
"""
super(CSPResNet, self).__init__()
self.stem = stem
self.num_classes = num_classes
current_stride = self.stem.stride
self._out_feature_strides = {"stem": current_stride}
self._out_feature_channels = {"stem": self.stem.out_channels}
self.stages_and_names = []
for i, stage in enumerate(stages):
curr_channels = stage.out_channels
name = "res" + str(i + 2)
self.add_module(name, stage)
self.stages_and_names.append((stage, name))
self._out_feature_strides[name] = current_stride = int(
current_stride * stage.stride)
self._out_feature_channels[name] = stage.out_channels
if num_classes is not None:
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.linear = nn.Linear(curr_channels, num_classes)
# Sec 5.1 in "Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour":
# "The 1000-way fully-connected layer is initialized by
# drawing weights from a zero-mean Gaussian with standard deviation of 0.01."
nn.init.normal_(self.linear.weight, std=0.01)
name = "linear"
if out_features is None:
out_features = [name]
self._out_features = out_features
assert len(self._out_features)
children = [x[0] for x in self.named_children()]
for out_feature in self._out_features:
assert out_feature in children, "Available children: {}".format(", ".join(children))
def forward(self, x):
outputs = {}
x = self.stem(x)
if "stem" in self._out_features:
outputs["stem"] = x
for stage, name in self.stages_and_names:
x = stage(x)
if name in self._out_features:
outputs[name] = x
if self.num_classes is not None:
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.linear(x)
if "linear" in self._out_features:
outputs["linear"] = x
return outputs
def output_shape(self):
return {
name: ShapeSpec(
channels=self._out_feature_channels[name], stride=self._out_feature_strides[name]
)
for name in self._out_features
}
@BACKBONE_REGISTRY.register()
def build_resneth_backbone(cfg, input_shape):
"""
Create a ResNet-h instance from config.
Returns:
ResNet: a :class:`ResNet` instance.
"""
# need registration of new blocks/stems?
norm = cfg.MODEL.RESNETH.NORM
stem = BasicStem(
in_channels=input_shape.channels,
out_channels=cfg.MODEL.RESNETH.STEM_OUT_CHANNELS,
norm=norm,
)
freeze_at = cfg.MODEL.RESNETH.FREEZE_AT
if freeze_at >= 1:
for p in stem.parameters():
p.requires_grad = False
stem = FrozenBatchNorm2d.convert_frozen_batchnorm(stem)
# fmt: off
out_features = cfg.MODEL.RESNETH.OUT_FEATURES
depth = cfg.MODEL.RESNETH.DEPTH
num_groups = cfg.MODEL.RESNETH.NUM_GROUPS
# width_per_group = cfg.MODEL.RESNETH.WIDTH_PER_GROUP
bottleneck_channels = [32, 64, 128, 256]
in_channels = [64, 128, 256, 512]
out_channels = [128, 256, 512, 1024]
stride_in_1x1 = cfg.MODEL.RESNETH.STRIDE_IN_1X1
dilation_on_per_stage = cfg.MODEL.RESNETH.DILATION_ON_PER_STAGE
deform_on_per_stage = cfg.MODEL.RESNETH.DEFORM_ON_PER_STAGE
deform_modulated = cfg.MODEL.RESNETH.DEFORM_MODULATED
deform_num_groups = cfg.MODEL.RESNETH.DEFORM_NUM_GROUPS
# fmt: on
# assert res5_dilation in {1, 2}, "res5_dilation cannot be {}.".format(res5_dilation)
num_blocks_per_stage = {50: [3, 4, 6, 3], 101: [3, 4, 23, 3], 152: [3, 8, 36, 3]}[depth]
stages = []
# Avoid creating variables without gradients
# It consumes extra memory and may cause allreduce to fail
out_stage_idx = [{"res2": 2, "res3": 3, "res4": 4, "res5": 5}[f] for f in out_features]
max_stage_idx = max(out_stage_idx)
for idx, stage_idx in enumerate(range(2, max_stage_idx + 1)):
# dilation = res5_dilation if stage_idx == 5 else 1
first_stride = 1 if idx == 0 else 2
stage_kargs = {
"num_blocks": num_blocks_per_stage[idx],
"first_stride": first_stride,
"in_channels": in_channels[idx],
"bottleneck_channels": bottleneck_channels[idx],
"out_channels": out_channels[idx],
"num_groups": num_groups,
"norm": norm,
"stride_in_1x1": stride_in_1x1,
"dilation": dilation_on_per_stage[idx],
}
if deform_on_per_stage[idx]:
stage_kargs["block_class"] = DeformBottleneckBlock
stage_kargs["deform_modulated"] = deform_modulated
stage_kargs["deform_num_groups"] = deform_num_groups
else:
stage_kargs["block_class"] = BottleneckBlock
blocks = make_stage(**stage_kargs)
if freeze_at >= stage_idx:
for block in blocks:
block.freeze()
stages.append(blocks)
return ResNet(stem, stages, out_features=out_features)
@BACKBONE_REGISTRY.register()
def build_cspresneth_backbone(cfg, input_shape):
"""
Create a CSP-ResNet-h instance from config.
Returns:
CSPResNet: a :class:`CSPResNet` instance.
"""
# need registration of new blocks/stems?
norm = cfg.MODEL.RESNETH.NORM
stem = BasicStem(
in_channels=input_shape.channels,
out_channels=cfg.MODEL.RESNETH.STEM_OUT_CHANNELS,
norm=norm,
)
freeze_at = cfg.MODEL.RESNETH.FREEZE_AT
if freeze_at >= 1:
for p in stem.parameters():
p.requires_grad = False
stem = FrozenBatchNorm2d.convert_frozen_batchnorm(stem)
# fmt: off
out_features = cfg.MODEL.RESNETH.OUT_FEATURES
depth = cfg.MODEL.RESNETH.DEPTH
num_groups = cfg.MODEL.RESNETH.NUM_GROUPS
# width_per_group = cfg.MODEL.RESNETH.WIDTH_PER_GROUP
bottleneck_channels = [32, 64, 128, 256]
in_channels = [64, 128, 256, 512]
out_channels = [128, 256, 512, 1024]
stride_in_1x1 = cfg.MODEL.RESNETH.STRIDE_IN_1X1
dilation_on_per_stage = cfg.MODEL.RESNETH.DILATION_ON_PER_STAGE
deform_on_per_stage = cfg.MODEL.RESNETH.DEFORM_ON_PER_STAGE
deform_modulated = cfg.MODEL.RESNETH.DEFORM_MODULATED
deform_num_groups = cfg.MODEL.RESNETH.DEFORM_NUM_GROUPS
# fmt: on
# assert res5_dilation in {1, 2}, "res5_dilation cannot be {}.".format(res5_dilation)
num_blocks_per_stage = {50: [3, 4, 6, 3], 101: [3, 4, 23, 3], 152: [3, 8, 36, 3]}[depth]
stages = []
# Avoid creating variables without gradients
# It consumes extra memory and may cause allreduce to fail
out_stage_idx = [{"res2": 2, "res3": 3, "res4": 4, "res5": 5}[f] for f in out_features]
max_stage_idx = max(out_stage_idx)
for idx, stage_idx in enumerate(range(2, max_stage_idx + 1)):
# dilation = res5_dilation if stage_idx == 5 else 1
first_stride = 1 if idx == 0 else 2
stage_kargs = {
"num_blocks": num_blocks_per_stage[idx],
"first_stride": first_stride,
"in_channels": in_channels[idx],
"bottleneck_channels": bottleneck_channels[idx],
"out_channels": out_channels[idx],
"num_groups": num_groups,
"norm": norm,
"stride_in_1x1": stride_in_1x1,
"dilation": dilation_on_per_stage[idx],
}
if deform_on_per_stage[idx]:
stage_kargs["block_class"] = DeformBottleneckBlock
stage_kargs["deform_modulated"] = deform_modulated
stage_kargs["deform_num_groups"] = deform_num_groups
else:
stage_kargs["block_class"] = BottleneckBlock
csp_stage = CSPStage(**stage_kargs)
if freeze_at >= stage_idx:
csp_stage.freeze()
stages.append(csp_stage)
return CSPResNet(stem, stages, out_features=out_features)
class LastLevelMaxPool(nn.Module):
"""
This module is used in the original FPN to generate a downsampled
P6 feature from P5.
"""
def __init__(self):
super().__init__()
self.num_levels = 1
self.in_feature = "p5"
def forward(self, x):
return [F.max_pool2d(x, kernel_size=1, stride=2, padding=0)]
class LastLevelP6P7(nn.Module):
"""
This module is used in RetinaNet to generate extra layers, P6 and P7 from
C5 feature.
"""
def __init__(self, in_channels, out_channels, in_feature="res5"):
super().__init__()
self.num_levels = 2
self.in_feature = in_feature
self.p6 = nn.Conv2d(in_channels, out_channels, 3, 2, 1)
self.p7 = nn.Conv2d(out_channels, out_channels, 3, 2, 1)
for module in [self.p6, self.p7]:
weight_init.c2_xavier_fill(module)
def forward(self, c5):
p6 = self.p6(c5)
p7 = self.p7(F.relu_(p6))
return [p6, p7]
class FPN_resneth(Backbone):
"""
This module implements Feature Pyramid Network.
It creates pyramid features built on top of some input feature maps.
"""
def __init__(
self, bottom_up, in_features, out_channels, norm="", top_block=None, fuse_type="sum"
):
"""
Args:
bottom_up (Backbone): module representing the bottom up subnetwork.
Must be a subclass of :class:`Backbone`. The multi-scale feature
maps generated by the bottom up network, and listed in `in_features`,
are used to generate FPN levels.
in_features (list[str]): names of the input feature maps coming
from the backbone to which FPN is attached. For example, if the
backbone produces ["res2", "res3", "res4"], any *contiguous* sublist
of these may be used; order must be from high to low resolution.
out_channels (int): number of channels in the output feature maps.
norm (str): the normalization to use.
top_block (nn.Module or None): if provided, an extra operation will
be performed on the output of the last (smallest resolution)
FPN output, and the result will extend the result list. The top_block
further downsamples the feature map. It must have an attribute
"num_levels", meaning the number of extra FPN levels added by
this block, and "in_feature", which is a string representing
its input feature (e.g., p5).
fuse_type (str): types for fusing the top down features and the lateral
ones. It | |
needed to
work.
"""
for line in self:
yield line
def __delitem__(self, idx):
"""Delete line or slice of lines from the buffer.
This is the same as __setitem__(idx, [])
"""
self.__setitem__(idx, None)
def __ne__(self, other):
"""Test inequality of Buffers.
Necessary for Python 2 compatibility. We never defined eq?
If I define eq, I'll need to define hash. Otherwise hash returns
nothing. Oh shit we should add reduce so we can pickle these things.
Awh shit this is gonna get complicated.
"""
return not self.__eq__(other)
def append(self, lines, index=-1):
"""Append a string or list of lines to the buffer."""
if isinstance(lines, (str, bytes)):
# BUG: dont just add [] around it split on newlines too!
lines = lines.split("\n")
return self.request("nvim_buf_set_lines", index, index, True, lines)
def __iadd__(self, lines, index=-1):
self.append(lines, index)
def mark(self, name):
"""Return (row, col) tuple for a named mark."""
return self.request("nvim_buf_get_mark", name)
# def _range(self, start, end):
def range(self, start=None, end=None):
"""Return a `Range` object, which represents part of the Buffer."""
if start is None:
start = 0
if end is None:
end = len(self)
return Range(self, start, end)
# return Range(self, start, end)
def add_highlight(
self, hl_group, line, col_start=0, col_end=-1, src_id=-1, async_=None, **kwargs
):
"""Add a highlight to the buffer."""
async_ = check_async(async_, kwargs, src_id != 0)
return self.request(
"nvim_buf_add_highlight",
src_id,
hl_group,
line,
col_start,
col_end,
async_=async_,
)
def clear_highlight(self, src_id, line_start=0, line_end=-1, async_=None, **kwargs):
"""Clear highlights from the buffer."""
async_ = check_async(async_, kwargs, True)
self.request(
"nvim_buf_clear_highlight", src_id, line_start, line_end, async_=async_
)
def update_highlights(
self, src_id, hls, clear_start=0, clear_end=-1, clear=False, async_=True
):
"""Add or update highlights in batch to avoid unnecessary redraws.
A `src_id` must have been allocated prior to use of this function. Use
for instance `nvim.new_highlight_source()` to get a src_id for your
plugin.
`hls` should be a list of highlight items. Each item should be a list
or tuple on the form `("GroupName", linenr, col_start, col_end)` or
`("GroupName", linenr)` to highlight an entire line.
By default existing highlights are preserved. Specify a line _range with
clear_start and clear_end to replace highlights in this _range. As a
shorthand, use clear=True to clear the entire buffer before adding the
new highlights.
"""
if clear and clear_start is None:
clear_start = 0
lua = self._session._get_lua_private()
lua.update_highlights(
self, src_id, hls, clear_start, clear_end, async_=async_)
@property
def name(self):
"""Get the buffer name."""
return self.request("nvim_buf_get_name")
@name.setter
def name(self, value):
"""Set the buffer name. BufFilePre/BufFilePost are triggered."""
return self.request("nvim_buf_set_name", value)
@property
def valid(self):
"""Return True if the buffer still exists."""
return self.request("nvim_buf_is_valid")
@property
def number(self):
"""Get the buffer number."""
return self.handle
class Range(object):
def __init__(self, buffer=None, start=1, end=None):
"""Give all required parameters default arguments to match Vims.
Parameters
----------
start : int
end : int
"""
self._buffer = buffer if buffer is not None else vim.current.buffer
self.start = start - 1
if end is not None:
self.end = end - 1
else:
self.end = len(self._buffer)
def __len__(self):
return self.end - self.start + 1
def __getitem__(self, idx):
if not isinstance(idx, slice):
return self._buffer[self._normalize_index(idx)]
start = self._normalize_index(idx.start)
end = self._normalize_index(idx.stop)
if start is None:
start = self.start
if end is None:
end = self.end + 1
return self._buffer[start:end]
def __setitem__(self, idx, lines):
if not isinstance(idx, slice):
self._buffer[self._normalize_index(idx)] = lines
return
start = self._normalize_index(idx.start)
end = self._normalize_index(idx.stop)
if start is None:
start = self.start
if end is None:
end = self.end
self._buffer[start: end + 1] = lines
def __iter__(self):
for i in range(self.start, self.end + 1):
yield self._buffer[i]
def append(self, lines, i=None):
i = self._normalize_index(i)
if i is None:
i = self.end + 1
self._buffer.append(lines, i)
def _normalize_index(self, index):
if index is None:
return None
if index < 0:
index = self.end
else:
index += self.start
if index > self.end:
index = self.end
return index
# API/common:
class RemoteApi(object):
"""Wrapper to allow api methods to be called like python methods.
Currently only defines getattr. Could easily expand or subclass something and possibly get much ore
fine controlled access to everything. Bound to the Neovim class at `api` so definitely important.
"""
def __init__(self, obj, api_prefix="nvim_"):
"""Initialize a RemoteApi with object and api prefix.
Parameters
----------
api_prefix : str
Now has a default so we don't have to type as much.
"""
self._obj = obj
self._api_prefix = api_prefix
def __getattr__(self, name):
"""Return wrapper to named api method."""
return functools.partial(self._obj.request, self._api_prefix + name)
def __repr__(self):
# Let's add a useful repr shall we?
return f"<{self.__class__.__name__}:> - Bound to {self._obj}"
def transform_keyerror(exc):
# Im very confident that this is not how you do this
if isinstance(exc, NvimError):
if exc.args[0].startswith("Key not found:"):
raise AttributeError
# return KeyError(exc.args[0])
if exc.args[0].startswith("Invalid option name:"):
raise KeyError
# return KeyError(exc.args[0])
# return exc
class RemoteMap(MutableMapping):
"""Represents a string->object map stored in Nvim.
This is the dict counterpart to the `RemoteSequence` class, but it is used
as a generic way of retrieving values from the various map-like data
structures present in Nvim.
It is used to provide a dict-like API to _vim variables and options.
Examples
--------
::
>>> import pprint
>>> pprint.pprint(vim.vars)
No because then it would require that the main Nvim class be iterable.
Which can happen later but not now.
"""
_set = None
_del = None
def __init__(self, obj: Any, get_method, set_method=None, del_method=None):
"""Initialize a RemoteMap with session, getter/setter."""
self._get = functools.partial(obj.request, get_method)
if set_method:
self._set = functools.partial(obj.request, set_method)
if del_method:
self._del = functools.partial(obj.request, del_method)
self.obj = obj
def __getitem__(self, key):
"""Return a map value by key."""
try:
return self._get(key)
except NvimError as exc:
raise transform_keyerror(exc)
def __setitem__(self, key, value):
"""Set a map value by key(if the setter was provided)."""
if not self._set:
raise TypeError("This dict is read-only")
self._set(key, value)
def __delitem__(self, key):
"""Delete a map value by associating None with the key."""
if not self._del:
raise TypeError("This dict is read-only")
try:
return self._del(key)
except NvimError as exc:
raise transform_keyerror(exc)
def __contains__(self, key):
"""Check if key is present in the map."""
try:
self._get(key)
return True
except Exception:
return False
def get(self, key, default=None):
"""Return value for key if present, else a default value."""
try:
return self.__getitem__(key)
except KeyError:
return default
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.obj)
class RemoteSequence(UserList):
"""Represents a sequence of objects stored in Nvim.
This class is used to wrap msgpack-rpc functions that work on Nvim
sequences(of lines, buffers, windows and tabpages) with an API that
is similar to the one provided by the python-_vim interface.
For example, the 'windows' property of the `Nvim` class is a RemoteSequence
sequence instance, and the expression `nvim.windows[0]` is translated to
session.request('nvim_list_wins')[0].
One important detail about this class is that all methods will fetch the
sequence into a list and perform the necessary manipulation
locally(iteration, indexing, counting, etc).
Attributes
----------
`_fetch` : functools.Partial
Literally the only one so this could become a function very easily.
"""
def __init__(self, session, method):
"""Initialize a RemoteSequence with session, method.
Parameters
----------
session :
Something that has a request attr?
method :
Idk.
"""
self._fetch = functools.partial(session.request, method)
def __len__(self):
"""Return the length of the remote sequence."""
return len(self._fetch())
def __getitem__(self, idx):
"""Return a sequence item by index."""
if not isinstance(idx, slice):
return self._fetch()[idx]
return self._fetch()[idx.start: idx.stop]
def __iter__(self):
"""Return an iterator for the sequence."""
items = self._fetch()
for item in items:
yield item
def __contains__(self, item):
"""Check if an item is present in the sequence."""
return item in self._fetch()
def _walk(f, obj=None, *args, **kwargs):
# TODO: test
if obj is None:
return
if not hasattr(obj, "__iter__"):
raise TypeError
return f(itertools.chain.from_iterable(obj), *args, **kwargs)
# why is this set up this way?
def _identity(obj, session, method, kind):
return obj
def walk(fn, obj, *args, **kwargs):
"""Recursively walk an object graph applying `fn`/`args` to objects."""
if type(obj) in [list, tuple]:
return list(walk(fn, o, *args) for o in obj)
if type(obj) is dict:
return dict((walk(fn, k, *args), walk(fn, v, *args)) for k, v in obj.items())
return fn(obj, *args, **kwargs)
# msgpack_rpc.msgpack_stream:
class MsgpackStream(object):
"""Two-way msgpack stream that wraps a event loop byte stream.
| |
%s' % \
(options.relog_name, config.PhaseStr(config.filter_user_defn)))
util.PhaseBegin(options)
result = self.phases.RelogWholeName(self.replay_cmd, wp_log_dir, relog_wp_dir, options)
if not options.list:
msg.PrintMsgDate('Finished filtering whole program pinballs with user defined name: %s %s' % \
(options.relog_name, config.PhaseStr(config.filter_user_defn)))
util.CheckResult(result, options, 'Filtering WP pinballs with user defined name: %s %s' % \
(options.relog_name, config.PhaseStr(config.filter_user_defn)))
# No errors, commit to using the new pinballs.
#
wp_log_dir = relog_wp_dir
FinalizeWPDir(relog_wp_dir, 'Whole program pinball(s) filtered with user defined name: ' + options.relog_name)
# Relog with a focus thread.
#
if options.use_relog_focus or options.relog_focus:
relog_wp_dir = util.GetRelogPhaseDir(wp_log_dir, config.RELOG_FOCUS, options)
if options.relog_focus:
if not options.list:
msg.PrintMsgDate('Filtering whole program pinballs with a focus thread %s' % \
config.PhaseStr(config.filter_focus_thread))
# import pdb; pdb.set_trace()
util.PhaseBegin(options)
result = self.phases.RelogWholeFocus(self.replay_cmd, wp_log_dir, relog_wp_dir, options)
if not options.list:
msg.PrintMsgDate('Finished filtering whole program pinballs with a focus thread %s' % \
config.PhaseStr(config.filter_focus_thread))
util.CheckResult(result, options, 'Filtering WP pinballs with focus thread %s' % \
config.PhaseStr(config.filter_focus_thread))
# No errors, commit to using the new pinballs.
#
wp_log_dir = relog_wp_dir
FinalizeWPDir(relog_wp_dir, 'Whole program pinball(s) filtered with a focus thread')
# If pinballs were relogged with a focus thread, then in the
# remaining phases the focus thread must be 0. Relogging generates
# per thread whole program pinballs (which only have thread 0). To
# enforce this, we change the focus_thread in the config object to
# be 0.
#
config.focus_thread = 0
# Also need to set options.use_relog_focus = True because we are will be using
# WP pinballs which have been relogged with a focus thread.
#
options.use_relog_focus = True
# Relog to remove initialization instructions. Do this before removing cleanup or
# MPI spin instructions.
#
# import pdb; pdb.set_trace()
if options.use_relog_no_init or options.relog_no_init:
relog_wp_dir = util.GetRelogPhaseDir(wp_log_dir, config.RELOG_NO_INIT, options)
if options.relog_no_init:
if not options.list:
msg.PrintMsgDate('Filtering whole program pinballs to remove initialization instructions %s' % \
config.PhaseStr(config.filter_init))
util.PhaseBegin(options)
result = self.phases.RelogWholeRemoveInit(self.replay_cmd, wp_log_dir, relog_wp_dir, options)
if not options.list:
msg.PrintMsgDate('Finished filtering whole program pinballs to remove initialization instructions %s' % \
config.PhaseStr(config.filter_init))
util.CheckResult(result, options, 'Filtering WP pinballs to remove init instructions %s' % \
config.PhaseStr(config.filter_init))
# No errors, commit to using the new pinballs.
#
wp_log_dir = relog_wp_dir
FinalizeWPDir(relog_wp_dir, 'Whole program pinball(s) filtered to remove initialization instructions')
# Relog to remove cleanup instructions. Do this before removing MPI spin
# instructions.
#
# import pdb; pdb.set_trace()
if options.use_relog_no_cleanup or options.relog_no_cleanup:
relog_wp_dir = util.GetRelogPhaseDir(wp_log_dir, config.RELOG_NO_CLEANUP, options)
if options.relog_no_cleanup:
if not options.list:
msg.PrintMsgDate('Filtering whole program pinballs to remove cleanup instructions %s' % \
config.PhaseStr(config.filter_cleanup))
util.PhaseBegin(options)
result = self.phases.RelogWholeRemoveCleanup(self.replay_cmd, wp_log_dir, relog_wp_dir, options)
if not options.list:
msg.PrintMsgDate('Finished filtering whole program pinballs to remove cleanup instructions %s' % \
config.PhaseStr(config.filter_cleanup))
util.CheckResult(result, options, 'Filtering WP pinballs to remove cleanup instructions %s' % \
config.PhaseStr(config.filter_cleanup))
# No errors, commit to using the new pinballs.
#
wp_log_dir = relog_wp_dir
FinalizeWPDir(relog_wp_dir, 'Whole program pinball(s) filtered to remove cleanup instructions')
# Relog to exclude code (instructions) between two addresses. Do this
# before removing MPI spin instructions.
#
# import pdb; pdb.set_trace()
if options.use_relog_code_exclude != '' or options.relog_code_exclude != '':
relog_wp_dir = util.GetRelogPhaseDir(wp_log_dir, config.RELOG_CODE_EXCLUDE, options)
if options.relog_code_exclude != '':
if not options.list:
msg.PrintMsgDate('Filtering whole program pinballs with code exclusion %s' % \
config.PhaseStr(config.filter_code_exclude))
util.PhaseBegin(options)
result = self.phases.RelogWholeCodeExclude(self.replay_cmd, wp_log_dir, relog_wp_dir, options)
if not options.list:
msg.PrintMsgDate('Finished filtering whole program pinballs with code exclusion %s' % \
config.PhaseStr(config.filter_code_exclude))
util.CheckResult(result, options, 'Filtering WP pinballs with code exclusion %s' % \
config.PhaseStr(config.filter_code_exclude))
# No errors, commit to using the new pinballs.
#
wp_log_dir = relog_wp_dir
FinalizeWPDir(relog_wp_dir, 'Whole program pinball(s) filtered with code exclusion')
# Relog to remove OpenMP spin instructions.
#
# import pdb; pdb.set_trace()
if options.use_relog_no_omp_spin or options.relog_no_omp_spin:
relog_wp_dir = util.GetRelogPhaseDir(wp_log_dir, config.RELOG_NO_OMP_SPIN, options)
if options.relog_no_omp_spin:
if not options.list:
msg.PrintMsgDate('Filtering whole program pinballs to remove OpenMP spin instructions %s' % \
config.PhaseStr(config.filter_OMP_spin))
util.PhaseBegin(options)
result = self.phases.RelogWholeRemoveOMPSpin(self.replay_cmd, wp_log_dir, relog_wp_dir, options)
if not options.list:
msg.PrintMsgDate('Finished filtering whole program pinballs to remove OpenMP spin instructions %s' % \
config.PhaseStr(config.filter_OMP_spin))
util.CheckResult(result, options, 'Filtering WP pinballs to remove OpenMP spin instructions %s' % \
config.PhaseStr(config.filter_OMP_spin))
# No errors, commit to using the new pinballs.
#
wp_log_dir = relog_wp_dir
FinalizeWPDir(relog_wp_dir, 'Whole program pinball(s) filtered to remove OpenMP spin instructions')
# Relog to remove MPI spin instructions.
#
# import pdb; pdb.set_trace()
if options.use_relog_no_mpi_spin or options.relog_no_mpi_spin:
relog_wp_dir = util.GetRelogPhaseDir(wp_log_dir, config.RELOG_NO_MPI_SPIN, options)
if options.relog_no_mpi_spin:
if not options.list:
msg.PrintMsgDate('Filtering whole program pinballs to remove MPI spin instructions %s' % \
config.PhaseStr(config.filter_MPI_spin))
util.PhaseBegin(options)
result = self.phases.RelogWholeRemoveMPISpin(self.replay_cmd, wp_log_dir, relog_wp_dir, options)
if not options.list:
msg.PrintMsgDate('Finished filtering whole program pinballs to remove MPI spin instructions %s' % \
config.PhaseStr(config.filter_MPI_spin))
util.CheckResult(result, options, 'Filtering WP pinballs to remove MPI spin instructions %s' % \
config.PhaseStr(config.filter_MPI_spin))
# No errors, commit to using the new pinballs.
#
wp_log_dir = relog_wp_dir
FinalizeWPDir(relog_wp_dir, 'Whole program pinball(s) filtered to remove MPI spin instructions')
if not options.list:
msg.PrintMsgPlus('Using whole program pinballs in dir: ' + wp_log_dir)
if (cmd_options.UseRelogOptionsSet(options) or \
cmd_options.RelogOptionsSet(options)) and \
os.path.isdir(wp_log_dir):
msg.PrintMsg('')
util.PrintInstrCount(wp_log_dir, options)
#########################################################################
#
# These phases are run with the whole progam pinballs defined/generated
# in the previous phases.
#
#########################################################################
# Make sure any relogged whole program pinball directory exist. Exit with an
# error if it does not exist.
#
# import pdb; pdb.set_trace()
if not os.path.isdir(wp_log_dir) and not options.list and not options.debug and\
not options.delete and not options.delete_all and not options.delete_wp and \
not options.native_pure and not options.native_pin:
string ='ERROR: Can\'t proceed because the whole program pinball directory does not exist:\n' + \
' ' + wp_log_dir
msg.PrintMsg(string)
util.CheckResult(-1, options, 'Second check to see if WP pinballs exist') # Use -1 to force check to fail
# Do not run replay whole program pinballs as one of the default
# phases. The user must explicitly include this option. This is to
# save time during the tracing process.
#
if options.replay:
if not options.list:
msg.PrintMsgDate('Replaying all whole program pinballs %s' % \
config.PhaseStr(config.replay_whole))
util.PhaseBegin(options)
result = self.phases.Replay(self.replay_cmd, wp_log_dir, options)
if not options.list:
msg.PrintMsgDate('Finished replaying all whole program pinballs %s' % \
config.PhaseStr(config.replay_whole))
util.CheckResult(result, options, 'Replay of whole program pinballs %s' % \
config.PhaseStr(config.replay_whole))
# Generate basic block vectors.
#
if options.basic_block_vector or options.default_phases:
if not options.list:
msg.PrintMsgDate('Generating basic block vectors %s' % \
config.PhaseStr(config.gen_BBV))
util.PhaseBegin(options)
result = self.phases.BasicBlockVector(self.replay_cmd, wp_log_dir, options)
if result == 0:
result = util.WaitJobs(options)
if not options.list:
msg.PrintMsgDate('Finished basic block vector generation %s' % \
config.PhaseStr(config.gen_BBV))
util.CheckResult(result, options, 'Basic block vector generation %s' % \
config.PhaseStr(config.gen_BBV))
# Run Simpoints to genenerate representative regions.
#
if options.simpoint or options.default_phases:
if not options.list:
msg.PrintMsgDate('Running Simpoint on all processes %s' % \
config.PhaseStr(config.Simpoint))
# Setup dictionary of parameters for method RunAllDir()
#
param = {'options': options}
util.PhaseBegin(options)
result = util.RunAllDir(wp_log_dir, self.phases.RunSimPoint, True, param)
if not options.list:
msg.PrintMsgDate('Finished running Simpoint for all processes %s' % \
config.PhaseStr(config.Simpoint))
util.CheckResult(result, options, 'Simpoints generation %s' % \
config.PhaseStr(config.Simpoint))
# Relog to generate representative region pinballs.
#
if options.region_pinball or options.default_phases:
if not options.list:
msg.PrintMsgDate('Generating region pinballs %s' % \
config.PhaseStr(config.relog_regions))
util.PhaseBegin(options)
result = self.phases.GenAllRegionPinballs(wp_log_dir, self.replayer_cmd, options)
if result == 0:
if not options.list:
msg.PrintMsgPlus('Waiting on final concurrent region pinball generation')
result = util.WaitJobs(options)
if not options.list:
msg.PrintMsgDate('Finished generating region pinballs %s' % \
config.PhaseStr(config.relog_regions))
util.CheckResult(result, options, 'Region pinball generation %s' %\
config.PhaseStr(config.relog_regions))
# Do not run replay region pinballs as one of the default phases. The
# user must explicitly include this option. This is to save time
# during the tracing process.
#
if options.replay_region:
result = 0
if not options.list:
msg.PrintMsgDate('Replaying all region pinballs %s' % \
config.PhaseStr(config.replay_regions))
# import pdb; pdb.set_trace()
util.PhaseBegin(options)
for pp_dir in util.GetRegionPinballDir():
# Accumulate any errors which occur, but don't check for errors
# until all the pinballs have been replayed.
#
r = self.phases.Replay(self.replay_cmd, pp_dir, options)
result = result or r
if not options.list:
msg.PrintMsgDate('Finished replaying all region pinballs %s' % \
config.PhaseStr(config.replay_regions))
util.CheckResult(result, options, 'Replay of region pinballs %s' % \
config.PhaseStr(config.replay_regions))
result = 0 # Remove the return values from replaying region pinballs
# If there | |
if not (min_size < output_size[d] < max_size):
raise ValueError(
'invalid output_size "{}" (dim {} must be between {} and {})'
.format(output_size, d, min_size, max_size))
return output_size
def max_unpool1d(input, indices, kernel_size, stride=None, padding=0,
output_size=None):
kernel_size = _single(kernel_size)
stride = _single(stride)
padding = _single(padding)
output_size = _unpool_output_size(input, kernel_size, stride, padding,
output_size)
f = _functions.thnn.MaxUnpool2d(output_size + [1])
return f(input.unsqueeze(3), indices.unsqueeze(3)).squeeze(3)
def max_unpool2d(input, indices, kernel_size, stride=None, padding=0,
output_size=None):
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
output_size = _unpool_output_size(input, kernel_size, stride, padding,
output_size)
f = _functions.thnn.MaxUnpool2d(output_size)
return f(input, indices)
def max_unpool3d(input, indices, kernel_size, stride=None, padding=0,
output_size=None):
kernel_size = _triple(kernel_size)
stride = _triple(stride)
padding = _triple(padding)
output_size = _unpool_output_size(input, kernel_size, stride, padding,
output_size)
f = _functions.thnn.MaxUnpool3d(output_size, stride, padding)
return f(input, indices)
def lp_pool2d(input, norm_type, kernel_size, stride=None, ceil_mode=False):
kw, kh = utils._pair(kernel_size)
out = avg_pool2d(input.pow(norm_type), kernel_size, stride, 0, ceil_mode)
return out.mul(kw * kh).pow(1. / norm_type)
def adaptive_max_pool1d(input, output_size, return_indices=False):
r"""Applies a 1D adaptive max pooling over an input signal composed of
several input planes.
See :class:`~torch.nn.AdaptiveMaxPool1d` for details and output shape.
Args:
output_size: the target output size (single integer)
return_indices: whether to return pooling indices
"""
return _functions.thnn.AdaptiveMaxPool1d(output_size, return_indices)(input)
def adaptive_max_pool2d(input, output_size, return_indices=False):
r"""Applies a 2D adaptive max pooling over an input signal composed of
several input planes.
See :class:`~torch.nn.AdaptiveMaxPool2d` for details and output shape.
Args:
output_size: the target output size (single integer or double-integer tuple)
return_indices: whether to return pooling indices
"""
return _functions.thnn.AdaptiveMaxPool2d(output_size, return_indices)(input)
def adaptive_avg_pool1d(input, output_size):
r"""Applies a 1D adaptive average pooling over an input signal composed of
several input planes.
See :class:`~torch.nn.AdaptiveAvgPool1d` for details and output shape.
Args:
output_size: the target output size (single integer)
"""
return _functions.thnn.AdaptiveAvgPool1d(output_size)(input)
def adaptive_avg_pool2d(input, output_size):
r"""Applies a 2D adaptive average pooling over an input signal composed of
several input planes.
See :class:`~torch.nn.AdaptiveAvgPool2d` for details and output shape.
Args:
output_size: the target output size (single integer or double-integer tuple)
"""
return _functions.thnn.AdaptiveAvgPool2d(output_size)(input)
# Activation functions
def dropout(input, p=0.5, training=False, inplace=False):
return _functions.dropout.Dropout(p, training, inplace)(input)
def threshold(input, threshold, value, inplace=False):
return _functions.thnn.auto.Threshold(threshold, value, inplace)(input)
def relu(input, inplace=False):
return _functions.thnn.auto.Threshold(0, 0, inplace)(input)
def hardtanh(input, min_val=-1., max_val=1., inplace=False):
return _functions.thnn.auto.Hardtanh(min_val, max_val, inplace)(input)
def relu6(input, inplace=False):
return _functions.thnn.auto.Hardtanh(0, 6, inplace)(input)
def elu(input, alpha=1., inplace=False):
return _functions.thnn.auto.ELU(alpha, inplace)(input)
def leaky_relu(input, negative_slope=1e-2, inplace=False):
return _functions.thnn.auto.LeakyReLU(negative_slope, inplace)(input)
def prelu(input, weight):
return _functions.thnn.PReLU()(input, weight)
def rrelu(input, lower=1. / 8, upper=1. / 3, training=False, inplace=False):
return _functions.thnn.RReLU(lower, upper, training, inplace)(input)
def logsigmoid(input):
return _functions.thnn.LogSigmoid()(input)
def hardshrink(input, lambd=0.5):
return _functions.thnn.auto.Hardshrink(lambd)(input)
def tanhshrink(input):
return input - _autograd_functions.Tanh()(input)
def softsign(input):
return _functions.activation.Softsign()(input)
def softplus(input, beta=1, threshold=20):
return _functions.thnn.auto.Softplus(beta, threshold)(input)
def softmin(input):
return _functions.thnn.Softmin()(input)
def softmax(input):
return _functions.thnn.auto.Softmax()(input)
def softshrink(input, lambd=0.5):
return _functions.thnn.auto.Softshrink(lambd)(input)
def log_softmax(input):
return _functions.thnn.LogSoftmax()(input)
def tanh(input):
return _autograd_functions.Tanh()(input)
def sigmoid(input):
return _autograd_functions.Sigmoid()(input)
# etc.
def linear(input, weight, bias=None):
if bias is None:
return _functions.linear.Linear.apply(input, weight)
else:
return _functions.linear.Linear.apply(input, weight, bias)
def bilinear(input1, input2, weight, bias=None):
state = _functions.linear.Bilinear()
if bias is None:
return state(input1, input2, weight)
else:
return state(input1, input2, weight, bias)
def batch_norm(input, running_mean, running_var, weight=None, bias=None,
training=False, momentum=0.1, eps=1e-5):
f = torch._C._functions.BatchNorm(running_mean, running_var, training, momentum, eps, torch.backends.cudnn.enabled)
return f(input, weight, bias)
# loss
def nll_loss(input, target, weight=None, size_average=True):
r"""The negative log likelihood loss.
See :class:`~torch.nn.NLLLoss` for details.
Args:
input: :math:`(N, C)` where `C = number of classes` or `(N, C, H, W)` in case of 2D - Loss
target: :math:`(N)` where each value is `0 <= targets[i] <= C-1`
weight (Variable, optional): a manual rescaling weight given to each
class. If given, has to be a Variable of size "nclasses"
size_average (bool, optional): By default, the losses are averaged
over observations for each minibatch. However, if the field
sizeAverage is set to False, the losses are instead summed
for each minibatch.
Attributes:
weight: the class-weights given as input to the constructor
Example:
>>> # input is of size nBatch x nClasses = 3 x 5
>>> input = autograd.Variable(torch.randn(3, 5))
>>> # each element in target has to have 0 <= value < nclasses
>>> target = autograd.Variable(torch.LongTensor([1, 0, 4]))
>>> output = F.nll_loss(F.log_softmax(input), target)
>>> output.backward()
"""
dim = input.dim()
if dim == 2:
f = _functions.thnn.NLLLoss(size_average, weight=weight)
elif dim == 4:
f = _functions.thnn.NLLLoss2d(size_average, weight=weight)
else:
raise ValueError('Expected 2 or 4 dimensions (got {})'.format(dim))
return f(input, target)
def kl_div(input, target, size_average=True):
r"""The `Kullback-Leibler divergence`_ Loss.
See :class:`~torch.nn.KLDivLoss` for details.
Args:
input: Variable of arbitrary shape
target: Variable of the same shape as input
size_average: if True the output is divided by the number of elements
in input tensor
"""
return _functions.thnn.KLDivLoss(size_average)(input, target)
def cross_entropy(input, target, weight=None, size_average=True):
r"""This criterion combines `log_softmax` and `nll_loss` in one single class.
See :class:`torch.nn.CrossEntropyLoss` for details.
Args:
input: Variable :math:`(N, C)` where `C = number of classes`
target: Variable :math:`(N)` where each value is `0 <= targets[i] <= C-1`
weight (Tensor, optional): a manual rescaling weight given to each
class. If given, has to be a Tensor of size "nclasses"
size_average (bool, optional): By default, the losses are averaged
over observations for each minibatch. However, if the field
sizeAverage is set to False, the losses are instead summed
for each minibatch.
"""
return nll_loss(log_softmax(input), target, weight, size_average)
def binary_cross_entropy(input, target, weight=None, size_average=True):
r"""Function that measures the Binary Cross Entropy
between the target and the output:
See :class:`~torch.nn.BCELoss` for details.
Args:
input: Variable of arbitrary shape
target: Variable of the same shape as input
weight (Variable, optional): a manual rescaling weight
if provided it's repeated to match input tensor shape
size_average (bool, optional): By default, the losses are averaged
over observations for each minibatch. However, if the field
sizeAverage is set to False, the losses are instead summed
for each minibatch.
"""
return _functions.thnn.BCELoss(size_average, weight=weight)(input, target)
def smooth_l1_loss(input, target, size_average=True):
return _functions.thnn.SmoothL1Loss(size_average)(input, target)
def pixel_shuffle(input, upscale_factor):
r"""Rearranges elements in a tensor of shape ``[*, C*r^2, H, W]`` to a
tensor of shape ``[C, H*r, W*r]``.
See :class:`~torch.nn.PixelShuffle` for details.
Args:
input (Variable): Input
upscale_factor (int): factor to increase spatial resolution by
Examples:
>>> ps = nn.PixelShuffle(3)
>>> input = autograd.Variable(torch.Tensor(1, 9, 4, 4))
>>> output = ps(input)
>>> print(output.size())
torch.Size([1, 1, 12, 12])
"""
batch_size, channels, in_height, in_width = input.size()
channels //= upscale_factor ** 2
out_height = in_height * upscale_factor
out_width = in_width * upscale_factor
input_view = input.contiguous().view(
batch_size, channels, upscale_factor, upscale_factor,
in_height, in_width)
shuffle_out = input_view.permute(0, 1, 4, 2, 5, 3).contiguous()
return shuffle_out.view(batch_size, channels, out_height, out_width)
def upsample_nearest(input, size=None, scale_factor=None):
"""Upsamples the input, using nearest neighbours' pixel values.
Currently only spatial upsampling is supported (i.e. expected inputs
are 4 dimensional).
Args:
input (Variable): input
size (int or Tuple[int, int]): output spatial size.
scale_factor (int): multiplier for spatial size. Has to be an integer.
"""
return _functions.thnn.UpsamplingNearest2d(size, scale_factor)(input)
def upsample_bilinear(input, size=None, scale_factor=None):
"""Upscales the input, using the bilinear upsampling.
Currently only spatial upsampling is supported (i.e. expected inputs
are 4 dimensional).
Args:
input (Variable): input
size (int or Tuple[int, int]): output spatial size.
scale_factor (int or Tuple[int, int]): multiplier for spatial size
"""
return _functions.thnn.UpsamplingBilinear2d(size, scale_factor)(input)
def _check_bilinear_2d_scale_factor(scale_factor):
scale_factor = _pair(scale_factor)
try:
assert len(scale_factor) == 2
assert all(isinstance(s, Integral) and s >= 1 for s in scale_factor)
except AssertionError as e:
raise ValueError('scale_factor must be a non-negative integer, '
'or a tuple of non-negative integers for bilinear upsamplings, but got: '
'{}'.format(scale_factor))
return scale_factor
def pad(input, pad, mode='constant', value=0):
"""Pads tensor.
Currently only 2D and 3D padding supported.
In case of 4D input tensor pad should be in form (pad_l, pad_r, pad_t, pad_b )
In case of 5D pad should be (pleft, pright, ptop, pbottom, pfront, pback)
Args:
input (Variable): 4D or 5D tensor
pad (tuple): 4-elem or 6-elem tuple
mode: 'constant', 'reflect' or 'replicate'
value: fill value for 'constant' padding
"""
if input.dim() == 4:
assert len(pad) == 4, '4D tensors expect 4 values for padding'
if mode == 'constant':
return ConstantPad2d(pad, value)(input)
elif mode == 'reflect':
return _functions.thnn.ReflectionPad2d(*pad)(input)
elif mode == 'replicate':
return _functions.thnn.ReplicationPad2d(*pad)(input)
elif input.dim() == 5:
assert len(pad) == 6, '5D tensors expect 6 values for padding'
if mode == 'constant':
raise NotImplementedError
elif mode == 'reflect':
raise NotImplementedError
elif mode == 'replicate':
return _functions.thnn.ReplicationPad3d(*pad)(input)
else:
raise NotImplementedError("Only 4D and 5D padding is supported for now")
# distance
def pairwise_distance(x1, | |
<reponame>DominikMa/P5-Vehicle-Detection
"""Pipeline to find lanes on images or movies."""
import numpy as np
import cv2
import glob
from os import path
import pickle
from lane import Lane
class Lanefinder():
"""docstring for ."""
def __init__(self):
self.ym_per_pix = 30/720 # meters per pixel in y dimension
self.xm_per_pix = 3.7/880 # meters per pixel in x dimension
self.src_points = np.float32([[240, 690],
[1070, 690],
[577, 460],
[706, 460]])
self.dst_points = np.float32([[200, 720],
[1110, 720],
[200, 25],
[1110, 25]])
self.saved_camera_calibration_path = './camera_cal/ \
saved_camera_calibration.p'
self.camera_calibration = self.__do_camera_calibration()
self.line_left = Lane()
self.line_right = Lane()
def __do_camera_calibration(self):
"""Calculate calibration parameters for all calibration images."""
# If calibration is saved just load it
if path.isfile(self.saved_camera_calibration_path):
return pickle.load(open(self.saved_camera_calibration_path, "rb"))
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((6*9, 3), np.float32)
objp[:, :2] = np.mgrid[0:9, 0:6].T.reshape(-1, 2)
objpoints = []
imgpoints = []
for image_file in glob.glob('./camera_cal/calibration*.jpg'):
image = cv2.imread(image_file)
image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
ret, corners = cv2.findChessboardCorners(image_gray, (9, 6), None)
# If found, add object points, image points
if ret is True:
objpoints.append(objp)
imgpoints.append(corners)
camera_calibration = cv2.calibrateCamera(objpoints, imgpoints,
image.shape[1::-1],
None, None)
camera_calibration = {'ret': camera_calibration[0],
'mtx': camera_calibration[1],
'dist': camera_calibration[2],
'rvecs': camera_calibration[3],
'tvecs': camera_calibration[4]}
pickle.dump(camera_calibration,
open(self.saved_camera_calibration_path, "wb"))
return camera_calibration
def distortion_correction(self, image):
"""Use the calibration data to undistort the image."""
image = cv2.undistort(image,
self.camera_calibration['mtx'],
self.camera_calibration['dist'],
None,
self.camera_calibration['mtx'])
return image
def __perspective_transform(self, image):
"""Transform image to birds-eye view."""
M = cv2.getPerspectiveTransform(self.src_points, self.dst_points)
image = cv2.warpPerspective(image,
M,
image.shape[1::-1],
flags=cv2.INTER_LINEAR)
return image
def __perspective_transform_reverse(self, image):
"""Transfor image back from birds-eye view to normal view."""
M = cv2.getPerspectiveTransform(self.dst_points, self.src_points)
image = cv2.warpPerspective(image,
M,
image.shape[1::-1],
flags=cv2.INTER_LINEAR)
return image
def process_image(self, image_org):
"""Pipeline to find an draw lanes."""
image_org = cv2.cvtColor(image_org, cv2.COLOR_RGB2BGR)
# image_undist = self.distortion_correction(image_org)
image_undist = image_org
image_binary = self.__color_transform(image_undist)
binary_warped = self.__perspective_transform(image_binary)
self.line_left, self.line_right, color_warped = self.__fit_polynomial(binary_warped,
self.line_left,
self.line_right)
image = self.__draw_on_road(image_undist, binary_warped,
self.line_left, self.line_right)
# image_combined = self.__combine_images(image,
# image1=image_binary,
# image2=binary_warped,
# image3=color_warped)
image_combined = image
image_combined = cv2.cvtColor(image_combined.astype(np.uint8),
cv2.COLOR_BGR2RGB)
return image_combined
def __combine_images(self, image, image1=None, image2=None, image3=None):
"""Append optional images below original input image."""
small_shape = cv2.resize(image, (0, 0), fx=1/3, fy=1/3).shape
if image1 is None:
image1 = np.zeros(small_shape)
else:
image1 = cv2.resize(image1, small_shape[1::-1])
if len(image1.shape) < 3:
image1 = cv2.cvtColor(image1, cv2.COLOR_GRAY2RGB)*255
if image2 is None:
image2 = np.zeros(small_shape)
else:
image2 = cv2.resize(image2, small_shape[1::-1])
if len(image2.shape) < 3:
image2 = cv2.cvtColor(image2, cv2.COLOR_GRAY2RGB)*255
if image3 is None:
image3 = np.zeros(small_shape)
else:
image3 = cv2.resize(image3, small_shape[1::-1])
if len(image3.shape) < 3:
image3 = cv2.cvtColor(image3, cv2.COLOR_GRAY2RGB)*255
image_above = np.concatenate((image1, image2), axis=1)
image_below = np.concatenate((image_above, image3), axis=1)
image_below = image_below[:, :1280, :]
image = np.concatenate((image, image_below), axis=0)
return cv2.resize(image, (0, 0), fx=0.9, fy=0.9).astype(np.uint8)
def __color_transform(self, image):
"""Calculate binary image with lanes."""
lanes_hls = self.__get_lanes_hls(image)
lanes_hsv = self.__get_lanes_hsv(image)
binaryCom = lanes_hls + lanes_hsv
binaryCom[binaryCom < 1] = 0
binaryCom[binaryCom > 0] = 1
return binaryCom
def __get_lanes_hls(self, image):
"""Calculate binary image with lanes from hls color space."""
hls = cv2.cvtColor(image, cv2.COLOR_BGR2HLS)
H = hls[:, :, 0]
S = hls[:, :, 2]
binaryH = np.zeros_like(H)
binaryH[(H >= 16) & (H <= 24)] = 1
binaryS = np.zeros_like(H)
binaryS[(S > 170) & (S <= 255)] = 1
binarySH = np.zeros_like(H)
binarySH[(S > 70) & (S <= 255) & (H >= 12) & (H <= 28)] = 1
binaryCom = binaryH + binaryS + binarySH
binaryCom[binaryCom < 2] = 0
binaryCom[binaryCom > 1] = 1
sobel_hls = cv2.Sobel(binaryCom, cv2.CV_64F, 1, 0, ksize=9)
sobel_hls = np.absolute(sobel_hls)
sobel_hls = np.uint8(255*sobel_hls/np.max(sobel_hls))
binary_sobel_hls = np.zeros_like(sobel_hls)
binary_sobel_hls[(sobel_hls >= 20) & (sobel_hls <= 255)] = 1
return binary_sobel_hls
def __get_lanes_hsv(self, image):
"""Calculate binary image with lanes from hsv color space."""
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
mask_white = cv2.inRange(hsv,
np.array([[[0, 0, 210]]]),
np.array([[[255, 90, 255]]]))
mask_yellow = cv2.inRange(hsv,
np.array([[[19, 120, 150]]]),
np.array([[[30, 200, 255]]]))
mask = cv2.bitwise_or(mask_white, mask_yellow)
sobel_hsv = cv2.Sobel(mask, cv2.CV_64F, 1, 0, ksize=9)
sobel_hsv = np.absolute(sobel_hsv)
sobel_hsv = np.uint8(255*sobel_hsv/np.max(sobel_hsv))
binary_sobel_hsv = np.zeros_like(sobel_hsv)
binary_sobel_hsv[(sobel_hsv >= 20) & (sobel_hsv <= 255)] = 1
return binary_sobel_hsv
def __find_lane_pixels(self, binary_warped, nwindows=9,
margin=100, minpix=100):
"""Find lane pixels with sliding window."""
out_img = np.dstack((binary_warped, binary_warped, binary_warped))
# Take a histogram of the bottom half of the image
histogram = np.sum(binary_warped[np.int(binary_warped.shape[0]*2/3):,
:], axis=0)
# Find the peak of the left and right halves of the histogram
midpoint = np.int(histogram.shape[0]//2)
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = np.argmax(histogram[midpoint:]) + midpoint
# Set height of windows - based on nwindows above and image shape
window_height = np.int(binary_warped.shape[0]//nwindows)
# Identify the x and y positions of all nonzero pixels in the image
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Current positions to be updated later for each window in nwindows
leftx_current = leftx_base
rightx_current = rightx_base
leftx_steps = [0]
rightx_steps = [0]
# Create empty lists to receive left and right lane pixel indices
left_lane_inds = []
right_lane_inds = []
# Step through the windows one by one
for window in range(nwindows):
# Identify window boundaries in x and y (and right and left)
win_y_low = binary_warped.shape[0] - (window+1)*window_height
win_y_high = binary_warped.shape[0] - window*window_height
win_xleft_low = leftx_current - margin
win_xleft_high = leftx_current + margin
win_xright_low = rightx_current - margin
win_xright_high = rightx_current + margin
cv2.rectangle(out_img, (win_xleft_low, win_y_low),
(win_xleft_high, win_y_high), (0, 255, 0), 2)
cv2.rectangle(out_img, (win_xright_low, win_y_low),
(win_xright_high, win_y_high), (0, 255, 0), 2)
good_left_inds = ((nonzeroy >= win_y_low) &
(nonzeroy < win_y_high) &
(nonzerox >= win_xleft_low) &
(nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low) &
(nonzeroy < win_y_high) &
(nonzerox >= win_xright_low) &
(nonzerox < win_xright_high)).nonzero()[0]
# Append these indices to the lists
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
# If you found > minpix pixels, recenter next window
if len(good_left_inds) > minpix:
leftx_steps.append(np.int(np.mean(nonzerox[good_left_inds]))
- leftx_current)
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
else:
leftx_current = leftx_current + np.int(np.mean(leftx_steps))
leftx_steps.append(0)
if len(good_right_inds) > minpix:
rightx_steps.append(np.int(np.mean(nonzerox[good_right_inds]))
- rightx_current)
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
else:
rightx_current = rightx_current + np.int(np.mean(rightx_steps))
rightx_steps.append(0)
# Concatenate the arrays of indices
try:
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
except ValueError:
# Avoids an error if the above is not implemented fully
pass
# Extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
return leftx, lefty, rightx, righty, out_img
def __find_lane_pixels_poly(self, binary_warped, line_left, line_right,
margin=100):
"""Find lane pixels with known previous lanes."""
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
left_fit = line_left.current_fit
right_fit = line_right.current_fit
left_lane_inds = ((nonzerox > (left_fit[0]*(nonzeroy**2) +
left_fit[1]*nonzeroy +
left_fit[2] - margin)) &
(nonzerox < (left_fit[0]*(nonzeroy**2) +
left_fit[1]*nonzeroy +
left_fit[2] + margin)))
right_lane_inds = ((nonzerox > (right_fit[0]*(nonzeroy**2) +
right_fit[1]*nonzeroy +
right_fit[2] - margin)) &
(nonzerox < (right_fit[0]*(nonzeroy**2) +
right_fit[1]*nonzeroy +
right_fit[2] + margin)))
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
return leftx, lefty, rightx, righty
def __fit_polynomial(self, binary_warped, line_left, line_right):
"""Find a fitting polynomial for the left and tight lane."""
warp_zero = np.zeros_like(binary_warped)
color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
if line_left.bad_Frames < 5 or line_right.bad_Frames < 5:
leftx, lefty, rightx, righty = self.__find_lane_pixels_poly(binary_warped,
line_left,
line_right)
try:
# Fit a second order polynomial to each
poly_fit_left = np.polyfit(lefty, leftx, 2)
poly_fit_right = np.polyfit(righty, rightx, 2)
# Generate x and y values
poly_fity = np.linspace(0, binary_warped.shape[0]-1,
binary_warped.shape[0])
poly_fitx_left = (poly_fit_left[0]*poly_fity**2 +
poly_fit_left[1]*poly_fity +
poly_fit_left[2])
poly_fitx_right = (poly_fit_right[0]*poly_fity**2 +
poly_fit_right[1]*poly_fity +
poly_fit_right[2])
line_left_chekcs = line_left.checks(poly_fit_left,
poly_fitx_left,
poly_fity)
line_right_checks = line_right.checks(poly_fit_right,
poly_fitx_right,
poly_fity)
if (line_left_chekcs and line_right_checks and
self.__checks(poly_fit_left, poly_fit_right,
poly_fitx_left, poly_fitx_right,
poly_fity)):
line_left.update(leftx, lefty, binary_warped.shape)
line_right.update(rightx, righty, binary_warped.shape)
else:
line_left.bad_Frame()
line_right.bad_Frame()
except TypeError:
# Avoids an error if `poly_fitx` still none or incorrect
print('The function failed to fit a line!')
line_left.bad_Frame()
line_right.bad_Frame()
if line_left.bad_Frames > 4 or line_right.bad_Frames > 4:
leftx, lefty, rightx, righty, out_img = self.__find_lane_pixels(binary_warped)
color_warp = out_img
if len(leftx) > 0 and len(lefty) > 0:
line_left.update(leftx, lefty, binary_warped.shape)
if len(rightx) > 0 and len(righty) > 0:
line_right.update(rightx, righty, binary_warped.shape)
# Recast the x and y points into usable format for cv2.fillPoly()
if line_left.detected:
poly_left_val = np.polyval(line_left.best_fit,
range(0, binary_warped.shape[0]))
points_left = np.column_stack((poly_left_val,
range(0, binary_warped.shape[0])))
color_warp[line_left.ally, line_left.allx] = [255, 0, 0]
cv2.polylines(color_warp, np.int32([points_left]), False,
[255, 255, 255], thickness=4)
if line_right.detected:
poly_right_val = np.polyval(line_right.best_fit,
range(0, binary_warped.shape[0]))
points_right = np.column_stack((poly_right_val,
range(0, | |
to marginalize over you must minimize over the unwanted axies of sum_dev
i.e for fr np.min(np.min(np.min(np.min(fit['sum_dev'],axis = 4),axis = 3),axis = 2),axis = 1)
'''
if error is None:
error = np.ones(len(x))
fs = np.linspace(ranges[0][0],ranges[1][0],n_grid_points)
Qrs = np.linspace(ranges[0][1],ranges[1][1],n_grid_points)
amps = np.linspace(ranges[0][2],ranges[1][2],n_grid_points)
phis = np.linspace(ranges[0][3],ranges[1][3],n_grid_points)
b0s = np.linspace(ranges[0][4],ranges[1][4],n_grid_points)
evaluated_ranges = np.vstack((fs,Qrs,amps,phis,b0s))
a,b,c,d,e = np.meshgrid(fs,Qrs,amps,phis,b0s,indexing = "ij") #always index ij
evaluated = linear_mag(x,a,b,c,d,e)
data_values = np.reshape(np.abs(z)**2,(abs(z).shape[0],1,1,1,1,1))
error = np.reshape(error,(abs(z).shape[0],1,1,1,1,1))
sum_dev = np.sum(((np.sqrt(evaluated)-np.sqrt(data_values))**2/error**2),axis = 0) # comparing in magnitude space rather than magnitude squared
min_index = np.where(sum_dev == np.min(sum_dev))
index1 = min_index[0][0]
index2 = min_index[1][0]
index3 = min_index[2][0]
index4 = min_index[3][0]
index5 = min_index[4][0]
fit_values = np.asarray((fs[index1],Qrs[index2],amps[index3],phis[index4],b0s[index5]))
fit_values_names = ('f0','Qr','amp','phi','b0')
fit_result = linear_mag(x,fs[index1],Qrs[index2],amps[index3],phis[index4],b0s[index5])
marginalized_1d = np.zeros((5,n_grid_points))
marginalized_1d[0,:] = np.min(np.min(np.min(np.min(sum_dev,axis = 4),axis = 3),axis = 2),axis = 1)
marginalized_1d[1,:] = np.min(np.min(np.min(np.min(sum_dev,axis = 4),axis = 3),axis = 2),axis = 0)
marginalized_1d[2,:] = np.min(np.min(np.min(np.min(sum_dev,axis = 4),axis = 3),axis = 1),axis = 0)
marginalized_1d[3,:] = np.min(np.min(np.min(np.min(sum_dev,axis = 4),axis = 2),axis = 1),axis = 0)
marginalized_1d[4,:] = np.min(np.min(np.min(np.min(sum_dev,axis = 3),axis = 2),axis = 1),axis = 0)
marginalized_2d = np.zeros((5,5,n_grid_points,n_grid_points))
#0 _
#1 x _
#2 x x _
#3 x x x _
#4 x x x x _
# 0 1 2 3 4
marginalized_2d[0,1,:] = marginalized_2d[1,0,:] = np.min(np.min(np.min(sum_dev,axis = 4),axis = 3),axis = 2)
marginalized_2d[2,0,:] = marginalized_2d[0,2,:] = np.min(np.min(np.min(sum_dev,axis = 4),axis = 3),axis = 1)
marginalized_2d[2,1,:] = marginalized_2d[1,2,:] = np.min(np.min(np.min(sum_dev,axis = 4),axis = 3),axis = 0)
marginalized_2d[3,0,:] = marginalized_2d[0,3,:] = np.min(np.min(np.min(sum_dev,axis = 4),axis = 2),axis = 1)
marginalized_2d[3,1,:] = marginalized_2d[1,3,:] = np.min(np.min(np.min(sum_dev,axis = 4),axis = 2),axis = 0)
marginalized_2d[3,2,:] = marginalized_2d[2,3,:] = np.min(np.min(np.min(sum_dev,axis = 4),axis = 1),axis = 0)
marginalized_2d[4,0,:] = marginalized_2d[0,4,:] = np.min(np.min(np.min(sum_dev,axis = 3),axis = 2),axis = 1)
marginalized_2d[4,1,:] = marginalized_2d[1,4,:] = np.min(np.min(np.min(sum_dev,axis = 3),axis = 2),axis = 0)
marginalized_2d[4,2,:] = marginalized_2d[2,4,:] = np.min(np.min(np.min(sum_dev,axis = 3),axis = 1),axis = 0)
marginalized_2d[4,3,:] = marginalized_2d[3,4,:] = np.min(np.min(np.min(sum_dev,axis = 2),axis = 1),axis = 0)
if plot:
levels = [2.3,4.61] #delta chi squared two parameters 68 90 % confidence
fig_fit = plt.figure(-1)
axs = fig_fit.subplots(5, 5)
for i in range(0,5): # y starting from top
for j in range(0,5): #x starting from left
if i > j:
#plt.subplot(5,5,i+1+5*j)
#axs[i, j].set_aspect('equal', 'box')
extent = [evaluated_ranges[j,0],evaluated_ranges[j,n_grid_points-1],evaluated_ranges[i,0],evaluated_ranges[i,n_grid_points-1]]
axs[i,j].imshow(marginalized_2d[i,j,:]-np.min(sum_dev),extent =extent,origin = 'lower', cmap = 'jet')
axs[i,j].contour(evaluated_ranges[j],evaluated_ranges[i],marginalized_2d[i,j,:]-np.min(sum_dev),levels = levels,colors = 'white')
axs[i,j].set_ylim(evaluated_ranges[i,0],evaluated_ranges[i,n_grid_points-1])
axs[i,j].set_xlim(evaluated_ranges[j,0],evaluated_ranges[j,n_grid_points-1])
axs[i,j].set_aspect((evaluated_ranges[j,0]-evaluated_ranges[j,n_grid_points-1])/(evaluated_ranges[i,0]-evaluated_ranges[i,n_grid_points-1]))
if j == 0:
axs[i, j].set_ylabel(fit_values_names[i])
if i == 4:
axs[i, j].set_xlabel("\n"+fit_values_names[j])
if i<4:
axs[i,j].get_xaxis().set_ticks([])
if j>0:
axs[i,j].get_yaxis().set_ticks([])
elif i < j:
fig_fit.delaxes(axs[i,j])
for i in range(0,5):
#axes.subplot(5,5,i+1+5*i)
axs[i,i].plot(evaluated_ranges[i,:],marginalized_1d[i,:]-np.min(sum_dev))
axs[i,i].plot(evaluated_ranges[i,:],np.ones(len(evaluated_ranges[i,:]))*1.,color = 'k')
axs[i,i].plot(evaluated_ranges[i,:],np.ones(len(evaluated_ranges[i,:]))*2.7,color = 'k')
axs[i,i].yaxis.set_label_position("right")
axs[i,i].yaxis.tick_right()
axs[i,i].xaxis.set_label_position("top")
axs[i,i].xaxis.tick_top()
axs[i,i].set_xlabel(fit_values_names[i])
#axs[0,0].set_ylabel(fit_values_names[0])
#axs[4,4].set_xlabel(fit_values_names[4])
axs[4,4].xaxis.set_label_position("bottom")
axs[4,4].xaxis.tick_bottom()
#make a dictionary to return
fit_dict = {'fit_values': fit_values,'fit_values_names':fit_values_names, 'sum_dev': sum_dev, 'fit_result': fit_result,'marginalized_2d':marginalized_2d,'marginalized_1d':marginalized_1d,'evaluated_ranges':evaluated_ranges}#, 'x0':x0, 'z':z}
return fit_dict
# function for fitting an iq sweep with the above equation
def fit_nonlinear_iq(x,z,**keywords):
'''
# keywards are
# bounds ---- which is a 2d tuple of low the high values to bound the problem by
# x0 --- intial guess for the fit this can be very important becuase because least square space over all the parameter is comple
# amp_norm --- do a normalization for variable amplitude. usefull when tranfer function of the cryostat is not flat
# tau forces tau to specific value
# tau_guess fixes the guess for tau without have to specifiy all of x0
'''
if ('tau' in keywords):
use_given_tau = True
tau = keywords['tau']
else:
use_given_tau = False
if ('bounds' in keywords):
bounds = keywords['bounds']
else:
#define default bounds
print("default bounds used")
bounds = ([np.min(x),50,.01,-np.pi,0,-np.inf,-np.inf,0,np.min(x)],[np.max(x),200000,1,np.pi,5,np.inf,np.inf,1*10**-6,np.max(x)])
if ('x0' in keywords):
x0 = keywords['x0']
else:
#define default intial guess
print("default initial guess used")
#fr_guess = x[np.argmin(np.abs(z))]
#x0 = [fr_guess,10000.,0.5,0,0,np.mean(np.real(z)),np.mean(np.imag(z)),3*10**-7,fr_guess]
x0 = guess_x0_iq_nonlinear(x,z,verbose = True)
print(x0)
if ('fr_guess' in keywords):
x0[0] = keywords['fr_guess']
if ('tau_guess' in keywords):
x0[7] = keywords['tau_guess']
#Amplitude normalization?
do_amp_norm = 0
if ('amp_norm' in keywords):
amp_norm = keywords['amp_norm']
if amp_norm == True:
do_amp_norm = 1
elif amp_norm == False:
do_amp_norm = 0
else:
print("please specify amp_norm as True or False")
if do_amp_norm == 1:
z = amplitude_normalization(x,z)
z_stacked = np.hstack((np.real(z),np.imag(z)))
if use_given_tau == True:
del bounds[0][7]
del bounds[1][7]
del x0[7]
fit = optimization.curve_fit(lambda x_lamb,a,b,c,d,e,f,g,h: nonlinear_iq_for_fitter(x_lamb,a,b,c,d,e,f,g,tau,h), x, z_stacked,x0,bounds = bounds)
fit_result = nonlinear_iq(x,fit[0][0],fit[0][1],fit[0][2],fit[0][3],fit[0][4],fit[0][5],fit[0][6],tau,fit[0][7])
x0_result = nonlinear_iq(x,x0[0],x0[1],x0[2],x0[3],x0[4],x0[5],x0[6],tau,x0[7])
else:
fit = optimization.curve_fit(nonlinear_iq_for_fitter, x, z_stacked,x0,bounds = bounds)
fit_result = nonlinear_iq(x,fit[0][0],fit[0][1],fit[0][2],fit[0][3],fit[0][4],fit[0][5],fit[0][6],fit[0][7],fit[0][8])
x0_result = nonlinear_iq(x,x0[0],x0[1],x0[2],x0[3],x0[4],x0[5],x0[6],x0[7],x0[8])
#make a dictionary to return
fit_dict = {'fit': fit, 'fit_result': fit_result, 'x0_result': x0_result, 'x0':x0, 'z':z}
return fit_dict
def fit_nonlinear_iq_sep(fine_x,fine_z,gain_x,gain_z,**keywords):
'''
# same as above funciton but takes fine and gain scans seperatly
# keywards are
# bounds ---- which is a 2d tuple of low the high values to bound the problem by
# x0 --- intial guess for the fit this can be very important becuase because least square space over all the parameter is comple
# amp_norm --- do a normalization for variable amplitude. usefull when tranfer function of the cryostat is not flat
'''
if ('bounds' in keywords):
bounds = keywords['bounds']
else:
#define default bounds
print("default bounds used")
bounds = ([np.min(fine_x),500.,.01,-np.pi,0,-np.inf,-np.inf,1*10**-9,np.min(fine_x)],[np.max(fine_x),1000000,1,np.pi,5,np.inf,np.inf,1*10**-6,np.max(fine_x)])
if ('x0' in keywords):
x0 = keywords['x0']
else:
#define default intial guess
print("default initial guess used")
#fr_guess = x[np.argmin(np.abs(z))]
#x0 = [fr_guess,10000.,0.5,0,0,np.mean(np.real(z)),np.mean(np.imag(z)),3*10**-7,fr_guess]
x0 = guess_x0_iq_nonlinear_sep(fine_x,fine_z,gain_x,gain_z)
#print(x0)
#Amplitude normalization?
do_amp_norm = 0
if ('amp_norm' in keywords):
amp_norm = keywords['amp_norm']
if amp_norm == True:
do_amp_norm = 1
elif amp_norm == False:
do_amp_norm = 0
else:
print("please specify amp_norm as True or False")
if (('fine_z_err' in keywords) & ('gain_z_err' in keywords)):
use_err = True
fine_z_err = keywords['fine_z_err']
gain_z_err = keywords['gain_z_err']
else:
use_err = False
x = np.hstack((fine_x,gain_x))
z = np.hstack((fine_z,gain_z))
if use_err:
z_err = np.hstack((fine_z_err,gain_z_err))
if do_amp_norm == 1:
z = amplitude_normalization(x,z)
z_stacked = np.hstack((np.real(z),np.imag(z)))
if use_err:
z_err_stacked = np.hstack((np.real(z_err),np.imag(z_err)))
fit = optimization.curve_fit(nonlinear_iq_for_fitter, x, z_stacked,x0,sigma = z_err_stacked,bounds = bounds)
else:
fit = optimization.curve_fit(nonlinear_iq_for_fitter, x, z_stacked,x0,bounds = bounds)
fit_result = nonlinear_iq(x,fit[0][0],fit[0][1],fit[0][2],fit[0][3],fit[0][4],fit[0][5],fit[0][6],fit[0][7],fit[0][8])
x0_result = nonlinear_iq(x,x0[0],x0[1],x0[2],x0[3],x0[4],x0[5],x0[6],x0[7],x0[8])
if use_err:
#only do it for fine data
#red_chi_sqr = np.sum(z_stacked-np.hstack((np.real(fit_result),np.imag(fit_result))))**2/z_err_stacked**2)/(len(z_stacked)-8.)
#only do it for fine data
red_chi_sqr = np.sum((np.hstack((np.real(fine_z),np.imag(fine_z)))-np.hstack((np.real(fit_result[0:len(fine_z)]),np.imag(fit_result[0:len(fine_z)]))))**2/np.hstack((np.real(fine_z_err),np.imag(fine_z_err)))**2)/(len(fine_z)*2.-8.)
#make a dictionary to return
if use_err:
fit_dict = {'fit': fit, 'fit_result': fit_result, 'x0_result': x0_result, 'x0':x0, 'z':z,'fit_freqs':x,'red_chi_sqr':red_chi_sqr}
else:
fit_dict = {'fit': fit, 'fit_result': fit_result, 'x0_result': x0_result, 'x0':x0, 'z':z,'fit_freqs':x}
return fit_dict
# same function but double fits so that it can get error and a proper covariance matrix out
def fit_nonlinear_iq_with_err(x,z,**keywords):
'''
# keywards are
# bounds ---- which is a 2d tuple of low the high values to bound the problem by
# x0 --- intial guess for the fit this can be very important becuase because least square space over all the parameter is comple
# amp_norm --- do a normalization for variable amplitude. usefull when tranfer function of the cryostat is not flat
'''
if ('bounds' in keywords):
bounds = keywords['bounds']
else:
#define default bounds
print("default bounds used")
bounds = ([np.min(x),2000,.01,-np.pi,0,-5,-5,1*10**-9,np.min(x)],[np.max(x),200000,1,np.pi,5,5,5,1*10**-6,np.max(x)])
if ('x0' in keywords):
x0 = keywords['x0']
else:
#define default intial guess
print("default initial guess used")
fr_guess = x[np.argmin(np.abs(z))]
x0 = guess_x0_iq_nonlinear(x,z)
#Amplitude normalization?
do_amp_norm = 0
if ('amp_norm' in keywords):
amp_norm = keywords['amp_norm']
if amp_norm == True:
do_amp_norm = 1
elif amp_norm == False:
do_amp_norm = 0
else:
print("please specify amp_norm as True or False")
if do_amp_norm == 1:
z = amplitude_normalization(x,z)
z_stacked = np.hstack((np.real(z),np.imag(z)))
fit = optimization.curve_fit(nonlinear_iq_for_fitter, x, z_stacked,x0,bounds = bounds)
fit_result = nonlinear_iq(x,fit[0][0],fit[0][1],fit[0][2],fit[0][3],fit[0][4],fit[0][5],fit[0][6],fit[0][7],fit[0][8])
fit_result_stacked = nonlinear_iq_for_fitter(x,fit[0][0],fit[0][1],fit[0][2],fit[0][3],fit[0][4],fit[0][5],fit[0][6],fit[0][7],fit[0][8])
x0_result = nonlinear_iq(x,x0[0],x0[1],x0[2],x0[3],x0[4],x0[5],x0[6],x0[7],x0[8])
# get error
var = np.sum((z_stacked-fit_result_stacked)**2)/(z_stacked.shape[0] - 1)
err = np.ones(z_stacked.shape[0])*np.sqrt(var)
# refit
fit = optimization.curve_fit(nonlinear_iq_for_fitter, x, z_stacked,x0,err,bounds = bounds)
fit_result = nonlinear_iq(x,fit[0][0],fit[0][1],fit[0][2],fit[0][3],fit[0][4],fit[0][5],fit[0][6],fit[0][7],fit[0][8])
x0_result = nonlinear_iq(x,x0[0],x0[1],x0[2],x0[3],x0[4],x0[5],x0[6],x0[7],x0[8])
#make a dictionary to return
fit_dict = {'fit': fit, 'fit_result': fit_result, 'x0_result': x0_result, 'x0':x0, 'z':z}
return fit_dict
# function for fitting an iq sweep with the above equation
def fit_nonlinear_mag(x,z,**keywords):
'''
# keywards are
# bounds ---- which is a 2d tuple of low the high values to bound the problem by
# x0 --- intial guess for the fit this can be very important becuase because least square space over all the parameter is comple
# | |
Constraint(expr= m.x2159 - m.b3015 <= 0)
m.c2161 = Constraint(expr= m.x2160 - m.b3015 <= 0)
m.c2162 = Constraint(expr= m.x2161 - m.b3015 <= 0)
m.c2163 = Constraint(expr= m.x2162 - m.b3015 <= 0)
m.c2164 = Constraint(expr= m.x2163 - m.b3015 <= 0)
m.c2165 = Constraint(expr= m.x2164 - m.b3015 <= 0)
m.c2166 = Constraint(expr= m.x2165 - m.b3015 <= 0)
m.c2167 = Constraint(expr= m.x2166 - m.b3015 <= 0)
m.c2168 = Constraint(expr= m.x2167 - m.b3015 <= 0)
m.c2169 = Constraint(expr= m.x2168 - m.b3015 <= 0)
m.c2170 = Constraint(expr= m.x2169 - m.b3015 <= 0)
m.c2171 = Constraint(expr= m.x2170 - m.b3015 <= 0)
m.c2172 = Constraint(expr= m.x2171 - m.b3015 <= 0)
m.c2173 = Constraint(expr= m.x2172 - m.b3015 <= 0)
m.c2174 = Constraint(expr= m.x2173 - m.b3015 <= 0)
m.c2175 = Constraint(expr= m.x2174 - m.b3015 <= 0)
m.c2176 = Constraint(expr= m.x2175 - m.b3015 <= 0)
m.c2177 = Constraint(expr= m.x2176 - m.b3015 <= 0)
m.c2178 = Constraint(expr= m.x2177 - m.b3015 <= 0)
m.c2179 = Constraint(expr= m.x2178 - m.b3015 <= 0)
m.c2180 = Constraint(expr= m.x2179 - m.b3015 <= 0)
m.c2181 = Constraint(expr= m.x2180 - m.b3015 <= 0)
m.c2182 = Constraint(expr= m.x2181 - m.b3015 <= 0)
m.c2183 = Constraint(expr= m.x2182 - m.b3015 <= 0)
m.c2184 = Constraint(expr= m.x2183 - m.b3015 <= 0)
m.c2185 = Constraint(expr= m.x2184 - m.b3015 <= 0)
m.c2186 = Constraint(expr= m.x2185 - m.b3015 <= 0)
m.c2187 = Constraint(expr= m.x2186 - m.b3015 <= 0)
m.c2188 = Constraint(expr= m.x2187 - m.b3015 <= 0)
m.c2189 = Constraint(expr= m.x2188 - m.b3015 <= 0)
m.c2190 = Constraint(expr= m.x2189 - m.b3015 <= 0)
m.c2191 = Constraint(expr= m.x2190 - m.b3015 <= 0)
m.c2192 = Constraint(expr= m.x2191 - m.b3015 <= 0)
m.c2193 = Constraint(expr= m.x2192 - m.b3015 <= 0)
m.c2194 = Constraint(expr= m.x2193 - m.b3015 <= 0)
m.c2195 = Constraint(expr= m.x2194 - m.b3015 <= 0)
m.c2196 = Constraint(expr= m.x2195 - m.b3015 <= 0)
m.c2197 = Constraint(expr= m.x2196 - m.b3015 <= 0)
m.c2198 = Constraint(expr= m.x2197 - m.b3015 <= 0)
m.c2199 = Constraint(expr= m.x2198 - m.b3015 <= 0)
m.c2200 = Constraint(expr= m.x2199 - m.b3015 <= 0)
m.c2201 = Constraint(expr= m.x2200 - m.b3015 <= 0)
m.c2202 = Constraint(expr= m.x2201 - m.b3015 <= 0)
m.c2203 = Constraint(expr= m.x2202 - m.b3015 <= 0)
m.c2204 = Constraint(expr= m.x2203 - m.b3015 <= 0)
m.c2205 = Constraint(expr= m.x2204 - m.b3015 <= 0)
m.c2206 = Constraint(expr= m.x2205 - m.b3015 <= 0)
m.c2207 = Constraint(expr= m.x2206 - m.b3015 <= 0)
m.c2208 = Constraint(expr= m.x2207 - m.b3015 <= 0)
m.c2209 = Constraint(expr= m.x2208 - m.b3015 <= 0)
m.c2210 = Constraint(expr= m.x2209 - m.b3015 <= 0)
m.c2211 = Constraint(expr= m.x2210 - m.b3015 <= 0)
m.c2212 = Constraint(expr= m.x2211 - m.b3015 <= 0)
m.c2213 = Constraint(expr= m.x2212 - m.b3015 <= 0)
m.c2214 = Constraint(expr= m.x2213 - m.b3015 <= 0)
m.c2215 = Constraint(expr= m.x2214 - m.b3015 <= 0)
m.c2216 = Constraint(expr= m.x2215 - m.b3015 <= 0)
m.c2217 = Constraint(expr= m.x2216 - m.b3015 <= 0)
m.c2218 = Constraint(expr= m.x2217 - m.b3015 <= 0)
m.c2219 = Constraint(expr= m.x2218 - m.b3015 <= 0)
m.c2220 = Constraint(expr= m.x2219 - m.b3015 <= 0)
m.c2221 = Constraint(expr= m.x2220 - m.b3015 <= 0)
m.c2222 = Constraint(expr= m.x2221 - m.b3015 <= 0)
m.c2223 = Constraint(expr= m.x2222 - m.b3015 <= 0)
m.c2224 = Constraint(expr= m.x2223 - m.b3015 <= 0)
m.c2225 = Constraint(expr= m.x2224 - m.b3015 <= 0)
m.c2226 = Constraint(expr= m.x2225 - m.b3015 <= 0)
m.c2227 = Constraint(expr= m.x2226 - m.b3015 <= 0)
m.c2228 = Constraint(expr= m.x2227 - m.b3015 <= 0)
m.c2229 = Constraint(expr= m.x2228 - m.b3015 <= 0)
m.c2230 = Constraint(expr= m.x2229 - m.b3015 <= 0)
m.c2231 = Constraint(expr= m.x2230 - m.b3015 <= 0)
m.c2232 = Constraint(expr= m.x2231 - m.b3015 <= 0)
m.c2233 = Constraint(expr= m.x2232 - m.b3015 <= 0)
m.c2234 = Constraint(expr= m.x2233 - m.b3015 <= 0)
m.c2235 = Constraint(expr= m.x2234 - m.b3015 <= 0)
m.c2236 = Constraint(expr= m.x2235 - m.b3015 <= 0)
m.c2237 = Constraint(expr= m.x2236 - m.b3015 <= 0)
m.c2238 = Constraint(expr= m.x2237 - m.b3015 <= 0)
m.c2239 = Constraint(expr= m.x2238 - m.b3015 <= 0)
m.c2240 = Constraint(expr= m.x2239 - m.b3015 <= 0)
m.c2241 = Constraint(expr= m.x2240 - m.b3015 <= 0)
m.c2242 = Constraint(expr= m.x2241 - m.b3015 <= 0)
m.c2243 = Constraint(expr= m.x2242 - m.b3015 <= 0)
m.c2244 = Constraint(expr= m.x2243 - m.b3015 <= 0)
m.c2245 = Constraint(expr= m.x2244 - m.b3015 <= 0)
m.c2246 = Constraint(expr= m.x2245 - m.b3015 <= 0)
m.c2247 = Constraint(expr= m.x2246 - m.b3015 <= 0)
m.c2248 = Constraint(expr= m.x2247 - m.b3015 <= 0)
m.c2249 = Constraint(expr= m.x2248 - m.b3015 <= 0)
m.c2250 = Constraint(expr= m.x2249 - m.b3015 <= 0)
m.c2251 = Constraint(expr= m.x2250 - m.b3015 <= 0)
m.c2252 = Constraint(expr= m.x2251 - m.b3016 <= 0)
m.c2253 = Constraint(expr= m.x2252 - m.b3016 <= 0)
m.c2254 = Constraint(expr= m.x2253 - m.b3016 <= 0)
m.c2255 = Constraint(expr= m.x2254 - m.b3016 <= 0)
m.c2256 = Constraint(expr= m.x2255 - m.b3016 <= 0)
m.c2257 = Constraint(expr= m.x2256 - m.b3016 <= 0)
m.c2258 = Constraint(expr= m.x2257 - m.b3016 <= 0)
m.c2259 = Constraint(expr= m.x2258 - m.b3016 <= 0)
m.c2260 = Constraint(expr= m.x2259 - m.b3016 <= 0)
m.c2261 = Constraint(expr= m.x2260 - m.b3016 <= 0)
m.c2262 = Constraint(expr= m.x2261 - m.b3016 <= 0)
m.c2263 = Constraint(expr= m.x2262 - m.b3016 <= 0)
m.c2264 = Constraint(expr= m.x2263 - m.b3016 <= 0)
m.c2265 = Constraint(expr= m.x2264 - m.b3016 <= 0)
m.c2266 = Constraint(expr= m.x2265 - m.b3016 <= 0)
m.c2267 = Constraint(expr= m.x2266 - m.b3016 <= 0)
m.c2268 = Constraint(expr= m.x2267 - m.b3016 <= 0)
m.c2269 = Constraint(expr= m.x2268 - m.b3016 <= 0)
m.c2270 = Constraint(expr= m.x2269 - m.b3016 <= 0)
m.c2271 = Constraint(expr= m.x2270 - m.b3016 <= 0)
m.c2272 = Constraint(expr= m.x2271 - m.b3016 <= 0)
m.c2273 = Constraint(expr= m.x2272 - m.b3016 <= 0)
m.c2274 = Constraint(expr= m.x2273 - m.b3016 <= 0)
m.c2275 = Constraint(expr= m.x2274 - m.b3016 <= 0)
m.c2276 = Constraint(expr= m.x2275 - m.b3016 <= 0)
m.c2277 = Constraint(expr= m.x2276 - m.b3016 <= 0)
m.c2278 = Constraint(expr= m.x2277 - m.b3016 <= 0)
m.c2279 = Constraint(expr= m.x2278 - m.b3016 <= 0)
m.c2280 = Constraint(expr= m.x2279 - m.b3016 <= 0)
m.c2281 = Constraint(expr= m.x2280 - m.b3016 <= 0)
m.c2282 = Constraint(expr= m.x2281 - m.b3016 <= 0)
m.c2283 = Constraint(expr= m.x2282 - m.b3016 <= 0)
m.c2284 = Constraint(expr= m.x2283 - m.b3016 <= 0)
m.c2285 = Constraint(expr= m.x2284 - m.b3016 <= 0)
m.c2286 = Constraint(expr= m.x2285 - m.b3016 <= 0)
m.c2287 = Constraint(expr= m.x2286 - m.b3016 <= 0)
m.c2288 = Constraint(expr= m.x2287 - m.b3016 <= 0)
m.c2289 = Constraint(expr= m.x2288 - m.b3016 <= 0)
m.c2290 = Constraint(expr= m.x2289 - m.b3016 <= 0)
m.c2291 = Constraint(expr= m.x2290 - m.b3016 <= 0)
m.c2292 = Constraint(expr= m.x2291 - m.b3016 <= 0)
m.c2293 = Constraint(expr= m.x2292 - m.b3016 <= 0)
m.c2294 = Constraint(expr= m.x2293 - m.b3016 <= 0)
m.c2295 = Constraint(expr= m.x2294 - m.b3016 <= 0)
m.c2296 = Constraint(expr= m.x2295 - m.b3016 <= 0)
m.c2297 = Constraint(expr= m.x2296 - m.b3016 <= 0)
m.c2298 = Constraint(expr= m.x2297 - m.b3016 <= 0)
m.c2299 = Constraint(expr= m.x2298 - m.b3016 <= 0)
m.c2300 = Constraint(expr= m.x2299 - m.b3016 <= 0)
m.c2301 = Constraint(expr= m.x2300 - m.b3016 <= 0)
m.c2302 = Constraint(expr= m.x2301 - m.b3016 <= 0)
m.c2303 = Constraint(expr= m.x2302 - m.b3016 <= 0)
m.c2304 = Constraint(expr= m.x2303 - m.b3016 <= 0)
m.c2305 = Constraint(expr= m.x2304 - m.b3016 <= 0)
m.c2306 = Constraint(expr= m.x2305 - m.b3016 <= 0)
m.c2307 = Constraint(expr= m.x2306 - m.b3016 <= 0)
m.c2308 = Constraint(expr= m.x2307 - m.b3016 <= 0)
m.c2309 = Constraint(expr= m.x2308 - m.b3016 <= 0)
m.c2310 = Constraint(expr= m.x2309 - m.b3016 <= 0)
m.c2311 = Constraint(expr= m.x2310 - m.b3016 <= 0)
m.c2312 = Constraint(expr= m.x2311 - m.b3016 <= 0)
m.c2313 = Constraint(expr= m.x2312 - m.b3016 <= 0)
m.c2314 = Constraint(expr= m.x2313 - m.b3016 <= 0)
m.c2315 = Constraint(expr= m.x2314 - m.b3016 <= 0)
m.c2316 = Constraint(expr= m.x2315 - m.b3016 <= 0)
m.c2317 = Constraint(expr= m.x2316 - m.b3016 <= 0)
m.c2318 = Constraint(expr= m.x2317 - m.b3016 <= 0)
m.c2319 = Constraint(expr= m.x2318 - m.b3016 <= 0)
m.c2320 = Constraint(expr= m.x2319 - m.b3016 <= 0)
m.c2321 = Constraint(expr= m.x2320 - m.b3016 <= 0)
m.c2322 = Constraint(expr= m.x2321 - m.b3016 <= 0)
m.c2323 = Constraint(expr= m.x2322 - m.b3016 <= 0)
m.c2324 = Constraint(expr= m.x2323 - m.b3016 <= 0)
m.c2325 = Constraint(expr= m.x2324 - m.b3016 <= 0)
m.c2326 = Constraint(expr= m.x2325 - m.b3016 <= 0)
m.c2327 = Constraint(expr= m.x2326 - m.b3016 <= 0)
m.c2328 = Constraint(expr= m.x2327 - m.b3016 <= 0)
m.c2329 = Constraint(expr= m.x2328 - m.b3016 <= 0)
m.c2330 = Constraint(expr= m.x2329 - m.b3016 <= 0)
m.c2331 = Constraint(expr= m.x2330 - m.b3016 <= 0)
m.c2332 = Constraint(expr= m.x2331 - m.b3016 <= 0)
m.c2333 = Constraint(expr= m.x2332 - m.b3016 <= 0)
m.c2334 = Constraint(expr= m.x2333 - m.b3016 <= 0)
m.c2335 = Constraint(expr= m.x2334 - m.b3016 <= 0)
m.c2336 = Constraint(expr= m.x2335 - m.b3016 <= 0)
m.c2337 = Constraint(expr= m.x2336 - m.b3016 <= 0)
m.c2338 = Constraint(expr= m.x2337 - m.b3016 <= 0)
m.c2339 = Constraint(expr= m.x2338 - m.b3016 <= 0)
m.c2340 = Constraint(expr= m.x2339 - m.b3016 <= 0)
m.c2341 = Constraint(expr= m.x2340 - m.b3016 <= 0)
m.c2342 = Constraint(expr= m.x2341 - m.b3016 <= 0)
m.c2343 | |
<reponame>stevenrbrandt/nrpytutorial
# finite_difference.py:
# As documented in the NRPy+ tutorial notebook:
# Tutorial-Finite_Difference_Derivatives.ipynb ,
# This module generates C kernels for numerically
# solving PDEs with finite differences.
#
# Depends primarily on: outputC.py and grid.py.
# Author: <NAME>
# zachetie **at** gmail **dot* com
from outputC import parse_outCparams_string, outC_function_dict, outC_function_prototype_dict, outC_NRPy_basic_defines_h_dict, outC_function_master_list # NRPy+: Core C code output module
import NRPy_param_funcs as par # NRPy+: parameter interface
import sympy as sp # SymPy: The Python computer algebra package upon which NRPy+ depends
import grid as gri # NRPy+: Functions having to do with numerical grids
import os, sys # Standard Python module for multiplatform OS-level functions
from finite_difference_helpers import extract_from_list_of_deriv_vars__base_gfs_and_deriv_ops_lists
from finite_difference_helpers import generate_list_of_deriv_vars_from_lhrh_sympyexpr_list
from finite_difference_helpers import read_gfs_from_memory, FDparams, construct_Ccode
# Step 1: Initialize free parameters for this module:
modulename = __name__
# Centered finite difference accuracy order
par.initialize_param(par.glb_param("int", modulename, "FD_CENTDERIVS_ORDER", 4))
par.initialize_param(par.glb_param("bool", modulename, "enable_FD_functions", False))
par.initialize_param(par.glb_param("int", modulename, "FD_KO_ORDER__CENTDERIVS_PLUS", 2))
def FD_outputC(filename, sympyexpr_list, params="", upwindcontrolvec=""):
outCparams = parse_outCparams_string(params)
# Step 0.a:
# In case sympyexpr_list is a single sympy expression,
# convert it to a list with just one element.
# This enables the rest of the routine to assume
# sympyexpr_list is indeed a list.
if not isinstance(sympyexpr_list, list):
sympyexpr_list = [sympyexpr_list]
# Step 0.b:
# finite_difference.py takes control over outCparams.includebraces here,
# which is necessary because outputC() is called twice:
# first for the reads from main memory and finite difference
# stencil expressions, and second for the SymPy expressions and
# writes to main memory.
# If outCparams.includebraces==True, then it will close off the braces
# after the finite difference stencil expressions and start new ones
# for the SymPy expressions and writes to main memory, resulting
# in a non-functioning C code.
# To get around this issue, we create braces around the entire
# string of C output from this function, only if
# outCparams.includebraces==True.
# See Step 5 for open and close braces
if outCparams.includebraces == "True":
indent = " "
else:
indent = ""
# Step 0.c: FDparams named tuple stores parameters used in the finite-difference codegen
FDparams.enable_SIMD = outCparams.enable_SIMD
FDparams.PRECISION = par.parval_from_str("PRECISION")
FDparams.FD_CD_order = par.parval_from_str("FD_CENTDERIVS_ORDER")
FDparams.enable_FD_functions = par.parval_from_str("enable_FD_functions")
FDparams.DIM = par.parval_from_str("DIM")
FDparams.MemAllocStyle = par.parval_from_str("MemAllocStyle")
FDparams.upwindcontrolvec = upwindcontrolvec
FDparams.fullindent = indent + outCparams.preindent
FDparams.outCparams = params
# Step 1: Generate from list of SymPy expressions in the form
# [lhrh(lhs=var, rhs=expr),lhrh(...),...]
# all derivative expressions, which we will process next.
list_of_deriv_vars = generate_list_of_deriv_vars_from_lhrh_sympyexpr_list(sympyexpr_list, FDparams)
# Step 2a: Extract from list_of_deriv_vars a list of base gridfunctions
# and a list of derivative operators. Usually takes list of SymPy
# symbols as input, but could just take strings, as this function
# does only string manipulations.
# Example:
# >>> extract_from_list_of_deriv_vars__base_gfs_and_deriv_ops_lists(["aDD_dD012","aDD_dKOD012","vetU_dKOD21","hDD_dDD0112"])
# (['aDD01', 'aDD01', 'vetU2', 'hDD01'], ['dD2', 'dKOD2', 'dKOD1', 'dDD12'])
list_of_base_gridfunction_names_in_derivs, list_of_deriv_operators = \
extract_from_list_of_deriv_vars__base_gfs_and_deriv_ops_lists(list_of_deriv_vars)
# Step 2b:
# Next, check each base gridfunction to determine whether
# it is indeed registered as a gridfunction.
# If not, exit with error.
for basegf in list_of_base_gridfunction_names_in_derivs:
is_gf = False
for gf in gri.glb_gridfcs_list:
if basegf == str(gf.name):
is_gf = True
if not is_gf:
print("Error: Attempting to take the derivative of "+basegf+", which is not a registered gridfunction.")
print(" Make sure your gridfunction name does not have any underscores in it!")
sys.exit(1)
# Step 2c:
# Check each derivative operator to make sure it is
# supported. If not, error out.
for i in range(len(list_of_deriv_operators)):
found_derivID = False
for derivID in ["dD", "dupD", "ddnD", "dKOD"]:
if derivID in list_of_deriv_operators[i]:
found_derivID = True
if not found_derivID:
print("Error: Valid derivative operator in "+list_of_deriv_operators[i]+" not found.")
sys.exit(1)
# Step 3:
# Evaluate the finite difference stencil for each
# derivative operator, being careful not to
# needlessly recompute.
# Note: Each finite difference stencil consists
# of two parts:
# 1) The coefficient, and
# 2) The index corresponding to the coefficient.
# The former is stored as a rational number, and
# the latter as a simple string, such that e.g.,
# in 3D, the empty string corresponds to (i,j,k),
# the string "ip1" corresponds to (i+1,j,k),
# the string "ip1kp1" corresponds to (i+1,j,k+1),
# etc.
fdcoeffs = [[] for i in range(len(list_of_deriv_operators))]
fdstencl = [[[] for i in range(4)] for j in range(len(list_of_deriv_operators))]
for i in range(len(list_of_deriv_operators)):
fdcoeffs[i], fdstencl[i] = compute_fdcoeffs_fdstencl(list_of_deriv_operators[i])
# Step 4: Create C code to read gridfunctions from memory
read_from_memory_Ccode = read_gfs_from_memory(list_of_base_gridfunction_names_in_derivs, fdstencl, sympyexpr_list,
FDparams)
# Step 5: construct C code.
Coutput = ""
if outCparams.includebraces == "True":
Coutput = outCparams.preindent + "{\n"
Coutput = construct_Ccode(sympyexpr_list, list_of_deriv_vars,
list_of_base_gridfunction_names_in_derivs, list_of_deriv_operators,
fdcoeffs, fdstencl, read_from_memory_Ccode, FDparams, Coutput)
if outCparams.includebraces == "True":
Coutput += outCparams.preindent+"}"
# Step 6: Output the C code in desired format: stdout, string, or file.
if filename == "stdout":
print(Coutput)
elif filename == "returnstring":
return Coutput
else:
# Output to the file specified by outCfilename
with open(filename, outCparams.outCfileaccess) as file:
file.write(Coutput)
successstr = ""
if outCparams.outCfileaccess == "a":
successstr = "Appended "
elif outCparams.outCfileaccess == "w":
successstr = "Wrote "
print(successstr + "to file \"" + filename + "\"")
################
# TO BE DEPRECATED:
def output_finite_difference_functions_h(path=os.path.join(".")):
with open(os.path.join(path, "finite_difference_functions.h"), "w") as file:
file.write("""
#ifndef __FD_FUNCTIONS_H__
#define __FD_FUNCTIONS_H__
#include "math.h"
#include "stdio.h"
#include "stdlib.h"
""")
UNUSED = "__attribute__((unused))"
NOINLINE = "__attribute__((noinline))"
if par.parval_from_str("grid::GridFuncMemAccess") == "ETK":
UNUSED = "CCTK_ATTRIBUTE_UNUSED"
NOINLINE = "CCTK_ATTRIBUTE_NOINLINE"
file.write("#define _UNUSED " + UNUSED + "\n")
file.write("#define _NOINLINE " + NOINLINE + "\n")
for key, item in outC_function_dict.items():
if "__FD_OPERATOR_FUNC__" in item:
file.write(item.replace("const REAL_SIMD_ARRAY _NegativeOne_ =",
"const REAL_SIMD_ARRAY "+UNUSED+" _NegativeOne_ =")) # Many of the NegativeOne's get optimized away in the SIMD postprocessing step. No need for all the warnings
# Clear all FD functions from outC_function_dict after outputting to finite_difference_functions.h.
# Otherwise outputC will be outputting these as separate individual C codes & attempting to build them in Makefile.
key_list_del = []
element_del = []
for i, func in enumerate(outC_function_master_list):
if "__FD_OPERATOR_FUNC__" in func.desc:
if func.name not in key_list_del:
key_list_del += [func.name]
if func not in element_del:
element_del += [func]
for func in element_del:
outC_function_master_list.remove(func)
for key in key_list_del:
outC_function_dict.pop(key)
if key in outC_function_prototype_dict:
outC_function_prototype_dict.pop(key)
file.write("#endif // #ifndef __FD_FUNCTIONS_H__\n")
################
def register_C_functions_and_NRPy_basic_defines(NGHOSTS_account_for_onezone_upwind=False, enable_SIMD=True):
# First register C functions needed by finite_difference
# Then set up the dictionary entry for finite_difference in NRPy_basic_defines
NGHOSTS = int(par.parval_from_str("finite_difference::FD_CENTDERIVS_ORDER")/2)
if NGHOSTS_account_for_onezone_upwind:
NGHOSTS += 1
Nbd_str = """
// Set the number of ghost zones
// Note that upwinding in e.g., BSSN requires that NGHOSTS = FD_CENTDERIVS_ORDER/2 + 1 <- Notice the +1.
"""
Nbd_str += "#define NGHOSTS " + str(NGHOSTS)+"\n"
if not enable_SIMD:
Nbd_str += """
// When enable_SIMD = False, this is the UPWIND_ALG() macro:
#define UPWIND_ALG(UpwindVecU) UpwindVecU > 0.0 ? 1.0 : 0.0\n"""
outC_NRPy_basic_defines_h_dict["finite_difference"] = Nbd_str
#######################################################
# FINITE-DIFFERENCE COEFFICIENT ALGORITHM
# Define the to-be-inverted matrix, A.
# We define A row-by-row, according to the prescription
# derived in notes/notes.pdf, via the following pattern
# that applies for arbitrary order.
#
# As an example, consider a 5-point finite difference
# stencil (4th-order accurate), where we wish to compute
# some derivative at the center point.
#
# Then A is given by:
#
# -2^0 -1^0 1 1^0 2^0
# -2^1 -1^1 0 1^1 2^1
# -2^2 -1^2 0 1^2 2^2
# -2^3 -1^3 0 1^3 2^3
# -2^4 -1^4 0 1^4 2^4
#
# Then right-multiplying A^{-1}
# by (1 0 0 0 0)^T will yield 0th deriv. stencil
# by (0 1 0 0 0)^T will yield 1st deriv. stencil
# by (0 0 1 0 0)^T will yield 2nd deriv. stencil
# etc.
#
# Next suppose we want an upwinded, 4th-order accurate
# stencil. For this case, A is given by:
#
# -1^0 1 1^0 2^0 3^0
# -1^1 0 1^1 2^1 3^1
# -1^2 0 1^2 2^2 3^2
# -1^3 0 1^3 2^3 3^3
# -1^4 0 1^4 2^4 3^4
#
# ... and similarly for the downwinded derivative.
#
# Finally, let's consider a 3rd-order accurate
# stencil. This would correspond to an in-place
# upwind stencil with stencil radius of 2 gridpoints,
# where other, centered derivatives are 4th-order
# accurate. For this case, A is given by:
#
# -1^0 1 1^0 2^0
# -1^1 0 1^1 2^1
# -1^2 0 1^2 2^2
# -1^3 0 1^3 2^3
# -1^4 0 1^4 2^4
#
# ... and similarly for the downwinded derivative.
#
# The | |
- 0.00153332*m.x228 - 0.0100498*m.x229 - 0.00785713*m.x230 + 0.00664334*m.x231
+ 0.00636725*m.x232 + 0.00180223*m.x233 - 0.00360195*m.x234 + 0.0366567*m.x235
- 0.0185973*m.x236 + 0.014965*m.x237 + 0.000920465*m.x238 - 0.00905371*m.x239
+ 0.0171861*m.x240 - 0.000803595*m.x241 - 0.0131086*m.x242 - 0.00149067*m.x243
+ 0.00578221*m.x244 + 0.00779234*m.x245 + 0.0108641*m.x246 + 0.0238284*m.x247
+ 0.00571637*m.x248 + 0.0216772*m.x249 + 0.00964961*m.x250 + 0.00274662*m.x251
+ 0.0197406*m.x252 - 0.00348555*m.x253 + 0.135419*m.x254 + 0.0148808*m.x255
- 0.00521642*m.x256 + 0.00301132*m.x257 + 0.0059386*m.x258 + 0.000656537*m.x259
- 5.81815E-5*m.x260 + 0.00610465*m.x261 + 0.00544527*m.x262 + 0.0153454*m.x263
+ 0.0292089*m.x264 + 0.0160595*m.x265 - 0.00416091*m.x266 - 0.0114877*m.x267
- 0.00518739*m.x268 + 0.00285023*m.x269 - 0.00263203*m.x270 + 0.00784535*m.x271
+ 0.00552304*m.x272 - 0.00966017*m.x273 + 0.00209512*m.x274 - 0.00647057*m.x275
+ 0.0011822*m.x276 + 0.0120375*m.x277 - 0.00082898*m.x278 + 0.00474529*m.x279
+ 0.00506484*m.x280 + 0.0257774*m.x281 - 0.00377692*m.x282 + 0.00714516*m.x283
+ 0.0077099*m.x284 + 0.0105595*m.x285 + 0.0161573*m.x286 + 0.00452026*m.x287
+ 0.00366557*m.x288 + 0.0131458*m.x289 - 0.00873615*m.x290 + 0.0046782*m.x291
+ 0.00459631*m.x292 + 0.0212014*m.x293 + 0.0117374*m.x294 + 0.0229518*m.x295
+ 0.00232247*m.x296 - 0.0141535*m.x297 + 0.00517337*m.x298 + 0.0102284*m.x299
+ 0.016647*m.x300 + 0.00970317*m.x301 + 0.0100764*m.x302 + 0.00923673*m.x303 == 0)
m.c158 = Constraint(expr= - m.x53 + 0.0163137*m.x204 + 0.0557033*m.x205 - 0.00120817*m.x206 + 0.0146599*m.x207
+ 0.0295561*m.x208 - 0.0144879*m.x209 + 0.00773796*m.x210 - 0.0266695*m.x211
+ 0.0132613*m.x212 - 0.0102711*m.x213 + 0.0593808*m.x214 + 0.0146264*m.x215
+ 0.00654495*m.x216 + 0.0206173*m.x217 - 0.0164481*m.x218 - 0.00659662*m.x219
+ 0.00896247*m.x220 + 0.020479*m.x221 - 0.0094595*m.x222 + 0.0515904*m.x223
+ 0.00288958*m.x224 + 0.00302148*m.x225 + 0.00456304*m.x226 + 0.0179012*m.x227
- 0.00274677*m.x228 + 0.0373415*m.x229 + 0.00317892*m.x230 + 0.00813124*m.x231
+ 0.0177429*m.x232 + 0.0269342*m.x233 + 0.0141086*m.x234 - 0.00105083*m.x235
+ 0.00879757*m.x236 - 0.0255534*m.x237 + 0.0125861*m.x238 + 0.0143571*m.x239
+ 0.0129119*m.x240 + 0.00351671*m.x241 + 0.0891112*m.x242 + 0.000467774*m.x243
+ 0.0121199*m.x244 + 0.0336911*m.x245 - 0.0111579*m.x246 - 0.00200986*m.x247
+ 0.0314978*m.x248 + 0.0180178*m.x249 + 0.023352*m.x250 - 0.00112164*m.x251 + 0.0196807*m.x252
+ 0.0225156*m.x253 + 0.0148808*m.x254 + 0.570686*m.x255 - 0.0118855*m.x256 - 0.014196*m.x257
+ 0.011069*m.x258 + 0.0175376*m.x259 + 0.0238431*m.x260 - 0.0397472*m.x261 - 0.0442114*m.x262
+ 0.0518827*m.x263 + 0.0161886*m.x264 + 0.00172998*m.x265 - 0.0149321*m.x266
- 0.00526354*m.x267 + 0.0191389*m.x268 - 0.000438258*m.x269 - 0.00523691*m.x270
- 0.00972975*m.x271 - 0.0150659*m.x272 - 0.0484007*m.x273 + 0.0154489*m.x274
+ 0.0227933*m.x275 + 0.00649497*m.x276 + 0.0365298*m.x277 + 0.00147956*m.x278
- 0.0171972*m.x279 + 0.0090494*m.x280 + 0.00449672*m.x281 + 0.000262274*m.x282
- 0.00945913*m.x283 + 0.0453251*m.x284 - 0.000808541*m.x285 + 0.0226841*m.x286
+ 0.0177547*m.x287 + 0.0128398*m.x288 + 0.0231386*m.x289 - 0.0113699*m.x290 + 0.0889439*m.x291
+ 0.0105083*m.x292 + 0.00390748*m.x293 + 0.00684597*m.x294 + 0.0131893*m.x295
- 0.0139354*m.x296 - 0.0247692*m.x297 + 0.0243459*m.x298 + 0.0752523*m.x299
- 0.00276693*m.x300 - 0.00836392*m.x301 + 0.0169651*m.x302 + 0.0207104*m.x303 == 0)
m.c159 = Constraint(expr= - m.x54 - 0.000494175*m.x204 + 0.00892139*m.x205 - 0.0113822*m.x206 + 0.0202259*m.x207
+ 0.00841613*m.x208 + 0.0254505*m.x209 + 0.0122267*m.x210 + 0.00886183*m.x211
- 0.00967266*m.x212 + 0.000261336*m.x213 + 0.000709314*m.x214 + 0.0570949*m.x215
+ 0.01696*m.x216 - 0.0067449*m.x217 + 0.00992241*m.x218 + 0.00760297*m.x219 + 0.0230109*m.x220
- 0.00154837*m.x221 - 0.0318332*m.x222 - 0.00645678*m.x223 + 0.0206887*m.x224
+ 0.00339277*m.x225 + 0.00172033*m.x226 + 0.00828721*m.x227 + 0.011526*m.x228
+ 0.00554618*m.x229 + 0.0107528*m.x230 + 0.00525742*m.x231 - 0.00438567*m.x232
+ 0.0137397*m.x233 + 0.0367283*m.x234 + 0.00408317*m.x235 - 0.00320472*m.x236
+ 0.0104359*m.x237 - 0.00390865*m.x238 + 0.0151841*m.x239 + 0.0235719*m.x240
+ 0.0145538*m.x241 + 0.0358926*m.x242 + 0.0104228*m.x243 - 0.00778263*m.x244
+ 0.0131565*m.x245 - 0.010758*m.x246 + 0.00469393*m.x247 + 0.0144722*m.x248
- 0.00109436*m.x249 + 0.000843914*m.x250 - 0.0111344*m.x251 + 0.00493219*m.x252
+ 0.00631849*m.x253 - 0.00521642*m.x254 - 0.0118855*m.x255 + 0.226022*m.x256
+ 0.0134414*m.x257 + 0.0132028*m.x258 + 0.0182593*m.x259 + 0.00378479*m.x260
+ 0.00402915*m.x261 + 0.00220607*m.x262 + 0.0103192*m.x263 + 0.0152444*m.x264
+ 0.0220514*m.x265 + 0.0445908*m.x266 - 0.00977257*m.x267 + 0.00947946*m.x268
+ 0.0081976*m.x269 + 0.00899719*m.x270 + 0.0216871*m.x271 + 0.0232*m.x272 + 0.0135089*m.x273
+ 0.0147635*m.x274 - 0.0169658*m.x275 + 0.00689011*m.x276 + 0.00530597*m.x277
+ 0.000210327*m.x278 - 0.00222996*m.x279 + 0.00353714*m.x280 + 0.0260232*m.x281
- 0.0188128*m.x282 - 0.00162038*m.x283 - 0.00901467*m.x284 + 0.031558*m.x285
+ 0.0131334*m.x286 + 0.010006*m.x287 - 0.00713827*m.x288 + 0.00638031*m.x289
+ 0.0140088*m.x290 + 0.0192336*m.x291 + 0.00082309*m.x292 + 0.00967333*m.x293
+ 0.000291585*m.x294 + 0.0165254*m.x295 + 0.00824013*m.x296 - 0.00561804*m.x297
+ 0.0192153*m.x298 - 0.00817059*m.x299 + 0.0078892*m.x300 + 0.00463932*m.x301
- 0.00593801*m.x302 + 0.00325836*m.x303 == 0)
m.c160 = Constraint(expr= - m.x55 + 0.0164977*m.x204 - 0.00637132*m.x205 + 0.0115937*m.x206 + 0.00222715*m.x207
+ 0.00832278*m.x208 + 0.00288051*m.x209 + 0.0155811*m.x210 + 0.0170693*m.x211
+ 0.0167367*m.x212 + 0.00274854*m.x213 + 0.00766591*m.x214 + 0.00451784*m.x215
+ 0.09051*m.x216 + 0.0455555*m.x217 + 0.0214377*m.x218 + 0.0166459*m.x219 + 0.0380049*m.x220
+ 0.0140183*m.x221 + 0.0316379*m.x222 - 0.0190381*m.x223 + 0.00659477*m.x224
+ 0.00760958*m.x225 + 0.00174846*m.x226 + 0.00342253*m.x227 + 9.44861E-5*m.x228
+ 0.0074701*m.x229 - 0.00906739*m.x230 + 0.015111*m.x231 - 0.00680131*m.x232
+ 0.00116831*m.x233 + 0.00320485*m.x234 - 0.0130126*m.x235 - 0.0046504*m.x236
+ 0.0058258*m.x237 + 0.00253837*m.x238 + 0.0144667*m.x239 + 0.0176837*m.x240
+ 0.0131903*m.x241 + 0.0274205*m.x242 + 0.00959745*m.x243 + 0.0206125*m.x244
+ 0.0127459*m.x245 + 0.0162983*m.x246 + 0.00134527*m.x247 + 0.0343682*m.x248
+ 0.00665208*m.x249 + 0.0110552*m.x250 + 0.0327084*m.x251 + 0.00452567*m.x252
+ 0.00655318*m.x253 + 0.00301132*m.x254 - 0.014196*m.x255 + 0.0134414*m.x256 + 0.438941*m.x257
+ 0.011138*m.x258 + 0.0267434*m.x259 + 0.0153667*m.x260 - 0.0139925*m.x261 + 0.00410561*m.x262
+ 0.0255464*m.x263 + 0.0476449*m.x264 + 0.0288895*m.x265 + 0.00855635*m.x266
- 0.0298097*m.x267 - 0.0193235*m.x268 - 0.00212818*m.x269 + 0.0152037*m.x270
+ 0.0243197*m.x271 + 0.0286571*m.x272 + 0.0238634*m.x273 + 0.00835242*m.x274
- 0.0044184*m.x275 + 0.0108029*m.x276 + 0.00721874*m.x277 + 0.0240962*m.x278
+ 0.0333277*m.x279 - 0.00435648*m.x280 + 0.0866383*m.x281 + 0.00354499*m.x282
+ 0.00280407*m.x283 - 0.0171587*m.x284 + 0.0167951*m.x285 + 0.0217323*m.x286
+ 0.0074812*m.x287 - 0.0200391*m.x288 + 0.00634167*m.x289 - 0.00043216*m.x290
- 0.000981924*m.x291 + 0.0100724*m.x292 + 0.00947697*m.x293 + 0.0122578*m.x294
+ 0.0215961*m.x295 + 0.0101139*m.x296 + 0.00689054*m.x297 + 0.0214997*m.x298
- 0.00197476*m.x299 + 0.0136175*m.x300 + 0.0236766*m.x301 + 0.00337669*m.x302
+ 0.0035861*m.x303 == 0)
m.c161 = Constraint(expr= - m.x56 + 0.00852804*m.x204 + 0.028098*m.x205 - 0.0206532*m.x206 - 0.018228*m.x207
+ 0.00806585*m.x208 - 0.00244405*m.x209 + 0.00782426*m.x210 - 0.0010862*m.x211
+ 0.000760504*m.x212 + 0.0082671*m.x213 + 0.0182084*m.x214 - 0.0264793*m.x215
- 0.00361466*m.x216 - 0.0220985*m.x217 + 0.0122468*m.x218 + 0.00111556*m.x219
+ 0.00594814*m.x220 - 0.0157061*m.x221 + 0.0193443*m.x222 + 0.0138196*m.x223
- 0.00192784*m.x224 - 0.00714421*m.x225 + 0.0155517*m.x226 + 0.0144765*m.x227
+ 0.0205903*m.x228 - 0.00778581*m.x229 + 0.00425862*m.x230 + 0.0266351*m.x231
+ 0.0104672*m.x232 + 0.00270328*m.x233 - 0.0033721*m.x234 + 0.00691299*m.x235
- 0.0112877*m.x236 + 0.0144468*m.x237 + 0.0021617*m.x238 + 0.00865979*m.x239
+ 0.00165906*m.x240 + 0.00498598*m.x241 + 0.00568814*m.x242 + 0.0216421*m.x243
- 0.00233532*m.x244 + 0.00581905*m.x245 + 0.00775024*m.x246 + 0.00374232*m.x247
+ 0.0244461*m.x248 + 0.0151547*m.x249 + 0.00964941*m.x250 - 0.0151267*m.x251
+ 0.000714771*m.x252 + 0.0189254*m.x253 + 0.0059386*m.x254 + 0.011069*m.x255
+ 0.0132028*m.x256 + 0.011138*m.x257 + 0.196677*m.x258 - 0.0016753*m.x259 + 0.0210475*m.x260
+ 0.0394424*m.x261 - 0.0137174*m.x262 + 0.0251559*m.x263 + 0.004979*m.x264 + 0.0286546*m.x265
+ 0.00245343*m.x266 - 0.0029273*m.x267 - 0.00576086*m.x268 + 0.0142978*m.x269
+ 0.000630997*m.x270 + 0.00317061*m.x271 - 0.0123761*m.x272 + 0.021312*m.x273
+ 0.00566315*m.x274 + 0.0328952*m.x275 - 0.00795682*m.x276 + 0.00577634*m.x277
+ 0.00671703*m.x278 + 0.0141594*m.x279 + 0.0189637*m.x280 - 0.0099332*m.x281
+ 0.0100466*m.x282 + 0.00647759*m.x283 + 0.0250933*m.x284 - 0.000881292*m.x285
+ 0.0263854*m.x286 + 0.0134424*m.x287 + 0.00802506*m.x288 + 0.00331125*m.x289
+ 0.00732372*m.x290 + 0.00825968*m.x291 + 0.0142932*m.x292 + 0.00984404*m.x293
- 0.00774969*m.x294 - 0.00584549*m.x295 - 0.00254807*m.x296 - 0.00996077*m.x297
- 0.00480671*m.x298 + 0.017175*m.x299 + 0.00103887*m.x300 - 2.46303E-5*m.x301
+ 0.00495815*m.x302 + 0.0225131*m.x303 == 0)
m.c162 = Constraint(expr= - m.x57 + 0.0112867*m.x204 + 0.0142655*m.x205 + 0.0357135*m.x206 + 0.0386991*m.x207
+ 0.0196795*m.x208 + 0.0133542*m.x209 + 0.03853*m.x210 + 0.0164155*m.x211 - 0.0145817*m.x212
+ 0.0215893*m.x213 + 0.0215114*m.x214 + 0.00837984*m.x215 + 0.0141785*m.x216
+ 0.00916835*m.x217 + 0.0297611*m.x218 + 0.0024162*m.x219 + 0.00156841*m.x220
+ 0.0426323*m.x221 + 0.00259346*m.x222 - 0.0199045*m.x223 + 0.00381282*m.x224
+ 0.020719*m.x225 + 0.00739069*m.x226 + 0.030732*m.x227 + 0.0219997*m.x228 + 0.0251306*m.x229
+ 0.00867625*m.x230 + 0.0408723*m.x231 - 0.00397893*m.x232 + 0.0137686*m.x233
+ 0.0182858*m.x234 + 0.0230993*m.x235 + 0.0388425*m.x236 + 0.012515*m.x237 + 0.00289448*m.x238
+ 0.00669218*m.x239 + 0.0824428*m.x240 + 0.0347475*m.x241 - 0.00209651*m.x242
+ 0.0118242*m.x243 + 0.037583*m.x244 + 0.0246255*m.x245 + 0.00853849*m.x246
+ 0.00559832*m.x247 + 0.0119362*m.x248 + 0.00745197*m.x249 + 0.00680548*m.x250
+ 0.0557771*m.x251 + 0.00310895*m.x252 + 0.0128203*m.x253 + 0.000656537*m.x254
+ 0.0175376*m.x255 + 0.0182593*m.x256 + 0.0267434*m.x257 - 0.0016753*m.x258 + 0.181831*m.x259
+ 0.0178859*m.x260 + 0.00593901*m.x261 + 0.0130043*m.x262 + 0.0296284*m.x263
+ 0.0122646*m.x264 + 0.00897029*m.x265 + 0.0101287*m.x266 - 0.0230152*m.x267
+ 0.0212254*m.x268 + 0.0151995*m.x269 + 0.00961517*m.x270 + 0.0216584*m.x271
+ 0.00890793*m.x272 + 0.0156085*m.x273 + 0.0179047*m.x274 + 0.0195461*m.x275
+ 0.0202055*m.x276 + 0.00312413*m.x277 - 0.0085337*m.x278 + 0.00691393*m.x279
+ 0.019103*m.x280 + 0.0121945*m.x281 - 0.01046*m.x282 + 7.7416E-5*m.x283 + 0.0195852*m.x284
+ 0.0203511*m.x285 + 0.00683799*m.x286 + 0.0150426*m.x287 + 0.0201186*m.x288
+ 0.0161444*m.x289 + 0.0127031*m.x290 + 0.0366677*m.x291 + 0.00820119*m.x292
- 0.00665374*m.x293 + 0.00551264*m.x294 + 0.0332128*m.x295 + 0.000520073*m.x296
- 0.00757188*m.x297 + 0.00641418*m.x298 + 0.00692204*m.x299 + 0.0175234*m.x300
- 0.0128168*m.x301 + 0.0164063*m.x302 + 0.00875325*m.x303 == 0)
m.c163 = Constraint(expr= - m.x58 + 0.0247222*m.x204 + 0.0264826*m.x205 - 0.00418977*m.x206 + 0.0209281*m.x207
+ 0.0144357*m.x208 + 0.0320697*m.x209 + 0.0225766*m.x210 - 0.00380167*m.x211
+ 0.00627821*m.x212 - 0.0026586*m.x213 + 0.0402573*m.x214 + 0.0137972*m.x215
+ 0.0210091*m.x216 + 0.022344*m.x217 + 0.0176119*m.x218 + 0.00726513*m.x219 + 0.0291383*m.x220
+ 0.00626505*m.x221 + 0.019997*m.x222 + 0.0230939*m.x223 + 0.0130988*m.x224 + 0.013493*m.x225
+ 0.0279297*m.x226 + 0.0159408*m.x227 + 0.0126357*m.x228 - 0.00245187*m.x229
- 0.0023455*m.x230 - 0.000700731*m.x231 - 0.00363432*m.x232 + 0.017504*m.x233
- 0.0042177*m.x234 + 0.0192279*m.x235 - 0.00375455*m.x236 + 0.00320118*m.x237
+ 0.00470079*m.x238 + 0.0128782*m.x239 + 0.0133704*m.x240 + 0.0244545*m.x241
+ 0.00931511*m.x242 + 0.0207329*m.x243 + 0.0180524*m.x244 + 0.0224446*m.x245
- 0.00130433*m.x246 - 0.00188392*m.x247 + 0.00907288*m.x248 + 0.01615*m.x249
| |
"""
Spiral plotly express documentation module.
"""
import inspect
from textwrap import TextWrapper
getfullargspec = inspect.getfullargspec
colref_type = "str or int or Series or array-like"
colref_desc = (
"Either a name of a column in `data_frame`,"
" or a pandas Series or array-like object."
)
colref_list_type = "list of str or int, or Series or array-like"
colref_list_desc = (
"Either names of columns in `data_frame`,"
" or pandas Series, or array-like objects."
)
docs = {
"data_frame": [
"DataFrame or array-like or dict",
"This argument needs to be passed for column names (and not keyword"
" names) to be used. Array-like and dict are transformed internally to"
" a pandas DataFrame. Optional: if missing, a DataFrame gets"
" constructed under the hood using the other arguments.",
],
"x": [
colref_type,
colref_desc,
"Values from this column or array-like are used to position marks along"
" the x-axis in Cartesian coordinates. For horizontal histograms, these"
" values are used as inputs to `histfunc`.",
],
"y": [
colref_type,
colref_desc,
"Values from this column or array-like are used to position marks along"
" the y-axis in Cartesian coordinates. For vertical histograms, these"
" values are used as inputs to `histfunc`.",
],
"z": [
colref_type,
colref_desc,
"Values from this column or array-like are used to position marks along"
" the z-axis in Cartesian coordinates. For `density_heatmap` and"
" `density_contour` these values are used as the inputs to `histfunc`.",
],
"a": [
colref_type,
colref_desc,
"Values from this column or array-like are used to position marks along"
" the a-axis in ternary coordinates.",
],
"b": [
colref_type,
colref_desc,
"Values from this column or array-like are used to position marks along"
" the b-axis in ternary coordinates.",
],
"c": [
colref_type,
colref_desc,
"Values from this column or array-like are used to position marks along"
" the c-axis in ternary coordinates.",
],
"r": [
colref_type,
colref_desc,
"Values from this column or array-like are used to position marks along"
" the radial axis in polar coordinates.",
],
"theta": [
colref_type,
colref_desc,
"Values from this column or array-like are used to position marks along"
" the angular axis in polar coordinates.",
],
"values": [
colref_type,
colref_desc,
"Values from this column or array-like are used to set values"
" associated to sectors.",
],
"parents": [
colref_type,
colref_desc,
"Values from this column or array-like are used as parents in sunburst"
" and treemap charts.",
],
"ids": [
colref_type,
colref_desc,
"Values from this column or array-like are used to set ids of sectors",
],
"path": [
colref_list_type,
colref_list_desc,
"List of columns names or columns of a rectangular dataframe defining"
" the hierarchy of sectors, from root to leaves. An error is raised if"
" path AND ids or parents is passed",
],
"lat": [
colref_type,
colref_desc,
"Values from this column or array-like are used to position marks"
" according to latitude on a map.",
],
"lon": [
colref_type,
colref_desc,
"Values from this column or array-like are used to position marks"
" according to longitude on a map.",
],
"locations": [
colref_type,
colref_desc,
"Values from this column or array-like are to be interpreted according"
" to `locationmode` and mapped to longitude/latitude.",
],
"dimensions": [
colref_list_type,
colref_list_desc,
"Values from these columns are used for multidimensional visualization.",
],
"dimensions_max_cardinality": [
"int (default 50)",
"When `dimensions` is `None` and `data_frame` is provided, "
"columns with more than this number of unique values are excluded from"
" the output. Not used when `dimensions` is passed.",
],
"error_x": [
colref_type,
colref_desc,
"Values from this column or array-like are used to size x-axis error"
" bars. If `error_x_minus` is `None`, error bars will be symmetrical,"
" otherwise `error_x` is used for the positive direction only.",
],
"error_x_minus": [
colref_type,
colref_desc,
"Values from this column or array-like are used to size x-axis error"
" bars in the negative direction. Ignored if `error_x` is `None`.",
],
"error_y": [
colref_type,
colref_desc,
"Values from this column or array-like are used to size y-axis error"
" bars. If `error_y_minus` is `None`, error bars will be symmetrical,"
" otherwise `error_y` is used for the positive direction only.",
],
"error_y_minus": [
colref_type,
colref_desc,
"Values from this column or array-like are used to size y-axis error"
" bars in the negative direction. Ignored if `error_y` is `None`.",
],
"error_z": [
colref_type,
colref_desc,
"Values from this column or array-like are used to size z-axis error"
" bars. If `error_z_minus` is `None`, error bars will be symmetrical,"
" otherwise `error_z` is used for the positive direction only.",
],
"error_z_minus": [
colref_type,
colref_desc,
"Values from this column or array-like are used to size z-axis error"
" bars in the negative direction. Ignored if `error_z` is `None`.",
],
"color": [
colref_type,
colref_desc,
"Values from this column or array-like are used to assign color to marks.",
],
"opacity": ["float", "Value between 0 and 1. Sets the opacity for markers."],
"line_dash": [
colref_type,
colref_desc,
"Values from this column or array-like are used to assign dash-patterns"
" to lines.",
],
"line_group": [
colref_type,
colref_desc,
"Values from this column or array-like are used to group rows of"
" `data_frame` into lines.",
],
"symbol": [
colref_type,
colref_desc,
"Values from this column or array-like are used to assign symbols to marks.",
],
"size": [
colref_type,
colref_desc,
"Values from this column or array-like are used to assign mark sizes.",
],
"radius": ["int (default is 30)", "Sets the radius of influence of each point."],
"hover_name": [
colref_type,
colref_desc,
"Values from this column or array-like appear in bold in the hover tooltip.",
],
"hover_data": [
colref_list_type,
colref_list_desc,
"Values from these columns appear as extra data in the hover tooltip.",
],
"custom_data": [
colref_list_type,
colref_list_desc,
"Values from these columns are extra data, to be used in widgets or"
" Dash callbacks for example. This data is not user-visible but is"
" included in events emitted by the figure (lasso selection etc.)",
],
"text": [
colref_type,
colref_desc,
"Values from this column or array-like appear in the figure as text labels.",
],
"names": [
colref_type,
colref_desc,
"Values from this column or array-like are used as labels for sectors.",
],
"locationmode": [
"str",
"One of 'ISO-3', 'USA-states', or 'country names'",
"Determines the set of locations used to match entries in `locations`"
" to regions on the map.",
],
"facet_row": [
colref_type,
colref_desc,
"Values from this column or array-like are used to assign marks to"
" faceted subplots in the vertical direction.",
],
"facet_col": [
colref_type,
colref_desc,
"Values from this column or array-like are used to assign marks to"
" faceted subplots in the horizontal direction.",
],
"facet_col_wrap": [
"int",
"Maximum number of facet columns. Wraps the column variable at this"
" width, so that the column facets span multiple rows. Ignored if 0,"
" and forced to 0 if `facet_row` or a `marginal` is set.",
],
"animation_frame": [
colref_type,
colref_desc,
"Values from this column or array-like are used to assign marks to"
" animation frames.",
],
"animation_group": [
colref_type,
colref_desc,
"Values from this column or array-like are used to provide"
" object-constancy across animation frames: rows with matching"
" `animation_group`'s will be treated as if they describe the same"
" object in each frame.",
],
"symbol_sequence": [
"list of str",
"Strings should define valid plotly.js symbols. When `symbol` is set,"
" values in that column are assigned symbols by cycling through"
" `symbol_sequence` in the order described in `category_orders`, unless"
" the value of `symbol` is a key in `symbol_map`.",
],
"symbol_map": [
"dict with str keys and str values (default `{}`)",
"String values should define plotly.js symbols",
"Used to override `symbol_sequence` to assign a specific symbols to"
" marks corresponding with specific values. Keys in `symbol_map` should"
" be values in the column denoted by `symbol`.",
],
"line_dash_map": [
"dict with str keys and str values (default `{}`)",
"Strings values define plotly.js dash-patterns.",
"Used to override `line_dash_sequences` to assign a specific"
" dash-patterns to lines corresponding with | |
* math.pow(stress_frac, m1)*gamma1*(1-gamma_val)\
*self._fraction[idx]
def get_damage_slope2(self, idx, curve, int_press, ext_press):
m2, log_m2, k, slope = snc.get_paramter(curve,'m2'), snc.get_paramter(curve,'log a2'),\
snc.get_paramter(curve,'k'), snc.get_paramter(curve,'slope')
cycles = self._design_life*365*24*3600/self._period[idx]
thk_eff = math.log10(max(1,self.plate_th/25)) * k
slope_ch = math.exp( math.log( math.pow(10, log_m2-m2*thk_eff)/slope) / m2)
gammm2 = self.__get_gamma2(idx)
weibull = self._weibull[idx]
stress_frac = self.__get_stress_fraction(idx, int_press, ext_press)
# finding GAMMADIST
if stress_frac == 0:
return 0
x, alpha = math.pow(slope_ch/stress_frac, weibull),1 + m2/weibull
gamma_val = gammadist.cdf(x,alpha)
return cycles / math.pow(10, log_m2-m2*thk_eff) * math.pow(stress_frac, m2)*gammm2*(gamma_val)\
*self._fraction[idx]
def get_total_damage(self, int_press=(0, 0, 0), ext_press=(0, 0, 0)):
damage = 0
for idx in range(3):
if self._fraction[idx] != 0 and self._period[idx] != 0:
damage += self.get_damage_slope1(idx,self._sn_curve, int_press[idx], ext_press[idx]) + \
self.get_damage_slope2(idx,self._sn_curve, int_press[idx], ext_press[idx])
return damage
def set_commmon_properties(self, fatigue_dict: dict):
''' Setting the fatiuge properties. '''
#self._sn_curve, self.fatigue_dict['SN-curve'] = fatigue_dict['SN-curve'], fatigue_dict['SN-curve']
self._acc, self.fatigue_dict['Accelerations'] = fatigue_dict['Accelerations'], fatigue_dict['Accelerations']
#self._weibull, self.fatigue_dict['Weibull'] = fatigue_dict['Weibull'], fatigue_dict['Weibull']
#self._period, self.fatigue_dict['Period'] = fatigue_dict['Period'], fatigue_dict['Period']
#self._k_factor, self.fatigue_dict['SCF'] = fatigue_dict['SCF'], fatigue_dict['SCF']
#self._corr_loc, self.fatigue_dict['CorrLoc'] = fatigue_dict['CorrLoc'], fatigue_dict['CorrLoc']
self._no_of_cycles, self.fatigue_dict['n0'] = fatigue_dict['n0'], fatigue_dict['n0']
self._design_life, self.fatigue_dict['Design life'] = fatigue_dict['Design life'], fatigue_dict['Design life']
self._fraction, self.fatigue_dict['Fraction'] = fatigue_dict['Fraction'], fatigue_dict['Fraction']
#self._case_order, self.fatigue_dict['Order'] = fatigue_dict['Order'], fatigue_dict['Order']
self._dff, self.fatigue_dict['DFF'] = fatigue_dict['DFF'], fatigue_dict['DFF']
def set_fatigue_properties(self, fatigue_dict: dict):
''' Setting the fatiuge properties. '''
self._sn_curve, self.fatigue_dict['SN-curve'] = fatigue_dict['SN-curve'], fatigue_dict['SN-curve']
self._acc, self.fatigue_dict['Accelerations'] = fatigue_dict['Accelerations'], fatigue_dict['Accelerations']
self._weibull, self.fatigue_dict['Weibull'] = fatigue_dict['Weibull'], fatigue_dict['Weibull']
self._period, self.fatigue_dict['Period'] = fatigue_dict['Period'], fatigue_dict['Period']
self._k_factor, self.fatigue_dict['SCF'] = fatigue_dict['SCF'], fatigue_dict['SCF']
self._corr_loc, self.fatigue_dict['CorrLoc'] = fatigue_dict['CorrLoc'], fatigue_dict['CorrLoc']
self._no_of_cycles, self.fatigue_dict['n0'] = fatigue_dict['n0'], fatigue_dict['n0']
self._design_life, self.fatigue_dict['Design life'] = fatigue_dict['Design life'], fatigue_dict['Design life']
self._fraction, self.fatigue_dict['Fraction'] = fatigue_dict['Fraction'], fatigue_dict['Fraction']
self._case_order, self.fatigue_dict['Order'] = fatigue_dict['Order'], fatigue_dict['Order']
self._dff, self.fatigue_dict['DFF'] = fatigue_dict['DFF'], fatigue_dict['DFF']
def get_fatigue_properties(self):
''' Returning properties as a dictionary '''
return self.fatigue_dict
def get_accelerations(self):
''' Returning tuple of accelerattions.'''
return self._acc
def get_dff(self):
return self._dff
def get_design_life(self):
return self._design_life
class PULSpanel():
'''
Takes care of puls runs
'''
def __init__(self, run_dict: dict = {}, puls_acceptance: float = 0.87, puls_sheet_location: str = None):
super(PULSpanel, self).__init__()
self._all_to_run = run_dict
self._run_results = {}
self._puls_acceptance = puls_acceptance
self._puls_sheet_location = puls_sheet_location
self._all_uf = {'buckling': list(), 'ultimate': list()}
@property
def all_uf(self):
return self._all_uf
@property
def puls_acceptance(self):
return self._puls_acceptance
@puls_acceptance.setter
def puls_acceptance(self, val):
self._puls_acceptance = val
@property
def puls_sheet_location(self):
return self._puls_sheet_location
@puls_sheet_location.setter
def puls_sheet_location(self, val):
self._puls_sheet_location = val
def set_all_to_run(self, val):
self._all_to_run = val
def get_all_to_run(self):
return self._all_to_run
def get_run_results(self):
return self._run_results
def set_run_results(self, val):
self._run_results = val
for key in self._run_results.keys():
if any([key == 'sheet location',type(self._run_results[key]['Buckling strength']) != dict,
type(self._run_results[key]['Ultimate capacity']) != dict]): # TODO CHECK
continue
if all([type(self._run_results[key]['Buckling strength']['Actual usage Factor'][0]) == float,
type(self._run_results[key]['Ultimate capacity']['Actual usage Factor'][0]) == float]):
self._all_uf['buckling'].append(self._run_results[key]['Buckling strength']['Actual usage Factor'][0])
self._all_uf['ultimate'].append(self._run_results[key]['Ultimate capacity']['Actual usage Factor'][0])
self._all_uf['buckling'] = np.unique(self._all_uf['buckling']).tolist()
self._all_uf['ultimate'] = np.unique(self._all_uf['ultimate']).tolist()
def run_all(self, store_results = False):
'''
Returning following results.:
Identification: name of line/run
Plate geometry: dict_keys(['Length of panel', 'Stiffener spacing', 'Plate thick.'])
Primary stiffeners: dict_keys(['Number of stiffeners', 'Stiffener type', 'Stiffener boundary', 'Stiff. Height',
'Web thick.', 'Flange width', 'Flange thick.', 'Flange ecc.', 'Tilt angle'])
Secondary stiffeners. dict_keys(['Number of sec. stiffeners', 'Secondary stiffener type', 'Stiffener boundary',
'Stiff. Height', 'Web thick.', 'Flange width', 'Flange thick.'])
Model imperfections. dict_keys(['Imp. level', 'Plate', 'Stiffener', 'Stiffener tilt'])
Material: dict_keys(['Modulus of elasticity', "Poisson's ratio", 'Yield stress plate', 'Yield stress stiffener'])
Aluminium prop: dict_keys(['HAZ pattern', 'HAZ red. factor'])
Applied loads: dict_keys(['Axial stress', 'Trans. stress', 'Shear stress', 'Pressure (fixed)'])
Bound cond.: dict_keys(['In-plane support'])
Global elastic buckling: dict_keys(['Axial stress', 'Trans. Stress', 'Trans. stress', 'Shear stress'])
Local elastic buckling: dict_keys(['Axial stress', 'Trans. Stress', 'Trans. stress', 'Shear stress'])
Ultimate capacity: dict_keys(['Actual usage Factor', 'Allowable usage factor', 'Status'])
Failure modes: dict_keys(['Plate buckling', 'Global stiffener buckling', 'Torsional stiffener buckling',
'Web stiffener buckling'])
Buckling strength: dict_keys(['Actual usage Factor', 'Allowable usage factor', 'Status'])
Local geom req (PULS validity limits): dict_keys(['Plate slenderness', 'Web slend', 'Web flange ratio',
'Flange slend ', 'Aspect ratio'])
CSR-Tank requirements (primary stiffeners): dict_keys(['Plating', 'Web', 'Web-flange', 'Flange', 'stiffness'])
:return:
'''
import ANYstructure_local.excel_inteface as pulsxl
iterator = self._all_to_run
newfile = self._puls_sheet_location
my_puls = pulsxl.PulsExcel(newfile, visible=False)
#my_puls.set_multiple_rows(20, iterator)
run_sp, run_up = my_puls.set_multiple_rows_batch(iterator)
my_puls.calculate_panels(sp=run_sp, up=run_up)
#all_results = my_puls.get_all_results()
all_results = my_puls.get_all_results_batch(sp = run_sp, up=run_up)
for id, data in all_results.items():
self._run_results[id] = data
my_puls.close_book(save=False)
self._all_uf = {'buckling': list(), 'ultimate': list()}
for key in self._run_results.keys():
try:
if all([type(self._run_results[key]['Buckling strength']['Actual usage Factor'][0]) == float,
type(self._run_results[key]['Ultimate capacity']['Actual usage Factor'][0]) == float]):
self._all_uf['buckling'].append(self._run_results[key]['Buckling strength']
['Actual usage Factor'][0])
self._all_uf['ultimate'].append(self._run_results[key]['Ultimate capacity']
['Actual usage Factor'][0])
except TypeError:
print('Got a type error. Life will go on. Key for PULS run results was', key)
print(self._run_results[key])
self._all_uf['buckling'] = np.unique(self._all_uf['buckling']).tolist()
self._all_uf['ultimate'] = np.unique(self._all_uf['ultimate']).tolist()
if store_results:
store_path = os.path.dirname(os.path.abspath(__file__))+'\\PULS\\Result storage\\'
with open(store_path+datetime.datetime.now().strftime("%Y%m%d-%H%M%S")+'_UP.json', 'w') as file:
file.write(json.dumps(all_results, ensure_ascii=False))
return all_results
def get_utilization(self, line, method, acceptance = 0.87):
if line in self._run_results.keys():
if method == 'buckling':
if type(self._run_results[line]['Buckling strength']['Actual usage Factor'][0]) == str or \
self._run_results[line]['Buckling strength']['Actual usage Factor'][0] is None:
return None
return self._run_results[line]['Buckling strength']['Actual usage Factor'][0]/acceptance
else:
if type(self._run_results[line]['Ultimate capacity']['Actual usage Factor'][0]) == str or \
self._run_results[line]['Buckling strength']['Actual usage Factor'][0] is None:
return None
return self._run_results[line]['Ultimate capacity']['Actual usage Factor'][0]/acceptance
else:
return None
# def run_all_multi(self):
#
# tasks = []
#
# if len(self._all_to_run) > 20:
# processes = 10#max(cpu_count() - 1, 1)
#
# def chunks(data, SIZE=10000):
# it = iter(data)
# for i in range(0, len(data), SIZE):
# yield {k: data[k] for k in islice(it, SIZE)}
#
# # Sample run:
#
# for item in chunks({key: value for key, value in ex.run_dict.items()}, int(len(self._all_to_run)/processes)):
# tasks.append(item)
# else:
# tasks.append(self._all_to_run)
# # [print(task) for task in tasks]
# # print(self._all_to_run)
# # quit()
# queue = multiprocessing.SimpleQueue()
#
# for idx, name in enumerate(tasks):
# p = Process(target=self.run_all_multi_sub, args=(name, queue, idx+1))
# p.start()
# p.join()
# for task in tasks:
# print(queue.get())
# def run_all_multi_sub(self, iterator, queue = None, idx = 0):
# '''
# Returning following results.:
#
# Identification: name of line/run
# Plate geometry: dict_keys(['Length of panel', 'Stiffener spacing', 'Plate thick.'])
# Primary stiffeners: dict_keys(['Number of stiffeners', 'Stiffener type', 'Stiffener boundary', 'Stiff. Height',
# 'Web thick.', 'Flange width', 'Flange thick.', 'Flange ecc.', 'Tilt angle'])
# Secondary stiffeners. dict_keys(['Number of sec. stiffeners', 'Secondary stiffener type', 'Stiffener boundary',
# 'Stiff. Height', 'Web thick.', 'Flange width', 'Flange thick.'])
# Model imperfections. dict_keys(['Imp. level', 'Plate', 'Stiffener', 'Stiffener tilt'])
# Material: dict_keys(['Modulus of elasticity', "Poisson's ratio", 'Yield stress plate', 'Yield stress stiffener'])
# Aluminium prop: dict_keys(['HAZ pattern', 'HAZ red. factor'])
# Applied loads: dict_keys(['Axial stress', 'Trans. stress', 'Shear stress', 'Pressure (fixed)'])
# Bound cond.: dict_keys(['In-plane support'])
# Global elastic buckling: dict_keys(['Axial stress', 'Trans. Stress', 'Trans. stress', 'Shear stress'])
# Local elastic buckling: dict_keys(['Axial stress', 'Trans. Stress', 'Trans. stress', 'Shear stress'])
# Ultimate capacity: dict_keys(['Actual usage Factor', 'Allowable usage factor', 'Status'])
# Failure modes: dict_keys(['Plate buckling', 'Global stiffener buckling', 'Torsional stiffener buckling',
# 'Web stiffener buckling'])
# Buckling strength: dict_keys(['Actual usage Factor', 'Allowable usage factor', 'Status'])
# Local geom req (PULS validity limits): dict_keys(['Plate slenderness', 'Web slend', 'Web flange ratio',
# 'Flange slend ', 'Aspect ratio'])
# CSR-Tank requirements (primary stiffeners): dict_keys(['Plating', 'Web', 'Web-flange', 'Flange', 'stiffness'])
#
# :return:
# '''
# old_file = os.path.dirname(os.path.abspath(__file__))+'\\PULS\\PulsExcel_new - Copy (1).xlsm'
# new_file = os.path.dirname(os.path.abspath(__file__))+'\\PULS\\PulsExcel_new - Copy multi ('+str(idx)+').xlsm'
# shutil.copy(old_file, new_file)
# #time.sleep(idx*5)
# pythoncom.CoInitialize()
#
# my_puls = pulsxl.PulsExcel(new_file, visible=False)
# try:
# my_puls.set_multiple_rows_batch(20, iterator)
# my_puls.calculate_panels()
# all_results = my_puls.get_all_results_batch()
# my_puls.close_book(save=True)
# queue.put(all_results)
# os.remove(new_file)
# except (BaseException, AttributeError):
# my_puls.close_book(save=False)
# queue.put(None)
def get_puls_line_results(self, line):
if line not in self._run_results.keys():
return None
else:
return self._run_results[line]
def get_string(self, line, uf = 0.87):
'''
:param line:
:return:
'''
results = self._run_results[line]
loc_geom = 'Ok' if all([val[0] == 'Ok' for val in results['Local geom req (PULS validity limits)']
.values()]) else 'Not ok'
csr_geom = 'Ok' if all([val[0] == 'Ok' for val in results['CSR-Tank requirements (primary stiffeners)']
.values()]) else 'Not ok'
ret_str = 'PULS results\n\n' +\
'Ultimate capacity usage factor: ' + str(results['Ultimate capacity']['Actual usage Factor'][0]/uf)+'\n'+\
'Buckling strength usage factor: ' + str(results['Buckling strength']['Actual usage Factor'][0]/uf)+'\n'+\
'Local geom req (PULS validity limits): ' + loc_geom + '\n'+\
'CSR-Tank requirements (primary stiffeners): ' + csr_geom
return ret_str
def result_changed(self, id):
if id in self._run_results.keys():
self._run_results.pop(id)
def generate_random_results(self, batch_size: int = 1000, stf_type: str | |
for deployed VM"
)
self.assertNotEqual(
len(vms),
0,
"List VMs should return valid response for deployed VM"
)
vm = vms[1]
self.debug(
"VM state after enabling maintenance on first host: %s" %
vm.state)
if vm.state in [
"Stopping",
"Stopped",
"Running",
"Starting",
"Migrating"
]:
if vm.state == "Running":
break
else:
time.sleep(self.services["sleep"])
timeout = timeout - 1
else:
self.fail(
"VM migration from one-host-to-other failed while enabling maintenance"
)
for vm in vms:
self.debug(
"VM states after enabling maintenance mode on host: %s - %s" %
(first_host, vm.state))
self.assertEqual(
vm.state,
"Running",
"Deployed VM should be in Running state"
)
# Spawn an instance on other host
virtual_machine_3 = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id
)
vms = VirtualMachine.list(
self.apiclient,
id=virtual_machine_3.id,
listall=True
)
self.assertEqual(
isinstance(vms, list),
True,
"List VMs should return valid response for deployed VM"
)
self.assertNotEqual(
len(vms),
0,
"List VMs should return valid response for deployed VM"
)
vm = vms[0]
self.debug("Deployed VM on host: %s" % vm.hostid)
self.debug("VM 3 state: %s" % vm.state)
self.assertEqual(
vm.state,
"Running",
"Deployed VM should be in Running state"
)
# Should be able to SSH VM
try:
self.debug("SSH into VM: %s" % virtual_machine.id)
ssh = virtual_machine.get_ssh_client(
ipaddress=public_ip.ipaddress.ipaddress)
except Exception as e:
self.fail("SSH Access failed for %s: %s" % \
(virtual_machine.ipaddress, e)
)
self.debug("Canceling host maintenance for ID: %s" % second_host)
cmd = cancelHostMaintenance.cancelHostMaintenanceCmd()
cmd.id = second_host
self.apiclient.cancelHostMaintenance(cmd)
self.debug("Maintenance mode canceled for host: %s" % second_host)
self.debug("Waiting for SSVMs to come up")
wait_for_ssvms(
self.apiclient,
zoneid=self.zone.id,
podid=self.pod.id,
)
return
@attr(tags = ["advanced", "advancedns", "multihost"])
def test_02_host_maintenance_mode_with_activities(self):
"""Test host maintenance mode with activities
"""
# Validate the following
# 1. Create Vms. Acquire IP. Create port forwarding & load balancing
# rules for Vms.
# 2. While activities are ongoing: Create snapshots, recurring
# snapshots, create templates, download volumes, Host 1: put to
# maintenance mode. All Vms should failover to Host 2 in cluster
# Vms should be in running state. All port forwarding rules and
# load balancing Rules should work.
# 3. After failover to Host 2 succeeds, deploy Vms. Deploy Vms on host
# 2 should succeed. All ongoing activities in step 3 should succeed
# 4. Host 1: cancel maintenance mode.
# 5. While activities are ongoing: Create snapshots, recurring
# snapshots, create templates, download volumes, Host 2: put to
# maintenance mode. All Vms should failover to Host 1 in cluster.
# 6. After failover to Host 1 succeeds, deploy VMs. Deploy Vms on
# host 1 should succeed. All ongoing activities in step 6 should
# succeed.
hosts = Host.list(
self.apiclient,
zoneid=self.zone.id,
resourcestate='Enabled',
type='Routing'
)
self.assertEqual(
isinstance(hosts, list),
True,
"List hosts should return valid host response"
)
self.assertGreaterEqual(
len(hosts),
2,
"There must be two hosts present in a cluster"
)
self.debug("Checking HA with hosts: %s, %s" % (
hosts[0].name,
hosts[1].name
))
self.debug("Deploying VM in account: %s" % self.account.name)
# Spawn an instance in that network
virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id
)
vms = VirtualMachine.list(
self.apiclient,
id=virtual_machine.id,
listall=True
)
self.assertEqual(
isinstance(vms, list),
True,
"List VMs should return valid response for deployed VM"
)
self.assertNotEqual(
len(vms),
0,
"List VMs should return valid response for deployed VM"
)
vm = vms[0]
self.debug("Deployed VM on host: %s" % vm.hostid)
self.assertEqual(
vm.state,
"Running",
"Deployed VM should be in RUnning state"
)
networks = Network.list(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid,
listall=True
)
self.assertEqual(
isinstance(networks, list),
True,
"List networks should return valid list for the account"
)
network = networks[0]
self.debug("Associating public IP for account: %s" %
self.account.name)
public_ip = PublicIPAddress.create(
self.apiclient,
accountid=self.account.name,
zoneid=self.zone.id,
domainid=self.account.domainid,
networkid=network.id
)
self.debug("Associated %s with network %s" % (
public_ip.ipaddress.ipaddress,
network.id
))
self.debug("Creating PF rule for IP address: %s" %
public_ip.ipaddress.ipaddress)
nat_rule = NATRule.create(
self.apiclient,
virtual_machine,
self.services["natrule"],
ipaddressid=public_ip.ipaddress.id
)
self.debug("Creating LB rule on IP with NAT: %s" %
public_ip.ipaddress.ipaddress)
# Create Load Balancer rule on IP already having NAT rule
lb_rule = LoadBalancerRule.create(
self.apiclient,
self.services["lbrule"],
ipaddressid=public_ip.ipaddress.id,
accountid=self.account.name
)
self.debug("Created LB rule with ID: %s" % lb_rule.id)
# Should be able to SSH VM
try:
self.debug("SSH into VM: %s" % virtual_machine.id)
ssh = virtual_machine.get_ssh_client(
ipaddress=public_ip.ipaddress.ipaddress)
except Exception as e:
self.fail("SSH Access failed for %s: %s" % \
(virtual_machine.ipaddress, e)
)
# Get the Root disk of VM
volumes = list_volumes(
self.apiclient,
virtualmachineid=virtual_machine.id,
type='ROOT',
listall=True
)
volume = volumes[0]
self.debug(
"Root volume of VM(%s): %s" % (
virtual_machine.name,
volume.name
))
# Create a snapshot from the ROOTDISK
self.debug("Creating snapshot on ROOT volume: %s" % volume.name)
snapshot = Snapshot.create(self.apiclient, volumes[0].id)
self.debug("Snapshot created: ID - %s" % snapshot.id)
snapshots = list_snapshots(
self.apiclient,
id=snapshot.id,
listall=True
)
self.assertEqual(
isinstance(snapshots, list),
True,
"Check list response returns a valid list"
)
self.assertNotEqual(
snapshots,
None,
"Check if result exists in list snapshots call"
)
self.assertEqual(
snapshots[0].id,
snapshot.id,
"Check snapshot id in list resources call"
)
# Generate template from the snapshot
self.debug("Generating template from snapshot: %s" % snapshot.name)
template = Template.create_from_snapshot(
self.apiclient,
snapshot,
self.services["templates"]
)
self.debug("Created template from snapshot: %s" % template.id)
templates = list_templates(
self.apiclient,
templatefilter=\
self.services["templates"]["templatefilter"],
id=template.id
)
self.assertEqual(
isinstance(templates, list),
True,
"List template call should return the newly created template"
)
self.assertEqual(
templates[0].isready,
True,
"The newly created template should be in ready state"
)
first_host = vm.hostid
self.debug("Enabling maintenance mode for host %s" % vm.hostid)
cmd = prepareHostForMaintenance.prepareHostForMaintenanceCmd()
cmd.id = first_host
self.apiclient.prepareHostForMaintenance(cmd)
self.debug("Waiting for SSVMs to come up")
wait_for_ssvms(
self.apiclient,
zoneid=self.zone.id,
podid=self.pod.id,
)
timeout = self.services["timeout"]
# Poll and check state of VM while it migrates from one host to another
while True:
vms = VirtualMachine.list(
self.apiclient,
id=virtual_machine.id,
listall=True
)
self.assertEqual(
isinstance(vms, list),
True,
"List VMs should return valid response for deployed VM"
)
self.assertNotEqual(
len(vms),
0,
"List VMs should return valid response for deployed VM"
)
vm = vms[0]
self.debug("VM 1 state: %s" % vm.state)
if vm.state in ["Stopping",
"Stopped",
"Running",
"Starting",
"Migrating"]:
if vm.state == "Running":
break
else:
time.sleep(self.services["sleep"])
timeout = timeout - 1
else:
self.fail(
"VM migration from one-host-to-other failed while enabling maintenance"
)
second_host = vm.hostid
self.assertEqual(
vm.state,
"Running",
"VM should be in Running state after enabling host maintenance"
)
# Should be able to SSH VM
try:
self.debug("SSH into VM: %s" % virtual_machine.id)
ssh = virtual_machine.get_ssh_client(
ipaddress=public_ip.ipaddress.ipaddress)
except Exception as e:
self.fail("SSH Access failed for %s: %s" % \
(virtual_machine.ipaddress, e)
)
self.debug("Deploying VM in account: %s" % self.account.name)
# Spawn an instance on other host
virtual_machine_2 = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id
)
vms = VirtualMachine.list(
self.apiclient,
id=virtual_machine_2.id,
listall=True
)
self.assertEqual(
isinstance(vms, list),
True,
"List VMs should return valid response for deployed VM"
)
self.assertNotEqual(
len(vms),
0,
"List VMs should return valid response for deployed VM"
)
vm = vms[0]
self.debug("Deployed VM on host: %s" % vm.hostid)
self.debug("VM 2 state: %s" % vm.state)
self.assertEqual(
vm.state,
"Running",
"Deployed VM should be in Running state"
)
self.debug("Canceling host maintenance for ID: %s" % first_host)
cmd = cancelHostMaintenance.cancelHostMaintenanceCmd()
cmd.id = first_host
self.apiclient.cancelHostMaintenance(cmd)
self.debug("Maintenance mode canceled for host: %s" % first_host)
# Get the Root disk of VM
volumes = list_volumes(
self.apiclient,
virtualmachineid=virtual_machine_2.id,
type='ROOT',
listall=True
)
volume = volumes[0]
self.debug(
"Root volume of VM(%s): %s" % (
virtual_machine_2.name,
volume.name
))
# Create a snapshot from the ROOTDISK
self.debug("Creating snapshot on ROOT volume: %s" % volume.name)
snapshot = Snapshot.create(self.apiclient, volumes[0].id)
self.debug("Snapshot created: ID - %s" % snapshot.id)
snapshots = list_snapshots(
self.apiclient,
id=snapshot.id,
listall=True
)
self.assertEqual(
isinstance(snapshots, list),
True,
"Check list response returns a valid list"
)
self.assertNotEqual(
snapshots,
None,
"Check if result exists in list snapshots call"
)
self.assertEqual(
snapshots[0].id,
snapshot.id,
"Check snapshot id in list resources call"
)
# Generate template from the snapshot
self.debug("Generating template from snapshot: %s" % snapshot.name)
template = Template.create_from_snapshot(
self.apiclient,
snapshot,
self.services["templates"]
)
self.debug("Created template from snapshot: %s" % template.id)
templates = list_templates(
self.apiclient,
templatefilter=\
self.services["templates"]["templatefilter"],
id=template.id
)
self.assertEqual(
isinstance(templates, list),
True,
"List template call should return the newly created template"
)
self.assertEqual(
templates[0].isready,
True,
"The newly created template should be in ready state"
)
self.debug("Enabling maintenance mode for host %s" % | |
<filename>tronx/helpers/utils.py
import time
import re
import shlex
import os
import asyncio
import html
import math
import aiohttp
import random
import json
from time import sleep
from hachoir.metadata import extractMetadata
from hachoir.parser import createParser
from PIL import Image
from math import ceil
from typing import List, Union
from re import escape, sub
from enum import IntEnum, unique
from tronx import (
app,
USER_ID,
Config,
)
from pyrogram.types import Message, User, InlineKeyboardButton
from pyrogram.errors import RPCError, MessageNotModified, FloodWait
HELP_EMOJI = " "
LAYER_FEED_CHAT = None
LAYER_UPDATE_INTERVAL = None
LAYER_UPDATE_MESSAGE_CAPTION = None
BTN_URL_REGEX = re.compile(r"(\[([^\[]+?)\]\(buttonurl:(?:/{0,2})(.+?)(:same)?\))")
@unique
class Types(IntEnum):
TEXT = 1
DOCUMENT = 2
PHOTO = 3
VIDEO = 4
STICKER = 5
AUDIO = 6
VOICE = 7
VIDEO_NOTE = 8
ANIMATION = 9
ANIMATED_STICKER = 10
CONTACT = 11
def long(m: Message):
text = len(m.command)
return text if text else None
# help menu builder
def helpdex(page_number, loaded_modules, prefix):
rows = 4
column = 2
help_modules = []
for mod in loaded_modules:
if not mod.startswith("_"):
help_modules.append(mod)
help_modules = sorted(help_modules)
modules = [
InlineKeyboardButton(
text="{} {}".format(
HELP_EMOJI,
x.replace("_", " ").title(),
),
callback_data="modulelist_{}|{}".format(x, page_number),
)
for x in help_modules
]
twins = list(zip(modules[::column], modules[1::column]))
if len(modules) % column == 1:
twins.append((modules[-1],))
num_pages = ceil(len(twins) / rows)
mod_page = page_number % num_pages
if len(twins) > rows:
twins = twins[
mod_page * rows : rows * (mod_page + 1)
] + [
(
InlineKeyboardButton(
text="❰ Prev",
callback_data="{}_prev({})".format(
prefix, mod_page
),
),
InlineKeyboardButton(text="Back", callback_data=f"open-start-dex"),
InlineKeyboardButton(
text="Next ❱",
callback_data="{}_next({})".format(
prefix, mod_page
),
),
)
]
return twins
# get type of message
def get_message_type(msg, include_text=True):
content = None
message_type = None
if include_text is True:
if msg.text or msg.caption:
content = None
message_type = Types.TEXT
elif msg.sticker:
content = msg.sticker.file_id
message_type = Types.STICKER
elif msg.document:
if msg.document.mime_type == "application/x-bad-tgsticker":
message_type = Types.ANIMATED_STICKER
else:
message_type = Types.DOCUMENT
content = msg.document.file_id
elif msg.photo:
content = msg.photo.file_id # last elem = best quality
message_type = Types.PHOTO
elif msg.audio:
content = msg.audio.file_id
message_type = Types.AUDIO
elif msg.voice:
content = msg.voice.file_id
message_type = Types.VOICE
elif msg.video:
content = msg.video.file_id
message_type = Types.VIDEO
elif msg.video_note:
content = msg.video_note.file_id
message_type = Types.VIDEO_NOTE
elif msg.animation:
content = msg.animation.file_id
message_type = Types.ANIMATION
return content, message_type
def get_note_type(msg):
reply = msg.reply_to_message
note_name = None
message_type = None
content = None
text = None
file_id = None
if long(msg) <= 1:
return None, None, None, None, None
if msg.text:
raw_text = msg.text.markdown
else:
raw_text = msg.caption.markdown
note_name = raw_text.split()[1]
# determine what the contents of the filter are - text, image, sticker, etc
if long(msg) >= 3:
text = raw_text.split(None, 2)[2]
message_type = Types.TEXT
elif reply:
if reply.text:
text = reply.text.markdown if reply.text else reply.caption.markdown if reply.caption else ""
message_type = Types.TEXT
content, message_type = get_message_type(reply, include_text=False)
else:
return
return note_name, text, message_type, content
def fetch_note_type(msg):
message_type = None
content = None
note_name = None
text = None
if msg:
if msg.text:
text = msg.text.markdown if msg.text else msg.caption.markdown if msg.caption else ""
message_type = Types.TEXT
content, message_type = get_message_type(msg, include_text=False)
return note_name, text, message_type, content
async def CheckAdmin(m: Message):
"""Check if we are an admin."""
ranks = ["administrator", "creator"]
data = await app.get_chat_member(
chat_id=m.chat.id,
user_id=m.from_user.id
)
return False if not data.status in ranks else True
async def CheckReplyAdmin(m: Message):
"""Check if the message is a reply to another user."""
if not m.reply_to_message:
await m.edit(f"`.{m.command[0]}` needs to be a reply")
sleep(2)
await m.delete()
elif m.reply_to_message.from_user.is_self:
await m.edit(f"I can't {m.command[0]} myself.")
sleep(2)
await m.delete()
else:
return True
async def RestrictFailed(m: Message):
await m.edit(f"I can't {message.command} this user.")
sleep(2)
await m.delete()
class AioHttp:
@staticmethod
async def get_json(link):
async with aiohttp.ClientSession() as session:
async with session.get(link) as resp:
return await resp.json()
@staticmethod
async def get_text(link):
async with aiohttp.ClientSession() as session:
async with session.get(link) as resp:
return await resp.text()
@staticmethod
async def get_json_from_text(link):
async with aiohttp.ClientSession() as session:
async with session.get(link) as resp:
text = await resp.text()
return json.loads(text)
@staticmethod
async def get_raw(link):
async with aiohttp.ClientSession() as session:
async with session.get(link) as resp:
return await resp.read()
@staticmethod
async def get_url(link):
async with aiohttp.ClientSession() as session:
async with session.get(link) as resp:
return resp.url
def clear_string(msg: str):
msg = re.sub(r"\<code\>(.*)\<\/code\>", "\g<1>", msg)
msg = re.sub(r"\<i\>(.*)\<\/i\>", "\g<1>", msg)
msg = re.sub(r"\<b\>(.*)\<\/b\>", "\g<1>", msg)
msg = re.sub(r"\<u\>(.*)\<\/u\>", "\g<1>", msg)
msg = re.sub(r"\*\*(.*)\*\*", "\g<1>", msg)
msg = re.sub(r"\_\_(.*)\_\_", "\g<1>", msg)
msg = re.sub(r"\`(.*)\`", "\g<1>", msg)
return msg
def quote_html(text: str) -> str:
"""
Escape unexpected HTML characters.
:param text: Original text
:return:
"""
return html.escape(text, quote=False)
async def progress_for_pyrogram(current, total, ud_type, message, start):
""" generic progress display for Telegram Upload / Download status """
now = time.time()
diff = now - start
if round(diff % 10.00) == 0 or current == total:
# if round(current / total * 100, 0) % 5 == 0:
percentage = current * 100 / total
speed = current / diff
elapsed_time = round(diff) * 1000
time_to_completion = round((total - current) / speed) * 1000
estimated_total_time = elapsed_time + time_to_completion
elapsed_time = time_formatter(milliseconds=elapsed_time)
estimated_total_time = time_formatter(milliseconds=estimated_total_time)
progress = "**[{0}{1}]** \n**Progress**: __{2}%__\n".format(
"".join(["●" for i in range(math.floor(percentage / 5))]),
"".join(["○" for i in range(20 - math.floor(percentage / 5))]),
round(percentage, 2),
)
tmp = progress + "**Done:** __{0} of {1}__\n**Speed:** __{2}/s__\n**ETA:** __{3}__\n".format(
humanbytes(current),
humanbytes(total),
humanbytes(speed),
estimated_total_time if estimated_total_time != "" else "0 s",
)
try:
await message.edit(f"{ud_type}\n {tmp}")
except (MessageNotModified, FloodWait):
pass
def humanbytes(size: int) -> str:
""" converts bytes into human readable format """
# https://stackoverflow.com/a/49361727/4723940
# 2**10 = 1024
if not size:
return ""
power = 2 ** 10
number = 0
dict_power_n = {0: " ", 1: "Ki", 2: "Mi", 3: "Gi", 4: "Ti"}
while size > power:
size /= power
number += 1
return str(round(size, 2)) + " " + dict_power_n[number] + "B"
# --------------------------------
def get_size_recursive(directory):
"""Returns the `directory` size in bytes."""
total = 0
try:
# print("[+] Getting the size of", directory)
for entry in os.scandir(directory):
if entry.is_file():
# if it's a file, use stat() function
total += entry.stat().st_size
elif entry.is_dir():
# if it's a directory, recursively call this function
total += get_size_recursive(entry.path)
except NotADirectoryError:
# if `directory` isn't a directory, get the file size then
return os.path.getsize(directory)
except PermissionError:
# if for whatever reason we can't open the folder, return 0
return 0
return total
def get_size_format(b, factor=1024, suffix="B"):
for unit in ["", "K", "M", "G", "T", "P", "E", "Z"]:
if b < factor:
return f"{b:.2f}{unit}{suffix}"
b /= factor
return f"{b:.2f}Y{suffix}"
def get_directory_size(location):
return get_size_format(get_size_recursive(location))
def cleanhtml(raw_html):
cleanr = re.compile("<.*?>")
cleantext = re.sub(cleanr, "", raw_html)
return cleantext
def escape_markdown(text):
escape_chars = r"\*_`\["
return re.sub(r"([%s])" % escape_chars, r"\\\1", text)
def mention_html(user_id, name):
return u'<a href="tg://user?id={}">{}</a>'.format(user_id, html.escape(name))
def mention_markdown(user_id, name):
return u'[{}](tg://user?id={})'.format(escape_markdown(name), user_id)
def parse_button(text):
markdown_note = text
prev = 0
note_data = ""
buttons = []
for match in BTN_URL_REGEX.finditer(markdown_note):
# Check if btnurl is escaped
n_escapes = 0
to_check = match.start(1) - 1
while to_check > 0 and markdown_note[to_check] == "\\":
n_escapes += 1
to_check -= 1
# if even, not escaped -> create button
if n_escapes % 2 == 0:
# create a thruple with button label, url, and newline status
buttons.append((match.group(2), match.group(3), bool(match.group(4))))
note_data += markdown_note[prev:match.start(1)]
prev = match.end(1)
# if odd, escaped -> move along
else:
note_data += markdown_note[prev:to_check]
prev = match.start(1) - 1
else:
note_data += markdown_note[prev:]
return note_data, buttons
def build_keyboard(buttons):
keyb = []
keyb.clear()
for btn in buttons:
keyb.append(
InlineKeyboardButton(
btn[0],
callback_data=btn[1]
)
)
return keyb
def time_formatter(milliseconds: int) -> str:
""" converts seconds into human readable format """
seconds, milliseconds = divmod(int(milliseconds), 1000)
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
tmp = (
((str(days) + "d, ") if days else "")
+ ((str(hours) + "h, ") if hours else "")
+ ((str(minutes) + "m, ") if minutes else "")
+ ((str(seconds) + "s, ") if seconds else "")
+ ((str(milliseconds) + "ms, ") if milliseconds else "")
)
return tmp[:-2]
def time_parser(start, end=None) -> int:
if end is None:
time_end = start
else:
time_end = end - start
month = time_end // 2678400
days = time_end // 86400
hours = time_end // 3600 % 24
minutes = time_end // 60 % 60
seconds = time_end % 60
times = ""
if month:
times += "{} month, ".format(month)
if days:
times += "{} days, ".format(days)
if hours:
times += "{} hours, ".format(hours)
if minutes:
times += "{} minutes, ".format(minutes)
if seconds:
times += "{} seconds".format(seconds)
if times == "":
times = "{} miliseconds".format(time_end)
return times
def convert_size(size_bytes):
if size_bytes == 0:
return "0B"
size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
i = int(math.floor(math.log(size_bytes, 1024)))
p = math.pow(1024, i)
s = round(size_bytes / p, 2)
return "%s %s" % (s, size_name[i])
def ReplyCheck(m: Message):
reply_id = False
reply = m.reply_to_message
if reply:
reply_id = reply.message_id if reply else m.message_id if not m.from_user.is_self else False
return reply_id
def get_arg(m):
msg = m.text
msg = msg.replace(" ", "", 1) if msg[1] == " " else msg
split = msg[1:].replace("\n", " \n").split(" ")
if " ".join(split[1:]).strip() == "":
return ""
return " ".join(split[1:])
def get_args(m):
try:
message = m.text
except AttributeError:
pass
if not message:
return False
message = message.split(maxsplit=1)
if len(message) <= 1:
return []
message = message[1]
try:
split = shlex.split(message)
except ValueError:
return message # Cannot split, let's assume that it's just one long message
return list(filter(lambda x: len(x) > 0, split))
def speed_convert(size):
power = 2**10
zero = 0
units = {
0: '',
1: 'Kb/s',
2: 'Mb/s',
3: 'Gb/s',
4: 'Tb/s'}
while size > power:
size /= power
zero += 1
return f"{round(size, 2)} {units[zero]}"
def get_readable_time(seconds: int) -> str:
count = 0
ping_time = ""
time_list = []
time_suffix_list = ["s", "m", "h", "days"]
while count < 4:
count += 1
remainder, result = divmod(seconds, 60) if count < 3 else divmod(seconds, 24)
if seconds == 0 and remainder == 0:
break
time_list.append(int(result))
seconds = int(remainder)
for x in range(len(time_list)):
time_list[x] = str(time_list[x]) + time_suffix_list[x]
if len(time_list) == 4:
ping_time += time_list.pop() + ", "
time_list.reverse()
ping_time += ":".join(time_list)
return ping_time
async def is_thumb_image_exists(file_name: str):
thumb_image_path = os.path.join(Config.TEMP_DICT, "thumb_image.jpg")
if os.path.exists(thumb_image_path):
thumb_image_path = os.path.join(Config.TEMP_DICT, "thumb_image.jpg")
elif file_name is not None and file_name.lower().endswith(("mp4", "mkv", "webm")):
metadata = extractMetadata(createParser(file_name))
duration = 0
if metadata.has("duration"):
duration = metadata.get("duration").seconds
# get a random TTL from the duration
ttl = str(random.randint(0, duration - 1))
thumb_image_path = gen_tg_thumbnail(await take_screen_shot(file_name, ttl))
else:
thumb_image_path = None
return thumb_image_path
def gen_tg_thumbnail(downloaded_file_name: str) -> str:
Image.open(downloaded_file_name).convert("RGB").save(downloaded_file_name)
metadata = extractMetadata(createParser(downloaded_file_name))
height = 0
if metadata.has("height"):
height = metadata.get("height")
img = Image.open(downloaded_file_name)
img.resize((320, height))
img.save(downloaded_file_name, "JPEG")
return downloaded_file_name
async def run_command(shell_command: List) -> (str, str):
process = await asyncio.create_subprocess_exec(
*shell_command,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
)
stdout, stderr = await process.communicate()
e_response = stderr.decode().strip()
t_response = stdout.decode().strip()
return t_response, e_response
async def extract_user(m: Message) -> | |
1]
[0 1]
sage: w._h_inverse_transition_matrices[2]
[ 1 -1]
[ 0 1]
sage: w._h_transition_matrices.keys()
[0, 1, 2]
"""
l = len(self._h_transition_matrices)
if l <= n:
from sage.combinat.partition import Partitions_n
from sage.misc.cachefunc import cached_function
@cached_function
def wsum(m): # expansion of h_m in w-basis, for m > 0
return self._from_dict({lam: 1 for lam in Partitions_n(m)})
for i in range(l, n + 1):
self._precompute_cache(i, self._h_to_self_cache,
self._h_from_self_cache,
self._h_transition_matrices,
self._h_inverse_transition_matrices,
wsum)
def _precompute_e(self, n):
"""
Compute the transition matrices between ``self`` and the elementary
symmetric basis in the homogeneous components of degree `n`
(and in those of smaller degree, if not already computed).
The result is not returned, but rather stored in the cache.
This assumes that the ``coerce_e`` keyword has been set to
``True`` in the initialization of ``self`` (otherwise the cache
does not exist).
INPUT:
- ``n`` -- nonnegative integer
EXAMPLES:
The examples below demonstrate how the caches of ``w`` are built
step by step using the ``_precompute_e`` method. Thus they rely on
an untouched Witt symmetric basis that hasn't already seen some
of its cache filled by other computations. We obtain such a basis
by choosing a ground ring unlikely to appear elsewhere::
sage: Sym = SymmetricFunctions(ZZ['hell', 'yeah'])
sage: w = Sym.Witt(coerce_e=True)
sage: l = lambda c: [ (i[0],[j for j in sorted(i[1].items())]) for i in sorted(c.items())]
sage: l(w._e_to_self_cache)
[]
sage: w._precompute_e(0)
sage: l(w._e_to_self_cache)
[([], [([], 1)])]
sage: w._precompute_e(1)
sage: l(w._e_to_self_cache)
[([], [([], 1)]), ([1], [([1], 1)])]
sage: w._precompute_e(2)
sage: l(w._e_to_self_cache)
[([], [([], 1)]),
([1], [([1], 1)]),
([1, 1], [([1, 1], 1)]),
([2], [([2], -1)])]
sage: w._e_transition_matrices[2]
[-1 0]
[ 0 1]
sage: w._e_inverse_transition_matrices[2]
[-1 0]
[ 0 1]
"""
l = len(self._e_transition_matrices)
if l <= n:
from sage.combinat.partition import Partitions
from sage.misc.cachefunc import cached_function
@cached_function
def wsum_e(m): # expansion of e_m in w-basis, for m > 0
return self._from_dict({lam: (-1 if (m + len(lam)) % 2 == 1 else 1)
for lam in Partitions(m, max_slope=-1)})
for i in range(l, n + 1):
self._precompute_cache(i, self._e_to_self_cache,
self._e_from_self_cache,
self._e_transition_matrices,
self._e_inverse_transition_matrices,
wsum_e)
def _precompute_p(self, n):
"""
Compute the transition matrices between ``self`` and the powersum
basis in the homogeneous components of degree `n`
(and in those of smaller degree, if not already computed).
The result is not returned, but rather stored in the cache.
This assumes that the ``coerce_p`` keyword has been set to
``True`` in the initialization of ``self`` (otherwise the cache
does not exist).
INPUT:
- ``n`` -- nonnegative integer
EXAMPLES:
The examples below demonstrate how the caches of ``w`` are built
step by step using the ``_precompute_p`` method. Thus they rely on
an untouched Witt symmetric basis that hasn't already seen some
of its cache filled by other computations. We obtain such a basis
by choosing a ground ring unlikely to appear elsewhere::
sage: Sym = SymmetricFunctions(QQ['hell', 'yeah'])
sage: w = Sym.Witt(coerce_h=False, coerce_e=True, coerce_p=True)
sage: l = lambda c: [ (i[0],[j for j in sorted(i[1].items())]) for i in sorted(c.items())]
sage: l(w._p_to_self_cache)
[]
sage: w._precompute_p(0)
sage: l(w._p_to_self_cache)
[([], [([], 1)])]
sage: w._precompute_p(1)
sage: l(w._p_to_self_cache)
[([], [([], 1)]), ([1], [([1], 1)])]
sage: w._precompute_p(2)
sage: l(w._p_to_self_cache)
[([], [([], 1)]), ([1], [([1], 1)]), ([1, 1], [([1, 1], 1)]), ([2], [([1, 1], 1), ([2], 2)])]
sage: w._p_transition_matrices[2]
[2 1]
[0 1]
sage: w._p_inverse_transition_matrices[2]
[ 1/2 -1/2]
[ 0 1]
"""
l = len(self._p_transition_matrices)
if l <= n:
from sage.rings.arith import divisors
from sage.combinat.partition import Partition
from sage.misc.cachefunc import cached_function
@cached_function
def wsum_p(m): # expansion of p_m in w-basis, for m > 0
return self._from_dict({Partition([d] * (m // d)): d
for d in divisors(m)})
for i in range(l, n + 1):
self._precompute_cache(i, self._p_to_self_cache,
self._p_from_self_cache,
self._p_transition_matrices,
self._p_inverse_transition_matrices,
wsum_p)
def _h_to_w_on_basis(self, lam):
r"""
Return the complete homogeneous symmetric function ``h[lam]``
expanded in the Witt basis, where ``lam`` is a partition.
This assumes that the ``coerce_h`` keyword has been set to ``True`` in
the initialization of ``self`` (otherwise the cache does not exist).
INPUT:
- ``lam`` -- a partition
OUTPUT:
- the expansion of ``h[lam]`` in the Witt basis ``self``
EXAMPLES::
sage: Sym = SymmetricFunctions(QQ)
sage: h = Sym.homogeneous()
sage: w = Sym.w()
sage: w._h_to_w_on_basis(Partition([]))
w[]
sage: w._h_to_w_on_basis(Partition([4,2,1]))
w[1, 1, 1, 1, 1, 1, 1] + 2*w[2, 1, 1, 1, 1, 1] + 2*w[2, 2, 1, 1, 1] + w[2, 2, 2, 1] + w[3, 1, 1, 1, 1] + w[3, 2, 1, 1] + w[4, 1, 1, 1] + w[4, 2, 1]
sage: h(w._h_to_w_on_basis(Partition([3,1]))) == h[3,1]
True
"""
n = sum(lam)
self._precompute_h(n)
return self._from_dict(self._h_to_self_cache[lam])
def _w_to_h_on_basis(self, lam):
r"""
Return the Witt symmetric function ``w[lam]`` expanded in the
complete homogeneous basis, where ``lam`` is a partition.
This assumes that the ``coerce_h`` keyword has been set to ``True`` in
the initialization of ``self`` (otherwise the cache does not exist).
INPUT:
- ``lam`` -- a partition
OUTPUT:
- the expansion of ``w[lam]`` in the complete
homogeneous basis of ``self.realization_of()``
EXAMPLES::
sage: Sym = SymmetricFunctions(QQ)
sage: h = Sym.homogeneous()
sage: w = Sym.w()
sage: w._w_to_h_on_basis(Partition([]))
h[]
sage: w._w_to_h_on_basis(Partition([4,2,1]))
h[1, 1, 1, 1, 1, 1, 1] - 3*h[2, 1, 1, 1, 1, 1] + 3*h[2, 2, 1, 1, 1] - h[2, 2, 2, 1] + h[3, 1, 1, 1, 1] - h[3, 2, 1, 1] - h[4, 1, 1, 1] + h[4, 2, 1]
sage: w(w._w_to_h_on_basis(Partition([3,1]))) == w[3,1]
True
"""
n = sum(lam)
self._precompute_h(n)
return self._h._from_dict(self._h_from_self_cache[lam])
def _e_to_w_on_basis(self, lam):
r"""
Return the elementary symmetric function ``e[lam]`` expanded in
the Witt basis, where ``lam`` is a partition.
This assumes that the ``coerce_e`` keyword has been set to ``True`` in
the initialization of ``self`` (otherwise the cache does not exist).
INPUT:
- ``lam`` -- a partition
OUTPUT:
- the expansion of ``e[lam]`` in the Witt basis ``self``
EXAMPLES::
sage: Sym = SymmetricFunctions(QQ)
sage: e = Sym.elementary()
sage: w = Sym.w(coerce_e=True)
sage: w._e_to_w_on_basis(Partition([]))
w[]
sage: w._e_to_w_on_basis(Partition([4,2,1]))
-w[3, 2, 1, 1] + w[4, 2, 1]
sage: e(w._e_to_w_on_basis(Partition([3,1]))) == e[3,1]
True
"""
n = sum(lam)
self._precompute_e(n)
return self._from_dict(self._e_to_self_cache[lam])
def _w_to_e_on_basis(self, lam):
r"""
Return the Witt symmetric function ``w[lam]``
expanded in the elementary symmetric basis, where
``lam`` is a partition.
This assumes that the ``coerce_e`` keyword has been set to ``True`` in
the initialization of ``self`` (otherwise the cache does not exist).
INPUT:
- ``lam`` -- a partition
OUTPUT:
- the expansion of ``w[lam]`` in the elementary
symmetric basis of ``self.realization_of()``
EXAMPLES::
sage: Sym = SymmetricFunctions(QQ)
sage: e = Sym.elementary()
sage: w = Sym.w(coerce_e=True)
sage: w._w_to_e_on_basis(Partition([]))
e[]
sage: w._w_to_e_on_basis(Partition([4,2,1]))
e[2, 2, 1, 1, 1] - e[3, 2, 1, 1] + e[4, 2, 1]
sage: w(w._w_to_e_on_basis(Partition([3,1]))) == w[3,1]
True
"""
n = sum(lam)
self._precompute_e(n)
return self._e._from_dict(self._e_from_self_cache[lam])
def _p_to_w_on_basis(self, lam):
r"""
Return the powersum symmetric function ``p[lam]`` expanded in
the Witt basis, where ``lam`` is a partition.
This assumes that the ``coerce_p`` keyword has been set to ``True`` in
the initialization of ``self`` (otherwise the cache does not exist).
INPUT:
- ``lam`` -- a partition
OUTPUT:
- the expansion of ``p[lam]`` in the Witt basis ``self``
EXAMPLES::
sage: Sym = SymmetricFunctions(QQ)
sage: p = Sym.power()
sage: w = Sym.w(coerce_p=True)
sage: w._p_to_w_on_basis(Partition([]))
w[]
sage: w._p_to_w_on_basis(Partition([4,2,1]))
w[1, 1, 1, 1, 1, 1, 1] + 2*w[2, 1, 1, 1, 1, 1] + 2*w[2, 2, 1, 1, 1] + 4*w[2, 2, 2, 1] + 4*w[4, 1, 1, 1] + 8*w[4, 2, 1]
sage: p(w._p_to_w_on_basis(Partition([3,1]))) == p[3,1]
True
"""
n = sum(lam)
self._precompute_p(n)
return self._from_dict(self._p_to_self_cache[lam])
def _w_to_p_on_basis(self, lam):
r"""
Return the Witt symmetric function ``w[lam]`` expanded in the
powersum basis, where ``lam`` is a partition.
This assumes that the ``coerce_p`` keyword has been set to ``True`` in
the initialization of ``self`` (otherwise the cache does not exist).
INPUT:
- ``lam`` -- a partition
OUTPUT:
- the expansion of ``w[lam]`` in the powersum
basis of ``self.realization_of()``
EXAMPLES::
sage: Sym = SymmetricFunctions(QQ)
sage: p = Sym.power()
sage: w = Sym.w(coerce_p=True)
sage: w._w_to_p_on_basis(Partition([]))
p[]
sage: w._w_to_p_on_basis(Partition([4,2,1]))
3/16*p[1, 1, 1, 1, 1, 1, 1] - 5/16*p[2, 1, 1, 1, 1, 1] + 3/16*p[2, | |
import argparse
from pathlib import Path
import networkx as nx
import nxmetis
import torch
import torch.nn as nn
import torch.multiprocessing as mp
from torch_geometric.data import Data, DataLoader, Batch
from torch_geometric.nn import SAGEConv, GATConv, GlobalAttention, graclus, avg_pool, global_mean_pool
from torch_geometric.utils import to_networkx, k_hop_subgraph, degree
import numpy as np
from numpy import random
import scipy
from scipy.sparse import coo_matrix
from scipy.io import mmread
from scipy.spatial import Delaunay
#import random_p
import copy
import math
import timeit
import os
from itertools import combinations
import ctypes
libscotch = ctypes.cdll.LoadLibrary('scotch/build/libSCOTCHWrapper.so')
# Networkx geometric Delaunay mesh with n random points in the unit square
def graph_delaunay_from_points(points):
mesh = Delaunay(points, qhull_options="QJ")
mesh_simp = mesh.simplices
edges = []
for i in range(len(mesh_simp)):
edges += combinations(mesh_simp[i], 2)
e = list(set(edges))
return nx.Graph(e)
# Pytorch geometric Delaunay mesh with n random points in the unit square
def random_delaunay_graph(n):
points = np.random.random_sample((n, 2))
g = graph_delaunay_from_points(points)
adj_sparse = nx.to_scipy_sparse_matrix(g, format='coo')
row = adj_sparse.row
col = adj_sparse.col
one_hot = []
for i in range(g.number_of_nodes()):
one_hot.append([1., 0.])
edges = torch.tensor([row, col], dtype=torch.long)
nodes = torch.tensor(np.array(one_hot), dtype=torch.float)
graph_torch = Data(x=nodes, edge_index=edges)
return graph_torch
# Build a pytorch geometric graph with features [1,0] form a networkx graph
def torch_from_graph(g):
adj_sparse = nx.to_scipy_sparse_matrix(g, format='coo')
row = adj_sparse.row
col = adj_sparse.col
one_hot = []
for i in range(g.number_of_nodes()):
one_hot.append([1., 0.])
edges = torch.tensor([row, col], dtype=torch.long)
nodes = torch.tensor(np.array(one_hot), dtype=torch.float)
graph_torch = Data(x=nodes, edge_index=edges)
degs = np.sum(adj_sparse.todense(), axis=0)
first_vertices = np.where(degs == np.min(degs))[0]
first_vertex = np.random.choice(first_vertices)
change_vertex(graph_torch, first_vertex)
return graph_torch
# Build a pytorch geometric graph with features [1,0] form a sparse matrix
def torch_from_sparse(adj_sparse):
row = adj_sparse.row
col = adj_sparse.col
features = []
for i in range(adj_sparse.shape[0]):
features.append([1., 0.])
edges = torch.tensor([row, col], dtype=torch.long)
nodes = torch.tensor(np.array(features), dtype=torch.float)
graph_torch = Data(x=nodes, edge_index=edges)
return graph_torch
# Cut of the input graph
def cut(graph):
cut = torch.sum((graph.x[graph.edge_index[0],
:2] != graph.x[graph.edge_index[1],
:2]).all(axis=-1)).detach().item() / 2
return cut
# Change the feature of the selected vertex
def change_vertex(state, vertex):
if (state.x[vertex, :2] == torch.tensor([1., 0.])).all():
state.x[vertex, 0] = torch.tensor(0.)
state.x[vertex, 1] = torch.tensor(1.)
else:
state.x[vertex, 0] = torch.tensor(1.)
state.x[vertex, 1] = torch.tensor(0.)
return state
# Normalized cut of the input graph
def normalized_cut(graph):
cut, da, db = volumes(graph)
if da == 0 or db == 0:
return 2
else:
return cut * (1 / da + 1 / db)
# Coarsen a pytorch geometric graph, then find the cut with METIS and
# interpolate it back
def partition_metis_refine(graph):
cluster = graclus(graph.edge_index)
coarse_graph = avg_pool(
cluster,
Batch(
batch=graph.batch,
x=graph.x,
edge_index=graph.edge_index))
coarse_graph_nx = to_networkx(coarse_graph, to_undirected=True)
_, parts = nxmetis.partition(coarse_graph_nx, 2)
mparts = np.array(parts)
coarse_graph.x[np.array(parts[0])] = torch.tensor([1., 0.])
coarse_graph.x[np.array(parts[1])] = torch.tensor([0., 1.])
_, inverse = torch.unique(cluster, sorted=True, return_inverse=True)
graph.x = coarse_graph.x[inverse]
return graph
# Subgraph around the cut
def k_hop_graph_cut(graph, k, g, va, vb):
nei = torch.where((graph.x[graph.edge_index[0], :2] !=
graph.x[graph.edge_index[1], :2]).all(axis=-1))[0]
neib = graph.edge_index[0][nei]
data_cut = k_hop_subgraph(neib, k, graph.edge_index, relabel_nodes=True)
data_small = k_hop_subgraph(
neib,
k - 1,
graph.edge_index,
relabel_nodes=True)
nodes_boundary = list(
set(data_cut[0].numpy()).difference(data_small[0].numpy()))
boundary_features = torch.tensor([1. if i.item(
) in nodes_boundary else 0. for i in data_cut[0]]).reshape(data_cut[0].shape[0], 1)
e = torch.ones(data_cut[0].shape[0], 1)
nnz = graph.num_edges
features = torch.cat((graph.x[data_cut[0]], boundary_features, torch.true_divide(
va, nnz) * e, torch.true_divide(vb, nnz) * e), 1)
g_red = Batch(
batch=torch.zeros(
data_cut[0].shape[0],
dtype=torch.long),
x=features,
edge_index=data_cut[1])
return g_red, data_cut[0]
# Volumes of the partitions
def volumes(graph):
ia = torch.where(
(graph.x[:, :2] == torch.tensor([1.0, 0.0])).all(axis=-1))[0]
ib = torch.where(
(graph.x[:, :2] != torch.tensor([1.0, 0.0])).all(axis=-1))[0]
degs = degree(
graph.edge_index[0],
num_nodes=graph.x.size(0),
dtype=torch.uint8)
da = torch.sum(degs[ia]).detach().item()
db = torch.sum(degs[ib]).detach().item()
cut = torch.sum((graph.x[graph.edge_index[0],
:2] != graph.x[graph.edge_index[1],
:2]).all(axis=-1)).detach().item() / 2
return cut, da, db
# Full valuation of the DRL model
def ac_eval_coarse_full_drl(ac, graph, k, ac2):
g = graph.clone()
info = []
edge_info = []
while g.num_nodes > 100:
edge_info.append(g.edge_index)
cluster = graclus(g.edge_index)
info.append(cluster)
g1 = avg_pool(
cluster,
Batch(
batch=g.batch,
x=g.x,
edge_index=g.edge_index))
g = g1
gnx = to_networkx(g, to_undirected=True)
g = torch_from_graph(gnx)
g.batch = torch.zeros(g.num_nodes, dtype=torch.long)
g = ac_eval(ac2, g, 0.01)
while len(info) > 0:
cluster = info.pop()
_, inverse = torch.unique(cluster, sorted=True, return_inverse=True)
g.x = g.x[inverse]
g.edge_index = edge_info.pop()
_, volA, volB = volumes(g)
gnx = to_networkx(g, to_undirected=True)
g = ac_eval_refine(ac, g, k, gnx, volA, volB)
return g
# Full valuation of the DRL model repeated for trials number of times.
# Then the best partition is returned
def ac_eval_coarse_full_trials_drl(ac, graph, k, trials, ac2):
graph_test = graph.clone()
gg = ac_eval_coarse_full_drl(ac, graph_test, k, ac2)
ncut = normalized_cut(gg)
for j in range(1, trials):
gg1 = ac_eval_coarse_full_drl(ac, graph_test, k, ac2)
if normalized_cut(gg1) < ncut:
ncut = normalized_cut(gg1)
gg = gg1
return gg
# Full valuation of the DRL_METIS model repeated for trials number of
# times. Then the best partition is returned
def ac_eval_coarse_full_trials(ac, graph, k, trials):
graph_test = graph.clone()
gg = ac_eval_coarse_full(ac, graph_test, k)
ncut = normalized_cut(gg)
for j in range(1, trials):
gg1 = ac_eval_coarse_full(ac, graph_test, k)
if normalized_cut(gg1) < ncut:
ncut = normalized_cut(gg1)
gg = gg1
return gg
# Full valuation of the DRL_METIS model
def ac_eval_coarse_full(ac, graph, k):
g = graph.clone()
info = []
edge_info = []
while g.num_nodes > 100:
edge_info.append(g.edge_index)
cluster = graclus(g.edge_index)
info.append(cluster)
g1 = avg_pool(
cluster,
Batch(
batch=g.batch,
x=g.x,
edge_index=g.edge_index))
g = g1
gnx = to_networkx(g, to_undirected=True)
g = partition_metis(g, gnx)
while len(info) > 0:
cluster = info.pop()
_, inverse = torch.unique(cluster, sorted=True, return_inverse=True)
g.x = g.x[inverse]
g.edge_index = edge_info.pop()
_, volA, volB = volumes(g)
gnx = to_networkx(g, to_undirected=True)
g = ac_eval_refine(ac, g, k, gnx, volA, volB)
return g
# Partitioning of a pytorch geometric graph obtained with METIS
def partition_metis(graph, graph_nx):
obj, parts = nxmetis.partition(graph_nx, 2)
mparts = np.array(parts)
graph.x[parts[0]] = torch.tensor([1., 0.])
graph.x[parts[1]] = torch.tensor([0., 1.])
return graph
# Refining the cut on the subgraph around the cut
def ac_eval_refine(ac, graph_t, k, gnx, volA, volB):
graph = graph_t.clone()
g0 = graph_t.clone()
data = k_hop_graph_cut(graph, k, gnx, volA, volB)
graph_cut, positions = data[0], data[1]
len_episod = int(cut(graph))
peak_reward = 0
peak_time = 0
total_reward = 0
actions = []
e = torch.ones(graph_cut.num_nodes, 1)
nnz = graph.num_edges
cut_sub = len_episod
for i in range(len_episod):
with torch.no_grad():
policy = ac(graph_cut)
probs = policy.view(-1).clone().detach().numpy()
flip = np.argmax(probs)
dv = gnx.degree[positions[flip].item()]
old_nc = cut_sub * (torch.true_divide(1, volA) +
torch.true_divide(1, volB))
if graph_cut.x[flip, 0] == 1.:
volA = volA - dv
volB = volB + dv
else:
volA = volA + dv
volB = volB - dv
new_nc, cut_sub = update_nc(
graph, gnx, cut_sub, positions[flip].item(), volA, volB)
total_reward += (old_nc - new_nc).item()
actions.append(flip)
change_vertex(graph_cut, flip)
change_vertex(graph, positions[flip])
graph_cut.x[:, 3] = torch.true_divide(volA, nnz)
graph_cut.x[:, 4] = torch.true_divide(volB, nnz)
if i >= 1 and actions[-1] == actions[-2]:
break
if total_reward > peak_reward:
peak_reward = total_reward
peak_time = i + 1
for t in range(peak_time):
g0 = change_vertex(g0, positions[actions[t]])
return g0
# Compute the update for the normalized cut
def update_nc(graph, gnx, cut_total, v1, va, vb):
c_v1 = 0
for v in gnx[v1]:
if graph.x[v, 0] != graph.x[v1, 0]:
c_v1 += 1
else:
c_v1 -= 1
cut_new = cut_total - c_v1
return cut_new * (torch.true_divide(1, va) +
torch.true_divide(1, vb)), cut_new
# Evaluation of the DRL model on the coarsest graph
def ac_eval(ac, graph, perc):
graph_test = graph.clone()
error_bal = math.ceil(graph_test.num_nodes * perc)
cuts = []
nodes = []
# Run the episod
for i in range(int(graph_test.num_nodes / 2 - 1 + error_bal)):
policy, _ = ac(graph_test)
policy = policy.view(-1).detach().numpy()
flip = random.choice(torch.arange(0, graph_test.num_nodes), p=policy)
graph_test = change_vertex(graph_test, flip)
if i >= int(graph_test.num_nodes / 2 - 1 - error_bal):
cuts.append(cut(graph_test))
nodes.append(flip)
if len(cuts) > 0:
stops = np.argwhere(cuts == np.min(cuts))
stops = stops.reshape((stops.shape[0],))
if len(stops) == 1:
graph_test.x[nodes[stops[0] + 1:]] = torch.tensor([1., 0.])
else:
diff = [np.abs(i - int(len(stops) / 2 - 1)) for i in stops]
min_dist = np.argwhere(diff == np.min(diff))
min_dist = min_dist.reshape((min_dist.shape[0],))
stop = np.random.choice(stops[min_dist])
graph_test.x[nodes[stop + 1:]] = torch.tensor([1., 0.])
return graph_test
# Partitioning provided by SCOTCH
def scotch_partition(g):
gnx = to_networkx(g, to_undirected=True)
a = nx.to_scipy_sparse_matrix(gnx, format="csr", dtype=np.float32)
n = g.num_nodes
part = np.zeros(n, dtype=np.int32)
libscotch.WRAPPER_SCOTCH_graphPart(
ctypes.c_int(n),
ctypes.c_void_p(a.indptr.ctypes.data),
ctypes.c_void_p(a.indices.ctypes.data),
ctypes.c_void_p(part.ctypes.data)
)
g.x[np.where(part == 0)] = torch.tensor([1., 0.])
g.x[np.where(part == 1)] = torch.tensor([0., 1.])
return g
# Deep neural network for the DRL agent
class Model(torch.nn.Module):
def __init__(self, units):
super(Model, self).__init__()
self.units = units
self.common_layers = 1
self.critic_layers = 1
self.actor_layers = 1
self.activation = torch.tanh
self.conv_first = SAGEConv(5, self.units)
self.conv_common = nn.ModuleList(
[SAGEConv(self.units, self.units)
for i in range(self.common_layers)]
)
self.conv_actor = nn.ModuleList(
[SAGEConv(self.units,
| |
from django.template import Template
from django.template.context import Context
from django.test import RequestFactory
from cms.test_utils.testcases import CMSTestCase
from cms.test_utils.util.mock import AttributeObject
from cms.toolbar.toolbar import CMSToolbar
from cms.toolbar.utils import get_object_edit_url, get_object_preview_url
from menus.menu_pool import menu_pool
from djangocms_versioning.constants import (
ARCHIVED,
DRAFT,
PUBLISHED,
UNPUBLISHED,
)
from djangocms_navigation.cms_menus import CMSMenu
from djangocms_navigation.test_utils import factories
from .utils import add_toolbar_to_request, disable_versioning_for_navigation
class CMSMenuTestCase(CMSTestCase):
def setUp(self):
self.language = "en"
self.request = RequestFactory().get("/")
self.user = factories.UserFactory()
self.request.user = self.user
self.request.toolbar = CMSToolbar(self.request)
self.renderer = menu_pool.get_renderer(self.request)
self.menu = CMSMenu(self.renderer)
def assertNavigationNodeEqual(self, node, **kwargs):
"""Helper method for asserting NavigationNode objects"""
self.assertEqual(node.title, kwargs["title"])
self.assertEqual(node.url, kwargs["url"])
self.assertEqual(node.id, kwargs["id"])
self.assertEqual(node.parent_id, kwargs["parent_id"])
self.assertDictEqual(node.attr, kwargs["attr"])
@disable_versioning_for_navigation()
def test_get_nodes(self):
menu_contents = factories.MenuContentFactory.create_batch(2, language=self.language)
child1 = factories.ChildMenuItemFactory(parent=menu_contents[0].root)
child2 = factories.ChildMenuItemFactory(parent=menu_contents[1].root)
grandchild = factories.ChildMenuItemFactory(parent=child1)
nodes = self.menu.get_nodes(self.request)
self.assertEqual(len(nodes), 5)
self.assertNavigationNodeEqual(
nodes[0],
title="",
url="",
id=menu_contents[0].menu.root_id,
parent_id=None,
attr={},
)
self.assertNavigationNodeEqual(
nodes[1],
title="",
url="",
id=menu_contents[1].menu.root_id,
parent_id=None,
attr={},
)
self.assertNavigationNodeEqual(
nodes[2],
title=child1.title,
url=child1.content.get_absolute_url(),
id=child1.id,
parent_id=menu_contents[0].menu.root_id,
attr={"link_target": child1.link_target, "soft_root": False},
)
self.assertNavigationNodeEqual(
nodes[3],
title=grandchild.title,
url=grandchild.content.get_absolute_url(),
id=grandchild.id,
parent_id=child1.id,
attr={"link_target": grandchild.link_target, "soft_root": False},
)
self.assertNavigationNodeEqual(
nodes[4],
title=child2.title,
url=child2.content.get_absolute_url(),
id=child2.id,
parent_id=menu_contents[1].menu.root_id,
attr={"link_target": child2.link_target, "soft_root": False},
)
def get_nodes_for_versioning_enabled(self):
menu_versions = factories.MenuVersionFactory.create_batch(2, state=PUBLISHED)
child1 = factories.ChildMenuItemFactory(parent=menu_versions[0].content.root)
child2 = factories.ChildMenuItemFactory(parent=menu_versions[1].content.root)
grandchild = factories.ChildMenuItemFactory(parent=child1)
nodes = self.menu.get_nodes(self.request)
self.assertEqual(len(nodes), 5)
self.assertNavigationNodeEqual(
nodes[0],
title="",
url="",
id=menu_versions[0].content.menu.root_id,
parent_id=None,
attr={},
)
self.assertNavigationNodeEqual(
nodes[1],
title="",
url="",
id=menu_versions[1].content.menu.root_id,
parent_id=None,
attr={},
)
self.assertNavigationNodeEqual(
nodes[2],
title=child1.title,
url=child1.content.get_absolute_url(),
id=child1.id,
parent_id=menu_versions[0].content.menu.root_id,
attr={"link_target": child1.link_target, "soft_root": False},
)
self.assertNavigationNodeEqual(
nodes[3],
title=grandchild.title,
url=grandchild.content.get_absolute_url(),
id=grandchild.id,
parent_id=child1.id,
attr={"link_target": grandchild.link_target, "soft_root": False},
)
self.assertNavigationNodeEqual(
nodes[4],
title=child2.title,
url=child2.content.get_absolute_url(),
id=child2.id,
parent_id=menu_versions[1].content.menu.root_id,
attr={"link_target": child2.link_target, "soft_root": False},
)
def get_nodes_with_soft_root_for_versioning_enabled(self):
"""
Check getnodes with a soft root node
"""
menu_versions = factories.MenuVersionFactory.create_batch(2, state=PUBLISHED)
child1 = factories.ChildMenuItemFactory(parent=menu_versions[0].content.root)
child2 = factories.ChildMenuItemFactory(parent=menu_versions[1].content.root)
grandchild = factories.ChildMenuItemFactory(parent=child1, soft_root=True)
nodes = self.menu.get_nodes(self.request)
self.assertEqual(len(nodes), 5)
self.assertNavigationNodeEqual(
nodes[0],
title="",
url="",
id=menu_versions[0].content.menu.root_id,
parent_id=None,
attr={},
)
self.assertNavigationNodeEqual(
nodes[1],
title="",
url="",
id=menu_versions[1].content.menu.root_id,
parent_id=None,
attr={},
)
self.assertNavigationNodeEqual(
nodes[2],
title=child1.title,
url=child1.content.get_absolute_url(),
id=child1.id,
parent_id=menu_versions[0].content.menu.root_id,
attr={"link_target": child1.link_target, "soft_root": False},
)
self.assertNavigationNodeEqual(
nodes[3],
title=grandchild.title,
url=grandchild.content.get_absolute_url(),
id=grandchild.id,
parent_id=child1.id,
attr={"link_target": grandchild.link_target, "soft_root": True},
)
self.assertNavigationNodeEqual(
nodes[4],
title=child2.title,
url=child2.content.get_absolute_url(),
id=child2.id,
parent_id=menu_versions[1].content.menu.root_id,
attr={"link_target": child2.link_target, "soft_root": False},
)
def test_get_roots_with_draft_mode_not_active(self):
"""This test to check versioning would group all the versions
of menu content and return latest of all distinct menu content
when renderer draft_mode_active is false
"""
menucontent_1_v1 = factories.MenuVersionFactory(content__language=self.language, state=ARCHIVED)
factories.MenuVersionFactory(
content__menu=menucontent_1_v1.content.menu, content__language=self.language, state=DRAFT
)
menucontent_2_v1 = factories.MenuVersionFactory(content__language=self.language, state=PUBLISHED)
factories.MenuVersionFactory(state=UNPUBLISHED)
# Assert to check draft_mode_active is false
self.assertFalse(self.request.toolbar.edit_mode_active)
roots = self.menu.get_roots(self.request)
# Renderer should only render published menucontent
self.assertEqual(roots.count(), 1)
self.assertListEqual(list(roots), [menucontent_2_v1.content.root])
def test_get_roots_with_toolbar_edit_mode_active(self):
"""This test to check versioning would group all the versions
of menu content and return latest of all distinct menu content
when renderer draft_mode_active is True
"""
menucontent_1_v1 = factories.MenuVersionFactory(content__language=self.language, state=ARCHIVED)
menucontent_1_v2 = factories.MenuVersionFactory(
content__menu=menucontent_1_v1.content.menu, content__language=self.language, state=DRAFT
)
menucontent_2_v1 = factories.MenuVersionFactory(content__language=self.language, state=PUBLISHED)
factories.MenuVersionFactory(content__language=self.language, state=UNPUBLISHED)
# setting toolbar to edit_mode_active
self.request.toolbar.edit_mode_active = True
menu = CMSMenu(self.renderer)
roots = menu.get_roots(self.request)
self.assertEqual(roots.count(), 2)
self.assertListEqual(
list(roots), [menucontent_1_v2.content.root, menucontent_2_v1.content.root]
)
@disable_versioning_for_navigation()
def test_get_roots_with_versioning_disabled(self):
"""This test will check while versioning disabled it should assert
against all menu content created
"""
menucontent_1 = factories.MenuContentFactory(language="en")
menucontent_2 = factories.MenuContentFactory(language="en")
menucontent_3 = factories.MenuContentFactory(language="en")
child1 = factories.ChildMenuItemFactory(parent=menucontent_1.root)
factories.ChildMenuItemFactory(parent=menucontent_2.root)
factories.ChildMenuItemFactory(parent=child1)
roots = self.menu.get_roots(self.request)
self.assertEqual(roots.count(), 3)
self.assertListEqual(
list(roots), [menucontent_1.root, menucontent_2.root, menucontent_3.root]
)
def test_draft_menu_on_draft_page(self):
"""
Ensure that a draft page renders a draft menu when it exists.
"""
menu = factories.MenuFactory()
menu_cont_published = factories.MenuContentWithVersionFactory(
menu=menu,
version__state=PUBLISHED,
language=self.language
)
menu_cont_draft = factories.MenuContentWithVersionFactory(
menu=menu,
version__state=DRAFT,
language=self.language
)
page = factories.PageFactory()
pagecontent_published = factories.PageContentWithVersionFactory(
page=page,
language=self.language,
version__created_by=self.get_superuser(),
version__state=PUBLISHED,
)
pagecontent_draft = factories.PageContentWithVersionFactory(
page=page,
language=self.language,
version__created_by=self.get_superuser(),
version__state=DRAFT,
)
draft_child = factories.ChildMenuItemFactory(parent=menu_cont_draft.root, content=pagecontent_draft.page)
published_child = factories.ChildMenuItemFactory(
parent=menu_cont_published.root,
content=pagecontent_published.page
)
self.assertEqual(pagecontent_draft.page, pagecontent_published.page)
self.assertEqual(menu_cont_draft.menu, menu_cont_published.menu)
# Node added in draft menu version is rendered in page draft view only
draft_page_endpoint = get_object_edit_url(pagecontent_draft)
with self.login_user_context(self.get_superuser()):
response = self.client.get(draft_page_endpoint)
self.assertEqual(response.status_code, 200)
self.assertIn(draft_child.title, str(response.content))
self.assertNotIn(published_child.title, str(response.content))
def test_draft_menu_on_published_page(self):
"""
Ensure that a published page only shows a published menu
"""
menu = factories.MenuFactory()
menu_cont_published = factories.MenuContentWithVersionFactory(
menu=menu,
version__state=PUBLISHED,
language=self.language
)
menu_cont_draft = factories.MenuContentWithVersionFactory(
menu=menu,
version__state=DRAFT,
language=self.language
)
page = factories.PageFactory()
pagecontent_published = factories.PageContentWithVersionFactory(
page=page,
language=self.language,
version__created_by=self.get_superuser(),
version__state=PUBLISHED,
)
pagecontent_draft = factories.PageContentWithVersionFactory(
page=page,
language=self.language,
version__created_by=self.get_superuser(),
version__state=DRAFT,
)
draft_child = factories.ChildMenuItemFactory(parent=menu_cont_draft.root, content=pagecontent_draft.page)
published_child = factories.ChildMenuItemFactory(
parent=menu_cont_published.root,
content=pagecontent_published.page
)
self.assertEqual(pagecontent_draft.page, pagecontent_published.page)
self.assertEqual(menu_cont_draft.menu, menu_cont_published.menu)
# Node added in draft menu version is not rendered in published view, only published menu nodes are rendered
with self.login_user_context(self.get_superuser()):
response = self.client.get(pagecontent_published.page.get_absolute_url())
self.assertEqual(response.status_code, 200)
self.assertIn(published_child.title, str(response.content))
self.assertNotIn(draft_child.title, str(response.content))
def test_published_menu_in_preview_mode(self):
"""
Ensure that a page preview mode renders a published menu.
"""
menu = factories.MenuFactory()
menu_cont_published = factories.MenuContentWithVersionFactory(
menu=menu,
version__state=PUBLISHED,
language=self.language
)
menu_cont_draft = factories.MenuContentWithVersionFactory(
menu=menu,
version__state=DRAFT,
language=self.language
)
page = factories.PageFactory()
pagecontent_published = factories.PageContentWithVersionFactory(
page=page,
language=self.language,
version__created_by=self.get_superuser(),
version__state=PUBLISHED,
)
pagecontent_draft = factories.PageContentWithVersionFactory(
page=page,
language=self.language,
version__created_by=self.get_superuser(),
version__state=DRAFT,
)
draft_child = factories.ChildMenuItemFactory(parent=menu_cont_draft.root, content=pagecontent_draft.page)
published_child = factories.ChildMenuItemFactory(
parent=menu_cont_published.root,
content=pagecontent_published.page
)
self.assertEqual(pagecontent_draft.page, pagecontent_published.page)
self.assertEqual(menu_cont_draft.menu, menu_cont_published.menu)
preview_page_endpoint = get_object_preview_url(pagecontent_draft)
with self.login_user_context(self.get_superuser()):
response = self.client.get(preview_page_endpoint)
# preview mode renders published menu with draft page in preview mode
self.assertEqual(response.status_code, 200)
self.assertIn(published_child.title, str(response.content))
self.assertNotIn(draft_child.title, str(response.content))
class SoftrootTests(CMSTestCase):
"""
Tree in fixture :
root
aaa
aaa1
ccc
ddd
aaa2
bbb
In the fixture, all pages are visible, "published" and
NOT-"soft_root".
What is a soft root?
A soft root is a page that acts as the root for a menu navigation tree.
Typically, this will be a page that is the root of a significant new
section on your site.
When the soft root feature is enabled, the navigation menu for any page
will start at the nearest soft root, rather than at the real root of
the site’s page hierarchy.
This feature is useful when your site has deep page hierarchies (and
therefore multiple levels in its navigation trees). In such a case, you
usually don’t want to present site visitors with deep menus of nested
items.
"""
def setUp(self):
self.language = 'en'
self.client.force_login(self.get_superuser())
self.user = factories.UserFactory()
self.root_pagecontent = factories.PageContentWithVersionFactory(
language=self.language,
version__created_by=self.get_superuser(),
title="root",
menu_title="root",
page_title="root",
version__state=PUBLISHED,
)
self.aaa_pagecontent = factories.PageContentWithVersionFactory(
language=self.language,
version__created_by=self.get_superuser(),
title="aaa",
menu_title="aaa",
page_title="aaa",
version__state=PUBLISHED
)
self.ddd_pagecontent = factories.PageContentWithVersionFactory(
language=self.language,
version__created_by=self.get_superuser(),
title="ddd",
menu_title="ddd",
page_title="ddd",
version__state=PUBLISHED
)
self.aaa1_pagecontent = factories.PageContentWithVersionFactory(
language=self.language,
version__created_by=self.get_superuser(),
title="aaa1",
menu_title="aaa1",
page_title="aaa1",
version__state=PUBLISHED
)
self.aaa2_pagecontent = factories.PageContentWithVersionFactory(
language=self.language,
version__created_by=self.get_superuser(),
title="aaa2",
menu_title="aaa2",
page_title="aaa2",
version__state=PUBLISHED
)
self.bbb_pagecontent = factories.PageContentWithVersionFactory(
language=self.language,
version__created_by=self.get_superuser(),
title="bbb",
menu_title="bbb",
page_title="bbb",
version__state=PUBLISHED
)
self.ccc_pagecontent = factories.PageContentWithVersionFactory(
language=self.language,
version__created_by=self.get_superuser(),
title="ccc",
menu_title="ccc",
page_title="ccc",
version__state=PUBLISHED
)
def assertTreeQuality(self, a, b, *attrs):
"""
Checks that the node-lists a and b are the same for attrs.
This is recursive over the tree
"""
msg = '%r != %r with %r, %r' % (len(a), len(b), a, b)
self.assertEqual(len(a), len(b), msg)
for n1, n2 in zip(a, b):
for attr in attrs:
a1 = getattr(n1, attr)
a2 = getattr(n2, attr)
msg = '%r != %r with %r, %r (%s)' % (a1, a2, n1, n2, attr)
self.assertEqual(a1, a2, msg)
self.assertTreeQuality(n1.children, n2.children, *attrs)
def test_menu_without_softroots(self):
"""
Tree in fixture :
root
aaa
aaa1
ccc
ddd
aaa2
bbb
tag: show_menu 0 100 0 100
expected result 1:
0:root
1:aaa
2:aaa1
3:ccc
4:ddd
5:aaa2
6:bbb
"""
menu_content = factories.MenuContentWithVersionFactory(version__state=PUBLISHED, language=self.language)
root = factories.ChildMenuItemFactory(parent=menu_content.root, content=self.root_pagecontent.page)
aaa = factories.ChildMenuItemFactory(parent=root, content=self.aaa_pagecontent.page)
aaa1 = factories.ChildMenuItemFactory(parent=aaa, content=self.aaa1_pagecontent.page)
ccc = factories.ChildMenuItemFactory(parent=aaa1, content=self.ccc_pagecontent.page)
ddd = factories.ChildMenuItemFactory(parent=ccc, content=self.ddd_pagecontent.page)
aaa2 = factories.ChildMenuItemFactory(parent=aaa, content=self.aaa2_pagecontent.page)
bbb = factories.ChildMenuItemFactory(parent=root, content=self.bbb_pagecontent.page)
page = self.aaa_pagecontent.page
page_context = self.get_context(page.get_absolute_url(), page=page)
context = add_toolbar_to_request(page_context, self.aaa_pagecontent, view_mode="edit")
tpl = Template("{% load menu_tags %}{% show_menu 0 100 100 100 %}")
tpl.render(context)
hard_root = context['children']
mock_tree = [
AttributeObject(title=root.title, level=0, children=[
AttributeObject(title=aaa.title, level=1, children=[
AttributeObject(title=aaa1.title, level=2, children=[
AttributeObject(title=ccc.title, level=3, children=[
AttributeObject(title=ddd.title, level=4, children=[])
])
]),
AttributeObject(title=aaa2.title, level=2, children=[])
]),
AttributeObject(title=bbb.title, level=1, children=[])
])
]
self.assertTreeQuality(hard_root, mock_tree, 'level', 'title')
def test_menu_with_node_hidden(self):
"""
Checks the menu Navigation when a parent node of hidden node is rendered.
Tree in fixture :
root
aaa
aaa1( Hide_node = True)
ccc
ddd
aaa2
bbb
tag: show_menu 0 100 0 100
expected result when rendering node (aaa) parent of hidden node(aaa1):
0:root
1:aaa
5:aaa2
6:bbb
"""
menu_content = factories.MenuContentWithVersionFactory(version__state=PUBLISHED, language=self.language)
root = factories.ChildMenuItemFactory(parent=menu_content.root, content=self.root_pagecontent.page)
aaa = factories.ChildMenuItemFactory(parent=root, content=self.aaa_pagecontent.page)
aaa1 = factories.ChildMenuItemFactory(parent=aaa, content=self.aaa1_pagecontent.page, hide_node=True)
ccc = factories.ChildMenuItemFactory(parent=aaa1, content=self.ccc_pagecontent.page)
factories.ChildMenuItemFactory(parent=ccc, content=self.ddd_pagecontent.page)
aaa2 = factories.ChildMenuItemFactory(parent=aaa, content=self.aaa2_pagecontent.page)
bbb = factories.ChildMenuItemFactory(parent=root, content=self.bbb_pagecontent.page)
page = self.aaa_pagecontent.page
page_context = self.get_context(page.get_absolute_url(), page=page)
context = add_toolbar_to_request(page_context, self.aaa_pagecontent, view_mode="edit")
tpl = Template("{% load menu_tags %}{% show_menu 0 100 100 100 %}")
tpl.render(context)
| |
clear any recent bombs
for bomb in player.bombs:
if bomb.recently_exploded == True:
self.clear_bomb(bomb)
del bomb # don't think this works?
player.bombs = [] # will need to fix this for when players have multiple bombs
# get player's action
action = player_actions[player.number]
# get player's new position if action is taken
new_position = tuple([sum(x) for x in zip(self.ACTIONS_DICT[player_actions[player.number]],player.prev_position)])
if self.check_if_valid(action, player.prev_position, new_position):
player.position = new_position # valid move, so update player's position
if action == actions.BOMB:
if player.num_bombs > 0:
player.bombs.append(Bomb(player.position, self.get_tiles_in_range(player.position), player.number, self.MAX_TIMER)) # create a bomb instance
player.num_bombs -= 1 # one less bomb available for the player
self.board[player.position] = self.BOARD_DICT[self.ON_BOMB_LIST[player.number]] # place bomb on map
elif action == actions.NONE:
pass
else:
# move
self.board[player.position] = self.BOARD_DICT[self.PLAYER_LIST[player.number]]
if not self.board[player.prev_position] == self.BOARD_DICT[self.ON_BOMB_LIST[player.number]]:
# clear previous position only if it wasn't a just-placed bomb
self.board[player.prev_position] = self.BOARD_DICT['empty']
else:
# player has left behind a bomb
self.board[player.prev_position] = self.BOARD_DICT['bomb']
else:
# return some invalid move penalty
'''
print("<<<<An Unvalid move is played>>>>")
print("action:", action)
print("curr_pos", player.prev_position)
print("new_pos", new_position)
print("playerNumber:", player.number)
'''
player.score += self.get_reward('invalid_move')
# update timer of any bombs
for bomb in player.bombs:
bomb_list.append(bomb)
bomb.update_timer()
if bomb.timer == 0: # bomb explodes
# check if any player is in range of the bomb
is_game_over, player_hit = self.check_if_game_over(bomb.tiles_in_range)
if is_game_over:
self.done = True
self.players[player_hit].score += self.get_reward('lose')
num_blocks = self.explode_bomb(bomb) # update bomb objects and map
player.score += self.get_reward('destroy_blocks', num_blocks)
player.num_bombs += 1 # return bomb to the player
return self.board, self.done, self.players, bomb_list
def state_check_if_valid(self, action, curr_pos, new_pos, current_board):
##Armin
######################### add logic for 'none' after recent bomb
### merge
if (action == actions.NONE) or (action == actions.BOMB):
is_valid = True
elif (new_pos[0] < 0 or new_pos[1] < 0):
# trying to move through left or top boundary
is_valid = False
elif new_pos[0] >= self.rows or new_pos[1] >= self.cols:
# trying to move through right or bottom boundary
is_valid = False
elif (current_board[tuple(new_pos)] == self.BOARD_DICT['empty']) or (current_board[tuple(new_pos)] == self.BOARD_DICT['exploding_tile']):
is_valid = True
else:
is_valid = False
return is_valid
def check_if_valid(self, action, curr_pos, new_pos):
######################### add logic for 'none' after recent bomb
### merge
if (action == actions.NONE) or (action == actions.BOMB):
is_valid = True
elif (new_pos[0] < 0 or new_pos[1] < 0):
# trying to move through left or top boundary
is_valid = False
elif new_pos[0] >= self.rows or new_pos[1] >= self.cols:
# trying to move through right or bottom boundary
is_valid = False
elif (self.board[tuple(new_pos)] == self.BOARD_DICT['empty']) or (self.board[tuple(new_pos)] == self.BOARD_DICT['exploding_tile']):
is_valid = True
else:
is_valid = False
return is_valid
def check_if_game_over(self,tiles):
is_game_over = False # did a player get hit
player_hit = None # which player
for tile in tiles:
if (self.board[tile] == self.BOARD_DICT['player1']) or (self.board[tile] == self.BOARD_DICT['p1_on_bomb']):
is_game_over = True
player_hit = 0
if (self.board[tile] == self.BOARD_DICT['player2']) or (self.board[tile] == self.BOARD_DICT['p2_on_bomb']):
is_game_over = True
player_hit = 1
return is_game_over, player_hit
def state_check_if_game_over(self,tiles, current_board):
'''ARMIN state_based'''
is_game_over = False # did a player get hit
player_hit = None # which player
for tile in tiles:
if (current_board[tile] == self.BOARD_DICT['player1']) or (current_board[tile] == self.BOARD_DICT['p1_on_bomb']):
is_game_over = True
player_hit = 0
if (current_board[tile] == self.BOARD_DICT['player2']) or (current_board[tile] == self.BOARD_DICT['p2_on_bomb']):
is_game_over = True
player_hit = 1
return is_game_over, player_hit
###################################
###### BOMB HELPER FUNCTIONS ######
###################################
def get_tiles_in_range(self, position):
'''
get surrounding 4 tiles impacted near bomb
'''
tile_up = (position[0]-1,position[1])
tile_down = (position[0]+1,position[1])
tile_left = (position[0],position[1]-1)
tile_right = (position[0],position[1]+1)
long_range = False
if (long_range):
## making explosions go till the edge
## explosions stops if they hit a block
bomb_range = []
#going right
#xPosition = position[1]
#yPosition = position[0]
for xPosition in range(position[1], self.cols):
tempTile = (position[0], xPosition)
if (self.board[tempTile] == self.BOARD_DICT['hard_block']):
break
elif(self.board[tempTile] == self.BOARD_DICT['soft_block']):
bomb_range.append(tempTile)
break
else:
bomb_range.append(tempTile)
#going left
for xPosition in range(position[1], -1, -1):
tempTile = (position[0], xPosition)
if (self.board[tempTile] == self.BOARD_DICT['hard_block']):
break
elif(self.board[tempTile] == self.BOARD_DICT['soft_block']):
bomb_range.append(tempTile)
break
else:
bomb_range.append(tempTile)
#going down
for yPosition in range(position[0], self.rows):
tempTile = (yPosition, position[1])
if (self.board[tempTile] == self.BOARD_DICT['hard_block']):
break
elif(self.board[tempTile] == self.BOARD_DICT['soft_block']):
bomb_range.append(tempTile)
break
else:
bomb_range.append(tempTile)
#going up
for yPosition in range(position[0],-1 , -1):
tempTile = (yPosition, position[1])
if (self.board[tempTile] == self.BOARD_DICT['hard_block']):
break
elif(self.board[tempTile] == self.BOARD_DICT['soft_block']):
bomb_range.append(tempTile)
break
else:
bomb_range.append(tempTile)
else:
#Single block
bomb_range = [tile_up, tile_down, tile_left, tile_right, position]
tiles_to_remove = []
for tile in bomb_range:
if (tile[0] < 0 or tile[1] < 0 or tile[0] >= self.rows or tile[1] >= self.cols or
self.board[tile] == self.BOARD_DICT['hard_block']):
# exclude tiles that cross the border of the board
# or contain indestructible object
tiles_to_remove.append(tile)
for tile in tiles_to_remove:
bomb_range.remove(tile)
return bomb_range
def explode_bomb(self, bomb):
'''
reset bomb parameters and return number of blocks destroyed
'''
#### fix bomb behavior - inputs are tiles & position only, not bomb object
num_blocks = 0
# update tiles that have been impacted
for tile in bomb.tiles_in_range:
if self.board[tile] == self.BOARD_DICT['soft_block']:
num_blocks+=1
self.board[tile] = self.BOARD_DICT['exploding_tile']
self.board[bomb.position] = self.BOARD_DICT['exploding_bomb']
bomb.explode()
return num_blocks
def state_explode_bomb(self, bomb, current_board):
'''
Armin
reset bomb parameters and return number of blocks destroyed
'''
#### fix bomb behavior - inputs are tiles & position only, not bomb object
num_blocks = 0
# update tiles that have been impacted
for tile in bomb.tiles_in_range:
if current_board[tile] == self.BOARD_DICT['soft_block']:
num_blocks+=1
current_board[tile] = self.BOARD_DICT['exploding_tile']
current_board[bomb.position] = self.BOARD_DICT['exploding_bomb']
bomb.explode()
return num_blocks
def clear_bomb(self, bomb):
'''
clear map after recent bomb
'''
self.board[bomb.position] = self.BOARD_DICT['empty']
for tile in bomb.tiles_in_range:
if (self.board[tile] != self.BOARD_DICT['player1']) and (self.board[tile] != self.BOARD_DICT['player2']):
self.board[tile] = self.BOARD_DICT['empty']
bomb.clear()
def state_clear_bomb(self, bomb, current_board):
'''
Armin
clear map after recent bomb
'''
current_board[bomb.position] = self.BOARD_DICT['empty']
for tile in bomb.tiles_in_range:
if (current_board[tile] != self.BOARD_DICT['player1']) and (current_board[tile] != self.BOARD_DICT['player2']):
current_board[tile] = self.BOARD_DICT['empty']
bomb.clear()
def get_reward(self, item, num_blocks=0):
'''
reward system:
+1 destroy block
-10 invalid move
-1000 lose game
+100 win game
'''
if item == 'destroy_blocks':
return num_blocks * self.REWARDS_DICT[item]
else:
return self.REWARDS_DICT[item]
def reset(self,num_players=2):
'''
Initializes a starting board
'''
### move num_players to environment level
# initalize board
self.board = np.zeros((self.rows,self.cols)).astype(int)
self.players = [] # stores player objects
self.tiles_in_range = [] # stores position of surrounding spaces near a bomb --> should beowned by bomb?
self.done = False # checks if game over
self.turn_i = 0
# number of soft blocks to place
num_soft_blocks = int(math.floor(0.3*self.cols*self.rows))
# initialize players
assert num_players <= 4
starting_positions = [(0,0), (self.rows-1, self.cols-1), (0, self.cols-1), (self.rows-1, 0)]
for i in range(num_players):
self.players.append(Player(i, starting_positions[i], self.MAX_BOMBS))
# update map with player locations
player_list = ['player1', 'player2', 'player3', 'player4']
for player in range(len(self.players)):
self.board[self.players[player].position] = self.BOARD_DICT[player_list[player]]
# place hard blocks
self.board[1::2,1::2] = self.BOARD_DICT['hard_block']
## place soft blocks (random)
# flatten array
flat_board = np.reshape(self.board,-1)
# get positions that can be filled
open_pos = [i for i in range(len(flat_board)) if flat_board[i] == 0]
# spots immediately to the right and bottom of player1 can't be filled
open_pos.remove(1)
open_pos.remove(2)
open_pos.remove(self.cols)
open_pos.remove(self.cols*2)
# spots immediately to the left and top of player2 can't be filled
open_pos.remove(self.cols * self.rows - 2)
open_pos.remove(self.cols * self.rows - 3)
open_pos.remove(self.cols * self.rows - self.cols*2 - 1)
open_pos.remove(self.cols * self.rows - self.cols - 1)
# choose a random subset from open spots
rand_pos = random.sample(open_pos,num_soft_blocks)
flat_board[rand_pos] = self.BOARD_DICT['soft_block']
self.board = np.reshape(flat_board,(self.rows,self.cols))
return self.board, self.players
def render(self, graphical=True):
self.turn_i = self.turn_i +1
folder = "./temp_photo"
os.makedirs(folder,exist_ok=True)
# renders bomberman environment
print_ascii = True
if self.config_data["print_ascii"] == 'True':
if os.name == 'nt':
os.system('cls')
# for mac and linux(here, os.name is 'posix')
else:
os.system('clear')
print(self)
#if self.config_data["graphical"] == 'True':
# render with graphics
if graphical:
flattened_map = np.reshape(self.board,-1)
# get rows
map_rows=[]
map_rows.append(np.concatenate(([img_wall_top_mid for i in range(self.cols)]),axis=1))
mid_wall = np.concatenate(([img_wall for i in range((2))]),axis=1)
map_rows.append(np.concatenate((mid_wall,img_banner_wall,img_banner_wall,img_banner_wall,mid_wall),axis=1))
for row in range(self.rows):
map_rows.append(np.concatenate(([dict_img[i] for i in self.board[row]]),axis=1))
temp_cols = np.concatenate(([img_wall_bot for i in range(self.cols)]),axis=1)
map_rows.append(temp_cols)
full_map = np.concatenate(([i for i in map_rows]),axis=0)
lhs = np.concatenate(([img_wall_left for i in range(self.rows+1)]),axis=0)
lhs = np.concatenate((img_wall_top_left,lhs,img_wall_side_front_left),axis=0)
rhs = np.concatenate(([img_wall_right for i in range(self.rows+1)]),axis=0)
rhs = np.concatenate((img_wall_top_right,rhs,img_wall_side_front_right),axis=0)
| |
# × <EMAIL>
# <EMAIL>
# region: Time Functions and Variables --------------------------------------------------
import datetime as dt
import pytz
class processTime():
"""
startTime: a datetime object (usually datatime.now()).
format: default 's', returns total seconds, 'f' returns min and secs.
startTime, format='s'
"""
def __init__(self):
self.startTimer = dt.datetime.now()
self.miamitime = dt.datetime.now(pytz.timezone('US/Eastern')).strftime("%Y-%m-%d %H:%M:%S") #dt.datetime.utcnow()
self.NowTimeStamp = int(dt.datetime.timestamp(self.startTimer))
self.todayfrmtd = self.startTimer.strftime("%Y-%m-%d")
self.scrapedateid = self.startTimer.strftime("%Y-%m-%d %H:%M:%S")
self.monthDay = self.startTimer.strftime('%Y-%b-%d')
self.hourMins = self.startTimer.strftime('%H:%M')
self.intDay = int(self.startTimer.strftime('%d'))
self.thisWeek = int(self.startTimer.isocalendar()[1])
def TotalRunningTime(self, format='s'):
"""
Spits out total running time, default is 's' in seconds.\n
Any other value in format will return mins and seconds.
"""
totalTime = round((dt.datetime.now() - self.startTimer).total_seconds(), 2)
ftotalTime = "{}m and {}s".format(int(totalTime//60), int(totalTime % 60))
if format == 's':
return totalTime
else:
return ftotalTime
def SubProcRunTime(self, SubProcStartTime, format='s'):
subptotalTime=round((dt.datetime.now() - SubProcStartTime).total_seconds(), 2)
ftotalTime = "{}m and {}s".format(int(subptotalTime//60), int(subptotalTime % 60))
if format == 's':
return subptotalTime
else:
return ftotalTime
class DictTime:
def __init__(self):
self.dictTime = {1:'LUN',2:'MAR',3:'MIE',4:'JUE',5:'VIE',6:'SAB',7:'DOM',}
def DoW(self, Date):
"""
returns Name of Day of week (LUN-DOM)
Date input as timestamp
"""
isoWeekDay = Date.isoweekday()
return self.dictTime.get(isoWeekDay)
def Tstmp2Str(self, Date, Format="ymd"):
"""
parses timestamp to given format, can be ymd, ymdhms, or hms
Date input as timestamp
"""
inputedFormat={"ymd": "%Y-%m-%d", 'ymdhms': "%Y-%m-%d %H:%M:%S", "hms": "%H:%M:%S"}.get(Format)
try:
return Date.strftime(inputedFormat)
except:
return None
def WeekNum(self, Date):
"""
returns isocalendar week number
Date input as timestamp
"""
return int(Date.isocalendar()[1]) # weeknum of dep lt
def weeks_for_year(self, year):
"""
Returns number of weeks for a given year
"""
last_week = dt.datetime(year, 12, 28)
return last_week.isocalendar()[1]
def RelativeWeek(self, date):
"""
Defines weeks from LY as negative distance from current year.\n
For example:\n
\tlast week 2019 = 52. Relative to 2020 it would be week 0.
\tweek 51 year 2019 = week -1 2020
"""
results_to_return = []
dttoday=dt.datetime.today()
thisyear=dttoday.isocalendar()[0]
for ldate in date:
lldate=dt.datetime.strptime(ldate, "%Y-%m-%d %H:%M:%S")
llyear, llweek, *_ = lldate.isocalendar()
# subtract_weeks=(thisyear-llyear)*weeks_for_year(llyear)
subtract_weeks=sum([weeks_for_year(x) for x in list(range(llyear, thisyear))])
result_to_append=llweek-subtract_weeks
results_to_return.append(result_to_append)
return results_to_return
import re
def reTime(x):
"""
wrapper for re.search that finds time HH:MM element in string
"""
foundem=re.search('\d{2}:\d{2}', x)
return foundem.group(0) if foundem else "-"
def reStatus(x):
"""
wrapper for re.search that finds [A-Z|a-z]+ element in string
"""
foundem=re.search('[A-Z|a-z]+', x)
return foundem.group(0) if foundem else "-"
def ToTimeIfPossible(Timestamp):
"""
Formats datetime timestamp as '%Y-%m-%d %H:%M:%S' if possible, otherwise applies None type.
"""
return None if Timestamp == None else dt.datetime.fromtimestamp(Timestamp).strftime('%Y-%m-%d %H:%M:%S')
# endregion
# ---------------------------------------------------------------------------------------
# region: Get info from GoogleSheets ----------------------------------------------------
# https://docs.google.com/spreadsheets/d/1y-3K9e47GWZTt3iPAH4tgRfey2IgZCp2a1-FD5adSQ8/edit?usp=sharing
# https://gspread.readthedocs.io/en/latest/oauth2.html
class GetGoogleSheetInfo:
"""
WhichProcess arg accepts one of "EUR" or "NAM"\n
Output is list of dicts, each structured as {'Airline': x, 'Code': y, 'Type': z}
MUST HAVE ALREADY SHARED SHEET WITH OWNER OF CREDENTIAL
"""
from google.oauth2.service_account import Credentials
import gspread
from pandas import DataFrame as pdDataFrame
# <EMAIL>
def __init__(self, spreadhseet, credentials='credentials.json'):
self.scope = ['https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive']
self.json_credentials = self.Credentials.from_service_account_file(credentials, scopes=self.scope)
self.gc = self.gspread.authorize(self.json_credentials)
self.gss = self.gc.open(spreadhseet)
def getWS(self, WhichProcess, asDF=True):
wks = self.gss.worksheet(WhichProcess)
wksvalues=wks.get_all_values()
if not asDF:
return [[x, y] for x,y in wksvalues[1::]]
else:
return self.pdDataFrame(wksvalues[1:], columns=wksvalues[0])
def Airlines(self, WhichProcess, asDF=True):
wks = self.gss.worksheet(WhichProcess)
wksvalues=wks.get_all_values()
if asDF:
CompetenciaList = self.pdDataFrame(wksvalues[1::], columns=wksvalues[0])
else:
CompetenciaList = [{'Airline': x, 'Code': y, 'Type': z} for x,y,z in wks.get_all_values()[1::]]
# CompetenciaList = {y: {'Airline': x, 'Tipo': z} for x,y,z in wks.get_all_values()}
return CompetenciaList
def Mails(self, Testing):
wks = self.gss.worksheet("Correos")
return wks.get_all_values() if not Testing else ["<EMAIL>"]
def Hauls(self):
wks = self.gss.worksheet("Hauls")
HaulsDict = {x: y for x,y in wks.get_all_values()}
return HaulsDict
def Geo(self):
wks = self.gss.worksheet("Geographic")
Geographic=wks.get_all_values()
return self.pdDataFrame(Geographic[1:], columns=Geographic[0])
def JoinWachas(self, guachas, AllAirlines):
guachitas=guachas.merge(AllAirlines[["NAME", "CODE"]], how="left", on="CODE")[["NAME", "CODE", "TYPE", "ARCFT", "REGNUM"]]
return guachitas.values.tolist()
# endregion
# ---------------------------------------------------------------------------------------
# region: Post to Slack -----------------------------------------------------------------
from slacker import Slacker
from slack_progress import SlackProgress # https://github.com/bcicen/slack-progress
class SlackMsg:
def __init__(self, cnl="ruteo-diariofr24"):
from keys import slack_token
# QUESTION ... who's api token is this?
self.api_token = slack_token
self.channel = cnl
self.username = "HAL 9000"
self.slack = Slacker(self.api_token)
def Post(self, text):
self.slack.chat.post_message(channel=self.channel, text=text, username=self.username)
print(text)
def Hello(self, monthDay, hourMins):
smsg = "{}\n\n{}: Corriendo proceso Ruteo diario a las {}.\n...".format("- "*20, monthDay, hourMins)
self.Post(text=smsg)
def SlackProgressBar(self, total):
self.intotal = total
self.sp = SlackProgress(self.api_token, self.channel)
self.pbar = self.sp.new(total=self.intotal)
def SlackProgressBarUpdater(self, posupdate):
self.pbar.pos = posupdate
# endregion
# ---------------------------------------------------------------------------------------
# region: GetInfo from fr24 -------------------------------------------------------------
# define function that gets RegNum info for each airline
class DefaultHeaders:
def __init__(self):
UserAgents=[
"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:52.0) Gecko/20100101 Firefox/52.0",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; cs; rv:1.9.0.8) Gecko/2009032609 Firefox/3.0.8"
]
self.UserAgent=UserAgents[processTime().intDay%2]
self.defaultheaders = {"User-Agent": self.UserAgent}
import requests
from bs4 import BeautifulSoup
# Disable warnings
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
from pandas import DataFrame as pdDataFrame
class fr24ws:
"""
modules: GetRegNums(), LoopOverAlnCodes(), Flights2DataFrame(), Geo2DataFrame()
"""
def __init__(self, Testing=False):
self.defaultHeaders = DefaultHeaders().defaultheaders
self.subprcstime = processTime()
self.pageTimeStamp = self.subprcstime.NowTimeStamp
self.fr24wsSlackMsg = SlackMsg(cnl="code-tests") if Testing else SlackMsg()
def GetRegNums(self, airlineinfo):
"""
airlineinfo must be a dict as such: {'Airline': 'ABX', 'Code': 'gb-abx', 'Type': 'cao'}
'Code' key's value MUST be a fr24 established code for desired airline (generally formatted as iata-icao: xx-xxx)
"""
headers=self.defaultHeaders
arlrgnloopstart=dt.datetime.now()
airline=airlineinfo['Airline']
aircode=airlineinfo['Code']
airtype=airlineinfo['Type']
AirlineRegNumList = []
url0 = 'https://www.flightradar24.com/data/airlines/'+aircode+'/fleet'
req1 = requests.get(url0, headers=headers, verify=False)
soup = BeautifulSoup(req1.content, 'html.parser')
dltag = soup.find('dl', attrs={'id': 'list-aircraft'})
try:
AircraftModelList = dltag.find_all('dt', attrs={'class': None})
TableBodyList = dltag.find_all('tbody')
lenAircraftModelList=len(AircraftModelList)
for i in range(lenAircraftModelList):
aircraftmodel = AircraftModelList[i].find('div').text
tbodyList = [x.text.strip() for x in TableBodyList[i].find_all('a', attrs={'class': 'regLinks'})]
for regnum in tbodyList:
AirlineRegNumList.append([airline, aircode, airtype, aircraftmodel, regnum])
except:
pass
smsg = f"Finished getting {airline} ({aircode}) in {self.subprcstime.SubProcRunTime(arlrgnloopstart)}"
print(smsg)
return AirlineRegNumList
# Get RegNums for each Airline by code
def LoopOverAlnCodes(self, Airlinefr24CodeDict):
"""
Get RegNums for each Airline by code (generally iata-icao)
"""
smsg = "Getting Registration Numbers for required Airlines..."
self.fr24wsSlackMsg.Post(text=smsg)
AllRegNumsList = []
for AirlineDict in Airlinefr24CodeDict:
AllRegNumsList.extend(self.GetRegNums(AirlineDict))
# smsg = f"Finished scrapping Aircraft Rgn in {processTime(compListloopstart)} seconds"
smsg = f"Finished scrapping Aircraft Rgn"
self.fr24wsSlackMsg.Post(text=smsg)
return AllRegNumsList
# Get flight info per RegNum
def GetFlightInfo(self, AircraftRegNums):
"""
Get flight info per RegNum(s), AircraftRegNums arg must be a list.
returns datatable, geoListOfLists
"""
from keys import fr_token
headers=self.defaultHeaders
# headers=defaultHeaders
spageTimeStamp=self.pageTimeStamp
# spageTimeStamp=pageTimeStamp
LoopTimeStart=dt.datetime.now()
datatable = []
geoListOfLists = []
ptoken = fr_token
loopLen = len(AircraftRegNums)
# innerLoopSlackMsg=SlackMsg()
smsg="Looping over Registration Numbers..."
self.fr24wsSlackMsg.Post(text=smsg)
self.fr24wsSlackMsg.SlackProgressBar(total=loopLen)
num=0
for RegNumInfo in AircraftRegNums:
iRegNumTimeStart=dt.datetime.now()
num+=1
# numTimer = datetime.now()
# RegNumInfo=AircraftRegNums[0]
aln, cde, typ, act, rgn = RegNumInfo
url1 = 'https://www.flightradar24.com/data/aircraft/' + rgn
url2 = f'https://api.flightradar24.com/common/v1/flight/list.json?query={rgn}&fetchBy=reg&page=1&pk=&limit=100&token={ptoken}×tamp={str(spageTimeStamp)}'
s = requests.session()
r = s.get(url1, headers=headers, verify=False)
cookie = r.cookies.get_dict()
headersFull = {"User-Agent": headers["User-Agent"], "Content-Type": "application/json", "x-fetch": "true"}
response = None
while response is None:
try:
response = s.get(url2, cookies=cookie, headers=headersFull, verify=False).json()
except:
pass
try:
data = response['result']['response']['data']
except KeyError:
data = None
if data != None:
# row=data[0]
for row in data:
# initialize variables on each loop (clean them)
# get data from json
callsn = row['identification']['callsign'] # callsign
fltnum = row['identification']['number']['default'] # flight number
statusRaw = row['status']['text'] # status
# clean and separate status data
status = reStatus(statusRaw)
statusTime = reTime(statusRaw)
# utc of departure
deptime1 = row['time']['real']['departure']
deplocaltime = ToTimeIfPossible(deptime1) # None if deptime1 == None else dt.datetime.fromtimestamp(deptime1).strftime('%Y-%m-%d %H:%M:%S')
# utc of event
arrtime1 = row['time']['real']['arrival']
arrlocaltime = ToTimeIfPossible(arrtime1) # None if arrtime1 == None else dt.datetime.fromtimestamp(arrtime1).strftime('%Y-%m-%d %H:%M:%S')
# Origin info
try:
orginfo = row['airport']['origin']
orgato = orginfo['code']['iata']
orgctrycode = orginfo['position']['country']['code']
orgoffset = orginfo['timezone']['offset']
deptimeUTC = ToTimeIfPossible(deptime1 - orgoffset) # dt.datetime.fromtimestamp(deptime1 - orgoffset).strftime('%Y-%m-%d %H:%M:%S')
except TypeError:
orgato = None
orgctrycode = None
orgoffset = None
deptimeUTC = None
# Destino info
try:
desinfo = row['airport']['destination']
desato = desinfo['code']['iata']
desctrycode = desinfo['position']['country']['code']
desoffset = desinfo['timezone']['offset']
arrtimeUTC = ToTimeIfPossible(arrtime1 - desoffset) # None if arrtime1 == None else dt.datetime.fromtimestamp(arrtime1 - desoffset).strftime('%Y-%m-%d %H:%M:%S') # Accepts None for Estimated arrival cases
except TypeError:
desato = None
desctrycode = None
desoffset = None
arrtimeUTC = None
# list with info to append to "datatable" (list of lists, then change to a DF)
flightLoopData = [aln, typ, act, rgn, callsn, fltnum, orgato, desato, deptimeUTC, arrtimeUTC, deplocaltime, arrlocaltime, status, statusTime, ]
datatable.append(flightLoopData)
geoListOfLists.extend((orginfo, desinfo))
# info to print to console, just to know where the process is at
# rgnTime = processTime(numTimer)
# totalTime = processTime(startTimer, 'f')
print(f"{num}/{loopLen} in {self.subprcstime.SubProcRunTime(iRegNumTimeStart)} | |
# -*- coding: utf-8 -*-
"""XLMRoberta for sentiment analysis (Spanish).ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1MnNGwewJl7rDg1gx6WhQ_pub--sTJ0Dy
"""
# This sentiment analysis program is modeled off the BERT tutorial by <NAME> and <NAME> found here:
# https://colab.research.google.com/drive/1pTuQhug6Dhl9XalKB0zUGf4FIdYFlpcX
# Citation:
# <NAME> and <NAME>. (2019, July 22). BERT Fine-Tuning Tutorial with PyTorch. Retrieved from http://www.mccormickml.com
# For questions regarding fair usage of this notebook, please contact the secondary author at [EMAIL REMOVED] and the source coauthor at <EMAIL>
import torch
# If there's a GPU available...
if torch.cuda.is_available():
# Tell PyTorch to use the GPU, and specify which one to use (if applicable)
device = torch.device("cuda")
print('There are %d GPU(s) available.' % torch.cuda.device_count())
print('We will use the GPU:', torch.cuda.get_device_name(0))
# If not...
else:
print('No GPU available, using the CPU instead.')
device = torch.device("cpu")
import numpy as np # data structure library
import math # handy for operations we'll be using
import time # used in running a timer during our training phase
import re # used for searching text and identifying patterns therein
from bs4 import BeautifulSoup # used in reading data from text files and cleaning it up
import random
import pandas as pd
# The library we'll be using to import and build our model (XLMRobertaForSequenceClassification)
!pip install transformers
# Reads in our training tweets (hydrated from tweet IDs found here: https://www.clarin.si/repository/xmlui/handle/11356/1054)
train_data = pd.read_json("Data/spanish_tweets_twarc_NEW.json")
train_data.to_csv(path_or_buf='Data/spanish_tweets_twarc_NEW.csv') # Converts the above file to a CSV
features = ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19", "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", "30", "31"] # The hydrated tweet IDs come loaded with a ton of information, most of it useless for our purposes—thus the purely numerical column names.
train_data = pd.read_csv("Data/spanish_tweets_twarc_NEW.csv", index_col=False, header=None, names=features, engine='python', encoding='utf-8') # Note the encoding: utf-8 works well with European languages like Spanish
# Loads in a file containing the hand-labeled tweet sentiments and IDs (Note: the first imported file contains tweets and their IDs; this one contains tweet sentiments and their IDs. Our goal now is to match tweets with their sentiments).
features = ["Tweet ID", "sentiment", "_"]
sentiment_and_ids = pd.read_csv("Data/Spanish_Twitter_sentiment.csv", index_col=False, header=None, names=features, engine='python', encoding='utf-8')
# features = ["1", "sentiment", "3"]
# sentiments = pd.read_csv("Data/Spanish_Twitter_sentiment.csv", header=None, names=features, engine='python', encoding='utf-8')
sentiments = sentiment_and_ids["sentiment"][1:] # Gives us a dataframe with just the sentiments
hydrated_tweets = train_data['5'][1:] # Gives us a dataframe with just the tweets
print(len(sentiment_and_ids)) # Should be 275589
tweet_ids = sentiment_and_ids['Tweet ID'][1:] # Gives us a dataframe with just the IDs
hydrated_tweet_ids = train_data['3'][1:] # Gives us a dataframe with just the IDs of the tweets that were successfully hydrated
tweet_ids = list(tweet_ids)
hydrated_tweet_ids = list(hydrated_tweet_ids)
# Unfortunately, all of the data we imported is quite messy and needs to be thoroughly cleaned. This nasty-looking function
# goes through and collects the indices of duplicates in the data files so we can remove them. And no, not all of the duplicates were
# sequential; I tried writing a much nicer linear-time function, but it left a few stragglers :( Thus this gross piece of work.
from collections import Counter # Used to construct multiset to find duplicates
def get_duplicate_indices(glist):
c = Counter(glist)
dups = []
for item in c:
if c[item] > 1:
dups.append(item)
print(len(dups))
more_dup_inds = []
k = 0
for dup in dups:
clone_inds = []
for i in range(len(glist)):
if glist[i] == dup:
clone_inds.append(i)
more_dup_inds.append(clone_inds)
print(k) # Helps keep track of where the algorithm is in the cleaning process. Also helps maintain your sanity while waiting for this goliath to finish.
k += 1
for clone_inds in more_dup_inds:
clone_inds.remove(min(clone_inds))
new_duplicates = []
for clone_inds in more_dup_inds:
for clone in clone_inds:
new_duplicates.append(clone)
return new_duplicates
new_duplicate_ids = get_duplicate_indices(tweet_ids) # Finds duplicates in tweet IDs
duplicate_indices = new_duplicate_ids
duplicate_indices_3 = get_duplicate_indices(hydrated_tweet_ids) # Finds duplicates in hydrated tweet IDs
duplicate_indices_2 = list(dict.fromkeys(duplicate_indices_3))
# Creates a list of unique tweet IDs
new_tweet_ids = []
for i in range(len(tweet_ids)):
if i not in duplicate_indices:
new_tweet_ids.append(tweet_ids[i])
# Creates lists for the training labels and hydrated tweet IDs containing unique data points
new_sentiment_list = []
new_hydrated_tweet_ids= []
for j in range(1, len(sentiments)+1):
if j-1 not in duplicate_indices:
new_sentiment_list.append(sentiments[j])
for k in range(len(hydrated_tweet_ids)):
if k not in duplicate_indices_2:
new_hydrated_tweet_ids.append(hydrated_tweet_ids[k])
tweet_ids = new_tweet_ids
hydrated_tweet_ids = new_hydrated_tweet_ids
sentiments = new_sentiment_list
# Another problem with our data is that many of the tweets whose IDs were contained in the original dataset failed to be hydrated (using two different hydration programs, Hydrator and twarc).
# This cell collects up all the IDs of tweets that failed to be hydrated
missing_indices = []
for i in range(len(tweet_ids)):
print(i)
if tweet_ids[i] not in hydrated_tweet_ids:
missing_indices.append(i)
# Gives us only the sentiments of tweets that were hydrated
more_new_sentiments = []
for i in range(len(sentiments)):
if i not in missing_indices:
more_new_sentiments.append(sentiments[i])
# One last problem with the data was that there was some random noise in the list of hydrated tweet IDs.
# If something in the ID dataset isn't the proper length, throw it out.
for tweet_id in hydrated_tweet_ids:
if len(str(tweet_id)) != 18: # length of each ID
hydrated_tweet_ids.remove(tweet_id)
# Purposeful duplicate, because the last function leaves one ID remaining for whatever reason
for tweet_id in hydrated_tweet_ids:
if len(str(tweet_id)) != 18:
hydrated_tweet_ids.remove(tweet_id)
print(len(hydrated_tweet_ids)) # Should be 50907
sentiments = more_new_sentiments
tweet_ids = hydrated_tweet_ids
print(len(sentiments)==len(tweet_ids)) # Should print True.
# You can use this to check that the tweet IDs and sentiments are finally aligned
print(tweet_ids[len(tweet_ids)-10:len(tweet_ids)])
print(sentiments[len(tweet_ids)-10:len(tweet_ids)])
# Creates a dictionary with IDs as keys and sentiments as values
id_sentiment_dict = {}
for tweet_id in tweet_ids:
index = list(train_data['3']).index(tweet_id)
id_sentiment_dict[tweet_id] = train_data['5'][index]
train_tweets = [id_sentiment_dict[tweet_id] for tweet_id in id_sentiment_dict] # Gives us just the tweets we want, after throwing out all the unusable data
tweets = train_tweets
# gets the tweet into the format we want
def clean_tweet(tweet):
tweet = BeautifulSoup(tweet, "lxml").get_text() # turns xml-formatted text into regular text
tweet = re.sub(r"@[A-Za-z0-9]+", " ", tweet) # gets rid of all user references in tweets (i.e. "@username")
tweet = re.sub(r"https?://[A-Za-z0-9./]+", " ", tweet) # gets rid of URLs
tweet = re.sub(r"[^A-Za-z.!?áéíóúüñ¿ÁÉÍÓÚÜÑ']", " ", tweet) # gets rid of any non-standard characters in the tweets
tweet = re.sub(r" +", " ", tweet) # replaces all excess whitespace with a single space
return tweet # gives us our cleaned tweet
clean_train_data = [clean_tweet(tweets[i]) for i in range(len(tweets))] # Gives us our cleaned tweets
tweets = clean_train_data
training_data_labels = list(sentiments) # fetches the sentiment values from our training dataset
for i in range(len(training_data_labels)):
if training_data_labels[i] == 'Positive':
training_data_labels[i] = 1
if training_data_labels[i] == "Negative":
training_data_labels[i] = 0
# Throws out all the neutral sentiments in our dataset
neutral_indices = []
new_training_data_labels = []
for i in range(len(training_data_labels)):
if training_data_labels[i] == 'Neutral':
neutral_indices.append(i)
for j in range(len(training_data_labels)):
if j not in neutral_indices:
new_training_data_labels.append(training_data_labels[j])
training_data_labels = new_training_data_labels
print(len(training_data_labels)) # Should be 29334
print(set(training_data_labels)) # Should be {0, 1}
# Throws out all the neutral-sentiments tweets in our dataset
new_tweets = []
for i in range(len(tweets)):
if i not in neutral_indices:
new_tweets.append(tweets[i])
tweets = new_tweets
print(len(tweets)==len(training_data_labels))) # Should print True
sentiments = training_data_labels
# Gives us the tally of positive- and negative-sentiment tweets in our dataset. You can even them out if you'd like, but
# we actually got decent results without doing so.
count0 = 0
count1 = 1
for label in training_data_labels:
if label == 0:
count0 += 1
if label == 1:
count1 += 1
print(count0, count1)
# # Use the following commented-out cells to even out the number of positive- and negative-sentiment tweets
# extra_pos_indices = []
# i = 0
# for j in range(len(training_data_labels)):
# if training_data_labels[j] == 1 and i < (count1 - count0):
# extra_pos_indices.append(j)
# i += 1
# elif i >= (count1 - count0):
# break
# print(len(extra_pos_indices))
# new_training_data_labels = []
# for i in range(len(training_data_labels)):
# if i not in extra_pos_indices:
# new_training_data_labels.append(training_data_labels[i])
# training_data_labels = new_training_data_labels
# new_tweets = []
# for i in range(len(tweets)):
# if i not in extra_pos_indices:
# new_tweets.append(tweets[i])
# tweets = new_tweets
# count0 = 0
# count1 = 1
# for label in training_data_labels:
# if label == 0:
# count0 += 1
# if label == 1:
# count1 += 1
# print(count0, count1) # Should be the exact same
# We're going to use the XLMRobertaTokenizer. Documentation: https://huggingface.co/transformers/model_doc/xlmroberta.html
from transformers import XLMRobertaTokenizer
tokenizer = XLMRobertaTokenizer.from_pretrained('xlm-roberta-large')
# CODE AND COMMENTS IN THIS CELL ARE ATTRIBUTABLE TO <NAME> AND <NAME>, NOT THE SECONDARY AUTHOR ([NAME REMOVED])
max_len = 0
# For every sentence...
for tweet in | |
# -*- coding:utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models, _
from odoo.exceptions import UserError, ValidationError
from odoo.tools.safe_eval import safe_eval
from datetime import datetime, date
class HrPayrollStructure(models.Model):
_inherit = 'hr.payroll.structure'
_description = 'Salary Structure'
@api.model
def _get_default_rule_ids(self):
if self.env.company.country_code == 'CM':
# CM
return [
(0, 0, {
'name': 'Basic Salary',
'sequence': 1,
'code': 'BASIC',
'category_id': self.env.ref('hr_payroll.BASIC').id,
'condition_select': 'none',
'amount_select': 'code',
'amount_python_compute': 'result = payslip.paid_amount',
}),
(0, 0, {
'name': "Prime d'ancienneté",
'sequence': 2,
'code': 'PA',
'category_id': self.env.ref('hr_payroll.ALW').id,
'condition_select': 'none',
'amount_select': 'percentage',
'amount_percentage_base': 'BASIC',
'quantity': '1.0',
'amount_percentage': '12',
}),
(0, 0, {
'name': "Indemnité de representation",
'sequence': 2,
'code': 'IR',
'category_id': self.env.ref('hr_payroll.ALW').id,
'condition_select': 'none',
'amount_select': 'fix',
'quantity': '1.0',
'amount_fix': '12500',
}),
(0, 0, {
'name': "Indemnité de logement",
'sequence': 2,
'code': 'IL',
'category_id': self.env.ref('hr_payroll.ALW').id,
'condition_select': 'none',
'amount_select': 'percentage',
'amount_percentage_base': 'BASIC',
'quantity': '1.0',
'amount_percentage': '40',
}),
(0, 0, {
'name': "Prime de Risque",
'sequence': 2,
'code': 'PR',
'category_id': self.env.ref('hr_payroll.ALW').id,
'condition_select': 'none',
'amount_select': 'fix',
'quantity': '1.0',
'amount_fix': '15000',
}),
(0, 0, {
'name': "Avantage En Nature Vehicule",
'sequence': 2,
'code': 'ANV',
'category_id': self.env.ref('hr_payroll.ALW').id,
'condition_select': 'none',
'amount_select': 'fix',
'quantity': '1.0',
'amount_fix': '100000',
}),
(0, 0, {
'name': "<NAME>",
'sequence': 5,
'code': 'ALW',
'category_id': self.env.ref('hr_payroll.ALW').id,
'condition_select': 'none',
'amount_select': 'code',
'amount_python_compute': 'result = BASIC + PA + IR + IL + PR + ANV',
}),
(0, 0, {
'name': "Taxe sur Developpement Local S",
'sequence': 6,
'code': 'TDLS',
'category_id': self.env.ref('hr_payroll.DED').id,
'condition_select': 'none',
'amount_select': 'fix',
'quantity': '1.0',
'amount_fix': '2520',
}),
(0, 0, {
'name': "Credit Foncier Salarial S",
'sequence': 6,
'code': 'CFSS',
'category_id': self.env.ref('hr_payroll.DED').id,
'condition_select': 'none',
'amount_select': 'percentage',
'amount_percentage_base': 'BASIC',
'quantity': '1.0',
'amount_percentage': '1',
}),
(0, 0, {
'name': "Retenu CNPS S",
'sequence': 6,
'code': 'CNPSS',
'category_id': self.env.ref('hr_payroll.DED').id,
'condition_select': 'none',
'amount_select': 'percentage',
'amount_percentage_base': 'BASIC',
'quantity': '1.0',
'amount_percentage': '4.2',
}),
(0, 0, {
'name': "Redevance Audio-Visuel S",
'sequence': 6,
'code': 'RAVS',
'category_id': self.env.ref('hr_payroll.DED').id,
'condition_select': 'none',
'amount_select': 'fix',
'quantity': '1.0',
'amount_fix': '8450',
}),
(0, 0, {
'name': "Retenue IRRP S",
'sequence': 6,
'code': 'RIS',
'category_id': self.env.ref('hr_payroll.DED').id,
'condition_select': 'none',
'amount_select': 'fix',
'quantity': '1.0',
'amount_fix': '64337',
}),
(0, 0, {
'name': "<NAME>",
'sequence': 6,
'code': 'CACS',
'category_id': self.env.ref('hr_payroll.DED').id,
'condition_select': 'none',
'amount_select': 'percentage',
'amount_percentage_base': 'BASIC',
'quantity': '1.0',
'amount_percentage': '10',
}),
(0, 0, {
'name': "Accident de Travail S",
'sequence': 6,
'code': 'ATS',
'category_id': self.env.ref('hr_payroll.DED').id,
'condition_select': 'none',
'amount_select': 'percentage',
'amount_percentage_base': 'BASIC',
'quantity': '1.0',
'amount_percentage': '0',
}),
(0, 0, {
'name': "Allocation Familiale S",
'sequence': 6,
'code': 'AFS',
'category_id': self.env.ref('hr_payroll.DED').id,
'condition_select': 'none',
'amount_select': 'percentage',
'amount_percentage_base': 'BASIC',
'quantity': '1.0',
'amount_percentage': '0',
}),
(0, 0, {
'name': "<NAME>",
'sequence': 6,
'code': 'CFPS',
'category_id': self.env.ref('hr_payroll.DED').id,
'condition_select': 'none',
'amount_select': 'percentage',
'amount_percentage_base': 'BASIC',
'quantity': '1.0',
'amount_percentage': '0',
}),
(0, 0, {
'name': "Fond National de l'Emploi S",
'sequence': 6,
'code': 'FNES',
'category_id': self.env.ref('hr_payroll.DED').id,
'condition_select': 'none',
'amount_select': 'percentage',
'amount_percentage_base': 'BASIC',
'quantity': '1.0',
'amount_percentage': '0',
}),
(0, 0, {
'name': 'Total Cotisation S',
'sequence': 10,
'code': 'TCS',
'category_id': self.env.ref('hr_payroll.DED').id,
'condition_select': 'none',
'amount_select': 'code',
'amount_python_compute': 'result = TDLS + CFSS + CNPSS + RAVS+ RIS + CACS + ATS + AFS + CFPS + FNES',
}),
(0, 0, {
'name': "Taxe sur Developpement Local P",
'sequence': 6,
'code': 'TDLP',
'category_id': self.env.ref('hr_payroll.DED').id,
'condition_select': 'none',
'amount_select': 'fix',
'quantity': '1.0',
'amount_fix': '0',
}),
(0, 0, {
'name': "Retenue Mutuelle",
'sequence': 6,
'code': 'RM',
'category_id': self.env.ref('hr_payroll.DED').id,
'condition_select': 'none',
'amount_select': 'fix',
'quantity': '1.0',
'amount_fix': '7000',
}),
(0, 0, {
'name': "<NAME>",
'sequence': 6,
'code': 'CFSP',
'category_id': self.env.ref('hr_payroll.DED').id,
'condition_select': 'none',
'amount_select': 'percentage',
'amount_percentage_base': 'BASIC',
'quantity': '1.0',
'amount_percentage': '0',
}),
(0, 0, {
'name': "Retenu CNPS P",
'sequence': 6,
'code': 'CNPSP',
'category_id': self.env.ref('hr_payroll.DED').id,
'condition_select': 'none',
'amount_select': 'percentage',
'amount_percentage_base': 'BASIC',
'quantity': '1.0',
'amount_percentage': '4.2',
}),
(0, 0, {
'name': "<NAME>",
'sequence': 6,
'code': 'RAVP',
'category_id': self.env.ref('hr_payroll.DED').id,
'condition_select': 'none',
'amount_select': 'fix',
'quantity': '1.0',
'amount_fix': '0',
}),
(0, 0, {
'name': "Retenue IRRP P",
'sequence': 6,
'code': 'RIP',
'category_id': self.env.ref('hr_payroll.DED').id,
'condition_select': 'none',
'amount_select': 'fix',
'quantity': '1.0',
'amount_fix': '0',
}),
(0, 0, {
'name': "<NAME>",
'sequence': 6,
'code': 'CACP',
'category_id': self.env.ref('hr_payroll.DED').id,
'condition_select': 'none',
'amount_select': 'percentage',
'amount_percentage_base': 'BASIC',
'quantity': '1.0',
'amount_percentage': '0',
}),
(0, 0, {
'name': "<NAME>",
'sequence': 6,
'code': 'ATP',
'category_id': self.env.ref('hr_payroll.DED').id,
'condition_select': 'none',
'amount_select': 'percentage',
'amount_percentage_base': 'BASIC',
'quantity': '1.0',
'amount_percentage': '1.75',
}),
(0, 0, {
'name': "<NAME>",
'sequence': 6,
'code': 'AFP',
'category_id': self.env.ref('hr_payroll.DED').id,
'condition_select': 'none',
'amount_select': 'percentage',
'amount_percentage_base': 'BASIC',
'quantity': '1.0',
'amount_percentage': '7',
}),
(0, 0, {
'name': "<NAME>",
'sequence': 6,
'code': 'CFPP',
'category_id': self.env.ref('hr_payroll.DED').id,
'condition_select': 'none',
'amount_select': 'percentage',
'amount_percentage_base': 'BASIC',
'quantity': '1.0',
'amount_percentage': '1.5',
}),
(0, 0, {
'name': "Fond National de l'Emploi P",
'sequence': 6,
'code': 'FNEP',
'category_id': self.env.ref('hr_payroll.DED').id,
'condition_select': 'none',
'amount_select': 'percentage',
'amount_percentage_base': 'BASIC',
'quantity': '1.0',
'amount_percentage': '1',
}),
(0, 0, {
'name': 'Total Cotisation P',
'sequence': 10,
'code': 'TCP',
'category_id': self.env.ref('hr_payroll.DED').id,
'condition_select': 'none',
'amount_select': 'code',
'amount_python_compute': 'result = TDLP + CFSP + CNPSP + RAVP+ RIP + CACP + ATP + AFP + CFPP + FNEP',
}),
(0, 0, {
'name': 'Gross',
'sequence': 100,
'code': 'GROSS',
'category_id': self.env.ref('hr_payroll.GROSS').id,
'condition_select': 'none',
'amount_select': 'code',
'amount_python_compute': 'result = categories.BASIC + categories.ALW',
}),
(0, 0, {
'name': 'Net Salary',
'sequence': 200,
'code': 'NET',
'category_id': self.env.ref('hr_payroll.NET').id,
'condition_select': 'none',
'amount_select': 'code',
'amount_python_compute': 'result = ALW - TCS',
})
]
if self.env.company.country_code == 'FR':
# FR
return [
(0, 0, {
'name': 'Basic Salary',
'sequence': 1,
'code': 'BASIC',
'category_id': self.env.ref('hr_payroll.BASIC').id,
'condition_select': 'none',
'amount_select': 'code',
'amount_python_compute': 'result = payslip.paid_amount',
}),
(0, 0, {
'name': 'HEURES ABSENCES ENTREE/SORTIE ',
'sequence': 1,
'code': 'HAES',
'category_id': self.env.ref('hr_payroll.BASIC').id,
'condition_select': 'none',
'amount_select': 'code',
'amount_python_compute': 'if worked_days.LEAVE90:result = 4.41 *(worked_days.LEAVE90.number_of_hours) else:result = 0 ',
}),
(0, 0, {
'name': 'Basic Brut',
'sequence': 2,
'code': 'SB',
'category_id': self.env.ref('hr_payroll.GROSS').id,
'condition_select': 'none',
'amount_select': 'code',
'amount_python_compute': 'result = BASIC - HAES',
}),
(0, 0, {
'name': "Sécurité sociale-Maladie Maternité Invalidité Décès E",
'sequence': 6,
'code': 'SSMMIDE',
'category_id': self.env.ref('hr_payroll.DED').id,
'condition_select': 'none',
'amount_select': 'percentage',
'amount_percentage_base': 'SB',
'quantity': '1',
'amount_percentage': '0',
}),
(0, 0, {
'name': "Sécurité sociale-Maladie Maternité Invalidité Décès P",
'sequence': 6,
'code': 'SSMMIDP',
'category_id': self.env.ref('hr_payroll.COMP').id,
'condition_select': 'none',
'amount_select': 'percentage',
'amount_percentage_base': 'SB',
'quantity': '1',
'amount_percentage': '7',
}),
(0, 0, {
'name': "Complément Incapacité Invalidité Décès TA E",
'sequence': 6,
'code': 'CIIDTE',
'category_id': self.env.ref('hr_payroll.DED').id,
'condition_select': 'none',
'amount_select': 'percentage',
'amount_percentage_base': 'SB',
'quantity': '1',
'amount_percentage': '0.55304',
}),
(0, 0, {
'name': "Complément Incapacité Invalidité Décès TA P",
'sequence': 6,
'code': 'CIIDTP',
'category_id': self.env.ref('hr_payroll.COMP').id,
'condition_select': 'none',
'amount_select': 'percentage',
'amount_percentage_base': 'SB',
'quantity': '1',
'amount_percentage': '0.55304',
}),
(0, 0, {
'name': "ACCIDENTS DU TRAVAIL & MAL. PROFESSIONNELLES",
'sequence': 6,
'code': 'ATMP',
'category_id': self.env.ref('hr_payroll.COMP').id,
'condition_select': 'none',
'amount_select': 'percentage',
'amount_percentage_base': 'SB',
'quantity': '1',
'amount_percentage': '0.9',
}),
(0, 0, {
'name': "Sécurité Sociale plafonnée E",
'sequence': 6,
'code': 'SSPE',
'category_id': self.env.ref('hr_payroll.DED').id,
'condition_select': 'none',
'amount_select': 'percentage',
'amount_percentage_base': 'SB',
'quantity': '100.0',
'amount_percentage': '0',
}),
(0, 0, {
'name': "Sécurité Sociale plafonnée P",
'sequence': 6,
'code': 'SSPP',
'category_id': self.env.ref('hr_payroll.COMP').id,
'condition_select': 'none',
'amount_select': 'percentage',
'amount_percentage_base': 'SB',
'quantity': '1.0',
'amount_percentage': '8.5',
}),
(0, 0, {
'name': "Sécurité Sociale déplafonnée E",
'sequence': 6,
'code': 'SSDE',
'category_id': self.env.ref('hr_payroll.DED').id,
'condition_select': 'none',
'amount_select': 'percentage',
'amount_percentage_base': 'SB',
'quantity': '1.0',
'amount_percentage': '0',
}),
(0, 0, {
'name': "Sécurité Sociale déplafonnée P",
'sequence': 6,
'code': 'SSDP',
'category_id': self.env.ref('hr_payroll.COMP').id,
'condition_select': 'none',
'amount_select': 'percentage',
'amount_percentage_base': 'SB',
'quantity': '1',
'amount_percentage': '1.9',
}),
(0, 0, {
'name': "<NAME>",
'sequence': 6,
'code': 'CTE',
'category_id': self.env.ref('hr_payroll.DED').id,
'condition_select': 'none',
'amount_select': 'percentage',
'amount_percentage_base': 'SB',
'quantity': '1.0',
'amount_percentage': '0',
}),
(0, 0, {
'name': "<NAME>",
'sequence': 6,
'code': 'CTP',
'category_id': self.env.ref('hr_payroll.COMP').id,
'condition_select': 'none',
'amount_select': 'percentage',
'amount_percentage_base': 'SB',
'quantity': '1',
'amount_percentage': '6.1',
}),
(0, 0, {
'name': "<NAME>",
'sequence': 6,
'code': 'FAME',
'category_id': self.env.ref('hr_payroll.DED').id,
'condition_select': 'none',
'amount_select': 'percentage',
'amount_percentage_base': 'SB',
'quantity': '1.0',
'amount_percentage': '0',
}),
(0, 0, {
'name': "<NAME>",
'sequence': 6,
'code': 'FAMP',
'category_id': self.env.ref('hr_payroll.COMP').id,
'condition_select': 'none',
'amount_select': 'percentage',
'amount_percentage_base': 'SB',
'quantity': '1',
'amount_percentage': '3.5',
}),
(0, 0, {
'name': "<NAME>",
'sequence': 6,
'code': 'CHOME',
'category_id': self.env.ref('hr_payroll.DED').id,
'condition_select': 'none',
'amount_select': 'percentage',
'amount_percentage_base': 'SB',
'quantity': '1.0',
'amount_percentage': '0',
}),
(0, 0, {
'name': "<NAME>",
'sequence': 6,
'code': 'CHOMP',
'category_id': self.env.ref('hr_payroll.COMP').id,
'condition_select': 'none',
'amount_select': 'percentage',
'amount_percentage_base': 'SB',
'quantity': '1',
'amount_percentage': '4.05',
}),
(0, 0, {
'name': | |
<reponame>rsdoherty/azure-sdk-for-python<gh_stars>1000+
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import msrest.serialization
class AccessUri(msrest.serialization.Model):
"""A disk access SAS uri.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar access_sas: A SAS uri for accessing a disk.
:vartype access_sas: str
"""
_validation = {
'access_sas': {'readonly': True},
}
_attribute_map = {
'access_sas': {'key': 'accessSAS', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AccessUri, self).__init__(**kwargs)
self.access_sas = None
class CreationData(msrest.serialization.Model):
"""Data used when creating a disk.
All required parameters must be populated in order to send to Azure.
:param create_option: Required. This enumerates the possible sources of a disk's creation.
Possible values include: "Empty", "Attach", "FromImage", "Import", "Copy", "Restore", "Upload".
:type create_option: str or ~azure.mgmt.compute.v2018_09_30.models.DiskCreateOption
:param storage_account_id: If createOption is Import, the Azure Resource Manager identifier of
the storage account containing the blob to import as a disk. Required only if the blob is in a
different subscription.
:type storage_account_id: str
:param image_reference: Disk source information.
:type image_reference: ~azure.mgmt.compute.v2018_09_30.models.ImageDiskReference
:param source_uri: If createOption is Import, this is the URI of a blob to be imported into a
managed disk.
:type source_uri: str
:param source_resource_id: If createOption is Copy, this is the ARM id of the source snapshot
or disk.
:type source_resource_id: str
"""
_validation = {
'create_option': {'required': True},
}
_attribute_map = {
'create_option': {'key': 'createOption', 'type': 'str'},
'storage_account_id': {'key': 'storageAccountId', 'type': 'str'},
'image_reference': {'key': 'imageReference', 'type': 'ImageDiskReference'},
'source_uri': {'key': 'sourceUri', 'type': 'str'},
'source_resource_id': {'key': 'sourceResourceId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CreationData, self).__init__(**kwargs)
self.create_option = kwargs['create_option']
self.storage_account_id = kwargs.get('storage_account_id', None)
self.image_reference = kwargs.get('image_reference', None)
self.source_uri = kwargs.get('source_uri', None)
self.source_resource_id = kwargs.get('source_resource_id', None)
class Resource(msrest.serialization.Model):
"""The Resource model definition.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Required. Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
**kwargs
):
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.location = kwargs['location']
self.tags = kwargs.get('tags', None)
class Disk(Resource):
"""Disk resource.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Required. Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:ivar managed_by: A relative URI containing the ID of the VM that has the disk attached.
:vartype managed_by: str
:param sku: The disks sku name. Can be Standard_LRS, Premium_LRS, StandardSSD_LRS, or
UltraSSD_LRS.
:type sku: ~azure.mgmt.compute.v2018_09_30.models.DiskSku
:param zones: The Logical zone list for Disk.
:type zones: list[str]
:ivar time_created: The time when the disk was created.
:vartype time_created: ~datetime.datetime
:param os_type: The Operating System type. Possible values include: "Windows", "Linux".
:type os_type: str or ~azure.mgmt.compute.v2018_09_30.models.OperatingSystemTypes
:param hyper_v_generation: The hypervisor generation of the Virtual Machine. Applicable to OS
disks only. Possible values include: "V1", "V2".
:type hyper_v_generation: str or ~azure.mgmt.compute.v2018_09_30.models.HyperVGeneration
:param creation_data: Disk source information. CreationData information cannot be changed after
the disk has been created.
:type creation_data: ~azure.mgmt.compute.v2018_09_30.models.CreationData
:param disk_size_gb: If creationData.createOption is Empty, this field is mandatory and it
indicates the size of the VHD to create. If this field is present for updates or creation with
other options, it indicates a resize. Resizes are only allowed if the disk is not attached to a
running VM, and can only increase the disk's size.
:type disk_size_gb: int
:param encryption_settings_collection: Encryption settings collection used for Azure Disk
Encryption, can contain multiple encryption settings per disk or snapshot.
:type encryption_settings_collection:
~azure.mgmt.compute.v2018_09_30.models.EncryptionSettingsCollection
:ivar provisioning_state: The disk provisioning state.
:vartype provisioning_state: str
:param disk_iops_read_write: The number of IOPS allowed for this disk; only settable for
UltraSSD disks. One operation can transfer between 4k and 256k bytes.
:type disk_iops_read_write: long
:param disk_m_bps_read_write: The bandwidth allowed for this disk; only settable for UltraSSD
disks. MBps means millions of bytes per second - MB here uses the ISO notation, of powers of
10.
:type disk_m_bps_read_write: int
:ivar disk_state: The state of the disk. Possible values include: "Unattached", "Attached",
"Reserved", "ActiveSAS", "ReadyToUpload", "ActiveUpload".
:vartype disk_state: str or ~azure.mgmt.compute.v2018_09_30.models.DiskState
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'managed_by': {'readonly': True},
'time_created': {'readonly': True},
'provisioning_state': {'readonly': True},
'disk_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'managed_by': {'key': 'managedBy', 'type': 'str'},
'sku': {'key': 'sku', 'type': 'DiskSku'},
'zones': {'key': 'zones', 'type': '[str]'},
'time_created': {'key': 'properties.timeCreated', 'type': 'iso-8601'},
'os_type': {'key': 'properties.osType', 'type': 'str'},
'hyper_v_generation': {'key': 'properties.hyperVGeneration', 'type': 'str'},
'creation_data': {'key': 'properties.creationData', 'type': 'CreationData'},
'disk_size_gb': {'key': 'properties.diskSizeGB', 'type': 'int'},
'encryption_settings_collection': {'key': 'properties.encryptionSettingsCollection', 'type': 'EncryptionSettingsCollection'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'disk_iops_read_write': {'key': 'properties.diskIOPSReadWrite', 'type': 'long'},
'disk_m_bps_read_write': {'key': 'properties.diskMBpsReadWrite', 'type': 'int'},
'disk_state': {'key': 'properties.diskState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Disk, self).__init__(**kwargs)
self.managed_by = None
self.sku = kwargs.get('sku', None)
self.zones = kwargs.get('zones', None)
self.time_created = None
self.os_type = kwargs.get('os_type', None)
self.hyper_v_generation = kwargs.get('hyper_v_generation', None)
self.creation_data = kwargs.get('creation_data', None)
self.disk_size_gb = kwargs.get('disk_size_gb', None)
self.encryption_settings_collection = kwargs.get('encryption_settings_collection', None)
self.provisioning_state = None
self.disk_iops_read_write = kwargs.get('disk_iops_read_write', None)
self.disk_m_bps_read_write = kwargs.get('disk_m_bps_read_write', None)
self.disk_state = None
class DiskList(msrest.serialization.Model):
"""The List Disks operation response.
All required parameters must be populated in order to send to Azure.
:param value: Required. A list of disks.
:type value: list[~azure.mgmt.compute.v2018_09_30.models.Disk]
:param next_link: The uri to fetch the next page of disks. Call ListNext() with this to fetch
the next page of disks.
:type next_link: str
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Disk]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DiskList, self).__init__(**kwargs)
self.value = kwargs['value']
self.next_link = kwargs.get('next_link', None)
class DiskSku(msrest.serialization.Model):
"""The disks sku name. Can be Standard_LRS, Premium_LRS, StandardSSD_LRS, or UltraSSD_LRS.
Variables are only populated by the server, and will be ignored when sending a request.
:param name: The sku name. Possible values include: "Standard_LRS", "Premium_LRS",
"StandardSSD_LRS", "UltraSSD_LRS".
:type name: str or ~azure.mgmt.compute.v2018_09_30.models.DiskStorageAccountTypes
:ivar tier: The sku tier.
:vartype tier: str
"""
_validation = {
'tier': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'tier': {'key': 'tier', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DiskSku, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.tier = None
class DiskUpdate(msrest.serialization.Model):
"""Disk update resource.
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param sku: The disks sku name. Can be Standard_LRS, Premium_LRS, StandardSSD_LRS, or
UltraSSD_LRS.
:type sku: ~azure.mgmt.compute.v2018_09_30.models.DiskSku
:param os_type: the Operating System type. Possible values include: "Windows", "Linux".
:type os_type: str or ~azure.mgmt.compute.v2018_09_30.models.OperatingSystemTypes
:param disk_size_gb: If creationData.createOption is Empty, this field is mandatory and it
indicates the size of the VHD to create. If this field is present for updates or creation with
other options, it indicates a resize. Resizes are only allowed if the disk is not attached to a
running VM, and can only increase the disk's size.
:type disk_size_gb: int
:param encryption_settings_collection: Encryption settings collection used be Azure Disk
Encryption, | |
<filename>qtoggleserver/core/device/attrs.py
import asyncio
import copy
import datetime
import hashlib
import logging
import re
import socket
import sys
import time
from typing import Optional
from qtoggleserver import system
from qtoggleserver import version
from qtoggleserver.conf import settings
from qtoggleserver.core.typing import Attributes, AttributeDefinitions, GenericJSONDict
from qtoggleserver.utils import json as json_utils
from qtoggleserver.utils.cmd import run_set_cmd
from . import events as device_events
logger = logging.getLogger(__name__)
ATTRDEFS = {
'name': {
'type': 'string',
'modifiable': True,
'persisted': True,
'min': 1,
'max': 32,
'pattern': r'^[_a-zA-Z][_a-zA-Z0-9-]{0,31}$',
'standard': True
},
'display_name': {
'type': 'string',
'modifiable': True,
'max': 64,
'standard': True
},
'version': {
'type': 'string',
'standard': True
},
'api_version': {
'type': 'string',
'standard': True
},
'vendor': {
'type': 'string',
'standard': True
},
'admin_password': {
'type': 'string',
'modifiable': True,
'max': 32,
'standard': True
},
'normal_password': {
'type': 'string',
'modifiable': True,
'max': 32,
'standard': True
},
'viewonly_password': {
'type': 'string',
'modifiable': True,
'max': 32,
'standard': True
},
'flags': {
'type': ['string'],
'standard': True
},
'virtual_ports': {
'type': 'number',
'enabled': lambda: bool(settings.core.virtual_ports),
'standard': True
},
'uptime': {
'type': 'number',
'standard': True
},
'date': {
'type': 'number',
'modifiable': lambda: system.date.has_set_date_support(),
'persisted': False,
'standard': False # Having standard False here enables exposing of definition (needed for non-modifiable)
},
'timezone': {
'type': 'string',
'modifiable': True,
'persisted': False,
'choices': [{'value': zone} for zone in system.date.get_timezones()],
'enabled': lambda: system.date.has_timezone_support(),
'standard': False # Having standard False here enables exposing of definition (needed for choices)
},
'wifi_ssid': {
'type': 'string',
'max': 32,
'modifiable': True,
'persisted': False,
'enabled': lambda: system.net.has_wifi_support(),
'standard': True
},
'wifi_key': {
'type': 'string',
'max': 64,
'modifiable': True,
'persisted': False,
'enabled': lambda: system.net.has_wifi_support(),
'standard': True
},
'wifi_bssid': {
'type': 'string',
'pattern': r'^([a-fA-F0-9]{2}:[a-fA-F0-9]{2}:[a-fA-F0-9]{2}:[a-fA-F0-9]{2}:[a-fA-F0-9]{2}:[a-fA-F0-9]{2})?$',
'modifiable': True,
'persisted': False,
'enabled': lambda: system.net.has_wifi_support(),
'standard': True
},
'wifi_bssid_current': {
'type': 'string',
'modifiable': False,
'persisted': False,
'enabled': lambda: system.net.has_wifi_support(),
'standard': True
},
'wifi_signal_strength': {
'type': 'number',
'modifiable': False,
'persisted': False,
'enabled': lambda: system.net.has_wifi_support(),
'standard': True
},
'ip_address': {
'type': 'string',
'pattern': r'^(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})?$',
'modifiable': True,
'persisted': False,
'enabled': lambda: system.net.has_ip_support(),
'standard': True
},
'ip_netmask': {
'type': 'number',
'min': 0,
'max': 31,
'integer': True,
'modifiable': True,
'persisted': False,
'enabled': lambda: system.net.has_ip_support(),
'standard': True
},
'ip_gateway': {
'type': 'string',
'pattern': r'^(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})?$',
'modifiable': True,
'persisted': False,
'enabled': lambda: system.net.has_ip_support(),
'standard': True
},
'ip_dns': {
'type': 'string',
'pattern': r'^(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})?$',
'modifiable': True,
'persisted': False,
'enabled': lambda: system.net.has_ip_support(),
'standard': True
},
'ip_address_current': {
'type': 'string',
'modifiable': False,
'persisted': False,
'enabled': lambda: system.net.has_ip_support(),
'standard': True
},
'ip_netmask_current': {
'type': 'number',
'modifiable': False,
'persisted': False,
'enabled': lambda: system.net.has_ip_support(),
'standard': True
},
'ip_gateway_current': {
'type': 'string',
'modifiable': False,
'persisted': False,
'enabled': lambda: system.net.has_ip_support(),
'standard': True
},
'ip_dns_current': {
'type': 'string',
'modifiable': False,
'persisted': False,
'enabled': lambda: system.net.has_ip_support(),
'standard': True
},
'cpu_usage': {
'type': 'number',
'min': 0,
'max': 100,
'modifiable': False,
'persisted': False,
'standard': True
},
'mem_usage': {
'type': 'number',
'min': 0,
'max': 100,
'modifiable': False,
'persisted': False,
'standard': True
},
'storage_usage': {
'type': 'number',
'min': 0,
'max': 100,
'modifiable': False,
'persisted': False,
'enabled': lambda: system.storage.has_storage_support(),
'standard': True
},
'temperature': {
'type': 'number',
'modifiable': False,
'persisted': False,
'enabled': lambda: system.temperature.has_temperature_support(),
'min': lambda: settings.system.temperature.min,
'max': lambda: settings.system.temperature.max,
'standard': False # Having standard False here enables exposing of definition (needed for min/max)
},
'battery_level': {
'type': 'number',
'min': 0,
'max': 100,
'modifiable': False,
'persisted': False,
'enabled': lambda: system.battery.has_battery_support(),
'standard': True
}
}
EMPTY_PASSWORD_HASH = hashlib.sha256(b'').hexdigest()
WIFI_RSSI_EXCELLENT = -50
WIFI_RSSI_GOOD = -60
WIFI_RSSI_FAIR = -70
NETWORK_ATTRS_WATCH_INTERVAL = 10
name: str = re.sub(r'[^a-zA-Z0-9_-]', '', socket.gethostname())
if not re.match('^[a-zA-Z_]', name): # Make sure name starts with a letter or underscore
name = f'q{name}'
name = name[:32]
display_name: str = ''
admin_password_hash: Optional[str] = None
normal_password_hash: Optional[str] = None
viewonly_password_hash: Optional[str] = None
_schema: Optional[GenericJSONDict] = None
_attrdefs: Optional[AttributeDefinitions] = None
_attrs_watch_task: Optional[asyncio.Task] = None
class DeviceAttributeError(Exception):
def __init__(self, error: str, attribute: str) -> None:
self.error: str = error
self.attribute: str = attribute
def get_attrdefs() -> AttributeDefinitions:
global _attrdefs
if _attrdefs is None:
logger.debug('initializing attribute definitions')
_attrdefs = copy.deepcopy(ATTRDEFS)
# Transform all callable values into corresponding results
for n, attrdef in _attrdefs.items():
for k, v in attrdef.items():
if callable(v):
attrdef[k] = v()
return _attrdefs
def get_schema(loose: bool = False) -> GenericJSONDict:
global _schema
# Use cached value, but only when loose is false, as loose schema is never cached
if _schema is not None and not loose:
return _schema
schema = {
'type': 'object',
'properties': {},
'additionalProperties': loose
}
for n, attrdef in get_attrdefs().items():
if not attrdef.get('modifiable'):
continue
attr_schema = dict(attrdef)
enabled = attr_schema.pop('enabled', True)
if not enabled:
continue
if attr_schema['type'] == 'string':
if 'min' in attr_schema:
attr_schema['minLength'] = attr_schema.pop('min')
if 'max' in attr_schema:
attr_schema['maxLength'] = attr_schema.pop('max')
elif attr_schema['type'] == 'number':
if attr_schema.get('integer'):
attr_schema['type'] = 'integer'
if 'min' in attr_schema:
attr_schema['minimum'] = attr_schema.pop('min')
if 'max' in attr_schema:
attr_schema['maximum'] = attr_schema.pop('max')
if 'choices' in attrdef:
attr_schema['enum'] = [c['value'] for c in attr_schema.pop('choices')]
attr_schema.pop('persisted', None)
attr_schema.pop('modifiable', None)
attr_schema.pop('standard', None)
schema['properties'][n] = attr_schema
if not loose:
_schema = schema
return schema
def get_attrs() -> Attributes:
from qtoggleserver.core import api as core_api
from qtoggleserver.core import history as core_history
attrs = {
'name': name,
'display_name': display_name,
'version': version.VERSION,
'api_version': core_api.API_VERSION,
'vendor': version.VENDOR,
'uptime': system.uptime(),
# Never disclose passwords
'admin_password': '' if admin_password_hash == <PASSWORD> else 'set',
'normal_password': '' if normal_password_hash == <PASSWORD>_PASSWORD_HASH else 'set',
'viewonly_password': '' if viewonly_password_hash == <PASSWORD>_<PASSWORD> else 'set'
}
flags = ['expressions']
if settings.system.fwupdate.driver:
flags.append('firmware')
if settings.core.backup_support:
flags.append('backup')
if core_history.is_enabled():
flags.append('history')
if settings.core.listen_support:
flags.append('listen')
if settings.slaves.enabled:
flags.append('master')
if settings.reverse.enabled:
flags.append('reverse')
if settings.core.sequences_support:
flags.append('sequences')
if settings.core.ssl_support:
flags.append('ssl')
if settings.webhooks.enabled:
flags.append('webhooks')
attrs['flags'] = flags
if settings.core.virtual_ports:
attrs['virtual_ports'] = settings.core.virtual_ports
if system.date.has_real_date_time():
attrs['date'] = int(time.time())
if system.date.has_timezone_support():
attrs['timezone'] = system.date.get_timezone()
if system.net.has_wifi_support():
wifi_config = system.net.get_wifi_config()
attrs['wifi_ssid'] = wifi_config['ssid']
attrs['wifi_key'] = wifi_config['psk']
attrs['wifi_bssid'] = wifi_config['bssid']
if wifi_config['bssid_current']:
attrs['wifi_bssid_current'] = wifi_config['bssid_current']
rssi = wifi_config['rssi_current']
if rssi:
rssi = int(rssi)
if rssi >= WIFI_RSSI_EXCELLENT:
strength = 3
elif rssi >= WIFI_RSSI_GOOD:
strength = 2
elif rssi >= WIFI_RSSI_FAIR:
strength = 1
else:
strength = 0
attrs['wifi_signal_strength'] = strength
if system.net.has_ip_support():
ip_config = system.net.get_ip_config()
attrs['ip_address'] = ip_config['address']
attrs['ip_netmask'] = int(ip_config['netmask'] or 0)
attrs['ip_gateway'] = ip_config['gateway']
attrs['ip_dns'] = ip_config['dns']
if 'address_current' in ip_config:
attrs['ip_address_current'] = ip_config['address_current']
if 'netmask_current' in ip_config:
attrs['ip_netmask_current'] = int(ip_config['netmask_current'] or 0)
if 'gateway_current' in ip_config:
attrs['ip_gateway_current'] = ip_config['gateway_current']
if 'dns_current' in ip_config:
attrs['ip_dns_current'] = ip_config['dns_current']
attrs['cpu_usage'] = system.get_cpu_usage()
attrs['mem_usage'] = system.get_mem_usage()
if system.storage.has_storage_support():
attrs['storage_usage'] = system.storage.get_storage_usage()
if system.temperature.has_temperature_support():
attrs['temperature'] = system.temperature.get_temperature()
if system.battery.has_battery_support():
attrs['battery_level'] = system.battery.get_battery_level()
return attrs
def set_attrs(attrs: Attributes, ignore_extra: bool = False) -> bool:
core_device_attrs = sys.modules[__name__]
reboot_required = False
attrdefs = get_attrdefs()
wifi_attrs = {}
ip_attrs = {}
for n, value in attrs.items():
# A few attributes may carry sensitive information, so treat them separately and do not log their values
if n.count('password') or n == 'wifi_key':
logger.debug('setting device attribute %s', n)
else:
logger.debug('setting device attribute %s = %s', n, json_utils.dumps(value))
try:
attrdef = attrdefs[n]
except KeyError:
if ignore_extra:
continue
else:
raise
if not attrdef.get('modifiable'):
if not ignore_extra:
raise DeviceAttributeError('attribute-not-modifiable', n)
# Treat passwords separately, as they are not persisted as given, but hashed first
if n.endswith('_password') and hasattr(core_device_attrs, f'{n}_hash'):
# Call password set command, if available
if settings.core.passwords.set_cmd:
run_set_cmd(
settings.core.passwords.set_cmd,
cmd_name='password',
log_values=False,
username=n[:-9],
password=<PASSWORD>
)
value = hashlib.sha256(value.encode()).hexdigest()
n += '_hash'
setattr(core_device_attrs, n, value)
continue
elif n.endswith('_password_hash') and hasattr(core_device_attrs, n):
# FIXME: Password set command cannot be called with hash and we don't have clear-text password here.
# A solution would be to use sha256 crypt algorithm w/o salt for Unix password (watch for the special
# alphabet and for number of rounds defaulting to 5000)
setattr(core_device_attrs, n, value)
continue
persisted = attrdef.get('persisted', attrdef.get('modifiable'))
if persisted:
setattr(core_device_attrs, n, value)
if n == 'name' and settings.core.device_name.set_cmd:
run_set_cmd(settings.core.device_name.set_cmd, cmd_name='device name', name=value)
elif n == 'date' and system.date.has_set_date_support():
date = datetime.datetime.utcfromtimestamp(value)
system.date.set_date(date)
elif n == 'timezone' and system.date.has_timezone_support():
system.date.set_timezone(value)
elif n in ('wifi_ssid', 'wifi_key', 'wifi_bssid') and system.net.has_wifi_support():
k = n[5:]
k = {
'key': 'psk'
}.get(k, k)
wifi_attrs[k] = value
elif n in ('ip_address', 'ip_netmask', 'ip_gateway', 'ip_dns') and system.net.has_ip_support():
k = n[3:]
ip_attrs[k] = value
if wifi_attrs:
wifi_config = system.net.get_wifi_config()
for k, v in wifi_attrs.items():
wifi_config[k] = v
wifi_config = {k: v for k, v in wifi_config.items() if not k.endswith('_current')}
system.net.set_wifi_config(**wifi_config)
reboot_required = True
if ip_attrs:
ip_config = system.net.get_ip_config()
for k, v in ip_attrs.items():
| |
1 0] [ 0 1] [0 1] [-1 0] [ 0 -1] [-1 0]
[0 1], [-1 0], [ 0 -1], [-1 0], [1 0], [ 0 -1], [ 1 0], [ 0 1]
]
There are 24 orientation preserving isometries of the 3-cube::
sage: ncube_isometry_group(3)
[
[1 0 0] [ 1 0 0] [ 0 1 0] [ 0 0 -1] [ 1 0 0] [ 0 1 0]
[0 1 0] [ 0 0 1] [ 0 0 -1] [ 0 -1 0] [ 0 0 -1] [-1 0 0]
[0 0 1], [ 0 -1 0], [-1 0 0], [-1 0 0], [ 0 1 0], [ 0 0 1],
<BLANKLINE>
[ 1 0 0] [ 0 0 1] [0 1 0] [ 0 0 1] [ 0 0 -1] [ 0 -1 0]
[ 0 -1 0] [-1 0 0] [0 0 1] [ 0 -1 0] [-1 0 0] [-1 0 0]
[ 0 0 -1], [ 0 -1 0], [1 0 0], [ 1 0 0], [ 0 1 0], [ 0 0 -1],
<BLANKLINE>
[ 0 1 0] [ 0 0 1] [ 0 0 -1] [ 0 -1 0] [0 0 1] [ 0 -1 0]
[ 1 0 0] [ 0 1 0] [ 1 0 0] [ 0 0 1] [1 0 0] [ 1 0 0]
[ 0 0 -1], [-1 0 0], [ 0 -1 0], [-1 0 0], [0 1 0], [ 0 0 1],
<BLANKLINE>
[-1 0 0] [-1 0 0] [ 0 0 -1] [-1 0 0] [ 0 -1 0] [-1 0 0]
[ 0 1 0] [ 0 0 -1] [ 0 1 0] [ 0 0 1] [ 0 0 -1] [ 0 -1 0]
[ 0 0 -1], [ 0 -1 0], [ 1 0 0], [ 0 1 0], [ 1 0 0], [ 0 0 1]
]
TESTS::
sage: ncube_isometry_group(1)
[[1]]
sage: ncube_isometry_group(0)
Traceback (most recent call last):
...
ValueError: ['B', 0] is not a valid Cartan type
"""
from sage.combinat.root_system.weyl_group import WeylGroup
L = [w.matrix() for w in WeylGroup(['B', n])]
if orientation_preserving:
return [m for m in L if m.det() == 1]
else:
return L
@cached_function
def ncube_isometry_group_cosets(n, orientation_preserving=True):
r"""
Return the quotient of the isometry group of the `n`-cube by the
the isometry group of the rectangular parallelepiped.
INPUT:
- ``n`` -- positive integer, dimension of the space
- ``orientation_preserving`` -- bool (optional, default: ``True``),
whether the orientation is preserved
OUTPUT:
list of cosets, each coset being a sorted list of matrices
EXAMPLES::
sage: from sage.combinat.tiling import ncube_isometry_group_cosets
sage: sorted(ncube_isometry_group_cosets(2))
[[
[-1 0] [1 0]
[ 0 -1], [0 1]
], [
[ 0 -1] [ 0 1]
[ 1 0], [-1 0]
]]
sage: sorted(ncube_isometry_group_cosets(2, False))
[[
[-1 0] [-1 0] [ 1 0] [1 0]
[ 0 -1], [ 0 1], [ 0 -1], [0 1]
], [
[ 0 -1] [ 0 -1] [ 0 1] [0 1]
[-1 0], [ 1 0], [-1 0], [1 0]
]]
::
sage: sorted(ncube_isometry_group_cosets(3))
[[
[-1 0 0] [-1 0 0] [ 1 0 0] [1 0 0]
[ 0 -1 0] [ 0 1 0] [ 0 -1 0] [0 1 0]
[ 0 0 1], [ 0 0 -1], [ 0 0 -1], [0 0 1]
], [
[-1 0 0] [-1 0 0] [ 1 0 0] [ 1 0 0]
[ 0 0 -1] [ 0 0 1] [ 0 0 -1] [ 0 0 1]
[ 0 -1 0], [ 0 1 0], [ 0 1 0], [ 0 -1 0]
], [
[ 0 -1 0] [ 0 -1 0] [ 0 1 0] [ 0 1 0]
[-1 0 0] [ 1 0 0] [-1 0 0] [ 1 0 0]
[ 0 0 -1], [ 0 0 1], [ 0 0 1], [ 0 0 -1]
], [
[ 0 -1 0] [ 0 -1 0] [ 0 1 0] [0 1 0]
[ 0 0 -1] [ 0 0 1] [ 0 0 -1] [0 0 1]
[ 1 0 0], [-1 0 0], [-1 0 0], [1 0 0]
], [
[ 0 0 -1] [ 0 0 -1] [ 0 0 1] [0 0 1]
[-1 0 0] [ 1 0 0] [-1 0 0] [1 0 0]
[ 0 1 0], [ 0 -1 0], [ 0 -1 0], [0 1 0]
], [
[ 0 0 -1] [ 0 0 -1] [ 0 0 1] [ 0 0 1]
[ 0 -1 0] [ 0 1 0] [ 0 -1 0] [ 0 1 0]
[-1 0 0], [ 1 0 0], [ 1 0 0], [-1 0 0]
]]
TESTS::
sage: cosets = ncube_isometry_group_cosets(3, False)
sage: len(cosets)
6
sage: [len(c) for c in cosets]
[8, 8, 8, 8, 8, 8]
sage: type(cosets[0][0])
<... 'sage.matrix.matrix_rational_dense.Matrix_rational_dense'>
"""
from sage.misc.misc_c import prod
from sage.matrix.constructor import diagonal_matrix
G = ncube_isometry_group(n, orientation_preserving)
# Construct the subgroup H of G of diagonal matrices
it = itertools.product((1,-1), repeat=n)
if orientation_preserving:
H = [diagonal_matrix(L) for L in it if prod(L) == 1]
else:
H = [diagonal_matrix(L) for L in it]
G_todo = set(G)
# Make sure that H is a subset of G
for h in H:
h.set_immutable()
assert all(h in G_todo for h in H), "H must be a subset of G"
# Construct the cosets
cosets = []
for g in G:
if g not in G_todo:
continue
left_coset = sorted(h*g for h in H)
right_coset = sorted(g*h for h in H)
assert left_coset == right_coset, "H must be a normal subgroup of G"
for c in left_coset:
c.set_immutable()
G_todo.difference_update(left_coset)
cosets.append(left_coset)
return cosets
##############################
# Class Polyomino
##############################
class Polyomino(SageObject):
r"""
A polyomino in `\ZZ^d`.
The polyomino is the union of the unit square (or cube, or n-cube)
centered at those coordinates. Such an object should be connected, but
the code does not make this assumption.
INPUT:
- ``coords`` -- iterable of integer coordinates in `\ZZ^d`
- ``color`` -- string (default: ``'gray'``), color for display
EXAMPLES::
sage: from sage.combinat.tiling import Polyomino
sage: Polyomino([(0,0,0), (0,1,0), (1,1,0), (1,1,1)], color='blue')
Polyomino: [(0, 0, 0), (0, 1, 0), (1, 1, 0), (1, 1, 1)], Color: blue
"""
def __init__(self, coords, color='gray'):
r"""
Constructor.
See :mod:`Polyomino` for full documentation.
EXAMPLES::
sage: from sage.combinat.tiling import Polyomino
sage: Polyomino([(0,0,0), (0,1,0), (1,1,0), (1,1,1)], color='blue')
Polyomino: [(0, 0, 0), (0, 1, 0), (1, 1, 0), (1, 1, 1)], Color: blue
::
sage: from sage.combinat.tiling import Polyomino
sage: Polyomino([(0,0), (1,0), (2,0)])
Polyomino: [(0, 0), (1, 0), (2, 0)], Color: gray
"""
from sage.modules.free_module import FreeModule
from sage.rings.integer_ring import ZZ
if not isinstance(color, str):
raise TypeError("color = ({!r}) must be a string".format(color))
self._color = color
if not isinstance(coords, (tuple,list)):
coords = list(coords)
if not coords:
raise ValueError("Polyomino must be non empty")
self._dimension = ZZ(len(coords[0]))
self._free_module = FreeModule(ZZ, self._dimension)
self._blocs = coords
self._blocs = [self._free_module(bloc) for bloc in self._blocs]
for b in self._blocs:
b.set_immutable()
self._blocs = frozenset(self._blocs)
def _repr_(self):
r"""
String representation.
EXAMPLES::
sage: from sage.combinat.tiling import Polyomino
sage: Polyomino([(0,0,0), (0,1,0), (1,1,0), (1,1,1)], color='red')
Polyomino: [(0, 0, 0), (0, 1, 0), (1, 1, 0), (1, 1, 1)], Color: red
"""
s = "Polyomino: %s, " % sorted(self._blocs)
s += "Color: %s" % self._color
return s
def color(self):
r"""
Return the color of the polyomino.
EXAMPLES::
sage: from sage.combinat.tiling import Polyomino
sage: p = Polyomino([(0,0,0), (0,1,0), (1,1,0), (1,1,1)], color='blue')
sage: p.color()
'blue'
"""
return self._color
def frozenset(self):
r"""
Return the elements of `\ZZ^d` in the polyomino as a frozenset.
EXAMPLES::
sage: from sage.combinat.tiling import Polyomino
sage: p = Polyomino([(0,0,0), (0,1,0), (1,1,0), (1,1,1)], color='red')
sage: p.frozenset()
frozenset({(0, 0, 0), (0, 1, 0), (1, 1, 0), (1, 1, 1)})
"""
return self._blocs
@cached_method
def sorted_list(self):
r"""
Return the color of | |
pertinent to rez
...
}
}
Each requirement has had its package name converted to the rez equivalent.
The 'variant_requires' key contains requirements specific to the current
variant.
TODO: Currently there is no way to reflect extras that may have been chosen
for this pip package. We need to wait for rez "package features" before this
will be possible. You probably shouldn't use extras presently.
Args:
installed_dist (`distlib.database.InstalledDistribution`): Distribution
to convert.
python_version (`Version`): Python version used to perform the
installation.
name_casings (list of str): A list of pip package names in their correct
casings (eg, 'Foo' rather than 'foo'). Any requirement whose name
case-insensitive-matches a name in this list, is set to that name.
This is needed because pip package names are case insensitive, but
rez is case-sensitive. So a package may list a requirement for package
'foo', when in fact the package that pip has downloaded is called 'Foo'.
Be sure to provide names in PIP format, not REZ format (the pip package
'foo-bah' will be converted to 'foo_bah' in rez).
Returns:
Dict: See example above.
"""
_system = System()
result_requires = []
result_variant_requires = []
# create cased names lookup
name_mapping = dict((x.lower(), x) for x in (name_casings or []))
# requirements such as platform, arch, os, and python
sys_requires = set(["python"])
# assume package is platform- and arch- specific if it isn't pure python
is_pure_python = is_pure_python_package(installed_dist)
if not is_pure_python:
sys_requires.update(["platform", "arch"])
# evaluate wrt python version, which may not be the current interpreter version
marker_env = {
"python_full_version": str(python_version),
"python_version": str(python_version.trim(2)),
"implementation_version": str(python_version)
}
# Note: This is supposed to give a requirements list that has already been
# filtered down based on the extras requested at install time, and on any
# environment markers present. However, this is not working in distlib. The
# package gets assigned a LegacyMetadata metadata object (only if a package metadata
# version is not equal to 2.0) and in that code path, this filtering
# doesn't happen.
#
# See: vendor/distlib/metadata.py#line-892
#
requires = installed_dist.run_requires
# filter requirements
for req_ in requires:
reqs = normalize_requirement(req_)
for req in reqs:
# skip if env marker is present and doesn't evaluate
if req.marker and not req.marker.evaluate(environment=marker_env):
continue
# skip if req is conditional on extras that weren't requested
if req.conditional_extras and not \
(set(installed_dist.extras or []) & set(req.conditional_extras)):
continue
if req.conditional_extras:
print_warning(
"Skipping requirement %r - conditional requirements are "
"not yet supported", str(req)
)
continue
# Inspect marker(s) to see if this requirement should be varianted.
# Markers may also cause other system requirements to be added to
# the variant.
#
to_variant = False
if req.marker:
marker_reqs = get_marker_sys_requirements(str(req.marker))
if marker_reqs:
sys_requires.update(marker_reqs)
to_variant = True
# remap the requirement name
remapped = name_mapping.get(req.name.lower())
if remapped:
req.name = remapped
# convert the requirement to rez equivalent
rez_req = str(packaging_req_to_rez_req(req))
if to_variant:
result_variant_requires.append(rez_req)
else:
result_requires.append(rez_req)
# prefix variant with system requirements
sys_variant_requires = []
if "platform" in sys_requires:
sys_variant_requires.append("platform-%s" % _system.platform)
if "arch" in sys_requires:
sys_variant_requires.append("arch-%s" % _system.arch)
if "os" in sys_requires:
sys_variant_requires.append("os-%s" % _system.os)
if "python" in sys_requires:
# Add python variant requirement. Note that this is always MAJOR.MINOR,
# because to do otherwise would mean analysing any present env markers.
# This could become quite complicated, and could also result in strange
# python version ranges in the variants.
#
sys_variant_requires.append("python-%s" % str(python_version.trim(2)))
return {
"requires": result_requires,
"variant_requires": sys_variant_requires + result_variant_requires,
"metadata": {
"is_pure_python": is_pure_python
}
}
def convert_distlib_to_setuptools(installed_dist):
"""Get the setuptools equivalent of a distlib installed dist.
Args:
installed_dist (`distlib.database.InstalledDistribution`: Distribution
to convert.
Returns:
`pkg_resources.DistInfoDistribution`: Equivalent setuptools dist object.
"""
path = os.path.dirname(installed_dist.path)
setuptools_dists = pkg_resources.find_distributions(path)
for setuptools_dist in setuptools_dists:
if setuptools_dist.key == installed_dist.key:
return setuptools_dist
return None
def get_marker_sys_requirements(marker):
"""Get the system requirements that an environment marker introduces.
Consider:
'foo (>1.2) ; python_version == "3" and platform_machine == "x86_64"'
This example would cause a requirement on python, platform, and arch
(platform as a consequence of requirement on arch).
See:
* vendor/packaging/markers.py:line=76
* https://www.python.org/dev/peps/pep-0508/#id23
Args:
marker (str): Environment marker string, eg 'python_version == "3"'.
Returns:
List of str: System requirements (unversioned).
"""
_py = "python"
_plat = "platform"
_arch = "arch"
sys_requires_lookup = {
# TODO There is no way to associate a python version with its implementation
# currently (ie CPython etc). When we have "package features", we may be
# able to manage this; ignore for now
"implementation_name": [_py], # PEP-0508
"implementation_version": [_py], # PEP-0508
"platform_python_implementation": [_py], # PEP-0508
"platform.python_implementation": [_py], # PEP-0345
"python_implementation": [_py], # setuptools legacy. Same as platform_python_implementation
"sys.platform": [_plat], # PEP-0345
"sys_platform": [_plat], # PEP-0508
# note that this maps to python's os.name, which does not mean distro
# (as 'os' does in rez). See https://docs.python.org/2/library/os.html#os.name
"os.name": [_plat], # PEP-0345
"os_name": [_plat], # PEP-0508
"platform.machine": [_arch], # PEP-0345
"platform_machine": [_arch], # PEP-0508
# TODO hmm, we never variant on plat version, let's leave this for now...
"platform.version": [_plat], # PEP-0345
"platform_version": [_plat], # PEP-0508
# somewhat ambiguous cases
"platform_system": [_plat], # PEP-0508
"platform_release": [_plat], # PEP-0508
"python_version": [_py], # PEP-0508
"python_full_version": [_py] # PEP-0508
}
sys_requires = set()
# note: packaging lib already delimits with whitespace
marker_parts = marker.split()
for varname, sys_reqs in sys_requires_lookup.items():
if varname in marker_parts:
sys_requires.update(sys_reqs)
return list(sys_requires)
def normalize_requirement(requirement):
"""Normalize a package requirement.
Requirements from distlib packages can be a mix of string- or dict- based
formats, as shown here:
* https://www.python.org/dev/peps/pep-0508/#environment-markers
* https://legacy.python.org/dev/peps/pep-0426/#environment-markers
There's another confusing case that this code deals with. Consider these two
requirements:
# means: reportlab is a requirement of this package when the 'pdf' extra is requested
Requires-Dist: reportlab; extra == 'pdf'
means: this package requires libexample, with its 'test' extras
Requires-Dist: libexample[test]
See https://packaging.python.org/specifications/core-metadata/#provides-extra-multiple-use
The packaging lib doesn't do a good job of expressing this - the first form
of extras use just gets embedded in the environment marker. This function
parses the extra from the marker, and stores it onto the resulting
`packaging.Requirement` object in a 'conditional_extras' attribute. It also
removes the extra from the marker (otherwise the marker cannot evaluate).
Even though you can specify `environment` in `packaging.Marker.evaluate`,
you can only supply a single 'extra' key in the env, so this can't be used
to correctly evaluate if multiple extras were requested.
Args:
requirement (str or dict): Requirement, for eg from
`distlib.database.InstalledDistribution.run_requires`.
Returns:
List of `packaging.requirements.Requirement`: Normalized requirements.
Note that a list is returned, because the PEP426 format can define
multiple requirements.
"""
def reconstruct(req, marker_str=None, conditional_extras=None):
new_req_str = req.name
if req.specifier:
new_req_str += " (%s)" % str(req.specifier)
if marker_str is None and req.marker:
marker_str = str(req.marker)
if marker_str:
new_req_str += " ; " + marker_str
new_req = packaging_Requirement(new_req_str)
setattr(new_req, "conditional_extras", conditional_extras)
return new_req
# PEP426 dict syntax
# So only metadata that are of version 2.0 will be in dict. The other versions
# (1.0, 1.1, 1.2, 2.1) will be strings.
if isinstance(requirement, dict):
result = []
requires = requirement["requires"]
extra = requirement.get("extra")
marker_str = requirement.get("environment")
# conditional extra, equivalent to: 'foo ; extra = "doc"'
if extra:
conditional_extras = set([extra])
else:
conditional_extras = None
for req_str in requires:
req = packaging_Requirement(req_str)
new_req = reconstruct(req, marker_str, conditional_extras)
result.append(new_req)
return result
# string-based syntax
req = packaging_Requirement(requirement)
# detect case: "mypkg ; extra == 'dev'"
# note: packaging lib already delimits with whitespace
marker_str = str(req.marker)
marker_parts = marker_str.split()
# already in PEP508, packaging lib- friendly format
if "extra" not in marker_parts:
setattr(req, "conditional_extras", None)
return [req]
# Parse conditional extras out of marker
conditional_extras = set()
marker_str = marker_str.replace(" and ", " \nand ")
marker_str = marker_str.replace(" or ", " \nor ")
lines = marker_str.split('\n')
lines = [x.strip() for x in lines]
new_marker_lines = []
for line in lines:
if "extra" in line.split():
extra = line.split()[-1]
extra = extra.replace('"', '')
extra = extra.replace("'", '')
conditional_extras.add(extra)
else:
new_marker_lines.append(line)
# reconstruct requirement in new | |
de Necessidades',
'List Notes': 'Lista de Notas',
'List Offices': 'Lista de Escritórios',
'List Organizations': 'Listar Organizações',
'List Patients': 'List Patients',
'List Peers': 'LISTA DE PARES',
'List Personal Effects': 'Lista de objetos pessoais',
'List Persons': 'LISTA DE PESSOAS',
'List Photos': 'Lista de Fotos',
'List Population Statistics': 'Lista das Estatisticas da População',
'List Positions': 'Lista de Posições',
'List Problems': 'Lista de Problemas',
'List Projections': 'Lista de Projeções',
'List Projects': 'Listar Projectos',
'List Rapid Assessments': 'Listar Avaliações Rápidas',
'List Received Items': 'Listar Elementos Recebidos',
'List Received Shipments': 'Listar Carga Recebida',
'List Records': 'Listar Registros',
'List Registrations': 'Listar Registrações',
'List Relatives': 'List Relatives',
'List Reports': 'Relatórios de Listas',
'List Request Items': 'Pedido de Itens de lista',
'List Requested Skills': 'List Requested Skills',
'List Requests': 'LISTA DE PEDIDOS',
'List Resources': 'Listar Recursos',
'List Rivers': 'Lista de Rios',
'List Roles': 'Listar Funções',
'List Rooms': 'Listar Salas',
'List Scenarios': 'Listar cenários',
'List Sections': 'lista de Seções',
'List Sectors': 'Lista de Sectores',
'List Sent Items': 'Os itens da lista Enviada',
'List Sent Shipments': 'Embarques lista Enviada',
'List Service Profiles': 'Lista de serviços Perfis',
'List Settings': 'Lista de configurações',
'List Shelter Services': 'Lista de serviços de abrigo',
'List Shelter Types': 'Lista de Tipos De Abrigo',
'List Shelters': 'Lista de Abrigos',
'List Skill Equivalences': 'LISTA DE HABILIDADE Equivalências',
'List Skill Provisions': 'Listar suprimento de habilidades',
'List Skill Types': 'Lista de Tipos De Habilidade',
'List Skills': 'LISTA DE HABILIDADES',
'List Solutions': 'Listar Soluções',
'List Staff': 'Listar Pessoal',
'List Staff Members': 'Listar funcionários',
'List Staff Types': 'Listar Tipos De Equipe',
'List Status': 'Listar Status',
'List Subscriptions': 'Lista de Assinaturas',
'List Subsectors': 'Listar Subsetores',
'List Support Requests': 'Listar Pedidos de Suporte',
'List Survey Answers': 'Listar Respostas de Pesquisa',
'List Survey Questions': 'Listar Perguntas da Pesquisa',
'List Survey Sections': 'Listar Seções da Pesquisa',
'List Survey Series': 'Listar Séries de Pesquisa',
'List Survey Templates': 'Listar Modelos de Pesquisa',
'List Tasks': 'Lista de Tarefas',
'List Teams': 'Lista de Equipes',
'List Themes': 'Lista de Temas',
'List Tickets': 'lista de Bilhetes',
'List Tracks': 'Rastreia lista',
'List Trainings': 'Listar Treinamentos',
'List Units': 'Lista de Unidades',
'List Users': 'Mostrar usuários',
'List Vehicle Details': 'List Vehicle Details',
'List Vehicles': 'List Vehicles',
'List Volunteers': 'Mostrar Voluntários',
'List Warehouses': 'Mostrar Depósitos',
'List all': 'Mostrar tudo',
'List available Scenarios': 'Listar Cenários Disponíveis',
'List of CSV files': 'List of CSV files',
'List of CSV files uploaded': 'List of CSV files uploaded',
'List of Items': 'Lista de Itens',
'List of Missing Persons': 'Lista de pessoas desaparecidas',
'List of Peers': 'Lista de pares',
'List of Reports': 'Lista de Relatórios',
'List of Requests': 'Lista de Pedidos',
'List of Spreadsheets': 'Lista de Folhas de Cálculo',
'List of Spreadsheets uploaded': 'Lista de Folhas de Cálculo transferidas',
'List of Volunteers': 'Lista de Voluntários',
'List of Volunteers for this skill set': 'Lista de Voluntários para este conjunto de competências',
'List of addresses': 'Lista de endereços',
'List unidentified': 'Lista não identificada',
'List/Add': 'Lista/incluir',
'Lists "who is doing what & where". Allows relief agencies to coordinate their activities': 'Lista "quem está fazendo o que & aonde". Permite a agências humanitárias coordenar suas atividades',
'Live Help': 'Ajuda ao vivo',
'Livelihood': 'Subsistência',
'Load Cleaned Data into Database': 'Carregue Informações Claras no Banco de Dados',
'Load Raw File into Grid': 'Carregamento de arquivo bruto na Grid',
'Loading': 'Carregando',
'Local Name': 'Nome local',
'Local Names': 'Nomes locais',
'Location': 'Localização',
'Location 1': 'Local 1',
'Location 2': 'Local 2',
'Location Details': 'Detalhes da Localização',
'Location Hierarchy Level 0 Name': 'Nivel Local de hierarquia 0 nome',
'Location Hierarchy Level 1 Name': 'Nivel local de hierarquia 1 nome',
'Location Hierarchy Level 2 Name': 'Nivel local de hierarquia 2 nome',
'Location Hierarchy Level 3 Name': 'Hierarquia local Nível 3 Nome',
'Location Hierarchy Level 4 Name': 'Hierarquia local Nível 4 Nome',
'Location Hierarchy Level 5 Name': 'Hierarquia local Nível 5 Nome',
'Location added': 'Local incluído',
'Location cannot be converted into a group.': 'Local não pode ser convertido em um grupo.',
'Location deleted': 'Localidade excluída',
'Location details': 'Detalhes do Local',
'Location group cannot be a parent.': 'Localização de grupo não pode ser um pai.',
'Location group cannot have a parent.': 'Localização de grupo não tem um pai.',
'Location groups can be used in the Regions menu.': 'Grupos local pode ser utilizado no menu Regiões.',
'Location groups may be used to filter what is shown on the map and in search results to only entities covered by locations in the group.': 'Grupos locais podem ser utilizados para filtrar o que é mostrado no mapa e nos resultados da procura apenas as entidades locais abrangidas no grupo.',
'Location updated': 'Local atualizado',
'Location:': 'Localização:',
'Location: ': 'Location: ',
'Locations': 'Localizações',
'Locations of this level need to have a parent of level': 'Locais de esse nível precisa ter um pai de nível',
'Lockdown': 'BLOQUEIO',
'Log': 'registro',
'Log Entry Details': 'detalhes da entrada de registro',
'Log entry added': 'Entrada de Log incluída',
'Log entry deleted': 'Entrada de Log Excluída',
'Log entry updated': 'Entrada de Log de atualização',
'Login': 'login',
'Logistics': 'Logística',
'Logistics Management System': 'Sistema de Gestão de Logística',
'Logo': 'Logotipo',
'Logo file %s missing!': 'Arquivo de logotipo %s ausente!',
'Logout': 'Deslogar',
'Long Text': 'Texto Longo',
'Longitude': 'Longitude',
'Longitude is West - East (sideways).': 'Longitude é Oeste - Leste (lateral).',
'Longitude is West-East (sideways).': 'Longitude é leste-oeste (direções).',
'Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas.': 'Longitude é zero no primeiro meridiano (Greenwich Mean Time) e é positivo para o leste, em toda a Europa e Ásia. Longitude é negativo para o Ocidente, no outro lado do Atlântico e nas Américas.',
'Longitude is zero on the prime meridian (through Greenwich, United Kingdom) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas.': 'Longitude é zero no primeiro meridiano (por meio de Greenwich, Reino Unido) e é positivo para o leste, em toda a Europa e Ásia. Longitude é negativo para o Ocidente, no outro lado do Atlântico e nas Américas.',
'Longitude of Map Center': 'Longitude do Centro do Mapa',
'Longitude of far eastern end of the region of interest.': 'Longitude longe do Oeste no final da região de interesse.',
'Longitude of far western end of the region of interest.': 'Longitude de oeste longínquo no final da Região de interesse.',
'Longitude should be between': 'Longitude deve estar entre',
'Looting': 'Saques',
'Lost': 'Perdido',
'Lost Password': '<PASSWORD>',
'Low': 'Baixo',
'Magnetic Storm': 'Tempestade magnética',
'Major Damage': 'Grandes danos',
'Major expenses': 'Despesas principais',
'Major outward damage': 'Danos exteriores principais',
'Make Commitment': 'Ter obrigação',
'Make New Commitment': 'Fazer Novo Compromisso',
'Make Request': 'Fazer Pedido',
'Make preparations per the <instruction>': 'Fazer Preparações por',
'Male': 'masculino',
'Manage': 'Gerenciar',
'Manage Events': 'Manage Events',
'Manage Relief Item Catalogue': 'Gerenciar Catálogo de Item de Alívio',
'Manage Users & Roles': 'GERENCIAR Usuários & Funções',
'Manage Vehicles': 'Manage Vehicles',
'Manage Warehouses/Sites': 'GERENCIAR Armazéns/Sites',
'Manage Your Facilities': 'Gerenciar suas instalações',
'Manage requests for supplies, assets, staff or other resources. Matches against Inventories where supplies are requested.': 'Gerenciar pedidos de suprimentos, patrimônio, pessoal ou outros recursos. Corresponde aos estoques onde os suprimentos são solicitados.',
'Manage requests of hospitals for assistance.': 'GERENCIAR Pedidos de hospitais para obter assistência.',
'Manage volunteers by capturing their skills, availability and allocation': 'GERENCIAR voluntários por captura sua capacidade, Alocação e disponibilidade',
'Manager': 'Gerente',
'Managing Office': 'Gerenciando Office',
'Mandatory. In GeoServer, this is the Layer Name. Within the WFS getCapabilities, this is the FeatureType Name part after the colon(:).': 'Obrigatório. Em GeoServer, este é o nome Da Camada. No getCapabilities WFS, este é o nome da parte FeatureType após os dois pontos (:).',
'Mandatory. The URL to access the service.': 'Obrigatório. A URL para acessar o serviço.',
'Manual': 'Manual',
'Manual Synchronization': 'Sincronização Manual',
'Many': 'Muitos',
'Map': 'Mapa',
'Map Center Latitude': 'Latitude do Centro do Mapa',
'Map Center Longitude': 'Longitude do centro do mapa',
'Map Configuration': 'Configuração de Mapa',
'Map Configuration Details': 'Detalhes de configuração de mapa',
'Map Configuration added': 'Configuração de mapa incluído',
'Map Configuration deleted': 'Configuração de mapa excluído',
'Map Configuration removed': 'Configuração de mapa removido',
'Map Configuration updated': 'Configuração de mapa atualizada',
'Map Configurations': 'Configuracões de mapa',
'Map Height': 'Altura do Mapa',
'Map Service Catalog': 'Catálogo do serviço de mapas',
'Map Settings': 'Configurações do Mapa',
'Map Viewing Client': 'Cliente de visualização do mapa',
'Map Width': 'Largura do mapa',
'Map Zoom': 'Zoom do mapa',
'Map of Hospitals': 'Mapa de Hospitais',
'MapMaker Hybrid Layer': 'MapMaker Hybrid Layer',
'MapMaker Layer': 'MapMaker Layer',
'Maps': 'Maps',
'Marine Security': 'Segurança da marina',
'Marital Status': 'Estado Civil',
'Marker': 'Marcador',
'Marker Details': 'Detalhes do Marcador',
'Marker added': 'Marcador incluído',
'Marker deleted': 'Marcador removido',
'Marker updated': 'Marcador atualizado',
'Markers': 'Marcadores',
'Master': 'Master',
'Master Message Log': 'Mensagem de Log principal',
'Master Message Log to process incoming reports & requests': 'Log de Mensagem Principal para processar relatórios de entrada e pedidos',
'Match Percentage': 'Porcentagem de correspondência',
'Match Requests': 'Corresponder Pedidos',
'Match percentage indicates the % match between these two records': 'Porcentagem idêntica indica a | |
<reponame>voloshanenko/smsgateway
#!/usr/bin/python
# Copyright 2015 <NAME> and <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.insert(0, "..")
from os import path
from datetime import datetime
from datetime import timedelta
import pytz
import sqlite3
import uuid
from common import config
from common import error
from common import smsgwglobals
import threading
smsdblock = threading.Lock()
class Database(object):
"""Base class for Database handling - SQLite3
Attributes:
configfile -- path to configuration file
to read [db] section from.
[db]
dbname = n0r1sk_smsgateway
loglevel = CRITICAL | ERROR | WARNING | INFO | DEBUG
logdirectory = absolut path to log directory
fallback is local \log directory
logfile = database.log
"""
__smsconfig = None
__path = path.abspath(path.join(path.dirname(__file__),
path.pardir))
__con = None
__cur = None
# Constructor
def __init__(self, configfile=(__path + "/conf/smsgw.conf")):
# read SmsConfigs
self.__smsconfig = config.SmsConfig(configfile)
dbname = self.__smsconfig.getvalue('dbname', 'n0r1sk_smsgateway', 'db')
dbname = (self.__path + "/common/sqlite/" + dbname + ".sqlite")
smsgwglobals.dblogger.info("SQLite: Database file used: %s", dbname)
# connect to database
smsgwglobals.dblogger.debug("SQLite: Connecting to database...")
self.db_connect(dbname)
# create tables and indexes if not exit
self.create_table_users()
self.create_table_sms()
self.create_table_stats()
# Destructor (called with "del <Databaseobj>"
def __del__(self):
# shutting down connecitons to SQLite
self.__con.close()
# Connect to Database
def db_connect(self, dbname="db.sqlite"):
try:
self.__con = sqlite3.connect(dbname, check_same_thread=False)
# change row-factory to get
self.__con.row_factory = sqlite3.Row
self.__cur = self.__con.cursor()
except Exception as e:
smsgwglobals.dblogger.critical("SQLite: Unable to connect! " +
"[EXCEPTION]:%s", e)
raise error.DatabaseError('Connection problem!', e)
# Create table users
def create_table_users(self):
smsgwglobals.dblogger.info("SQLite: Create table 'users'")
query = ("CREATE TABLE IF NOT EXISTS users (" +
"user TEXT PRIMARY KEY UNIQUE, " +
"password TEXT, " +
"salt TEXT, " +
"changed TIMESTAMP)")
try:
smsdblock.acquire()
self.__cur.execute(query)
finally:
smsdblock.release()
# Create table and index for table sms
def create_table_sms(self):
smsgwglobals.dblogger.info("SQLite: Create table 'sms'")
# if smsid is not insertet it is automatically set to a free number
query = ("CREATE TABLE IF NOT EXISTS sms (" +
"smsid TEXT PRIMARY KEY, " +
"modemid TEXT, " +
"imsi TEXT, " +
"targetnr TEXT, " +
"content TEXT, " +
"priority INTEGER, " +
"appid TEXT, " +
"sourceip TEXT, " +
"xforwardedfor TEXT, " +
"smsintime TIMESTAMP, " +
"status INTEGER, " +
"statustime TIMESTAMP)"
)
try:
smsdblock.acquire()
self.__cur.execute(query)
finally:
smsdblock.release()
# index sms_status_modemid
query = ("CREATE INDEX IF NOT EXISTS sms_status_modemid " +
"ON sms (status, modemid)"
)
try:
smsdblock.acquire()
self.__cur.execute(query)
finally:
smsdblock.release()
# Create table stats
def create_table_stats(self):
smsgwglobals.dblogger.info("SQLite: Create table 'stats'")
query = ("CREATE TABLE IF NOT EXISTS stats (" +
"type TEXT PRIMARY KEY UNIQUE, " +
"lasttimestamp TIMESTAMP)")
try:
smsdblock.acquire()
self.__cur.execute(query)
finally:
smsdblock.release()
# Insert or replaces a stats timestamp data
def write_statstimestamp(self, timestamp, intype='SUC_SMS_STATS'):
"""Insert or replace a stats entry timestamp
"""
query = ("INSERT OR REPLACE INTO stats " +
"(type, lasttimestamp) " +
"VALUES (?, ?) ")
# set changed timestamp to utcnow if not set
try:
smsdblock.acquire()
smsgwglobals.dblogger.debug("SQLite: Write into stats" +
" :intype: " + str(intype) +
" :lasttimestamp: " + str(timestamp)
)
self.__cur.execute(query, (intype, timestamp))
self.__con.commit()
smsgwglobals.dblogger.debug("SQLite: Insert done!")
except Exception as e:
smsgwglobals.dblogger.critical("SQLite: " + query +
" failed! [EXCEPTION]:%s", e)
raise error.DatabaseError("Unable to INSERT stats! ", e)
finally:
smsdblock.release()
# Insert or replaces a users data
def write_users(self, user, password, salt, changed=None):
"""Insert or replace a users entry
Attributes: user ... text-the primary key - unique
password ... text-password
salt ... text-salt
changed ... datetime.utcnow-when changed
"""
query = ("INSERT OR REPLACE INTO users " +
"(user, password, salt, changed) " +
"VALUES (?, ?, ?, ?) ")
# set changed timestamp to utcnow if not set
if changed is None:
changed = datetime.utcnow()
try:
smsdblock.acquire()
smsgwglobals.dblogger.debug("SQLite: Write into users" +
" :user: " + user +
" :password-len: " +
str(len(password)) +
" :salt-len: " + str(len(salt)) +
" :changed: " + str(changed)
)
self.__cur.execute(query, (user, password, salt, changed))
self.__con.commit()
smsgwglobals.dblogger.debug("SQLite: Insert done!")
except Exception as e:
smsgwglobals.dblogger.critical("SQLite: " + query +
" failed! [EXCEPTION]:%s", e)
raise error.DatabaseError("Unable to INSERT user! ", e)
finally:
smsdblock.release()
# Insert sms
def insert_sms(self, modemid='00431234', imsi='1234567890', targetnr='+431234',
content='♠♣♥♦Test', priority=1, appid='demo',
sourceip='127.0.0.1', xforwardedfor='172.16.58.3',
smsintime=None, status=0, statustime=None, smsid=None):
"""Insert a fresh SMS out of WIS
Attributes: modemid ... string-countryexitcode+number (0043664123..)
imsi ... string-no SIM card IMSI
targetnr ... string-no country exit code (+436761234..)
content ... string-message
prioirty ... int-0 low, 1 middle, 2 high
appid ... sting (uuid) for consumer
sourceip ... string with ip (172.16.58.3)
xforwaredfor ... stirng with client ip
smsintime ... datetime.utcnow()
status ... int-0 new, ???
statustime ... datetime.utcnow()
"""
# check if smsid is empty string or None
if smsid is None or not smsid:
smsid = str(uuid.uuid1())
now = datetime.utcnow()
if smsintime is None:
smsintime = now
if statustime is None:
statustime = now
query = ("INSERT INTO sms " +
"(smsid, modemid, imsi, targetnr, content, priority, " +
"appid, sourceip, xforwardedfor, smsintime, " +
"status, statustime) " +
"VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)" +
"ON CONFLICT(smsid) DO UPDATE SET " +
"modemid=excluded.modemid, imsi=excluded.imsi, statustime=excluded.statustime, status=excluded.status")
try:
smsdblock.acquire()
smsgwglobals.dblogger.debug("SQLite: Insert SMS" +
" :smsid: " + smsid +
" :imsi: " + imsi +
" :modemid: " + modemid +
" :targetnr: " + targetnr +
" :content: " + content +
" :priority: " + str(priority) +
" :appid: " + appid +
" :sourceip: " + sourceip +
" :xforwardedfor: " + xforwardedfor +
" :smsintime: " + str(smsintime) +
" :status: " + str(status) +
" :statustime: " + str(statustime)
)
self.__con.execute(query, (smsid, modemid, imsi, targetnr,
content, priority,
appid, sourceip, xforwardedfor,
smsintime, status, statustime))
self.__con.commit()
smsgwglobals.dblogger.debug("SQLite: Insert done!")
except Exception as e:
smsgwglobals.dblogger.critical("SQLite: " + query +
" failed! [EXCEPTION]:%s", e)
raise error.DatabaseError("Unable to INSERT sms! ", e)
finally:
smsdblock.release()
# update sms (input is a list)
def update_sms(self, smslist=[]):
"""Updates Sms entries out of a list to reflect the new values
all columns of sms have to be set!
Attributes: smslsit ... list of sms in dictionary structure
(see read_sms)
"""
smsgwglobals.dblogger.debug("SQLite: Will update "
+ str(len(smslist)) + "sms.")
# for each sms in the list
for sms in smslist:
smsgwglobals.dblogger.debug("SQLite: Update SMS: " + str(sms))
query = ("UPDATE sms SET " +
"modemid = ?, " +
"imsi = ?, " +
"targetnr = ?, " +
"content = ?, " +
"priority = ?, " +
"appid = ?, " +
"sourceip = ?, " +
"xforwardedfor = ?, " +
"smsintime = ?, " +
"status = ?, " +
"statustime = ? " +
"WHERE smsid = ?"
)
try:
smsdblock.acquire()
self.__con.execute(query, (sms['modemid'], sms['imsi'], sms['targetnr'],
sms['content'], sms['priority'],
sms['appid'], sms['sourceip'],
sms['xforwardedfor'],
sms['smsintime'], sms['status'],
sms['statustime'], sms['smsid']))
self.__con.commit()
smsgwglobals.dblogger.debug("SQLite: Update for smsid: " +
str(sms['smsid']) + " done!")
except Exception as e:
smsgwglobals.dblogger.critical("SQLite: " + query +
" failed! [EXCEPTION]:%s", e)
raise error.DatabaseError("Unable to UPDATE sms! ", e)
finally:
smsdblock.release()
# Merge userlist with userlist out of db
def merge_users(self, userlist=[]):
""" Merges user entries from database with those given in userlist
older values are replaced and new ones are inserted on both sites
Attributes: userlist ... list of sms in dictionary structure
(see read_users)
Return: new userlist ... list of merged users out of DB
"""
# read db users
dbuserlist = self.read_users()
smsgwglobals.dblogger.debug("SQLite: Will merge " + str(len(userlist)) +
" given user with " + str(len(dbuserlist)) +
" user from db.")
# iterate userlist and update form db if entries there newer
mergeduserlist = []
for user in userlist:
smsgwglobals.dblogger.debug("SQLite: Now on user: " + user['user'])
for | |
<filename>sdk/python/pulumi_google_native/compute/alpha/reservation.py
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['ReservationInitArgs', 'Reservation']
@pulumi.input_type
class ReservationInitArgs:
def __init__(__self__, *,
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
request_id: Optional[pulumi.Input[str]] = None,
share_settings: Optional[pulumi.Input['ShareSettingsArgs']] = None,
specific_reservation: Optional[pulumi.Input['AllocationSpecificSKUReservationArgs']] = None,
specific_reservation_required: Optional[pulumi.Input[bool]] = None,
zone: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Reservation resource.
:param pulumi.Input[str] description: An optional description of this resource. Provide this property when you create the resource.
:param pulumi.Input[str] name: The name of the resource, provided by the client when initially creating the resource. The resource name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.
:param pulumi.Input['ShareSettingsArgs'] share_settings: Share-settings for shared-reservation
:param pulumi.Input['AllocationSpecificSKUReservationArgs'] specific_reservation: Reservation for instances with specific machine shapes.
:param pulumi.Input[bool] specific_reservation_required: Indicates whether the reservation can be consumed by VMs with affinity for "any" reservation. If the field is set, then only VMs that target the reservation by name can consume from this reservation.
:param pulumi.Input[str] zone: Zone in which the reservation resides. A zone must be provided if the reservation is created within a commitment.
"""
if description is not None:
pulumi.set(__self__, "description", description)
if name is not None:
pulumi.set(__self__, "name", name)
if project is not None:
pulumi.set(__self__, "project", project)
if request_id is not None:
pulumi.set(__self__, "request_id", request_id)
if share_settings is not None:
pulumi.set(__self__, "share_settings", share_settings)
if specific_reservation is not None:
pulumi.set(__self__, "specific_reservation", specific_reservation)
if specific_reservation_required is not None:
pulumi.set(__self__, "specific_reservation_required", specific_reservation_required)
if zone is not None:
pulumi.set(__self__, "zone", zone)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
An optional description of this resource. Provide this property when you create the resource.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the resource, provided by the client when initially creating the resource. The resource name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@property
@pulumi.getter(name="requestId")
def request_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "request_id")
@request_id.setter
def request_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "request_id", value)
@property
@pulumi.getter(name="shareSettings")
def share_settings(self) -> Optional[pulumi.Input['ShareSettingsArgs']]:
"""
Share-settings for shared-reservation
"""
return pulumi.get(self, "share_settings")
@share_settings.setter
def share_settings(self, value: Optional[pulumi.Input['ShareSettingsArgs']]):
pulumi.set(self, "share_settings", value)
@property
@pulumi.getter(name="specificReservation")
def specific_reservation(self) -> Optional[pulumi.Input['AllocationSpecificSKUReservationArgs']]:
"""
Reservation for instances with specific machine shapes.
"""
return pulumi.get(self, "specific_reservation")
@specific_reservation.setter
def specific_reservation(self, value: Optional[pulumi.Input['AllocationSpecificSKUReservationArgs']]):
pulumi.set(self, "specific_reservation", value)
@property
@pulumi.getter(name="specificReservationRequired")
def specific_reservation_required(self) -> Optional[pulumi.Input[bool]]:
"""
Indicates whether the reservation can be consumed by VMs with affinity for "any" reservation. If the field is set, then only VMs that target the reservation by name can consume from this reservation.
"""
return pulumi.get(self, "specific_reservation_required")
@specific_reservation_required.setter
def specific_reservation_required(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "specific_reservation_required", value)
@property
@pulumi.getter
def zone(self) -> Optional[pulumi.Input[str]]:
"""
Zone in which the reservation resides. A zone must be provided if the reservation is created within a commitment.
"""
return pulumi.get(self, "zone")
@zone.setter
def zone(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "zone", value)
class Reservation(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
request_id: Optional[pulumi.Input[str]] = None,
share_settings: Optional[pulumi.Input[pulumi.InputType['ShareSettingsArgs']]] = None,
specific_reservation: Optional[pulumi.Input[pulumi.InputType['AllocationSpecificSKUReservationArgs']]] = None,
specific_reservation_required: Optional[pulumi.Input[bool]] = None,
zone: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Creates a new reservation. For more information, read Reserving zonal resources.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: An optional description of this resource. Provide this property when you create the resource.
:param pulumi.Input[str] name: The name of the resource, provided by the client when initially creating the resource. The resource name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.
:param pulumi.Input[pulumi.InputType['ShareSettingsArgs']] share_settings: Share-settings for shared-reservation
:param pulumi.Input[pulumi.InputType['AllocationSpecificSKUReservationArgs']] specific_reservation: Reservation for instances with specific machine shapes.
:param pulumi.Input[bool] specific_reservation_required: Indicates whether the reservation can be consumed by VMs with affinity for "any" reservation. If the field is set, then only VMs that target the reservation by name can consume from this reservation.
:param pulumi.Input[str] zone: Zone in which the reservation resides. A zone must be provided if the reservation is created within a commitment.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Optional[ReservationInitArgs] = None,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Creates a new reservation. For more information, read Reserving zonal resources.
:param str resource_name: The name of the resource.
:param ReservationInitArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ReservationInitArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
request_id: Optional[pulumi.Input[str]] = None,
share_settings: Optional[pulumi.Input[pulumi.InputType['ShareSettingsArgs']]] = None,
specific_reservation: Optional[pulumi.Input[pulumi.InputType['AllocationSpecificSKUReservationArgs']]] = None,
specific_reservation_required: Optional[pulumi.Input[bool]] = None,
zone: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ReservationInitArgs.__new__(ReservationInitArgs)
__props__.__dict__["description"] = description
__props__.__dict__["name"] = name
__props__.__dict__["project"] = project
__props__.__dict__["request_id"] = request_id
__props__.__dict__["share_settings"] = share_settings
__props__.__dict__["specific_reservation"] = specific_reservation
__props__.__dict__["specific_reservation_required"] = specific_reservation_required
__props__.__dict__["zone"] = zone
__props__.__dict__["commitment"] = None
__props__.__dict__["creation_timestamp"] = None
__props__.__dict__["kind"] = None
__props__.__dict__["satisfies_pzs"] = None
__props__.__dict__["self_link"] = None
__props__.__dict__["self_link_with_id"] = None
__props__.__dict__["status"] = None
super(Reservation, __self__).__init__(
'google-native:compute/alpha:Reservation',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Reservation':
"""
Get an existing Reservation resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = ReservationInitArgs.__new__(ReservationInitArgs)
__props__.__dict__["commitment"] = None
__props__.__dict__["creation_timestamp"] = None
__props__.__dict__["description"] = None
__props__.__dict__["kind"] = None
__props__.__dict__["name"] = None
__props__.__dict__["satisfies_pzs"] = None
__props__.__dict__["self_link"] = None
__props__.__dict__["self_link_with_id"] = None
__props__.__dict__["share_settings"] = None
__props__.__dict__["specific_reservation"] = None
__props__.__dict__["specific_reservation_required"] = None
__props__.__dict__["status"] = None
__props__.__dict__["zone"] = None
return Reservation(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def commitment(self) -> pulumi.Output[str]:
"""
Full or partial URL to a parent commitment. This field displays for reservations that are tied to a commitment.
"""
return pulumi.get(self, "commitment")
@property
@pulumi.getter(name="creationTimestamp")
def creation_timestamp(self) -> pulumi.Output[str]:
"""
Creation timestamp in RFC3339 text format.
"""
return pulumi.get(self, "creation_timestamp")
@property
@pulumi.getter
def description(self) -> pulumi.Output[str]:
"""
An optional description of this resource. Provide this property when you create the resource.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def | |
xp.to_streamlit('hip', 'selected_uids').display()
fit = variable[int(selected[0])]
st.subheader('Displaying results of fit #%i' % (int(selected[0]) + 1))
# --------------------------------------------------------------------------------------------------------------
# ------------------------------------------------ DISPLAYT FIT -----------------------------------------------
# --------------------------------------------------------------------------------------------------------------
col1, col2 = st.columns(2)
# -------------------------------------------- FITTING PLOT & EXPORT -------------------------------------------
col1.markdown("""#### TRPL fitting""")
# Plot
col1.plotly_chart(plot.plot_fit(fit['xs_data'], fit['ys_data'], fit['fit_ydata'], fit['N0s_labels']), use_container_width=True)
# Export
header = np.concatenate([['Time (ns)', 'Intensity %i' % i] for i in range(1, len(ys_data) + 1)])
export_data = utils.matrix_to_string([val for pair in zip(fit['xs_data'], fit['fit_ydata']) for val in pair], header)
col1.download_button('Download data', export_data, 'pears_fit_data.csv')
# ---------------------------------------- OPTIMISED PARAMETERS DISPLAY ----------------------------------------
col1.markdown("""#### Parameters""")
for label in fit['labels']:
col1.markdown(label, unsafe_allow_html=True)
# ----------------------------------------------- CONTRIBUTIONS ------------------------------------------------
st.markdown("""#### Contributions""")
contributions = pd.DataFrame(fit['contributions'], index=fit['N0s_labels']).transpose()
st.markdown(contributions.to_html(escape=False) + '<br>', unsafe_allow_html=True)
# Analysis
for s in model.get_recommendations(fit['contributions']):
st.warning(s)
# -------------------------------------------- CARRIER ACCUMULATION -------------------------------------------
if period:
st.markdown("""#### Carrier accumulation""")
nca = model.get_carrier_accumulation(fit['popts'], fit['N0s'], period)
nca_df = pd.DataFrame(nca, index=['Carrier accumulation (%)'])
st.markdown(nca_df.to_html(escape=False) + '<br>', unsafe_allow_html=True)
# Analysis
max_nca = np.max(list(nca.values()))
if max_nca > 5.:
st.warning('This fit predicts significant carrier accumulation leading to a maximum %f %% difference '
'between the single pulse and multiple pulse TRPL decays. You might need to increase your '
'excitation repetition period to prevent potential carrier accumulation.' % max_nca)
else:
st.success('This fit does not predict significant carrier accumulation.')
# -------------------------------------------- CONCENTRATIONS PLOT ---------------------------------------------
col2.markdown("""#### Carrier concentrations""")
concentrations = model.get_carrier_concentrations(fit['xs_data'], fit['popts'], period)
conc_fig = plot.plot_carrier_concentrations(concentrations[0], concentrations[2], fit['N0s'], fit['N0s_labels'],
concentrations[1], model)
col2.plotly_chart(conc_fig, use_container_width=True)
# ----------------------------------------------------------------------------------------------------------------------
# ---------------------------------------------------- DATA DISPLAY ----------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------------
elif xs_data[0] is not None:
with results_container.container():
st.markdown("""#### Input data""")
if N0s is not None:
labels = utils.get_power_labels(N0s)
else:
labels = ['%i' % (i + 1) for i in range(len(ys_data))]
st.plotly_chart(plot.plot_fit(xs_data, ys_data, labels=labels), use_container_width=True)
# ----------------------------------------------------------------------------------------------------------------------
# ------------------------------------------------- GENERAL INFORMATION ------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------------
# --------------------------------------------------- APP DESCRIPTION --------------------------------------------------
with st.expander('About', xs_data[0] is None):
st.info("""*Pears* is a web app to easily fit time-resolved photoluminescence (TRPL) data of perovskite materials.
Two models can be used, which are extensively discussed [here](https://doi.org/10.1039/D0CP04950F).
- The Bimolecular-Trapping model considers assumes no doping and that the trap states remain mostly empty over time
- The Bimolecular-Trapping-Detrapping model considers bimolecular recombination, trapping and detrapping with the presence of doping.\n
Two modes are available.
- The "%s" mode can be used to fit experimental data given a set of guess parameters.
- The "%s" mode runs the fitting optimisation for a range of guess parameters.
If all the optimisations do not converge toward the same values, then the fitting is inaccurate due to the possibility of multiple solutions\n
App created and maintained by [<NAME>](mailto:<EMAIL>) ([Twitter](https://twitter.com/emmanuel_pean)).
Version 0.3.1 (last updated: 9th May 2022).
Source code: https://github.com/Emmanuelpean/pears""" % (resources.fitting_mode, resources.analysis_mode))
# -------------------------------------------------- MODEL DESCRIPTION -------------------------------------------------
with st.expander('Model & computational details'):
st.markdown("""The following information can be found with more details [here](https://doi.org/10.1039/D0CP04950F)
(Note that for simplicity purpose, the $\Delta$ notation for the photoexcited carriers was dropped here for
simplicity purposes *e.g.* $\Delta n_e$ in the paper is $n_e$ here).""")
st.markdown("""#### Models""")
st.markdown("""Two charge carrier recombination models can be used whose rate equations for the different carrier
concentrations are given below.""")
col1, col2 = st.columns(2)
with col1:
st.markdown("<h3 style='text-align: center; '>Bimolecular-trapping model</h3>", unsafe_allow_html=True)
st.markdown("""%s""" % utils.render_image(resources.btmodel_filename, 65), unsafe_allow_html=True)
st.latex(r'n_e(t)=n_h(t)=n(t)')
st.markdown('#')
st.markdown('#####')
st.latex(r'\frac{dn}{dt}=-k_Tn-k_Bn^2-k_An^3,\ \ \ n^p(t=0)=n^{p-1}(T)+N_0')
st.markdown('#')
st.markdown('#####')
st.latex(r'I_{TRPL} \propto n^2')
st.markdown("""where:
* $n$ is the photoexcited carrier concentration (in $cm^{-3}$)
* $k_B$ is the bimolecular recombination rate constant (in $cm^3/ns$)
* $k_T$ is the trapping rate constant (in $ns^{-1}$)""")
with col2:
st.markdown("<h3 style='text-align: center; '>Bimolecular-trapping-detrapping model</h3>",
unsafe_allow_html=True)
st.markdown("""%s""" % utils.render_image(resources.btdmodel_filename, 65), unsafe_allow_html=True)
st.latex(r'\frac{dn_e}{dt}=-k_B n_e (n_h+p_0 )-k_T n_e [N_T-n_t ],\ \ \ n_e^p(t=0)=n_e^{p-1}(T)+N_0')
st.latex(r'\frac{dn_t}{dt}=k_T n_e [N_T-n_t]-k_D n_t (n_h+p_0 ),\ \ \ n_t^p(t=0)=n_t^{p-1}(T)')
st.latex(r'\frac{dn_h}{dt}=-k_B n_e (n_h+p_0 )-k_D n_t (n_h+p_0 ),\ \ \ n_h^p(t=0)=n_h^{p-1}(T)+N_0')
st.latex(r'I_{TRPL} \propto n_e(n_h+p_0)')
st.markdown("""where:
* $n_e$ is the photoexcited electron concentration (in $cm^{-3}$)
* $n_h$ is the photoexcited hole concentration (in $cm^{-3}$)
* $n_t$ is the trapped electron concentration (in $cm^{-3}$)
* $k_B$ is the bimolecular recombination rate constant (in $cm^3/ns$)
* $k_T$ is the trapping rate constant (in $cm^3/ns$)
* $k_D$ is the detrapping rate constant (in $cm^3/ns$)
* $N_T$ is the trap state concentration (in $cm^{-3}$)
* $p_0$ is the dark hole concentration (in $cm^{-3}$)""")
st.markdown("""####""")
st.markdown("""For both models, the photoexcited charge carrier concentration $N_0$ is the concentration of carriers
excited by a single excitation pulse. The initial condition of a carrier concentration after excitation pulse $p$ is
given by the sum of any remaining carriers $n_X^{p-1}(T)$ ($T$ is the excitation repetition period) just before
excitation plus the concentration of carrier generated by the pulse $N_0$ (except for the trapped electrons).""")
st.markdown("""#### Fitting""")
st.markdown(r"""Fitting is carried using the least square optimisation. For a dataset containing $M$ curves,
each containing $N_i$ data points, the residue $SS_{res}$ is:""")
st.latex(r"""SS_{res}=\sum_i^M\sum_j^{N_i}\left(y_{i,j}-F(t_{i,j},A_{i})\right)^2""")
st.markdown("""where $y_{i,j}$ is the intensity associated with time $t_{i,j}$ of point $j$ of curve $i$.
$A_i$ are the model parameters associated with curve $i$ and $F$ is the fitting model given by:""")
st.latex(r'F(t,I_0, y_0, k_B,...)=I_0 \frac{I_{TRPL}(t,k_B,...)}{I_{TRPL}(0, k_B,...)} + y_0')
st.markdown(""" where $I_0$ is an intensity factor and $y_0$ is an intensity offset. Contrary to the other parameters of
the models (e.g. $k_B$), $I_0$ and $y_0$ are not kept the same between the different TRPL curves *i.e.* the fitting
models for curves $A$, $B$,... are:""")
st.latex(r'F_A(t,I_0^A, y_0^A, k_B,...)=I_0^A \frac{I_{TRPL}(t,k_B,...)}{I_{TRPL}(0, k_B,...)} + y_0^A')
st.latex(r'F_B(t,I_0^B, y_0^B, k_B,...)=I_0^B \frac{I_{TRPL}(t,k_B,...)}{I_{TRPL}(0, k_B,...)} + y_0^B')
st.latex('...')
st.markdown("""By default, $I_0$ and $y_0$ are respectively fixed at 1 and 0 (assuming no background noise and
normalised intensity. The quality of the fit is estimated from the coefficient of determination $R^2$:""")
st.latex(r'R^2=1-\frac{SS_{res}}{SS_{total}}')
st.markdown(r"""where $SS_{total}$ is defined as the sum of the squared difference between each point and the
average of all curves $\bar{y}$:""")
st.latex(r"""SS_{total}=\sum_i^M\sum_j^{N_i}\left(y_{i,j}-\bar{y}\right)^2""")
st.markdown("""For fitting, it is assumed that there is no carrier accumulation between excitation pulses due to the
presence of non-recombined carriers from previous excitation pulses. This requires the TRPL decays to be measured
with long enough excitation repetition periods such that all carriers can recombine.""")
st.markdown("""#### Carrier accumulation""")
st.markdown("""It is possible to calculate the expected effect of carrier accumulation on the TRPL from the
parameters retrieved from the fits if the repetition period is provided. The carrier accumulation ($CA$) is
calculated as the maximum diffence between the simulated TRPL after the first ($p=1$) and stabilised ($p=s$) pulses:""")
st.latex(r'CA=\max\left({\frac{I_{TRPL}^{p=1}(t)}{I_{TRPL}^{p=1}(0)}-\frac{I_{TRPL}^{p=s}(t)}{I_{TRPL}^{p=s}(0)}})\right)')
st.markdown("""The stabilised pulse is defined as when the electron and hole concentrations vary by less than
10<sup>-3</sup> % of the photoexcited concentration between two consecutive pulses:""", unsafe_allow_html=True)
st.latex(r'|n_e^p(t)-n_e^{p+1}(t)|<10^{-5} N_0')
st.latex(r'|n_h^p(t)-n_h^{p+1}(t)|<10^{-5} N_0')
st.markdown("""#### Contributions""")
st.markdown("""The contribution of each process to the TRPL intensity variayions over time is calculated from the
fitted values (see Equations 22 to 25 [here](https://doi.org/10.1039/D0CP04950F)). It is important to ensure that the
contribution of a process (e.g., trapping) is non-negligible so that the associated parameters (e.g., $k_T$
in the case of the bimolecular-trapping model) are accurately retrieved.""", unsafe_allow_html=True)
st.markdown("""#### Grid fitting""")
st.markdown("""This mode runs the fitting process for a grid of guess values. The grid is generated from every
possible combinations of guess values supplied (e.g. $k_B$: 10<sup>-20</sup>, 10<sup>-19</sup> and
$k_T$: 10<sup>-3</sup>, 10<sup>-2</sup> yields 4 sets of guess values: (10<sup>-20</sup>, 10<sup>-3</sup>),
(10<sup>-20</sup>, 10<sup>-2</sup>), (10<sup>-19</sup>, 10<sup>-3</sup>) and (10<sup>-19</sup>, 10<sup>-2</sup>)).
Note that in the case of the bimolecular-trapping-detrapping model, only set of guess values satisfying $k_T>k_B$
and $k_T>k_D$ are considered to keep the computational time reasonable. Fitting is then carried using each set of
guess values as schematically represented below: %s
In the case where all the optimisations converge towards a similar solution, it can be assumed that only 1 solution
exist and that therefore the parameter values obtained accurately describe the system measured. However, if the fits
converge toward multiple solutions, it is not possible to ascertain which solution represents the system accurately."""
% utils.render_image(resources.opt_guess, 60, 'png'), unsafe_allow_html=True)
# ------------------------------------------------------- HOW TO -------------------------------------------------------
with st.expander('Getting started'):
st.markdown("""#### Example""")
data1_link = utils.generate_downloadlink(resources.test_file1, text='data set 1')
data2_link = utils.generate_downloadlink(resources.test_file2, text='data set 2')
st.markdown("""Follow these steps to fit TRPL decays.""")
st.markdown("""1. Upload your data and select the data format (text files and csv are supported).
Check | |
19, 18])
rectangle_points_definitions[30, :] = np.array([0, 2, 6, 4])
rectangle_points_definitions[31, :] = np.array([1, 3, 7, 5])
rectangle_points_definitions[32, :] = np.array([12, 14, 18, 16])
rectangle_points_definitions[33, :] = np.array([13, 15, 19, 17])
rectangle_points_definitions[34, :] = np.array([1, 2, 6, 5])
rectangle_points_definitions[35, :] = np.array([13, 14, 18, 17])
rectangle_points_definitions[36, :] = np.array([4, 5, 13, 12])
rectangle_points_definitions[37, :] = np.array([6, 7, 15, 14])
rectangle_points_definitions[38, :] = np.array([0, 1, 5, 4])
rectangle_points_definitions[39, :] = np.array([2, 3, 7, 6])
rectangle_points_definitions[40, :] = np.array([12, 13, 17, 16])
rectangle_points_definitions[41, :] = np.array([14, 15, 19, 18])
rectangle_points_definitions[42, :] = np.array([10, 11, 14, 13])
rectangle_points_definitions[43, :] = np.array([5, 6, 9, 8])
rectangle_lines_definitions = np.zeros((44, 4))
rectangle_lines_definitions[0, :] = np.array([0, 5, 6, 9])
rectangle_lines_definitions[1, :] = np.array([6, 9, 0, 4])
rectangle_lines_definitions[2, :] = np.array([6, 9, 1, 5])
rectangle_lines_definitions[3, :] = np.array([7, 9, 0, 5])
rectangle_lines_definitions[4, :] = np.array([6, 8, 0, 5])
rectangle_lines_definitions[5, :] = np.array([6, 8, 0, 4])
rectangle_lines_definitions[6, :] = np.array([7, 9, 0, 4])
rectangle_lines_definitions[7, :] = np.array([1, 5, 6, 8])
rectangle_lines_definitions[8, :] = np.array([1, 5, 7, 9])
rectangle_lines_definitions[9, :] = np.array([1, 4, 6, 9])
rectangle_lines_definitions[10, :] = np.array([7, 8, 1, 5])
rectangle_lines_definitions[11, :] = np.array([0, 4, 7, 8])
rectangle_lines_definitions[12, :] = np.array([6, 8, 1, 4])
rectangle_lines_definitions[13, :] = np.array([1, 4, 7, 9])
rectangle_lines_definitions[14, :] = np.array([7, 8, 2, 5])
rectangle_lines_definitions[15, :] = np.array([0, 3, 7, 8])
rectangle_lines_definitions[16, :] = np.array([1, 4, 7, 8])
rectangle_lines_definitions[17, :] = np.array([0, 1, 6, 9])
rectangle_lines_definitions[18, :] = np.array([6, 9, 4, 5])
rectangle_lines_definitions[19, :] = np.array([3, 5, 7, 8])
rectangle_lines_definitions[20, :] = np.array([7, 8, 2, 0])
rectangle_lines_definitions[21, :] = np.array([2, 4, 7, 8])
rectangle_lines_definitions[22, :] = np.array([1, 3, 7, 8])
rectangle_lines_definitions[23, :] = np.array([2, 3, 7, 8])
rectangle_lines_definitions[24, :] = np.array([6, 7, 0, 5])
rectangle_lines_definitions[25, :] = np.array([8, 9, 0, 5])
rectangle_lines_definitions[26, :] = np.array([6, 7, 0, 4])
rectangle_lines_definitions[27, :] = np.array([8, 9, 0, 4])
rectangle_lines_definitions[28, :] = np.array([6, 7, 1, 5])
rectangle_lines_definitions[29, :] = np.array([1, 5, 8, 9])
rectangle_lines_definitions[30, :] = np.array([6, 8, 0, 1])
rectangle_lines_definitions[31, :] = np.array([7, 9, 0, 1])
rectangle_lines_definitions[32, :] = np.array([4, 5, 6, 8])
rectangle_lines_definitions[33, :] = np.array([4, 5, 7, 9])
rectangle_lines_definitions[34, :] = np.array([7, 8, 0, 1])
rectangle_lines_definitions[35, :] = np.array([4, 5, 7, 8])
rectangle_lines_definitions[36, :] = np.array([1, 4, 6, 7])
rectangle_lines_definitions[37, :] = np.array([1, 4, 8, 9])
rectangle_lines_definitions[38, :] = np.array([0, 1, 6, 7])
rectangle_lines_definitions[39, :] = np.array([0, 1, 8, 9])
rectangle_lines_definitions[40, :] = np.array([4, 5, 6, 7])
rectangle_lines_definitions[41, :] = np.array([4, 5, 8, 9])
rectangle_lines_definitions[42, :] = np.array([3, 4, 7, 8])
rectangle_lines_definitions[43, :] = np.array([1, 2, 7, 8])
rectangle_points_position_definition = np.zeros((44, 4, 2))
rectangle_points_position_definition[0, :, :] = np.array([[0, 0],
[214, 0],
[214, 428],
[0, 428]])
rectangle_points_position_definition[1, :, :] = np.array([[0, 0],
[214, 0],
[214, 322],
[0, 322]])
rectangle_points_position_definition[2, :, :] = np.array([[0, 107],
[214, 107],
[214, 428],
[0, 428]])
rectangle_points_position_definition[3, :, :] = np.array([[53, 0],
[214, 0],
[214, 428],
[53, 428]])
rectangle_points_position_definition[4, :, :] = np.array([[0, 0],
[161, 0],
[161, 428],
[0, 428]])
rectangle_points_position_definition[5, :, :] = np.array([[0, 0],
[161, 0],
[161, 322],
[0, 322]])
rectangle_points_position_definition[6, :, :] = np.array([[53, 0],
[214, 0],
[214, 322],
[53, 322]])
rectangle_points_position_definition[7, :, :] = np.array([[0, 107],
[161, 107],
[161, 428],
[0, 428]])
rectangle_points_position_definition[8, :, :] = np.array([[53, 107],
[214, 107],
[214, 428],
[53, 428]])
rectangle_points_position_definition[9, :, :] = np.array([[0, 107],
[214, 107],
[214, 322],
[0, 322]])
rectangle_points_position_definition[10, :, :] = np.array([[53, 107],
[161, 107],
[161, 428],
[53, 428]])
rectangle_points_position_definition[11, :, :] = np.array([[53, 0],
[161, 0],
[161, 322],
[53, 322]])
rectangle_points_position_definition[12, :, :] = np.array([[0, 107],
[161, 107],
[161, 322],
[0, 322]])
rectangle_points_position_definition[13, :, :] = np.array([[53, 107],
[214, 107],
[214, 322],
[53, 322]])
rectangle_points_position_definition[14, :, :] = np.array([[53, 160],
[161, 160],
[161, 428],
[53, 428]])
rectangle_points_position_definition[15, :, :] = np.array([[53, 0],
[161, 0],
[161, 268],
[53, 268]])
rectangle_points_position_definition[16, :, :] = np.array([[53, 107],
[161, 107],
[161, 322],
[53, 322]])
rectangle_points_position_definition[17, :, :] = np.array([[0, 0],
[214, 0],
[214, 107],
[0, 107]])
rectangle_points_position_definition[18, :, :] = np.array([[0, 322],
[214, 322],
[214, 428],
[0, 428]])
rectangle_points_position_definition[19, :, :] = np.array([[53, 268],
[161, 268],
[161, 428],
[53, 428]])
rectangle_points_position_definition[20, :, :] = np.array([[53, 0],
[161, 0],
[161, 160],
[53, 160]])
rectangle_points_position_definition[21, :, :] = np.array([[53, 160],
[161, 160],
[161, 322],
[53, 322]])
rectangle_points_position_definition[22, :, :] = np.array([[53, 107],
[161, 107],
[161, 268],
[53, 268]])
rectangle_points_position_definition[23, :, :] = np.array([[53, 160],
[161, 160],
[161, 268],
[53, 268]])
rectangle_points_position_definition[24, :, :] = np.array([[0, 0],
[53, 0],
[53, 428],
[0, 428]])
rectangle_points_position_definition[25, :, :] = np.array([[161, 0],
[214, 0],
[214, 428],
[161, 428]])
rectangle_points_position_definition[26, :, :] = np.array([[0, 0],
[53, 0],
[53, 322],
[0, 322]])
rectangle_points_position_definition[27, :, :] = np.array([[161, 0],
[214, 0],
[214, 322],
[161, 322]])
rectangle_points_position_definition[28, :, :] = np.array([[0, 107],
[53, 107],
[53, 428],
[0, 428]])
rectangle_points_position_definition[29, :, :] = np.array([[161, 107],
[214, 107],
[214, 428],
[161, 428]])
rectangle_points_position_definition[30, :, :] = np.array([[0, 0],
[161, 0],
[161, 107],
[0, 107]])
rectangle_points_position_definition[31, :, :] = np.array([[53, 0],
[214, 0],
[214, 107],
[53, 107]])
rectangle_points_position_definition[32, :, :] = np.array([[0, 322],
[161, 322],
[161, 428],
[0, 428]])
rectangle_points_position_definition[33, :, :] = np.array([[53, 322],
[214, 322],
[214, 428],
[53, 428]])
rectangle_points_position_definition[34, :, :] = np.array([[53, 0],
[161, 0],
[161, 107],
[53, 107]])
rectangle_points_position_definition[35, :, :] = np.array([[53, 322],
[161, 322],
[161, 428],
[53, 428]])
rectangle_points_position_definition[36, :, :] = np.array([[0, 107],
[53, 107],
[53, 322],
[0, 322]])
rectangle_points_position_definition[37, :, :] = np.array([[161, 107],
[214, 107],
[214, 322],
[161, 322]])
rectangle_points_position_definition[38, :, :] = np.array([[0, 0],
[53, 0],
[53, 107],
[0, 107]])
rectangle_points_position_definition[39, :, :] = np.array([[161, 0],
[214, 0],
[214, 107],
[161, 107]])
rectangle_points_position_definition[40, :, :] = np.array([[0, 322],
[53, 322],
[53, 428],
[0, 428]])
rectangle_points_position_definition[41, :, :] = np.array([[161, 322],
[214, 322],
[214, 428],
[161, 428]])
rectangle_points_position_definition[42, :, :] = np.array([[53, 268],
[161, 268],
[161, 322],
[53, 322]])
rectangle_points_position_definition[43, :, :] = np.array([[53, 107],
[161, 107],
[161, 160],
[53, 160]])
def nothing(x):
return
cv2.namedWindow(Image_name)
cv2.createTrackbar(Trackbar_name, Image_name, 0, num_frames, nothing)
frame_counter = 0
# cv2.setTrackbarPos(Trackbar_name, Image_name, frame_counter)
cv2.createButton("0", point_choice, 0, cv2.QT_PUSH_BUTTON, 0)
cv2.createButton("1", point_choice, 1, cv2.QT_PUSH_BUTTON, 1)
cv2.createButton("2", point_choice, 2, cv2.QT_PUSH_BUTTON, 2)
cv2.createButton("3", point_choice, 3, cv2.QT_PUSH_BUTTON, 3)
cv2.createButton("4", point_choice, 4, cv2.QT_PUSH_BUTTON, 4)
cv2.createButton("5", point_choice, 5, cv2.QT_PUSH_BUTTON, 5)
cv2.createButton("6", point_choice, 6, cv2.QT_PUSH_BUTTON, 6)
cv2.createButton("7", point_choice, 7, cv2.QT_PUSH_BUTTON, 7)
cv2.createButton("8", point_choice, 8, cv2.QT_PUSH_BUTTON, 8)
cv2.createButton("9", point_choice, 9, cv2.QT_PUSH_BUTTON, 9)
cv2.createButton("q", point_choice, 10, cv2.QT_PUSH_BUTTON, 10)
cv2.createButton("w", point_choice, 11, cv2.QT_PUSH_BUTTON, 11)
cv2.createButton("e", point_choice, 12, cv2.QT_PUSH_BUTTON, 12)
cv2.createButton("r", point_choice, 13, cv2.QT_PUSH_BUTTON, 13)
cv2.createButton("t", point_choice, 14, cv2.QT_PUSH_BUTTON, 14)
cv2.createButton("y", point_choice, 15, cv2.QT_PUSH_BUTTON, 15)
cv2.createButton("u", point_choice, 16, cv2.QT_PUSH_BUTTON, 16)
cv2.createButton("i", point_choice, 17, cv2.QT_PUSH_BUTTON, 17)
cv2.createButton("o", point_choice, 18, cv2.QT_PUSH_BUTTON, 18)
cv2.createButton("p", point_choice, 19, cv2.QT_PUSH_BUTTON, 19)
cv2.createTrackbar("Rien", "", 0, 1, nothing)
cv2.createButton("Trampoline", looking_at_trampo, 0, cv2.QT_PUSH_BUTTON, 0)
cv2.createButton("Wall front", looking_at_wall_front, 0, cv2.QT_PUSH_BUTTON, 0)
cv2.createButton("Wall back", looking_at_wall_back, 0, cv2.QT_PUSH_BUTTON, 0)
cv2.createButton("Wall right", looking_at_wall_right, 0, cv2.QT_PUSH_BUTTON, 0)
cv2.createButton("Wall left", looking_at_wall_left, 0, cv2.QT_PUSH_BUTTON, 0)
cv2.createButton("Self", looking_at_self, 0, cv2.QT_PUSH_BUTTON, 0)
cv2.createButton("Ceiling", looking_at_ceiling, 0, cv2.QT_PUSH_BUTTON, 0)
cv2.createButton("Not an acrobatics", looking_at_not_an_acrobatics, 0, cv2.QT_PUSH_BUTTON, 0)
cv2.createButton("Jump", looking_at_jump, 0, cv2.QT_PUSH_BUTTON, 0)
cv2.setMouseCallback(Image_name, mouse_click)
gaze_position_labels_file = "/home/user/disk/Eye-tracking/PupilData/points_labeled/" + movie_name + "_labeling_points.pkl" # [:-4]
if os.path.exists(gaze_position_labels_file):
file = open(gaze_position_labels_file, "rb")
points_labels, active_points, curent_AOI_label, csv_eye_tracking = pickle.load(file)
if "Wall right" not in curent_AOI_label.keys(): ############
curent_AOI_label["Wall right"] = np.zeros((len(frames),))
curent_AOI_label["Wall left"] = np.zeros((len(frames),))
curent_AOI_label["Self"] = np.zeros((len(frames),))
if "Jump" not in curent_AOI_label.keys(): ############
curent_AOI_label["Jump"] = np.zeros((len(frames),))
# $$$$$$$$$$$$$$$$$$
playVideo = True
image_clone = frames[frame_counter].copy()
width, height, rgb = np.shape(image_clone)
small_image = cv2.resize(image_clone, (int(round(width / ratio_image)), int(round(height / ratio_image))))
width_small, height_small, rgb_small = np.shape(small_image)
cv2.imshow(Image_name, small_image)
while playVideo == True:
key = cv2.waitKey(0) & 0xFF
if key == ord('0'):
point_choice(0, 0)
elif key == ord('1'):
point_choice(1, 1)
elif key == ord('2'):
point_choice(2, 2)
elif key == ord('3'):
point_choice(3, 3)
elif key == ord('4'):
point_choice(4, 4)
elif key == ord('5'):
point_choice(5, 5)
elif key == ord('6'):
point_choice(6, 6)
elif key == ord('7'):
point_choice(7, 7)
elif key == ord('8'):
point_choice(8, 8)
elif key == ord('9'):
point_choice(9, 9)
elif key == ord('q'):
point_choice(10, 10)
elif key == ord('w'):
point_choice(11, 11)
elif key == ord('e'):
point_choice(12, 12)
elif key == ord('r'):
point_choice(13, 13)
elif key == ord('t'):
point_choice(14, 14)
elif key == ord('y'):
point_choice(15, 15)
elif key == ord('u'):
point_choice(16, 16)
elif key == ord('i'):
point_choice(17, 17)
elif key == ord('o'):
point_choice(18, 18)
elif key == ord('p'):
point_choice(19, 19)
if frame_counter % 15: # s'il ya un probleme, au moins on n'a pas tout perdu
if not os.path.exists(f'/home/user/disk/Eye-tracking/Results/{json_info["wearer_name"]}'):
os.makedirs(f'/home/user/disk/Eye-tracking/Results/{json_info["wearer_name"]}')
with open(f'/home/user/disk/Eye-tracking/Results/{json_info["wearer_name"]}/{movie_name}_tempo_labeling_points.pkl', 'wb') as handle:
pickle.dump([points_labels, active_points, curent_AOI_label, csv_eye_tracking], handle)
# $$$$$$$$$$$$$$$$$$
frame_counter = cv2.getTrackbarPos(Trackbar_name, Image_name)
frames_clone = frames.copy()
image_clone = np.zeros(np.shape(frames_clone[frame_counter]), dtype=np.uint8)
image_clone[:] = frames_clone[frame_counter][:]
small_image = cv2.resize(image_clone, (int(round(width / ratio_image)), int(round(height / ratio_image))))
if key == ord(','): # if `<` then go back
if frame_counter != 0:
frame_counter -= 1
cv2.setTrackbarPos(Trackbar_name, Image_name, frame_counter)
image_clone = np.zeros(np.shape(frames_clone[frame_counter]), dtype=np.uint8)
image_clone[:] = frames_clone[frame_counter][:]
small_image = cv2.resize(image_clone, (int(round(width / ratio_image)), int(round(height / ratio_image))))
small_image_gray = cv2.cvtColor(small_image, cv2.COLOR_BGR2GRAY)
cv2.imshow(Image_name, small_image_gray)
draw_points_and_lines()
elif key == ord('.'): # if | |
in city:
if city["cityNameData"]["Orig"]:
cityKey = "_".join(x.upper() for x in city["CityName"].split(" "))
if cityKey in CITY_NAMES:
city["cityLocData"] = CITY_NAMES[cityKey]
def reorderedCityData(cityData, removeGaps=True):
reorderedCityData = []
usedCities = []
replacedCities = []
for idx, city in enumerate(cityData["cities"]):
if city["CivIndex"] < 0:
continue
CityName = city["CityName"]
cityCopy = city.copy()
cityCopy["OldIdx"] = idx
# If city exists already remove it from list
if CityName in usedCities:
for idx2, insertedCity in enumerate(reorderedCityData):
if insertedCity["CityName"] == CityName:
reorderedCityData.pop(idx2)
break
else:
usedCities.append(CityName)
# Find a slot where to insert
insertCity(reorderedCityData, cityCopy, replacedCities)
replacedCities2 = []
for cityName in replacedCities:
buildOver = False
for city in reorderedCityData:
if city["CityName"] == cityName:
break
else:
for idx, missingCity in enumerate(reversed(cityData["cities"])):
if missingCity["CityName"] == cityName:
cityLoc = missingCity["LocationIdx"]
for cityForLocCheck in reversed(cityData["cities"]):
if cityName != cityForLocCheck["CityName"]:
if cityForLocCheck["LocationIdx"] == cityLoc:
buildOver = True
break
else:
print("Failed to find a missing city candidate at reorderedCityData")
continue
if buildOver:
# print("New city on same tile")
continue
newCity = missingCity.copy()
maxCivCityOrderIdx, maxCivCityOrderIdx1 = findLastCivCityIdx(cityData, missingCity["tileOwner"])
newCity["OldIdx"] = len(cityData["cities"]) - idx - 1
newCity["CivIndex"] = missingCity["tileOwner"]
newCity["CivCityOrderIdx"] = maxCivCityOrderIdx + 1
newCity["CivCityOrderIdx1"] = maxCivCityOrderIdx1 + 1
insertCity(reorderedCityData, newCity, replacedCities2)
# Remove gaps from CivCityOrderIdx
if removeGaps:
first = True
CurrentCivIndex = 0
CurrentCivCityOrderIdx = 0
for city in reorderedCityData:
if first:
first = False
CurrentCivIndex = city["CivIndex"]
CurrentCivCityOrderIdx = city["CivCityOrderIdx"]
# Start from 0
if CurrentCivCityOrderIdx != 0:
city["CivCityOrderIdx"] = 0
city["CivCityOrderIdxOld"] = CurrentCivCityOrderIdx
CurrentCivCityOrderIdx = 0
continue
if city["tileOwner"] == FREE_CITY_IDX:
continue
if CurrentCivIndex == city["CivIndex"]:
CurrentCivCityOrderIdx += 1
if CurrentCivCityOrderIdx != city["CivCityOrderIdx"]:
city["CivCityOrderIdxOld"] = city["CivCityOrderIdx"]
city["CivCityOrderIdx"] = CurrentCivCityOrderIdx
else: # Next Civ
CurrentCivIndex = city["CivIndex"]
CurrentCivCityOrderIdx = city["CivCityOrderIdx"]
# Start from 0
if CurrentCivCityOrderIdx != 0:
city["CivCityOrderIdx"] = 0
city["CivCityOrderIdxOld"] = CurrentCivCityOrderIdx
CurrentCivCityOrderIdx = 0
return reorderedCityData
def insertCity(reorderedCityData, city, replacedCities):
for idx2, insertedCity in enumerate(reorderedCityData):
insertOrReplace = compareCity(city, insertedCity)
if insertOrReplace > 0:
reorderedCityData.insert(idx2, city)
break
elif insertOrReplace == 0:
replacedCities.append(reorderedCityData[idx2]["CityName"])
reorderedCityData[idx2] = city
break
else:
reorderedCityData.append(city)
def findLastCivCityIdx(cityData, civIdx):
maxCivCityOrderIdx = -1
maxCivCityOrderIdx1 = -1
for city in cityData["cities"]:
if city["CivIndex"] == civIdx:
if maxCivCityOrderIdx < city["CivCityOrderIdx"]:
maxCivCityOrderIdx = city["CivCityOrderIdx"]
if maxCivCityOrderIdx1 < city["CivCityOrderIdx1"]:
maxCivCityOrderIdx1 = city["CivCityOrderIdx1"]
return maxCivCityOrderIdx, maxCivCityOrderIdx1
def cityHasExiststedAlreadyInCiv(CityName, CivIndex, cityData):
CivCityOrderIdx = -1
CivCityOrderIdx1 = -1
oldIdx = -1
for idx, city in enumerate(cityData["cities"]):
if city["CivIndex"] == CivIndex:
if city["CityName"] == CityName:
CivCityOrderIdx = city["CivCityOrderIdx"]
oldIdx = idx
if CivCityOrderIdx1 < city["CivCityOrderIdx1"]:
CivCityOrderIdx1 = city["CivCityOrderIdx1"]
CivCityOrderIdx1 += 1
return CivCityOrderIdx, CivCityOrderIdx1, oldIdx
def compareCity(city1, city2):
CivIndex = city1["CivIndex"]
CivCityOrderIdx = city1["CivCityOrderIdx"]
tileOwner = city1["tileOwner"]
CivIndex2 = city2["CivIndex"]
CivCityOrderIdx2 = city2["CivCityOrderIdx"]
tileOwner2 = city2["tileOwner"]
# City 1 is not free city and city 2 is
if tileOwner != FREE_CITY_IDX and tileOwner2 == FREE_CITY_IDX:
return 1
# City 1 is free city and city 2 is not
elif tileOwner == FREE_CITY_IDX and tileOwner2 != FREE_CITY_IDX:
return -1
# Both are either free city or not #TODO: Check that the order is not time related with Free Cities
else:
# Civ index priority # 1
if CivIndex < CivIndex2:
return 1
elif CivIndex > CivIndex2:
return -1
else:
# City index priority # 1
if CivCityOrderIdx < CivCityOrderIdx2:
return 1
elif CivCityOrderIdx > CivCityOrderIdx2:
return -1
# Both are same replace old index (player has lost an existing city)
else:
return 0
class GameDataHandler:
def __init__(self, dataFolder, fileExt=".Civ6Save"):
self.dataFolder = dataFolder
self.recursive = False
self.tileData = []
self.cityData = []
self.civData = []
self.leaderData = []
self.notifications = []
self.diploStates = []
self.wars = []
self.incWars = []
self.events = []
self.events_orig = []
self.borderColors = []
self.borderColorsInner = []
self.borderColorsSC = []
self.cityColors = []
self.envColors = []
self.riverColors = []
self.goodyHuts = []
self.fileExt = fileExt
self.pColors = civColors
self.X = -1
self.Y = -1
self.neighbours_list = []
self.majorCivs = 0
self.minorCivs = 0
self.cityCounts = []
self.razedCityLocs = []
self.civ_text = []
self.civHexaCounts = []
self.playersAlive = []
self.minorOrigos = {}
self.minorCivTypes = {}
self.calculatingCivNames = False
def parseData(self):
snapshot = DirectorySnapshot(self.dataFolder, self.recursive)
count = 0
filePaths = []
for filePath in sorted(snapshot.paths):
if self.fileExt == os.path.splitext(filePath)[1]:
count += 1
filePaths.append(filePath)
self.tileData = [None] * count
self.cityData = [None] * count
self.civData = [None] * count
self.leaderData = [None] * count
self.notifications = [None] * count
self.diploStates = [None] * count
self.wars = [None] * count
# self.saveResult(fileWorker(0, filePaths[0]))
# self.calcMajorCivs()
t0 = time.time()
pool = mp.Pool()
fileCount = len(filePaths)
for ii, filePath in enumerate(filePaths):
pool.apply_async(fileWorker, args=(ii, filePath, fileCount), callback=self.saveResult)
# self.saveResult(fileWorker(ii, filePath, fileCount)) # debugging single thread
pool.close()
pool.join()
# unique_notifications = self.checkUniqueNotifications()
self.X, self.Y = self.getMapSize()
self.neighbours_list = []
for ii in range(self.X*self.Y):
self.neighbours_list.append(self.getNeighbourIndexes(ii))
self.calcMajorCivs()
self.calcDiploStateWarPeaceDiff()
self.calcCityCounts()
self.calculateCivHexas()
self.calcPlayersAlive()
self.calculateCityStateOrigos()
self.calcRazedCitys()
self.calcIncrementalWars()
print("Total time {} s for data parsing from {} files".format(time.time() - t0, count))
def createEvents(self):
self.events = []
self.createWarEvents()
self.createPeaceEvents()
self.createWinningConditionEvents()
self.createCityEvents()
self.createWonderEvents()
self.sortEvents()
self.events_orig = self.events.copy()
def filterEvents(self, filter_rules):
self.events = self.events_orig.copy()
for event in reversed(self.events):
if event["Type"] in filter_rules:
if filter_rules[event["Type"]]:
self.events.remove(event)
def createPeaceEvents(self):
colorhex = ''.join([format(int(c), '02x') for c in COLORS_PRISM["COLOR_STANDARD_WHITE_LT"]])
peaceIcon = "<font color=#" + colorhex + "> \U0001F54A </font>" # Dove, as white if no colored icon support
pCount = self.majorCivs + self.minorCivs
for idx in range(self.warPeaceDiffTable.shape[-1]):
for p1 in range(pCount):
for p2 in range(p1 + 1, pCount):
if self.warPeaceDiffTable[p1][p2][idx] == -1:
peaceType = "Peace Major"
if p1 >= self.majorCivs and p2 >= self.majorCivs:
peaceType = "Peace Minor Minor"
elif p1 >= self.majorCivs or p2 >= self.majorCivs:
peaceType = "Peace Minor"
event_txt = "[" + str(idx + 1) + "]: " + self.civ_text[p1].replace("<br>", "") + \
peaceIcon + self.civ_text[p2].replace("<br>", "")
event = {"TurnIdx": idx, "Type": peaceType, "Event": event_txt}
self.events.append(event)
def createWinningConditionEvents(self):
pass
def createCityEvents(self):
# Founded, captured, razed, revolt, flipped, freed etc...
pass
def createWonderEvents(self):
pass
def createWarEvents(self):
colorhex = ''.join([format(int(c), '02x') for c in COLORS_PRISM["COLOR_STANDARD_RED_DK"]])
warIcon = "<font color=#" + colorhex + "> \u2694 </font>" # Crossed swords, as red if no colored icon support
for ii, turn in enumerate(self.incWars):
for war in turn:
attIdx = war["Att"]
defIdx = war["Def"]
warType = "War Major"
if attIdx >= self.majorCivs and defIdx >= self.majorCivs:
warType = "War Minor Minor"
elif attIdx >= self.majorCivs or defIdx >= self.majorCivs:
warType = "War Minor"
event_txt = "[" + str(ii + 1) + "]: " + self.civ_text[attIdx].replace("<br>", "") +\
warIcon + self.civ_text[defIdx].replace("<br>", "")
event = {"TurnIdx": ii, "Type": warType, "Event": event_txt}
self.events.append(event)
def sortEvents(self):
def sortEventFunc(e):
return e["TurnIdx"]
self.events.sort(key=sortEventFunc)
def checkUniqueNotifications(self):
unique_notifications = []
for turn in self.notifications:
for notification in turn:
if notification["NotiName"] not in unique_notifications:
unique_notifications.append(notification["NotiName"])
return unique_notifications
def saveResult(self, result):
self.tileData[result[0]] = result[1]
self.cityData[result[0]] = result[2]
self.civData[result[0]] = result[3]
self.leaderData[result[0]] = result[4]
self.notifications[result[0]] = result[5]
self.diploStates[result[0]] = result[6]
self.wars[result[0]] = result[7]
def calcCityCounts(self):
self.cityCounts = []
for i, turn in enumerate(self.cityData):
cityCounts = [0] * self.majorCivs
usedCities = {}
for city in turn["cities"]:
civIndex = city["CivIndex"]
if civIndex >= self.majorCivs: # Skip if city state has been liberated
continue
cityCounts[civIndex] += 1
if city["CityName"] not in usedCities:
usedCities[city["CityName"]] = city["CivIndex"]
else:
cityCounts[usedCities[city["CityName"]]] -= 1
self.cityCounts.append(cityCounts)
def calcRazedCitys(self):
self.razedCityLocs = []
for i, turn in enumerate(self.cityData):
razedCitysAtTurn = []
# For minor player ruins
for minor in self.minorOrigos:
if not self.playersAlive[i][minor]:
loc = self.minorOrigos[minor]
for cityNewer in turn["cities"]:
if loc == cityNewer["LocationIdx"] and cityNewer["CivIndex"] >= 0:
break
else:
# No newer city exists -> razed
razedCitysAtTurn.append(loc)
# For major player ruins
for idx, city in enumerate(turn["cities"]):
if city["CivIndex"] < 0:
loc = city["LocationIdx"]
if idx + 1 >= len(turn["cities"]):
razedCitysAtTurn.append(loc)
continue
for cityNewer in turn["cities"][idx+1:]:
if loc == cityNewer["LocationIdx"] and cityNewer["CivIndex"] >= 0:
break
else:
# No newer city exists -> razed
razedCitysAtTurn.append(loc)
self.razedCityLocs.append(razedCitysAtTurn)
def calculateOtherStuff(self):
t0 = time.time()
for turnIdx, turn in enumerate(self.tileData):
goodyHutsAtTurn = []
for ii, tile in enumerate(turn["tiles"]):
terrainType = tile["TerrainType"]
featureType = tile["FeatureType"]
GoodyHut = tile["GoodyHut"]
try:
if Features[GoodyHut]["FeatureType"] == "GoodyHut" or \
Features[GoodyHut]["FeatureType"] == "BarbCamp":
goodyHutsAtTurn.append(Features[GoodyHut]["color"])
else:
goodyHutsAtTurn.append(emptyBrush)
except:
print("Unknown feature: turnIdx: {}, x: {}, y: {}, goodyHut: {}".format(turnIdx, tile["x"], | |
<filename>pyglet/graphics/vertexdomain.py<gh_stars>0
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 <NAME>
# Copyright (c) 2008-2021 pyglet contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
"""Manage related vertex attributes within a single vertex domain.
A vertex "domain" consists of a set of attribute descriptions that together
describe the layout of one or more vertex buffers which are used together to
specify the vertices in a primitive. Additionally, the domain manages the
buffers used to store the data and will resize them as necessary to accommodate
new vertices.
Domains can optionally be indexed, in which case they also manage a buffer
containing vertex indices. This buffer is grown separately and has no size
relation to the attribute buffers.
Applications can create vertices (and optionally, indices) within a domain
with the :py:meth:`VertexDomain.create` method. This returns a
:py:class:`VertexList` representing the list of vertices created. The vertex
attribute data within the group can be modified, and the changes will be made
to the underlying buffers automatically.
The entire domain can be efficiently drawn in one step with the
:py:meth:`VertexDomain.draw` method, assuming all the vertices comprise
primitives of the same OpenGL primitive mode.
"""
import ctypes
import pyglet
from pyglet.gl import *
from pyglet.graphics import allocation, vertexattribute, vertexbuffer
def _nearest_pow2(v):
# From http://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2
# Credit: <NAME>
v -= 1
v |= v >> 1
v |= v >> 2
v |= v >> 4
v |= v >> 8
v |= v >> 16
return v + 1
_gl_types = {
'b': GL_BYTE,
'B': GL_UNSIGNED_BYTE,
's': GL_SHORT,
'S': GL_UNSIGNED_SHORT,
'i': GL_INT,
'I': GL_UNSIGNED_INT,
'f': GL_FLOAT,
'd': GL_DOUBLE,
}
class VertexDomain:
"""Management of a set of vertex lists.
Construction of a vertex domain is usually done with the
:py:func:`create_domain` function.
"""
version = 0
_initial_count = 16
def __init__(self, attribute_meta):
self.allocator = allocation.Allocator(self._initial_count)
self.attributes = []
self.buffer_attributes = [] # list of (buffer, attributes)
for name, meta in attribute_meta.items():
location = meta['location']
count = meta['count']
gl_type = _gl_types[meta['format'][0]]
normalize = 'n' in meta['format']
attribute = vertexattribute.VertexAttribute(name, location, count, gl_type, normalize)
self.attributes.append(attribute)
# Create buffer:
attribute.buffer = vertexbuffer.create_buffer(attribute.stride * self.allocator.capacity)
attribute.buffer.element_size = attribute.stride
attribute.buffer.attributes = (attribute,)
self.buffer_attributes.append((attribute.buffer, (attribute,)))
# Create named attributes for each attribute
self.attribute_names = {}
for attribute in self.attributes:
self.attribute_names[attribute.name] = attribute
def __del__(self):
# Break circular refs that Python GC seems to miss even when forced
# collection.
for attribute in self.attributes:
try:
del attribute.buffer
except AttributeError:
pass
def safe_alloc(self, count):
"""Allocate vertices, resizing the buffers if necessary."""
try:
return self.allocator.alloc(count)
except allocation.AllocatorMemoryException as e:
capacity = _nearest_pow2(e.requested_capacity)
self.version += 1
for buffer, _ in self.buffer_attributes:
buffer.resize(capacity * buffer.element_size)
self.allocator.set_capacity(capacity)
return self.allocator.alloc(count)
def safe_realloc(self, start, count, new_count):
"""Reallocate vertices, resizing the buffers if necessary."""
try:
return self.allocator.realloc(start, count, new_count)
except allocation.AllocatorMemoryException as e:
capacity = _nearest_pow2(e.requested_capacity)
self.version += 1
for buffer, _ in self.buffer_attributes:
buffer.resize(capacity * buffer.element_size)
self.allocator.set_capacity(capacity)
return self.allocator.realloc(start, count, new_count)
def create(self, count, index_count=None):
"""Create a :py:class:`VertexList` in this domain.
:Parameters:
`count` : int
Number of vertices to create.
`index_count`: None
Ignored for non indexed VertexDomains
:rtype: :py:class:`VertexList`
"""
start = self.safe_alloc(count)
return VertexList(self, start, count)
def draw(self, mode):
"""Draw all vertices in the domain.
All vertices in the domain are drawn at once. This is the
most efficient way to render primitives.
:Parameters:
`mode` : int
OpenGL drawing mode, e.g. ``GL_POINTS``, ``GL_LINES``, etc.
"""
for buffer, attributes in self.buffer_attributes:
buffer.bind()
for attribute in attributes:
attribute.enable()
attribute.set_pointer(attribute.buffer.ptr)
starts, sizes = self.allocator.get_allocated_regions()
primcount = len(starts)
if primcount == 0:
pass
elif primcount == 1:
# Common case
glDrawArrays(mode, starts[0], sizes[0])
else:
starts = (GLint * primcount)(*starts)
sizes = (GLsizei * primcount)(*sizes)
glMultiDrawArrays(mode, starts, sizes, primcount)
for buffer, _ in self.buffer_attributes:
buffer.unbind()
def draw_subset(self, mode, vertex_list):
"""Draw a specific VertexList in the domain.
The `vertex_list` parameter specifies a :py:class:`VertexList`
to draw. Only primitives in that list will be drawn.
:Parameters:
`mode` : int
OpenGL drawing mode, e.g. ``GL_POINTS``, ``GL_LINES``, etc.
`vertex_list` : `VertexList`
Vertex list to draw.
"""
for buffer, attributes in self.buffer_attributes:
buffer.bind()
for attribute in attributes:
attribute.enable()
attribute.set_pointer(attribute.buffer.ptr)
glDrawArrays(mode, vertex_list.start, vertex_list.count)
for buffer, _ in self.buffer_attributes:
buffer.unbind()
@property
def is_empty(self):
return not self.allocator.starts
def __repr__(self):
return '<%s@%x %s>' % (self.__class__.__name__, id(self), self.allocator)
class VertexList:
"""A list of vertices within a :py:class:`VertexDomain`. Use
:py:meth:`VertexDomain.create` to construct this list.
"""
def __init__(self, domain, start, count):
self.domain = domain
self.start = start
self.count = count
self._caches = {}
self._cache_versions = {}
def draw(self, mode):
"""Draw this vertex list in the given OpenGL mode.
:Parameters:
`mode` : int
OpenGL drawing mode, e.g. ``GL_POINTS``, ``GL_LINES``, etc.
"""
with pyglet.graphics.get_default_batch().vao:
pyglet.graphics.get_default_group().set_state()
self.domain.draw_subset(mode, self)
pyglet.graphics.get_default_group().unset_state()
def resize(self, count, index_count=None):
"""Resize this group.
:Parameters:
`count` : int
New number of vertices in the list.
`index_count`: None
Ignored for non indexed VertexDomains
"""
new_start = self.domain.safe_realloc(self.start, self.count, count)
if new_start != self.start:
# Copy contents to new location
for attribute in self.domain.attributes:
old = attribute.get_region(attribute.buffer, self.start, self.count)
new = attribute.get_region(attribute.buffer, new_start, self.count)
new.array[:] = old.array[:]
new.invalidate()
self.start = new_start
self.count = count
for version in self._cache_versions:
self._cache_versions[version] = None
def delete(self):
"""Delete this group."""
self.domain.allocator.dealloc(self.start, self.count)
def migrate(self, domain):
"""Move this group from its current domain and add to the specified
one. Attributes on domains must match. (In practice, used to change
parent state of some vertices).
:Parameters:
`domain` : `VertexDomain`
Domain to migrate this vertex list to.
"""
assert list(domain.attribute_names.keys()) == list(self.domain.attribute_names.keys()),\
'Domain attributes must match.'
new_start = domain.safe_alloc(self.count)
for key, old_attribute in self.domain.attribute_names.items():
old = old_attribute.get_region(old_attribute.buffer, self.start, self.count)
new_attribute = domain.attribute_names[key]
new = new_attribute.get_region(new_attribute.buffer, new_start, self.count)
new.array[:] = old.array[:]
new.invalidate()
self.domain.allocator.dealloc(self.start, self.count)
self.domain = domain
self.start = new_start
for version in self._cache_versions:
self._cache_versions[version] = None
def set_attribute_data(self, i, data):
attribute = self.domain.attributes[i]
# TODO without region
region = attribute.get_region(attribute.buffer, self.start, self.count)
region.array[:] = data
region.invalidate()
def __getattr__(self, name):
"""dynamic access to vertex attributes, for backwards compatibility.
"""
domain = self.domain
if self._cache_versions.get(name, None) != domain.version:
attribute = domain.attribute_names[name]
self._caches[name] = attribute.get_region(attribute.buffer, self.start, self.count)
self._cache_versions[name] = domain.version
region = self._caches[name]
region.invalidate()
return region.array
def __setattr__(self, name, value):
# Allow setting vertex attributes directly without overwriting them:
if 'domain' in self.__dict__ and name in self.__dict__['domain'].attribute_names:
getattr(self, name)[:] = value
return
super().__setattr__(name, value)
class IndexedVertexDomain(VertexDomain):
"""Management of a set of indexed vertex lists.
Construction of an indexed vertex domain is usually done with the
:py:func:`create_domain` function.
"""
_initial_index_count = 16
def __init__(self, attribute_meta, index_gl_type=GL_UNSIGNED_INT):
super(IndexedVertexDomain, self).__init__(attribute_meta)
self.index_allocator = allocation.Allocator(self._initial_index_count)
self.index_gl_type = index_gl_type
self.index_c_type = vertexattribute._c_types[index_gl_type]
self.index_element_size = ctypes.sizeof(self.index_c_type)
self.index_buffer = vertexbuffer.create_buffer(
self.index_allocator.capacity * self.index_element_size,
target=GL_ELEMENT_ARRAY_BUFFER)
def safe_index_alloc(self, count):
"""Allocate indices, resizing the buffers if necessary."""
try:
return self.index_allocator.alloc(count)
except allocation.AllocatorMemoryException as e:
capacity = _nearest_pow2(e.requested_capacity)
self.version += 1
self.index_buffer.resize(capacity * self.index_element_size)
self.index_allocator.set_capacity(capacity)
return self.index_allocator.alloc(count)
def safe_index_realloc(self, start, count, new_count):
"""Reallocate indices, resizing the buffers if necessary."""
try:
return self.index_allocator.realloc(start, count, new_count)
except allocation.AllocatorMemoryException as | |
)
class OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemoteArray(
object
):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [
OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote.to_proto(
i
)
for i in resources
]
@classmethod
def from_proto(self, resources):
return [
OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote.from_proto(
i
)
for i in resources
]
class OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs(object):
def __init__(self, bucket: str = None, object: str = None, generation: int = None):
self.bucket = bucket
self.object = object
self.generation = generation
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = (
os_policy_assignment_pb2.OsconfigBetaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs()
)
if Primitive.to_proto(resource.bucket):
res.bucket = Primitive.to_proto(resource.bucket)
if Primitive.to_proto(resource.object):
res.object = Primitive.to_proto(resource.object)
if Primitive.to_proto(resource.generation):
res.generation = Primitive.to_proto(resource.generation)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs(
bucket=Primitive.from_proto(resource.bucket),
object=Primitive.from_proto(resource.object),
generation=Primitive.from_proto(resource.generation),
)
class OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcsArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [
OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs.to_proto(
i
)
for i in resources
]
@classmethod
def from_proto(self, resources):
return [
OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs.from_proto(
i
)
for i in resources
]
class OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository(object):
def __init__(
self, apt: dict = None, yum: dict = None, zypper: dict = None, goo: dict = None
):
self.apt = apt
self.yum = yum
self.zypper = zypper
self.goo = goo
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = (
os_policy_assignment_pb2.OsconfigBetaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository()
)
if OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt.to_proto(
resource.apt
):
res.apt.CopyFrom(
OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt.to_proto(
resource.apt
)
)
else:
res.ClearField("apt")
if OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum.to_proto(
resource.yum
):
res.yum.CopyFrom(
OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum.to_proto(
resource.yum
)
)
else:
res.ClearField("yum")
if OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper.to_proto(
resource.zypper
):
res.zypper.CopyFrom(
OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper.to_proto(
resource.zypper
)
)
else:
res.ClearField("zypper")
if OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo.to_proto(
resource.goo
):
res.goo.CopyFrom(
OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo.to_proto(
resource.goo
)
)
else:
res.ClearField("goo")
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository(
apt=OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt.from_proto(
resource.apt
),
yum=OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum.from_proto(
resource.yum
),
zypper=OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper.from_proto(
resource.zypper
),
goo=OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo.from_proto(
resource.goo
),
)
class OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [
OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository.to_proto(i)
for i in resources
]
@classmethod
def from_proto(self, resources):
return [
OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository.from_proto(i)
for i in resources
]
class OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt(object):
def __init__(
self,
archive_type: str = None,
uri: str = None,
distribution: str = None,
components: list = None,
gpg_key: str = None,
):
self.archive_type = archive_type
self.uri = uri
self.distribution = distribution
self.components = components
self.gpg_key = gpg_key
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = (
os_policy_assignment_pb2.OsconfigBetaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt()
)
if OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryAptArchiveTypeEnum.to_proto(
resource.archive_type
):
res.archive_type = OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryAptArchiveTypeEnum.to_proto(
resource.archive_type
)
if Primitive.to_proto(resource.uri):
res.uri = Primitive.to_proto(resource.uri)
if Primitive.to_proto(resource.distribution):
res.distribution = Primitive.to_proto(resource.distribution)
if Primitive.to_proto(resource.components):
res.components.extend(Primitive.to_proto(resource.components))
if Primitive.to_proto(resource.gpg_key):
res.gpg_key = Primitive.to_proto(resource.gpg_key)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt(
archive_type=OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryAptArchiveTypeEnum.from_proto(
resource.archive_type
),
uri=Primitive.from_proto(resource.uri),
distribution=Primitive.from_proto(resource.distribution),
components=Primitive.from_proto(resource.components),
gpg_key=Primitive.from_proto(resource.gpg_key),
)
class OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryAptArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [
OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt.to_proto(i)
for i in resources
]
@classmethod
def from_proto(self, resources):
return [
OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt.from_proto(
i
)
for i in resources
]
class OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum(object):
def __init__(
self,
id: str = None,
display_name: str = None,
base_url: str = None,
gpg_keys: list = None,
):
self.id = id
self.display_name = display_name
self.base_url = base_url
self.gpg_keys = gpg_keys
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = (
os_policy_assignment_pb2.OsconfigBetaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum()
)
if Primitive.to_proto(resource.id):
res.id = Primitive.to_proto(resource.id)
if Primitive.to_proto(resource.display_name):
res.display_name = Primitive.to_proto(resource.display_name)
if Primitive.to_proto(resource.base_url):
res.base_url = Primitive.to_proto(resource.base_url)
if Primitive.to_proto(resource.gpg_keys):
res.gpg_keys.extend(Primitive.to_proto(resource.gpg_keys))
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum(
id=Primitive.from_proto(resource.id),
display_name=Primitive.from_proto(resource.display_name),
base_url=Primitive.from_proto(resource.base_url),
gpg_keys=Primitive.from_proto(resource.gpg_keys),
)
class OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYumArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [
OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum.to_proto(i)
for i in resources
]
@classmethod
def from_proto(self, resources):
return [
OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum.from_proto(
i
)
for i in resources
]
class OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper(object):
def __init__(
self,
id: str = None,
display_name: str = None,
base_url: str = None,
gpg_keys: list = None,
):
self.id = id
self.display_name = display_name
self.base_url = base_url
self.gpg_keys = gpg_keys
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = (
os_policy_assignment_pb2.OsconfigBetaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper()
)
if Primitive.to_proto(resource.id):
res.id = Primitive.to_proto(resource.id)
if Primitive.to_proto(resource.display_name):
res.display_name = Primitive.to_proto(resource.display_name)
if Primitive.to_proto(resource.base_url):
res.base_url = Primitive.to_proto(resource.base_url)
if Primitive.to_proto(resource.gpg_keys):
res.gpg_keys.extend(Primitive.to_proto(resource.gpg_keys))
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper(
id=Primitive.from_proto(resource.id),
display_name=Primitive.from_proto(resource.display_name),
base_url=Primitive.from_proto(resource.base_url),
gpg_keys=Primitive.from_proto(resource.gpg_keys),
)
class OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypperArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [
OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper.to_proto(
i
)
for i in resources
]
@classmethod
def from_proto(self, resources):
return [
OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper.from_proto(
i
)
for i in resources
]
class OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo(object):
def __init__(self, name: str = None, url: str = None):
self.name = name
self.url = url
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = (
os_policy_assignment_pb2.OsconfigBetaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo()
)
if Primitive.to_proto(resource.name):
res.name = Primitive.to_proto(resource.name)
if Primitive.to_proto(resource.url):
res.url = Primitive.to_proto(resource.url)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo(
name=Primitive.from_proto(resource.name),
url=Primitive.from_proto(resource.url),
)
class OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGooArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [
OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo.to_proto(i)
for i in resources
]
@classmethod
def from_proto(self, resources):
return [
OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo.from_proto(
i
)
for i in resources
]
class OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec(object):
def __init__(self, validate: dict = None, enforce: dict = None):
self.validate = validate
self.enforce = enforce
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = (
os_policy_assignment_pb2.OsconfigBetaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec()
)
if OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate.to_proto(
resource.validate
):
res.validate.CopyFrom(
OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate.to_proto(
resource.validate
)
)
else:
res.ClearField("validate")
if OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce.to_proto(
resource.enforce
):
res.enforce.CopyFrom(
OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce.to_proto(
resource.enforce
)
)
else:
res.ClearField("enforce")
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec(
validate=OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate.from_proto(
resource.validate
),
enforce=OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce.from_proto(
resource.enforce
),
)
class OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [
OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec.to_proto(i)
for i in resources
]
@classmethod
def from_proto(self, resources):
return [
OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec.from_proto(i)
for i in resources
]
class OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate(object):
def __init__(
self,
file: dict = None,
script: str = None,
args: list = None,
interpreter: str = None,
output_file_path: str = None,
):
self.file = file
self.script = script
self.args = args
self.interpreter = interpreter
self.output_file_path = output_file_path
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = (
os_policy_assignment_pb2.OsconfigBetaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate()
)
if OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile.to_proto(
resource.file
):
res.file.CopyFrom(
OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile.to_proto(
resource.file
)
)
else:
res.ClearField("file")
if Primitive.to_proto(resource.script):
res.script = Primitive.to_proto(resource.script)
if Primitive.to_proto(resource.args):
res.args.extend(Primitive.to_proto(resource.args))
if OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateInterpreterEnum.to_proto(
resource.interpreter
):
res.interpreter = OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateInterpreterEnum.to_proto(
resource.interpreter
)
if Primitive.to_proto(resource.output_file_path):
res.output_file_path = Primitive.to_proto(resource.output_file_path)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate(
file=OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile.from_proto(
resource.file
),
script=Primitive.from_proto(resource.script),
args=Primitive.from_proto(resource.args),
interpreter=OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateInterpreterEnum.from_proto(
resource.interpreter
),
output_file_path=Primitive.from_proto(resource.output_file_path),
)
class OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [
OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate.to_proto(i)
for i in resources
]
@classmethod
def from_proto(self, resources):
return [
OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate.from_proto(
i
)
for i in resources
]
class OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile(object):
def __init__(
self,
remote: dict = None,
gcs: dict = None,
local_path: str = None,
allow_insecure: bool = None,
):
self.remote = remote
self.gcs = gcs
self.local_path = local_path
self.allow_insecure = allow_insecure
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = (
os_policy_assignment_pb2.OsconfigBetaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile()
)
if OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote.to_proto(
resource.remote
):
res.remote.CopyFrom(
OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote.to_proto(
resource.remote
)
)
else:
res.ClearField("remote")
if OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs.to_proto(
resource.gcs
):
res.gcs.CopyFrom(
OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs.to_proto(
resource.gcs
)
)
else:
res.ClearField("gcs")
if Primitive.to_proto(resource.local_path):
res.local_path = Primitive.to_proto(resource.local_path)
if Primitive.to_proto(resource.allow_insecure):
res.allow_insecure = Primitive.to_proto(resource.allow_insecure)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile(
remote=OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote.from_proto(
resource.remote
),
gcs=OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs.from_proto(
resource.gcs
),
local_path=Primitive.from_proto(resource.local_path),
allow_insecure=Primitive.from_proto(resource.allow_insecure),
)
class OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [
OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile.to_proto(
i
)
for i in resources
]
@classmethod
def from_proto(self, resources):
return [
OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile.from_proto(
i
)
for i in resources
]
class OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote(object):
def __init__(self, uri: str = None, sha256_checksum: str = None):
self.uri = uri
self.sha256_checksum = sha256_checksum
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = (
os_policy_assignment_pb2.OsconfigBetaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote()
)
if Primitive.to_proto(resource.uri):
res.uri = Primitive.to_proto(resource.uri)
if Primitive.to_proto(resource.sha256_checksum):
res.sha256_checksum = Primitive.to_proto(resource.sha256_checksum)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return (
OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote(
uri=Primitive.from_proto(resource.uri),
sha256_checksum=Primitive.from_proto(resource.sha256_checksum),
)
)
class OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemoteArray(
object
):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [
OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote.to_proto(
i
)
for i in resources
]
@classmethod
def from_proto(self, resources):
return [
OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote.from_proto(
i
)
for i in resources
]
class OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs(object):
def __init__(self, bucket: str = None, object: str = None, generation: int = None):
self.bucket = bucket
self.object = object
self.generation = generation
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = (
os_policy_assignment_pb2.OsconfigBetaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs()
)
if Primitive.to_proto(resource.bucket):
res.bucket = Primitive.to_proto(resource.bucket)
if Primitive.to_proto(resource.object):
res.object = Primitive.to_proto(resource.object)
if Primitive.to_proto(resource.generation):
res.generation = Primitive.to_proto(resource.generation)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
| |
<gh_stars>100-1000
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
try:
from ._models_py3 import AccountSasParameters
from ._models_py3 import ActiveDirectoryProperties
from ._models_py3 import AzureEntityResource
from ._models_py3 import AzureFilesIdentityBasedAuthentication
from ._models_py3 import BlobContainer
from ._models_py3 import BlobInventoryPolicy
from ._models_py3 import BlobInventoryPolicyDefinition
from ._models_py3 import BlobInventoryPolicyFilter
from ._models_py3 import BlobInventoryPolicyRule
from ._models_py3 import BlobInventoryPolicySchema
from ._models_py3 import BlobRestoreParameters
from ._models_py3 import BlobRestoreRange
from ._models_py3 import BlobRestoreStatus
from ._models_py3 import BlobServiceItems
from ._models_py3 import BlobServiceProperties
from ._models_py3 import ChangeFeed
from ._models_py3 import CheckNameAvailabilityResult
from ._models_py3 import CloudErrorBody
from ._models_py3 import CorsRule
from ._models_py3 import CorsRules
from ._models_py3 import CustomDomain
from ._models_py3 import DateAfterCreation
from ._models_py3 import DateAfterModification
from ._models_py3 import DeleteRetentionPolicy
from ._models_py3 import DeletedAccount
from ._models_py3 import DeletedAccountListResult
from ._models_py3 import DeletedShare
from ._models_py3 import Dimension
from ._models_py3 import Encryption
from ._models_py3 import EncryptionScope
from ._models_py3 import EncryptionScopeKeyVaultProperties
from ._models_py3 import EncryptionScopeListResult
from ._models_py3 import EncryptionService
from ._models_py3 import EncryptionServices
from ._models_py3 import Endpoints
from ._models_py3 import ErrorResponse
from ._models_py3 import ErrorResponseBody
from ._models_py3 import ExtendedLocation
from ._models_py3 import FileServiceItems
from ._models_py3 import FileServiceProperties
from ._models_py3 import FileShare
from ._models_py3 import FileShareItem
from ._models_py3 import FileShareItems
from ._models_py3 import GeoReplicationStats
from ._models_py3 import IPRule
from ._models_py3 import Identity
from ._models_py3 import ImmutabilityPolicy
from ._models_py3 import ImmutabilityPolicyProperties
from ._models_py3 import KeyVaultProperties
from ._models_py3 import LastAccessTimeTrackingPolicy
from ._models_py3 import LeaseContainerRequest
from ._models_py3 import LeaseContainerResponse
from ._models_py3 import LegalHold
from ._models_py3 import LegalHoldProperties
from ._models_py3 import ListAccountSasResponse
from ._models_py3 import ListBlobInventoryPolicy
from ._models_py3 import ListContainerItem
from ._models_py3 import ListContainerItems
from ._models_py3 import ListQueue
from ._models_py3 import ListQueueResource
from ._models_py3 import ListQueueServices
from ._models_py3 import ListServiceSasResponse
from ._models_py3 import ListTableResource
from ._models_py3 import ListTableServices
from ._models_py3 import ManagementPolicy
from ._models_py3 import ManagementPolicyAction
from ._models_py3 import ManagementPolicyBaseBlob
from ._models_py3 import ManagementPolicyDefinition
from ._models_py3 import ManagementPolicyFilter
from ._models_py3 import ManagementPolicyRule
from ._models_py3 import ManagementPolicySchema
from ._models_py3 import ManagementPolicySnapShot
from ._models_py3 import MetricSpecification
from ._models_py3 import Multichannel
from ._models_py3 import NetworkRuleSet
from ._models_py3 import ObjectReplicationPolicies
from ._models_py3 import ObjectReplicationPolicy
from ._models_py3 import ObjectReplicationPolicyFilter
from ._models_py3 import ObjectReplicationPolicyRule
from ._models_py3 import Operation
from ._models_py3 import OperationDisplay
from ._models_py3 import OperationListResult
from ._models_py3 import PrivateEndpoint
from ._models_py3 import PrivateEndpointConnection
from ._models_py3 import PrivateEndpointConnectionListResult
from ._models_py3 import PrivateLinkResource
from ._models_py3 import PrivateLinkResourceListResult
from ._models_py3 import PrivateLinkServiceConnectionState
from ._models_py3 import ProtocolSettings
from ._models_py3 import ProxyResource
from ._models_py3 import QueueServiceProperties
from ._models_py3 import Resource
from ._models_py3 import ResourceAccessRule
from ._models_py3 import RestorePolicyProperties
from ._models_py3 import Restriction
from ._models_py3 import RoutingPreference
from ._models_py3 import SKUCapability
from ._models_py3 import ServiceSasParameters
from ._models_py3 import ServiceSpecification
from ._models_py3 import Sku
from ._models_py3 import SkuInformation
from ._models_py3 import SmbSetting
from ._models_py3 import StorageAccount
from ._models_py3 import StorageAccountCheckNameAvailabilityParameters
from ._models_py3 import StorageAccountCreateParameters
from ._models_py3 import StorageAccountInternetEndpoints
from ._models_py3 import StorageAccountKey
from ._models_py3 import StorageAccountListKeysResult
from ._models_py3 import StorageAccountListResult
from ._models_py3 import StorageAccountMicrosoftEndpoints
from ._models_py3 import StorageAccountRegenerateKeyParameters
from ._models_py3 import StorageAccountUpdateParameters
from ._models_py3 import StorageQueue
from ._models_py3 import StorageSkuListResult
from ._models_py3 import SystemData
from ._models_py3 import Table
from ._models_py3 import TableServiceProperties
from ._models_py3 import TagFilter
from ._models_py3 import TagProperty
from ._models_py3 import TrackedResource
from ._models_py3 import UpdateHistoryProperty
from ._models_py3 import Usage
from ._models_py3 import UsageListResult
from ._models_py3 import UsageName
from ._models_py3 import VirtualNetworkRule
except (SyntaxError, ImportError):
from ._models import AccountSasParameters # type: ignore
from ._models import ActiveDirectoryProperties # type: ignore
from ._models import AzureEntityResource # type: ignore
from ._models import AzureFilesIdentityBasedAuthentication # type: ignore
from ._models import BlobContainer # type: ignore
from ._models import BlobInventoryPolicy # type: ignore
from ._models import BlobInventoryPolicyDefinition # type: ignore
from ._models import BlobInventoryPolicyFilter # type: ignore
from ._models import BlobInventoryPolicyRule # type: ignore
from ._models import BlobInventoryPolicySchema # type: ignore
from ._models import BlobRestoreParameters # type: ignore
from ._models import BlobRestoreRange # type: ignore
from ._models import BlobRestoreStatus # type: ignore
from ._models import BlobServiceItems # type: ignore
from ._models import BlobServiceProperties # type: ignore
from ._models import ChangeFeed # type: ignore
from ._models import CheckNameAvailabilityResult # type: ignore
from ._models import CloudErrorBody # type: ignore
from ._models import CorsRule # type: ignore
from ._models import CorsRules # type: ignore
from ._models import CustomDomain # type: ignore
from ._models import DateAfterCreation # type: ignore
from ._models import DateAfterModification # type: ignore
from ._models import DeleteRetentionPolicy # type: ignore
from ._models import DeletedAccount # type: ignore
from ._models import DeletedAccountListResult # type: ignore
from ._models import DeletedShare # type: ignore
from ._models import Dimension # type: ignore
from ._models import Encryption # type: ignore
from ._models import EncryptionScope # type: ignore
from ._models import EncryptionScopeKeyVaultProperties # type: ignore
from ._models import EncryptionScopeListResult # type: ignore
from ._models import EncryptionService # type: ignore
from ._models import EncryptionServices # type: ignore
from ._models import Endpoints # type: ignore
from ._models import ErrorResponse # type: ignore
from ._models import ErrorResponseBody # type: ignore
from ._models import ExtendedLocation # type: ignore
from ._models import FileServiceItems # type: ignore
from ._models import FileServiceProperties # type: ignore
from ._models import FileShare # type: ignore
from ._models import FileShareItem # type: ignore
from ._models import FileShareItems # type: ignore
from ._models import GeoReplicationStats # type: ignore
from ._models import IPRule # type: ignore
from ._models import Identity # type: ignore
from ._models import ImmutabilityPolicy # type: ignore
from ._models import ImmutabilityPolicyProperties # type: ignore
from ._models import KeyVaultProperties # type: ignore
from ._models import LastAccessTimeTrackingPolicy # type: ignore
from ._models import LeaseContainerRequest # type: ignore
from ._models import LeaseContainerResponse # type: ignore
from ._models import LegalHold # type: ignore
from ._models import LegalHoldProperties # type: ignore
from ._models import ListAccountSasResponse # type: ignore
from ._models import ListBlobInventoryPolicy # type: ignore
from ._models import ListContainerItem # type: ignore
from ._models import ListContainerItems # type: ignore
from ._models import ListQueue # type: ignore
from ._models import ListQueueResource # type: ignore
from ._models import ListQueueServices # type: ignore
from ._models import ListServiceSasResponse # type: ignore
from ._models import ListTableResource # type: ignore
from ._models import ListTableServices # type: ignore
from ._models import ManagementPolicy # type: ignore
from ._models import ManagementPolicyAction # type: ignore
from ._models import ManagementPolicyBaseBlob # type: ignore
from ._models import ManagementPolicyDefinition # type: ignore
from ._models import ManagementPolicyFilter # type: ignore
from ._models import ManagementPolicyRule # type: ignore
from ._models import ManagementPolicySchema # type: ignore
from ._models import ManagementPolicySnapShot # type: ignore
from ._models import MetricSpecification # type: ignore
from ._models import Multichannel # type: ignore
from ._models import NetworkRuleSet # type: ignore
from ._models import ObjectReplicationPolicies # type: ignore
from ._models import ObjectReplicationPolicy # type: ignore
from ._models import ObjectReplicationPolicyFilter # type: ignore
from ._models import ObjectReplicationPolicyRule # type: ignore
from ._models import Operation # type: ignore
from ._models import OperationDisplay # type: ignore
from ._models import OperationListResult # type: ignore
from ._models import PrivateEndpoint # type: ignore
from ._models import PrivateEndpointConnection # type: ignore
from ._models import PrivateEndpointConnectionListResult # type: ignore
from ._models import PrivateLinkResource # type: ignore
from ._models import PrivateLinkResourceListResult # type: ignore
from ._models import PrivateLinkServiceConnectionState # type: ignore
from ._models import ProtocolSettings # type: ignore
from ._models import ProxyResource # type: ignore
from ._models import QueueServiceProperties # type: ignore
from ._models import Resource # type: ignore
from ._models import ResourceAccessRule # type: ignore
from ._models import RestorePolicyProperties # type: ignore
from ._models import Restriction # type: ignore
from ._models import RoutingPreference # type: ignore
from ._models import SKUCapability # type: ignore
from ._models import ServiceSasParameters # type: ignore
from ._models import ServiceSpecification # type: ignore
from ._models import Sku # type: ignore
from ._models import SkuInformation # type: ignore
from ._models import SmbSetting # type: ignore
from ._models import StorageAccount # type: ignore
from ._models import StorageAccountCheckNameAvailabilityParameters # type: ignore
from ._models import | |
<filename>tpx3format/read.py
import logging
import struct
import h5py
import numpy as np
from lib.constants import *
import os
# TODO: Logging does not work for multiprocessing processes on Windows
logger = logging.getLogger('root')
def read_positions(f):
control_events = []
i = 0
rollover_counter = 0
approaching_rollover = False
leaving_rollover = False
while True:
b = f.read(8)
cursor = f.tell()
if not b:
# Reached EOF
break
if len(b) < 8:
logger.error("Truncated file, no full header at file position %d. Continuing with what we have." % f.tell())
break
header = struct.unpack('<bbbbbbbb', b)
chip_nr = header[4]
mode = header[5]
# Check for mode
if mode != 0:
logger.error("Header packet with mode %d. Code has been developed for mode 0." % mode)
size = ((0xff & header[7]) << 8) | (0xff & header[6])
# Read the first package of the data package to figure out its type
pkg_data = f.read(8)
if len(pkg_data) < 8:
logger.error("Truncated file, no first data packet found at file position %d. Continuing with what we have." % f.tell())
break
pkg = struct.unpack("<Q", pkg_data)[0]
pkg_type = pkg >> 60
# The SPIDR time is 16 bit (65536). It has a rollover time of 26.843 seconds
time = pkg & 0xffff
rollover = rollover_counter
# Check if the time is nearing the limit of the rollover
if time > 0.9 * 65536.:
if leaving_rollover:
# We have already increased the rollover counter, so we need to reset it
rollover = rollover_counter - 1
elif not approaching_rollover:
# We must be approaching it
logger.debug("Approaching SPIDR timer rollover")
approaching_rollover = True
# We have been approaching the rollover, so if now see a low time, it probably is a rollover
if approaching_rollover and time < 0.01 * 65536.:
logger.debug("SPIDR timer rollover")
approaching_rollover = False
leaving_rollover = True
rollover_counter += 1
rollover = rollover_counter
# We are leaving the rollover, but we're far away by now
if leaving_rollover and time > 0.1 * 65536.:
logger.debug("Leaving SPIDR timer rollover")
approaching_rollover = False
leaving_rollover = False
# Parse the different package types
if pkg_type == 0x7:
control_event = parse_control_packet(pkg, size)
if control_event:
control_events.append(control_event)
elif pkg_type == 0x4:
parse_heartbeat_packet(pkg, size)
# TODO: Use heartbeat packages in calculating time
# Heartbeat packages are always followed by a 0x7145 or 0x7144 control package, and then possibly
# pixels. Continue to parse those pixels, but strip away the control package
if size - (8*2) > 0:
yield [cursor+16, size-(8*2), chip_nr, rollover]
elif pkg_type == 0x6:
pass
logger.debug("TDC timestamp at position %d len %d" % (cursor, size))
# TODO: Use TDC packages
# tdc = parse_tdc_packet(pkg)
elif pkg_type == 0xb:
yield [cursor, size, chip_nr, rollover]
i += 1
else:
logger.warning("Found packet with unknown type %d" % pkg_type)
# Skip over the data packets and to the next header
f.seek(cursor + size, 0)
def parse_heartbeat_packet(pkg, size):
time = pkg >> 16
if pkg >> 56 == 0x44:
lsb = time & 0xffffffff
logger.debug('Heartbeat (LSB). lsb %d. len %d' % (lsb, size))
if pkg >> 56 == 0x45:
msb = (time & 0xFFFFFFFF) << 32
logger.debug('Heartbeat (MSB). msb %d. len %d' % (msb, size))
return
# TDC (Time to Digital Converter) packages can come from the external trigger
def parse_tdc_packet(pkg):
tdc_type = pkg >> 56
counter = (pkg >> 44) & 0xfff
timestamp = (pkg >> 9) & 0x3ffffffff
stamp = (pkg >> 4) & 0xf
logger.debug("TDC package. Type: 0x%04x. Counter: %d. Timestamp: %d. Stamp: %d" % (tdc_type, counter, timestamp, stamp))
return
def parse_control_packet(pkg, size):
# Get SPIDR time and CHIP ID
time = pkg & 0xffff
chip_id = (pkg >> 16) & 0xffff
control_type = pkg >> 48
if size / 8 > 1:
logger.warning("Control data packet is followed by more data. This is unexpected")
if control_type == CONTROL_END_OF_COMMAND:
logger.debug('EndOfCommand on chip ID %04x at SPIDR_TIME %5d' % (chip_id, time))
elif control_type == CONTROL_END_OF_READOUT:
logger.debug('EndOfReadOut on chip ID %04x at SPIDR_TIME %5d' % (chip_id, time))
elif control_type == CONTROL_END_OF_SEQUANTIAL_COMMAND:
logger.debug('EndOfResetSequentialCommand on chip ID %04x at SPIDR_TIME %5d' % (chip_id, time))
elif control_type == CONTROL_OTHER_CHIP_COMMAND:
logger.debug('OtherChipCommand on chip ID %04x at SPIDR_TIME %5d' % (chip_id, time))
else:
logger.debug('Unknown control packet (0x%04x) on chip ID %04x at SPIDR_TIME %5d' % (pkg >> 48, chip_id, time))
return [control_type, chip_id, time]
def check_tot_correction(correct_file):
if correct_file == "0" or correct_file is None:
# No ToT correction requested
return True
if not os.path.exists(correct_file):
return "ToT correction file (%s) does not exists" % correct_file
f = h5py.File(correct_file, 'r')
if 'tot_correction' not in f:
return "ToT correction file %s does not contain a tot_correction matrix" % correct_file
data = f['tot_correction']
logger.info("Found ToT correction file that was created on %s" % data.attrs['creation_date'])
return True
def read_tot_correction(correct_file):
if correct_file == "0":
# No ToT correction requested
return None
f = h5py.File(correct_file, 'r')
data = f['tot_correction']
return data[()]
def remove_cross_hits(hits):
# Maybe not the cleanest way to do this, but it's fast
ind_3x = (hits['chipId'] == 3) & (hits['x'] == 255)
ind_3y = (hits['chipId'] == 3) & (hits['y'] == 255)
ind_0x = (hits['chipId'] == 0) & (hits['x'] == 0)
ind_0y = (hits['chipId'] == 0) & (hits['y'] == 255)
ind_1x = (hits['chipId'] == 1) & (hits['x'] == 255)
ind_1y = (hits['chipId'] == 1) & (hits['y'] == 255)
ind_2x = (hits['chipId'] == 2) & (hits['x'] == 0)
ind_2y = (hits['chipId'] == 2) & (hits['y'] == 255)
# Combine all found hits
ind = ind_3x | ind_3y | ind_0x | ind_0y | ind_1x | ind_1y | ind_2x | ind_2y
indices = np.arange(len(hits))
hits = np.delete(hits, indices[ind], axis=0)
return hits
def apply_tot_correction(tot_correction, ToT, y, x, chip_id):
return tot_correction.item((ToT, y, x, chip_id))
def apply_toa_railroad_correction_phase1_um(x, cToA, chipId):
# The railroad columns for pllConfig 30
if 193 < x < 206:
cToA = cToA - 16
# Chips 2, 3, 0 in Maastricht/Basel
if chipId in (2, 3, 0) and (x == 204 or x == 205):
cToA = cToA + 16
# Chips 1 in Maastricht/Basel
if chipId == 1 and (x == 186 or x == 187):
cToA = cToA - 16
return cToA
def apply_toa_railroad_correction_phase1_basel(x, cToA, chipId):
# The railroad columns for pllConfig 30
if 193 < x < 206:
cToA = cToA - 16
# Chips 1, 3, 0 in Maastricht/Basel
if chipId in (3, 0) and (x == 204 or x == 205):
cToA = cToA + 16
# Chips 2
if chipId == 2 and (x == 186 or x == 187):
cToA = cToA - 16
return cToA
def apply_toa_railroad_correction_phase2(x, cToA):
# The railroad columns for pllConfig 94
if x == 196 or x == 197 or x == 200 or x == 201 or x == 204 or x == 205:
cToA = cToA - 16
return cToA
def apply_toa_phase2_correction(x, cToA):
# PHASE 2 (pllConfig 94)
if int(x % 4) == 2 or int(x % 4) == 3:
cToA = cToA - 8
return cToA
def calculate_image_shape(hits_cross_extra_offset):
return 512 + 2 * hits_cross_extra_offset
def combine_chips(hits, hits_cross_extra_offset):
# Chip are orientated like this
# 2 1
# 3 0
# Calculate extra offset required for the cross pixels
offset = 256 + 2 * hits_cross_extra_offset
# ChipId 0
ind = tuple([hits['chipId'] == 0])
hits['x'][ind] = hits['x'][ind] + offset
hits['y'][ind] = 255 - hits['y'][ind] + offset
# ChipId 1
ind = tuple([hits['chipId'] == 1])
hits['x'][ind] = 255 - hits['x'][ind] + offset
# hits['y'][ind] = hits['y'][ind]
# ChipId 2
ind = tuple([hits['chipId'] == 2])
hits['x'][ind] = 255 - hits['x'][ind]
# hits['y'][ind] = hits['y'][ind]
# ChipId 3
ind = tuple([hits['chipId'] == 3])
# hits['x'][ind] = hits['x'][ind]
hits['y'][ind] = 255 - hits['y'][ind] + offset
def marker_pixel(hits, pixel):
# Find hits of the marker pixel
ind = (hits['chipId'] == pixel['chipId']) & (hits['x'] == pixel['x']) & (hits['y'] == pixel['y'])
# Marker pixel not found in chunk
if np.count_nonzero(ind) == 0:
return hits, -1, -1
min_toa = np.min(hits[ind]['ToA'])
max_toa = np.max(hits[ind]['ToA'])
# Delete all hits from this marker pixel
indices = np.arange(len(hits))
hits = np.delete(hits, indices[ind], axis=0)
return hits, min_toa, max_toa
def parse_data_packages(positions, f, tot_correction, settings):
# Allocate | |
*/
#~ int x, y; /* pointer x, y coordinates in event window */
#~ int x_root, y_root; /* coordinates relative to root */
#~ int mode; /* NotifyNormal, NotifyGrab, NotifyUngrab */
#~ int detail;
#~ /*
#~ * NotifyAncestor, NotifyVirtual, NotifyInferior,
#~ * NotifyNonlinear,NotifyNonlinearVirtual
#~ */
#~ Bool same_screen; /* same screen flag */
#~ Bool focus; /* boolean focus */
#~ unsigned int state; /* key or button mask */
#~ } XCrossingEvent;
#~ typedef XCrossingEvent XEnterWindowEvent;
#~ typedef XCrossingEvent XLeaveWindowEvent;
class XCrossingEvent(Structure):
_fields_ = [
('type', c_int),
('serial', c_ulong),
('send_event', Bool),
('display', POINTER(Display)),
('window', Window),
('root', Window),
('subwindow', Window),
('time', Time),
('x', c_int),
('y', c_int),
('x_root', c_int),
('y_root', c_int),
('mode', c_int),
('detail', c_int),
('same_screen', Bool),
('focus', Bool),
('state', c_uint),
]
XEnterWindowEvent = XCrossingEvent
XLeaveWindowEvent = XCrossingEvent
#~ typedef struct {
#~ int type; /* FocusIn or FocusOut */
#~ unsigned long serial; /* # of last request processed by server */
#~ Bool send_event; /* true if this came from a SendEvent request */
#~ Display *display; /* Display the event was read from */
#~ Window window; /* window of event */
#~ int mode; /* NotifyNormal, NotifyWhileGrabbed,
#~ NotifyGrab, NotifyUngrab */
#~ int detail;
#~ /*
#~ * NotifyAncestor, NotifyVirtual, NotifyInferior,
#~ * NotifyNonlinear,NotifyNonlinearVirtual, NotifyPointer,
#~ * NotifyPointerRoot, NotifyDetailNone
#~ */
#~ } XFocusChangeEvent;
#~ typedef XFocusChangeEvent XFocusInEvent;
#~ typedef XFocusChangeEvent XFocusOutEvent;
class XFocusChangeEvent(Structure):
_fields_ = [
('type', c_int),
('serial', c_ulong),
('send_event', Bool),
('display', POINTER(Display)),
('window', Window),
('mode', c_int),
('detail', c_int),
]
XFocusInEvent = XFocusChangeEvent
XFocusOutEvent = XFocusChangeEvent
#/* generated on EnterWindow and FocusIn when KeyMapState selected */
#~ typedef struct {
#~ int type;
#~ unsigned long serial; /* # of last request processed by server */
#~ Bool send_event; /* true if this came from a SendEvent request */
#~ Display *display; /* Display the event was read from */
#~ Window window;
#~ char key_vector[32];
#~ } XKeymapEvent;
class XKeymapEvent(Structure):
_fields_ = [
('type', c_int),
('serial', c_ulong),
('send_event', Bool),
('display', POINTER(Display)),
('window', Window),
('key_vector', c_char * 32),
]
#~ typedef struct {
#~ int type;
#~ unsigned long serial; /* # of last request processed by server */
#~ Bool send_event; /* true if this came from a SendEvent request */
#~ Display *display; /* Display the event was read from */
#~ Window window;
#~ int x, y;
#~ int width, height;
#~ int count; /* if non-zero, at least this many more */
#~ } XExposeEvent;
class XExposeEvent(Structure):
_fields_ = [
('type', c_int),
('serial', c_ulong),
('send_event', Bool),
('display', POINTER(Display)),
('window', Window),
('x', c_int),
('y', c_int),
('width', c_int),
('height', c_int),
('count', c_int),
]
#~ typedef struct {
#~ int type;
#~ unsigned long serial; /* # of last request processed by server */
#~ Bool send_event; /* true if this came from a SendEvent request */
#~ Display *display; /* Display the event was read from */
#~ Drawable drawable;
#~ int x, y;
#~ int width, height;
#~ int count; /* if non-zero, at least this many more */
#~ int major_code; /* core is CopyArea or CopyPlane */
#~ int minor_code; /* not defined in the core */
#~ } XGraphicsExposeEvent;
class XGraphicsExposeEvent(Structure):
_fields_ = [
('type', c_int),
('serial', c_ulong),
('send_event', Bool),
('display', POINTER(Display)),
('drawable', Drawable),
('x', c_int),
('y', c_int),
('width', c_int),
('height', c_int),
('count', c_int),
('major_code', c_int),
('minor_code', c_int),
]
#~ typedef struct {
#~ int type;
#~ unsigned long serial; /* # of last request processed by server */
#~ Bool send_event; /* true if this came from a SendEvent request */
#~ Display *display; /* Display the event was read from */
#~ Drawable drawable;
#~ int major_code; /* core is CopyArea or CopyPlane */
#~ int minor_code; /* not defined in the core */
#~ } XNoExposeEvent;
class XNoExposeEvent(Structure):
_fields_ = [
('type', c_int),
('serial', c_ulong),
('send_event', Bool),
('display', POINTER(Display)),
('drawable', Drawable),
('major_code', c_int),
('minor_code', c_int),
]
#~ typedef struct {
#~ int type;
#~ unsigned long serial; /* # of last request processed by server */
#~ Bool send_event; /* true if this came from a SendEvent request */
#~ Display *display; /* Display the event was read from */
#~ Window window;
#~ int state; /* Visibility state */
#~ } XVisibilityEvent;
class XVisibilityEvent(Structure):
_fields_ = [
('type', c_int),
('serial', c_ulong),
('send_event', Bool),
('display', POINTER(Display)),
('window', Window),
('state', c_int),
]
#~ typedef struct {
#~ int type;
#~ unsigned long serial; /* # of last request processed by server */
#~ Bool send_event; /* true if this came from a SendEvent request */
#~ Display *display; /* Display the event was read from */
#~ Window parent; /* parent of the window */
#~ Window window; /* window id of window created */
#~ int x, y; /* window location */
#~ int width, height; /* size of window */
#~ int border_width; /* border width */
#~ Bool override_redirect; /* creation should be overridden */
#~ } XCreateWindowEvent;
class XCreateWindowEvent(Structure):
_fields_ = [
('type', c_int),
('serial', c_ulong),
('send_event', Bool),
('display', POINTER(Display)),
('parent', Window),
('window', Window),
('x', c_int),
('y', c_int),
('width', c_int),
('height', c_int),
('border_width', c_int),
('override_redirect', Bool),
]
#~ typedef struct {
#~ int type;
#~ unsigned long serial; /* # of last request processed by server */
#~ Bool send_event; /* true if this came from a SendEvent request */
#~ Display *display; /* Display the event was read from */
#~ Window event;
#~ Window window;
#~ } XDestroyWindowEvent;
class XDestroyWindowEvent(Structure):
_fields_ = [
('type', c_int),
('serial', c_ulong),
('send_event', Bool),
('display', POINTER(Display)),
('event', Window),
('window', Window),
]
#~ typedef struct {
#~ int type;
#~ unsigned long serial; /* # of last request processed by server */
#~ Bool send_event; /* true if this came from a SendEvent request */
#~ Display *display; /* Display the event was read from */
#~ Window event;
#~ Window window;
#~ Bool from_configure;
#~ } XUnmapEvent;
class XUnmapEvent(Structure):
_fields_ = [
('type', c_int),
('serial', c_ulong),
('send_event', Bool),
('display', POINTER(Display)),
('event', Window),
('window', Window),
('from_configure', Bool),
]
#~ typedef struct {
#~ int type;
#~ unsigned long serial; /* # of last request processed by server */
#~ Bool send_event; /* true if this came from a SendEvent request */
#~ Display *display; /* Display the event was read from */
#~ Window event;
#~ Window window;
#~ Bool override_redirect; /* boolean, is override set... */
#~ } XMapEvent;
class XMapEvent(Structure):
_fields_ = [
('type', c_int),
('serial', c_ulong),
('send_event', Bool),
('display', POINTER(Display)),
('event', Window),
('window', Window),
('override_redirect', Bool),
]
#~ typedef struct {
#~ int type;
#~ unsigned long serial; /* # of last request processed by server */
#~ Bool send_event; /* true if this came from a SendEvent request */
#~ Display *display; /* Display the event was read from */
#~ Window parent;
#~ Window window;
#~ } XMapRequestEvent;
class XMapRequestEvent(Structure):
_fields_ = [
('type', c_int),
('serial', c_ulong),
('send_event', Bool),
('display', POINTER(Display)),
('event', Window),
('window', Window),
]
#~ typedef struct {
#~ int type;
#~ unsigned long serial; /* # of last request processed by server */
#~ Bool send_event; /* true if this came from a SendEvent request */
#~ Display *display; /* Display the event was read from */
#~ Window event;
#~ Window window;
#~ Window parent;
#~ int x, y;
#~ Bool override_redirect;
#~ } XReparentEvent;
class XReparentEvent(Structure):
_fields_ = [
('type', c_int),
('serial', c_ulong),
('send_event', Bool),
('display', POINTER(Display)),
('event', Window),
('window', Window),
('parent', Window),
('x', c_int),
('y', c_int),
('override_redirect', Bool),
]
#~ typedef struct {
#~ int type;
#~ unsigned long serial; /* # of last request processed by server */
#~ Bool send_event; /* true if this came from a SendEvent request */
#~ Display *display; /* Display the event was read from */
#~ Window event;
#~ Window window;
#~ int x, y;
#~ int width, height;
#~ int border_width;
#~ Window above;
#~ Bool override_redirect;
#~ } XConfigureEvent;
class XConfigureEvent(Structure):
_fields_ = [
('type', c_int),
('serial', c_ulong),
('send_event', Bool),
('display', POINTER(Display)),
('event', Window),
('window', Window),
('x', c_int),
('y', c_int),
('width', c_int),
('height', c_int),
| |
<filename>py/test/pytests/fingerprint_mcu.py
# Copyright 2018 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A factory test for the Fingerprint sensor.
Description
-----------
Tests that the fingerprint sensor is connected properly and has no defect
by executing commands through the fingerprint micro-controller.
Test Procedure
--------------
This is an automated test without user interaction,
it might use a rubber finger pressed against the sensor by a proper fixture.
Dependency
----------
The pytest supposes that the system as a fingerprint MCU exposed through the
kernel cros_ec driver as ``/dev/cros_fp``.
When available, it uses the vendor 'libfputils' shared library and its Python
helper to compute the image quality signal-to-noise ratio.
Examples
--------
Minimum runnable example to check if the fingerprint sensor is connected
properly and fits the default quality settings::
{
"pytest_name": "fingerprint_mcu"
}
To check if the sensor has at most 10 dead pixels and its HWID is 0x140c,
with bounds for the pixel grayscale median values and finger detection zones,
add this in test list::
{
"pytest_name": "fingerprint_mcu",
"args": {
"dead_pixel_max": 10,
"sensor_hwid": [
1234,
[5120, 65520]
],
"pixel_median": {
"cb_type1" : [180, 220],
"cb_type2" : [80, 120],
"icb_type1" : [15, 70],
"icb_type2" : [155, 210]
},
"detect_zones" : [
[8, 16, 15, 23], [24, 16, 31, 23], [40, 16, 47, 23],
[8, 66, 15, 73], [24, 66, 31, 73], [40, 66, 47, 73],
[8, 118, 15, 125], [24, 118, 31, 125], [40, 118, 47, 125],
[8, 168, 15, 175], [24, 168, 31, 175], [40, 168, 47, 175]
]
}
}
"""
import logging
import sys
import unittest
import numpy
from cros.factory.device import device_utils
from cros.factory.testlog import testlog
from cros.factory.test.utils import fpmcu_utils
from cros.factory.utils import type_utils
from cros.factory.utils.arg_utils import Arg
from cros.factory.utils import schema
# use the fingerprint image processing library if available
sys.path.extend(['/usr/local/opt/fpc', '/opt/fpc'])
try:
import fputils
libfputils = fputils.FpUtils()
except ImportError:
libfputils = None
_ARG_SENSOR_HWID_SCHEMA = schema.JSONSchemaDict(
'sensor hwid schema object',
{
'anyOf': [
{
'type': ['integer', 'null']
},
{
'type': 'array',
'items': {
'anyOf': [
{
'type': 'integer'
},
{
'type': 'array',
'items': {
'type': 'integer'
},
'minItems': 2,
'maxItems': 2
}
]
}
}
]
})
class FingerprintTest(unittest.TestCase):
"""Tests the fingerprint sensor."""
ARGS = [
Arg('max_dead_pixels', int,
'The maximum number of dead pixels on the fingerprint sensor.',
default=10),
Arg('max_dead_detect_pixels', int,
'The maximum number of dead pixels in the detection zone.',
default=0),
Arg('max_pixel_dev', int,
'The maximum deviation from the median for a pixel of a given type.',
default=35),
Arg('pixel_median', dict,
'Keys: "(cb|icb)_(type1|type2)", '
'Values: a list of [minimum, maximum] '
'Range constraints of the pixel median value of the checkerboards.',
default={}),
Arg('detect_zones', list,
'a list of rectangles [x1, y1, x2, y2] defining '
'the finger detection zones on the sensor.',
default=[]),
Arg('min_snr', float,
'The minimum signal-to-noise ratio for the image quality.',
default=0.0),
Arg('rubber_finger_present', bool,
'A Rubber finger is pressed against the sensor for quality testing.',
default=False),
Arg('max_reset_pixel_dev', int,
'The maximum deviation from the median per column for a pixel from '
'test reset image.',
default=55),
Arg('max_error_reset_pixels', int,
'The maximum number of error pixels in the test_reset image.',
default=5),
Arg('fpframe_retry_count', int,
'The maximum number of retry for fpframe.',
default=0),
]
# MKBP index for Fingerprint sensor event
EC_MKBP_EVENT_FINGERPRINT = '5'
def setUp(self):
self._dut = device_utils.CreateDUTInterface()
self._fpmcu = fpmcu_utils.FpmcuDevice(self._dut)
def tearDown(self):
self._fpmcu.FpmcuCommand('fpmode', 'reset')
def FpmcuTryWaitEvent(self, *args, **kwargs):
try:
self._fpmcu.FpmcuCommand('waitevent', *args, **kwargs)
except Exception as e:
logging.error('Wait event fail: %s', e)
def FpmcuGetFpframe(self, *args, **kwargs):
# try fpframe command for at most (fpframe_retry_count + 1) times.
for num_retries in range(self.args.fpframe_retry_count + 1):
try:
img = self._fpmcu.FpmcuCommand('fpframe', *args, **kwargs)
break
except Exception as e:
if num_retries < self.args.fpframe_retry_count:
logging.info('Retrying fpframe %d times', num_retries + 1)
else:
# raise an exception if last attempt failed
raise e
return img
def IsDetectZone(self, x, y):
for x1, y1, x2, y2 in self.args.detect_zones:
if (x in range(x1, x2 + 1) and
y in range(y1, y2 + 1)):
return True
return False
def CheckPnmAndExtractPixels(self, pnm):
if not pnm:
raise type_utils.TestFailure('Failed to retrieve image')
lines = pnm.split('\n')
if lines[0].strip() != 'P2':
raise type_utils.TestFailure('Unsupported/corrupted image')
try:
# strip header/footer
pixel_lines = lines[3:-1]
except (IndexError, ValueError):
raise type_utils.TestFailure('Corrupted image')
return pixel_lines
def CalculateMedianAndDev(self, matrix):
# Transform the 2D array of triples in a 1-D array of triples
pixels = matrix.reshape((-1, 3))
median = numpy.median([v for v, x, y in pixels])
dev = [(abs(v - median), x, y) for v, x, y in pixels]
return median, dev
def ProcessCheckboardPixels(self, lines, parity):
# Keep only type-1 or type-2 pixels depending on parity
matrix = numpy.array([[(int(v), x, y) for x, v
in enumerate(l.strip().split())
if (x + y) % 2 == parity]
for y, l in enumerate(lines)])
return self.CalculateMedianAndDev(matrix)
def CheckerboardTest(self, inverted=False):
full_name = 'Inv. checkerboard' if inverted else 'Checkerboard'
short_name = 'icb' if inverted else 'cb'
# trigger the checkerboard test pattern and capture it
self._fpmcu.FpmcuCommand('fpmode', 'capture',
'pattern1' if inverted else 'pattern0')
# wait for the end of capture (or timeout after 500 ms)
self.FpmcuTryWaitEvent(self.EC_MKBP_EVENT_FINGERPRINT, '500')
# retrieve the resulting image as a PNM
pnm = self.FpmcuGetFpframe()
pixel_lines = self.CheckPnmAndExtractPixels(pnm)
# Build arrays of black and white pixels (aka Type-1 / Type-2)
# Compute pixels parameters for each type
median1, dev1 = self.ProcessCheckboardPixels(pixel_lines, 0)
median2, dev2 = self.ProcessCheckboardPixels(pixel_lines, 1)
all_dev = dev1 + dev2
max_dev = numpy.max([d for d, _, _ in all_dev])
# Count dead pixels (deviating too much from the median)
dead_count = 0
dead_detect_count = 0
for d, x, y in all_dev:
if d > self.args.max_pixel_dev:
dead_count += 1
if self.IsDetectZone(x, y):
dead_detect_count += 1
# Log everything first for debugging
logging.info('%s type 1 median:\t%d', full_name, median1)
logging.info('%s type 2 median:\t%d', full_name, median2)
logging.info('%s max deviation:\t%d', full_name, max_dev)
logging.info('%s dead pixels:\t%d', full_name, dead_count)
logging.info('%s dead pixels in detect zones:\t%d',
full_name, dead_detect_count)
testlog.UpdateParam(
name='dead_pixels_%s' % short_name,
description='Number of dead pixels',
value_unit='pixels')
if not testlog.CheckNumericParam(
name='dead_pixels_%s' % short_name,
value=dead_count,
max=self.args.max_dead_pixels):
raise type_utils.TestFailure('Too many dead pixels')
testlog.UpdateParam(
name='dead_detect_pixels_%s' % short_name,
description='Dead pixels in detect zone',
value_unit='pixels')
if not testlog.CheckNumericParam(
name='dead_detect_pixels_%s' % short_name,
value=dead_detect_count,
max=self.args.max_dead_detect_pixels):
raise type_utils.TestFailure('Too many dead pixels in detect zone')
# Check specified pixel range constraints
t1 = "%s_type1" % short_name
testlog.UpdateParam(
name=t1,
description='Median Type-1 pixel value',
value_unit='8-bit grayscale')
if t1 in self.args.pixel_median and not testlog.CheckNumericParam(
name=t1,
value=median1,
min=self.args.pixel_median[t1][0],
max=self.args.pixel_median[t1][1]):
raise type_utils.TestFailure('Out of range Type-1 pixels')
t2 = "%s_type2" % short_name
testlog.UpdateParam(
name=t2,
description='Median Type-2 pixel value',
value_unit='8-bit grayscale')
if t2 in self.args.pixel_median and not testlog.CheckNumericParam(
name=t2,
value=median2,
min=self.args.pixel_median[t2][0],
max=self.args.pixel_median[t2][1]):
raise type_utils.TestFailure('Out of range Type-2 pixels')
def CalculateMedianAndDevPerColumns(self, matrix):
# The data flow of the input matrix would be
# 1. original matrix:
# [1, 2, 150]
# [1, 2, 150]
# [1, 2, 3 ]
# 2. rotate 90 to access column in an index of array:
# [150, 150, 3]
# [2, 2, 2]
# [1, 1, 1]
# 3. flipud first level of array to put first column at index 0:
# [1, 1, 1]
# [2, 2, 2]
# [150, 150, 3]
# 4. medians per column - [1, 2, 150]
# 5. devs per column:
# [0, 0, 0 ]
# [0, 0, 0 ]
# [0, 0, 147]
matrix = numpy.rot90(matrix)
matrix = numpy.flipud(matrix)
medians = [numpy.median([v for v, x, y in l]) for l in matrix]
devs = [[(abs(v - medians[x]), x, y) for v, x, y in l] for l in matrix]
return medians, devs
def ProcessResetPixelImage(self, lines):
matrix = numpy.array([[(int(v), x, y) for x, v
in enumerate(l.strip().split())]
for y, l in enumerate(lines)])
return self.CalculateMedianAndDevPerColumns(matrix)
def ResetPixelTest(self):
# reset the sensor and leave it in reset state then capture the single
# frame.
self._fpmcu.FpmcuCommand('fpmode', 'capture', 'test_reset')
# wait for the end of capture (or timeout after 500 ms)
self.FpmcuTryWaitEvent(self.EC_MKBP_EVENT_FINGERPRINT, '500')
# retrieve the resulting image as a PNM
pnm = self.FpmcuGetFpframe()
pixel_lines = self.CheckPnmAndExtractPixels(pnm)
# Compute median value and the deviation of every pixels per column.
medians, devs = self.ProcessResetPixelImage(pixel_lines)
# Count error pixels (deviating too much from the median)
error_count = 0
max_dev_per_columns = [numpy.max([d for d, _, _ in col]) for col in devs]
for | |
import plotly.graph_objects as go
import disnake
from datetime import datetime, timedelta, timezone
from disnake.ext import commands
from utils.discord_utils import image_to_file, format_number
from utils.setup import db_stats, db_users
from utils.arguments_parser import parse_speed_args
from utils.table_to_image import table_to_image
from utils.time_converter import (
format_datetime,
format_timezone,
round_minutes_down,
str_to_td,
td_format,
)
from utils.plot_utils import add_glow, get_theme, fig2img, hex_to_rgba_string
from utils.image.image_utils import hex_str_to_int, v_concatenate
from utils.pxls.cooldown import get_best_possible
from utils.timezoneslib import get_timezone
from utils.utils import in_executor
class PxlsSpeed(commands.Cog):
def __init__(self, bot: commands.Bot):
self.bot: commands.Bot = bot
@commands.slash_command(name="speed")
async def _speed(
self,
inter: disnake.AppCmdInter,
usernames: str = None,
last: str = None,
canvas: bool = False,
groupby: str = commands.Param(
default=None, choices=["hour", "day", "week", "month", "canvas"]
),
progress: bool = False,
before: str = None,
after: str = None,
):
"""Show the speed of a pxls user with a graph.
Parameters
----------
usernames: A list pxls user name separated by a space. ('!' = your set username.)
last: Show the speed in the last x year/month/week/day/hour/minute/second. (format: ?y?mo?w?d?h?m?s)
canvas: To show the speed during the whole current canvas.
groupby: Show a bar chart for each hour/day/week/month/canvas/.
progress: To compare the progress instead of alltime/canvas stats.
before: To show the speed before a specific date (format: YYYY-mm-dd HH:MM)
after: To show the speed after a specific date (format: YYYY-mm-dd HH:MM)
"""
await inter.response.defer()
args = ()
if usernames:
args += tuple(usernames.split(" "))
if last:
args += ("-last", last)
if canvas:
args += ("-canvas",)
if groupby:
args += ("-groupby", groupby)
if progress:
args += ("-progress",)
if before:
args += ("-before",) + tuple(before.split(" "))
if after:
args += ("-after",) + tuple(after.split(" "))
await self.speed(inter, *args)
@commands.command(
name="speed",
usage="<name> [-canvas] [-groupby [hour|day|week|month]] [-progress] [-last <?d?h?m?s>] [-before <date time>] [-after <date time>]",
description="Show the speed of a pxls user with a graph.",
help="""- `<names>`: list of pxls users names separated by a space (`!` = your set username)
- `[-canvas|-c]`: show the canvas stats
- `[-groupby|-g]`: show a bar chart for each `hour`, `day`, `week`, `month` or `canvas`
- `[-progress|-p]`: compare the progress between users
- `[-last ?y?mo?w?d?h?m?s]` Show the progress in the last x years/months/weeks/days/hours/minutes/seconds (default: 1d)
- `[-before <date time>]`: show the speed before a date and time (format YYYY-mm-dd HH:MM)
- `[-after <date time>]`: show the speed after a date and time (format YYYY-mm-dd HH:MM)""",
)
async def p_speed(self, ctx, *args):
async with ctx.typing():
await self.speed(ctx, *args)
async def speed(self, ctx, *args):
"""Show the average speed of a user in the last x min, hours or days"""
# get the user theme
discord_user = await db_users.get_discord_user(ctx.author.id)
user_timezone = discord_user["timezone"]
current_user_theme = discord_user["color"] or "default"
font = discord_user["font"]
theme = get_theme(current_user_theme)
try:
param = parse_speed_args(args, get_timezone(user_timezone))
except ValueError as e:
return await ctx.send(f"❌ {e}")
# select the discord user's pxls username if it has one linked
names = param["names"]
pxls_user_id = discord_user["pxls_user_id"]
is_slash = not isinstance(ctx, commands.Context)
cmd_name = "user setname" if is_slash else "setname"
prefix = "/" if is_slash else ctx.prefix
usage_text = (
f"(You can set your default username with `{prefix}{cmd_name} <username>`)"
)
if len(names) == 0:
if pxls_user_id is None:
return await ctx.send(
"❌ You need to specify at least one username.\n" + usage_text
)
else:
name = await db_users.get_pxls_user_name(pxls_user_id)
names.append(name)
if "!" in names:
if pxls_user_id is None:
return await ctx.send(
"❌ You need to have a set username to use `!`.\n" + usage_text
)
else:
name = await db_users.get_pxls_user_name(pxls_user_id)
names = [name if u == "!" else u for u in names]
# check on date arguments
canvas_opt = param["canvas"]
groupby_opt = param["groupby"]
if param["before"] is None and param["after"] is None:
# if no date argument and -canvas : show the whole canvas
if param["last"] is None and canvas_opt:
old_time = datetime(1900, 1, 1, 0, 0, 0)
recent_time = datetime.now(timezone.utc)
else:
date = param["last"] or "1d"
input_time = str_to_td(date)
if not input_time:
return await ctx.send(
"❌ Invalid `last` parameter, format must be `?y?mo?w?d?h?m?s`."
)
input_time = input_time + timedelta(minutes=1)
recent_time = datetime.now(timezone.utc)
old_time = round_minutes_down(datetime.now(timezone.utc) - input_time)
else:
old_time = param["after"] or datetime.min
recent_time = param["before"] or datetime.max
# get the data we need
if groupby_opt and groupby_opt != "canvas":
(past_time, now_time, stats) = await db_stats.get_grouped_stats_history(
names, old_time, recent_time, groupby_opt, canvas_opt
)
elif groupby_opt == "canvas":
(past_time, now_time, stats) = await db_stats.get_stats_per_canvas(names)
else:
(past_time, now_time, stats) = await db_stats.get_stats_history(
names, old_time, recent_time, canvas_opt
)
# check that we found data
if len(stats) == 0:
msg = "❌ User{} not found.".format("s" if len(names) > 1 else "")
return await ctx.send(msg)
# check that we can calculate the speed
if past_time == now_time:
return await ctx.send("❌ The time frame given is too short.")
diff_time = now_time - past_time
nb_hour = diff_time / timedelta(hours=1)
# format the data to be displayed
formatted_data = []
found_but_no_data = False
for user in stats:
data = user[1]
if groupby_opt and groupby_opt != "canvas":
# truncate the first data if we're groupping by day/hour
data = data[1:]
if len(data) == 0:
continue
# last username
name = data[-1]["name"]
# current pixels
current_pixels = data[-1]["pixels"]
if groupby_opt:
if all([d["placed"] is None for d in data]):
# skip the user if all the values are None
continue
diff_pixels = sum([(d["placed"] or 0) for d in data])
else:
# find first non-null value
lowest_pixels = None
for d in data:
if d["pixels"] is not None:
lowest_pixels = d["pixels"]
break
if lowest_pixels is None or current_pixels is None:
# the user exists in the database but doesnt have data
# in the given time frame
found_but_no_data = True
continue
diff_pixels = current_pixels - lowest_pixels
# calculate the speed
speed_px_h = diff_pixels / nb_hour
speed_px_d = speed_px_h * 24
# format data for the graph
if groupby_opt:
if groupby_opt == "month":
dates = [
datetime.strptime(
stat["first_datetime"], "%Y-%m-%d %H:%M:%S"
).strftime("%b %Y")
for stat in data
]
user_timezone = None
elif groupby_opt == "week":
dates = []
for stat in data:
first_dt = datetime.strptime(
stat["first_datetime"], "%Y-%m-%d %H:%M:%S"
)
last_dt = first_dt + timedelta(days=6)
week_dates = (
f"{first_dt.strftime('%d-%b')} - {last_dt.strftime('%d-%b')}"
)
dates.append(week_dates)
user_timezone = None
elif groupby_opt == "day":
dates = [stat["first_datetime"][:10] for stat in data]
user_timezone = None
elif groupby_opt == "hour":
dates = [stat["first_datetime"][:13] for stat in data]
# convert the dates to the user's timezone
dates = [datetime.strptime(d, "%Y-%m-%d %H") for d in dates]
tz = get_timezone(user_timezone) or timezone.utc
dates = [
datetime.astimezone(d.replace(tzinfo=timezone.utc), tz)
for d in dates
]
elif groupby_opt == "canvas":
dates = ["C" + stat["canvas_code"] for stat in data]
user_timezone = None
pixels = [stat["placed"] for stat in data]
# remove the "None" values to calculate the min, max, avg
pixels_int_only = [p for p in pixels if p is not None]
if len(pixels_int_only) > 0:
min_pixels = min(pixels_int_only)
max_pixels = max(pixels_int_only)
average = sum(pixels_int_only) / len(pixels_int_only)
else:
min_pixels = max_pixels = average = None
else:
dates = [stat["datetime"] for stat in data]
if param["progress"]:
# substract the first value to each value so they start at 0
pixels = [
(
(stat["pixels"] - lowest_pixels)
if (stat["pixels"] is not None and lowest_pixels is not None)
else None
)
for stat in data
]
else:
pixels = [stat["pixels"] for stat in data]
user_data = [name, current_pixels, diff_pixels]
if groupby_opt:
user_data.append(average)
user_data.append(min_pixels)
user_data.append(max_pixels)
else:
user_data.append(speed_px_h)
user_data.append(speed_px_d)
user_data.append(dates)
user_data.append(pixels)
formatted_data.append(user_data)
if len(formatted_data) == 0:
if found_but_no_data and not canvas_opt:
msg = f"❌ User{'s' if len(names) > 1 else ''} not found in the all-time leaderboard.\n(try using `-canvas` to use the canvas data instead.)"
else:
msg = f"❌ User{'s' if len(names) > 1 else ''} not found."
return await ctx.send(msg)
# sort the data by the 3rd column (progress in the time frame)
formatted_data.sort(key=lambda x: x[2], reverse=True)
# create the headers needed for the table
table_colors = theme.get_palette(len(formatted_data))
# make the title
if groupby_opt:
title = "Speed {}".format("per " + groupby_opt if groupby_opt else "")
elif canvas_opt and param["last"] is None:
title = "Canvas Speed"
else:
title = "Speed"
diff_time = round_minutes_down(now_time) - round_minutes_down(past_time)
diff_time_str | |
<filename>models.py
import ipdb
import math
import numpy as np
import tensorflow as tf
# N_DIM_STATE = 4
# N_DIM_ACTIONS = 2
N_DIM_STATE = 210*160
N_DIM_ACTIONS = 9
def batch_norm_init(inits, size, name):
return tf.Variable(inits * tf.ones([size]), name=name)
def weight_init(shape, name):
return tf.Variable(tf.random_normal(shape, stddev=math.sqrt(shape[0])), name=name)
def batch_normalization(batch, mean=None, var=None):
if mean is None or var is None:
mean, var = tf.nn.moments(batch, axes=[0])
return (batch - mean) / tf.sqrt(var + tf.constant(1e-9))
def update_batch_normalization(batch, l, bn_assigns, running_mean, running_var, ewma):
mean, var = tf.nn.moments(batch, axes=[0])
assign_mean = running_mean[l - 1].assign(mean)
assign_var = running_var[l - 1].assign(var)
bn_assigns.append(ewma.apply([running_mean[l - 1], running_var[l - 1]]))
with tf.control_dependencies([assign_mean, assign_var]):
return (batch - mean) / tf.sqrt(var + 1e-10)
def ddqn(s1, a1, r1, s2, discount, learning_rate, layers, q_values_fun_builder):
training = tf.placeholder(tf.bool)
n_data = tf.shape(s1)[0]
# DDQN - Find best value using the up to date Q function, but estimate it's value from our target Q function.
targets, _, bn_assigns, target_weights, _ = q_values_fun_builder(s2, training)
best_action = tf.argmax(targets, axis=1)
# Cases when the second action is picked
second_action_is_best = tf.cast(best_action, dtype=bool)
# DDQN Pick action with Q_1, score with Q_target
ddqn_target_scores, _, _, ddqn_target_weights, _ = q_values_fun_builder(s2, training)
target_scores = tf.where(
second_action_is_best,
discount*ddqn_target_scores[:, 1],
discount*ddqn_target_scores[:, 0])
# Remove future score prediction if end of episode
future_score = tf.where(
tf.equal(r1, -1*tf.ones(tf.shape(r1))),
tf.zeros(tf.shape(r1)),
tf.reshape(target_scores, [-1, 1]))
target_q_valuez = tf.concat([r1 + future_score for _ in range(N_DIM_ACTIONS)], 1)
all_ones = tf.concat([tf.ones([n_data, 1]) for _ in range(N_DIM_ACTIONS)], 1)
predicted_q_values, _, _, online_weights, _ = q_values_fun_builder(s1, training)
target_q_values = tf.where(
tf.equal(a1, all_ones),
target_q_valuez,
predicted_q_values)
best_action_picker, u_loss, bn_assigns, _, tf_debug_var = q_values_fun_builder(s1, training, online_weights)
u_loss = (u_loss * tf.constant(1/100))
supervised_loss = tf.reduce_mean(tf.square(tf.stop_gradient(target_q_values) - predicted_q_values))
loss = supervised_loss + u_loss
training_vars = []
for w_key, weights in online_weights.items():
training_vars = training_vars + weights
opt = tf.train.AdamOptimizer(learning_rate=learning_rate)
train_op = opt.minimize(loss, var_list=training_vars)
target_updaters = []
for w_key, weights in online_weights.items():
for w_index in range(len(weights)):
target_updaters.append(
tf.assign(target_weights[w_key][w_index],
online_weights[w_key][w_index]))
updaters = []
for w_key, weights in online_weights.items():
for w_index in range(len(weights)):
updaters.append(
tf.assign(ddqn_target_weights[w_key][w_index],
online_weights[w_key][w_index]))
def updater(sess):
for u in updaters:
sess.run(u)
# add the updates of batch normalization statistics to train_step
network_updates = tf.group(*(bn_assigns + target_updaters))
with tf.control_dependencies([train_op]):
train_op = tf.group(network_updates)
return loss, \
train_op, \
best_action_picker, \
updater, \
training, \
None
def ddqn_mlp(s1, a1, r1, s2, discount, learning_rate, layer_sizes):
n_data = tf.shape(s1)[0]
# Q-Values from a ladder network
def q_values(state1, training, weights=None):
L = len(layer_sizes) - 1 # number of layers
shapes = [s for s in zip(layer_sizes[:-1], layer_sizes[1:])] # shapes of linear layers
if weights is None:
weights = {
'Encoder_w': [weight_init(s, 'Encoder_w') for s in shapes], # Encoder weights
'beta': [batch_norm_init(0.0, layer_sizes[l+1], 'beta') for l in range(L)],
'gamma': [batch_norm_init(1.0, layer_sizes[l+1], 'gamma') for l in range(L)]
}
# Relative importance of each layer
running_mean = [tf.Variable(tf.constant(0.0, shape=[l]), name='running_mean', trainable=False)
for l in layer_sizes[1:]]
running_var = [tf.Variable(tf.constant(1.0, shape=[l]), name='running_var', trainable=False)
for l in layer_sizes[1:]]
ewma = tf.train.ExponentialMovingAverage(decay=0.99) # to calculate the moving averages of mean and variance
bn_assigns = [] # this list stores the updates to be made to average mean and variance
# to store the pre-activation, activation, mean and variance for each layer
d = {'z': {}, 'm': {}, 'v': {}, 'h': {}}
h = state1
d['z'][0] = h
for l in range(1, L + 1):
print("Layer ", l, ": ", layer_sizes[l - 1], " -> ", layer_sizes[l])
d['h'][l - 1] = h
z_pre = tf.matmul(h, weights['Encoder_w'][l - 1]) # pre-activation
m, v = tf.nn.moments(z_pre, axes=[0])
# if training:
def training_batch_norm():
return update_batch_normalization(z_pre, l, bn_assigns, running_mean, running_var, ewma)
# else:
def eval_batch_norm():
mean = ewma.average(running_mean[l - 1])
var = ewma.average(running_var[l - 1])
z = batch_normalization(z_pre, mean, var)
return z
z = tf.cond(training, training_batch_norm, eval_batch_norm)
if l == L:
h = tf.nn.softmax(weights['gamma'][l - 1] * (z + weights["beta"][l - 1]))
else:
h = tf.nn.relu(z + weights["beta"][l - 1])
d['z'][l] = z
d['m'][l], d['v'][l] = m, v
d['h'][l] = h
return h, tf.Variable(tf.constant(0.0)), bn_assigns, weights, None
return ddqn(s1, a1, r1, s2, discount, learning_rate, layer_sizes, q_values)
# https://github.com/rinuboney/ladder/blob/master/ladder.py
def ladder_mlp(s1, a1, r1, s2, discount, learning_rate, layer_sizes, denoising_cost):
# Q-Values from a ladder network
def q_values(state1, training, weights=None):
L = len(layer_sizes) - 1 # number of layers
shapes = [s for s in zip(layer_sizes[:-1], layer_sizes[1:])] # shapes of linear layers
if weights is None:
weights = {
'Encoder_w': [weight_init(s, 'Encoder_w') for s in shapes], # Encoder weights
'Decoder_w': [weight_init(s[::-1], 'Decoder_w') for s in shapes], # Decoder weights
'beta': [batch_norm_init(0.0, layer_sizes[l+1], 'beta') for l in range(L)],
'gamma': [batch_norm_init(1.0, layer_sizes[l+1], 'gamma') for l in range(L)]
}
# Relative importance of each layer
running_mean = [tf.Variable(tf.constant(0.0, shape=[l]), name='running_mean', trainable=False)
for l in layer_sizes[1:]]
running_var = [tf.Variable(tf.constant(1.0, shape=[l]), name='running_var', trainable=False)
for l in layer_sizes[1:]]
ewma = tf.train.ExponentialMovingAverage(decay=0.99) # to calculate the moving averages of mean and variance
bn_assigns = [] # this list stores the updates to be made to average mean and variance
def encoder(inputs, noise_std):
# add noise to input
h = inputs + tf.random_normal(tf.shape(inputs)) * noise_std
# to store the pre-activation, activation, mean and variance for each layer
d = {'z': {}, 'm': {}, 'v': {}, 'h': {}}
d['z'][0] = h
for l in range(1, L + 1):
print("Layer ", l, ": ", layer_sizes[l - 1], " -> ", layer_sizes[l])
d['h'][l - 1] = h
z_pre = tf.matmul(h, weights['Encoder_w'][l - 1]) # pre-activation
m, v = tf.nn.moments(z_pre, axes=[0])
# if training:
def training_batch_norm():
# Training batch normalization
# batch normalization for labeled and unlabeled examples is performed separately
if noise_std > 0:
# Corrupted encoder
# batch normalization + noise
z = batch_normalization(z_pre, m, v)
z += tf.random_normal(tf.shape(z_pre)) * noise_std
else:
# Clean encoder
# batch normalization + update the average mean and variance using batch
# mean and variance of labeled examples
z = update_batch_normalization(z_pre, l, bn_assigns, running_mean, running_var, ewma)
return z
# else:
def eval_batch_norm():
# Evaluation batch normalization
# obtain average mean and variance and use it to normalize the batch
mean = ewma.average(running_mean[l - 1])
var = ewma.average(running_var[l - 1])
z = batch_normalization(z_pre, mean, var)
return z
# perform batch normalization according to value of boolean "training" placeholder:
z = tf.cond(training, training_batch_norm, eval_batch_norm)
if l == L:
# use softmax activation in output layer
h = tf.nn.softmax(weights['gamma'][l - 1] * (z + weights["beta"][l - 1]))
else:
# use ReLU activation in hidden layers
h = tf.nn.relu(z + weights["beta"][l - 1])
d['z'][l] = z
d['m'][l], d['v'][l] = m, v # save mean and variance of unlabeled examples for decoding
d['h'][l] = h
return h, d
print("=== Corrupted Encoder ===")
y_c, corr = encoder(state1, 0.1)
print("=== Clean Encoder ===")
y, clean = encoder(state1, 0.0) # 0.0 -> do not add noise
print("=== Decoder ===")
def g_gauss(z_c, u, size):
wi = lambda inits, name: tf.Variable(inits * tf.ones([size]), name=name)
a1 = wi(0., 'a1')
a2 = wi(1., 'a2')
a3 = wi(0., 'a3')
a4 = wi(0., 'a4')
a5 = wi(0., 'a5')
a6 = wi(0., 'a6')
a7 = wi(1., 'a7')
a8 = wi(0., 'a8')
a9 = wi(0., 'a9')
a10 = wi(0., 'a10')
mu = a1 * tf.sigmoid(a2 * (u + tf.constant(1e-9)) + a3) + a4 * u + a5
v = a6 * tf.sigmoid(a7 * (u + tf.constant(1e-9)) + a8) + a9 * u + a10
z_est = (z_c - mu) * v + mu
return z_est
# Decoder
z_est = {}
d_cost = [] # to store the denoising cost of all layers
for l in range(L, -1, -1):
print("Layer ", l, ": ", layer_sizes[l+1] if l+1 < len(layer_sizes) else None,
" -> ", layer_sizes[l], ", denoising cost: ", denoising_cost[l])
z, z_c = clean['z'][l], corr['z'][l]
m = clean['m'].get(l, 0)
v = clean['v'].get(l, 1-1e-10) + tf.constant(1e-9)
if l == L:
u = y_c
else:
u = tf.matmul(z_est[l+1], weights['Decoder_w'][l])
u = batch_normalization(u)
z_est[l] = g_gauss(z_c, u, layer_sizes[l])
z_est_bn = (z_est[l] - m) / v
# append the cost of this layer to d_cost
d_cost.append((tf.reduce_mean(tf.reduce_sum(tf.square(z_est_bn - z), 1)) / layer_sizes[l]) * denoising_cost[l])
# calculate total unsupervised cost by adding the denoising cost of all layers
unsupervised_cost | |
widgetize(self):
Label(self, text='String:', anchor=W).pack(fill=X)
##Text
info = Frame(self)
vscroll = Scrollbar(info)
self.info = Text(info, yscrollcommand=vscroll.set, width=1, height=1, wrap=WORD)
self.info.insert(1.0, self.string)
self.info.pack(side=LEFT, fill=BOTH, expand=1)
vscroll.config(command=self.info.yview)
vscroll.pack(side=RIGHT, fill=Y)
info.pack(fill=BOTH, expand=1)
##Buttons
buttonframe = Frame(self)
self.okbtn = Button(buttonframe, text='Ok', width=10, command=self.ok)
self.okbtn.pack(side=LEFT, padx=3, pady=3)
Button(buttonframe, text='Cancel', width=10, command=self.cancel).pack(side=LEFT, padx=3, pady=3)
buttonframe.pack()
self.minsize(300,100)
if 'stringeditwindow' in self.parent.parent.settings:
loadsize(self, self.parent.settings, 'stringeditwindow', True)
return self.info
def ok(self):
string = TBL.compile_string(self.info.get(1.0, END)[:-1])
if not string.endswith('\x00'):
string += '\x00'
savesize(self, self.parent.parent.settings, 'stringeditwindow')
self.string = string
PyMSDialog.ok(self)
def cancel(self):
savesize(self, self.parent.parent.settings, 'stringeditwindow')
PyMSDialog.ok(self)
class StringEditor(PyMSDialog):
def __init__(self, parent, title='String Editor', cancel=False, index=0):
self.result = None
self.cancelbtn = cancel
self.index = index
self.ai = parent.ai
self.tbl = parent.tbl
self.settings = parent.settings
self.edittbl = parent.edittbl
self.stattxt = parent.stattxt
self.strings = parent.strings
self.resort = parent.resort
self.select_file = parent.select_file
PyMSDialog.__init__(self, parent, '%s (%s)' % (title, parent.stattxt()))
def widgetize(self):
self.bind('<Control-o>', self.open)
self.bind('<Control-d>', self.opendefault)
self.bind('<Control-s>', self.save)
self.bind('<Control-Alt-s>', self.saveas)
self.bind('<Insert>', self.add)
self.bind('<Delete>', self.remove)
self.bind('<Control-f>', self.find)
self.bind('<Control-e>', self.edit)
#Toolbar
buttons = [
('open', self.open, 'Open (Ctrl+O)', NORMAL),
('opendefault', self.opendefault, 'Open Default TBL (Ctrl+D)', NORMAL),
('save', self.save, 'Save (Ctrl+S)', NORMAL),
('saveas', self.saveas, 'Save As (Ctrl+Alt+S)', NORMAL),
10,
('add', self.add, 'Add String (Insert)', NORMAL),
('remove', self.remove, 'Remove String (Delete)', NORMAL),
4,
('find', self.find, 'Find String (Ctrl+F)', NORMAL),
10,
('edit', self.edit, 'Edit String (Ctrl+E)', NORMAL),
]
self.buttons = {}
toolbar = Frame(self)
for btn in buttons:
if isinstance(btn, tuple):
image = get_img(btn[0])
button = Button(toolbar, image=image, width=20, height=20, command=btn[1], state=btn[3])
button.image = image
button.tooltip = Tooltip(button, btn[2], couriernew)
button.pack(side=LEFT)
self.buttons[btn[0]] = button
else:
Frame(toolbar, width=btn).pack(side=LEFT)
toolbar.pack(side=TOP, fill=X, padx=2, pady=1)
##Listbox
listframe = Frame(self, bd=2, relief=SUNKEN)
scrollbar = Scrollbar(listframe)
self.listbox = Listbox(listframe, font=couriernew, bd=0, activestyle=DOTBOX, highlightthickness=0, yscrollcommand=scrollbar.set, exportselection=0)
bind = [
('<MouseWheel>', self.scroll),
('<Home>', self.home),
('<End>', self.end),
('<Up>', self.up),
('<Down>', self.down),
('<Prior>', self.pageup),
('<Next>', self.pagedown),
]
for b in bind:
self.bind(*b)
self.listbox.bind('<ButtonRelease-3>', self.popup)
self.listbox.bind('<Double-Button-1>', self.edit)
scrollbar.config(command=self.listbox.yview)
scrollbar.pack(side=RIGHT, fill=Y)
self.listbox.pack(side=LEFT, fill=BOTH, expand=1)
listframe.pack(fill=BOTH, expand=1)
listmenu = [
('Add String', self.add, 4), # 0
('Remove String', self.remove, 0), # 1
None,
('Edit String', self.edit, 8), # 3
]
self.listmenu = Menu(self, tearoff=0)
for m in listmenu:
if m:
l,c,u = m
self.listmenu.add_command(label=l, command=c, underline=u)
else:
self.listmenu.add_separator()
##Buttons
buttons = Frame(self)
ok = Button(buttons, text='Ok', width=10, command=self.ok)
ok.pack(side=LEFT, padx=3, pady=3)
if self.cancelbtn:
Button(buttons, text='Cancel', width=10, command=self.cancel).pack(padx=3, pady=3)
buttons.pack()
##Statusbar
self.status = StringVar()
statusbar = Label(self, textvariable=self.status, bd=1, relief=SUNKEN, anchor=W).pack(side=BOTTOM, fill=X)
self.update()
self.listbox.select_clear(0,END)
self.listbox.select_set(self.index)
self.listbox.see(self.index)
self.minsize(300,300)
if 'stringeditorwindow' in self.parent.settings:
loadsize(self, self.parent.settings, 'stringeditorwindow', True)
return ok
def scroll(self, e):
if e.delta > 0:
self.listbox.yview('scroll', -2, 'units')
else:
self.listbox.yview('scroll', 2, 'units')
def home(self, e):
self.listbox.yview('moveto', 0.0)
def end(self, e):
self.listbox.yview('moveto', 1.0)
def up(self, e):
self.listbox.yview('scroll', -1, 'units')
def down(self, e):
self.listbox.yview('scroll', 1, 'units')
def pageup(self, e):
self.listbox.yview('scroll', -1, 'pages')
def pagedown(self, e):
self.listbox.yview('scroll', 1, 'pages')
def popup(self, e):
if not self.listbox.curselection():
s = DISABLED
else:
s = NORMAL
for i in [1,3]:
self.listmenu.entryconfig(i, state=s)
self.listmenu.post(e.x_root, e.y_root)
def find(self, e=None):
if self.listbox.size():
FindDialog(self, True)
def ok(self):
self.result = int(self.listbox.curselection()[0])
savesize(self, self.parent.settings, 'stringeditorwindow')
PyMSDialog.ok(self)
def cancel(self):
self.result = None
savesize(self, self.parent.settings, 'stringeditorwindow')
PyMSDialog.ok(self)
def update(self):
sel = 0
if self.listbox.size():
sel = self.listbox.curselection()[0]
self.listbox.delete(0, END)
size = len(self.parent.tbl.strings)
pad = len(str(size))
for n,s in enumerate(self.parent.tbl.strings):
self.listbox.insert(END, '%s%s %s' % (' ' * (pad - len(str(n))), n, TBL.decompile_string(s)))
self.listbox.select_set(sel)
self.listbox.see(sel)
self.status.set('Strings: %s' % size)
def open(self, file=None):
if self.parent.edittbl():
save = askquestion(parent=self, title='Save Changes?', message="Save changes to '%s'?" % self.parent.stattxt(), default=YES, type=YESNOCANCEL)
if save != 'no':
if save == 'cancel':
return
if self.tbl:
self.save()
else:
self.saveas()
if file == None:
file = self.parent.select_file('Open stat_txt.tbl', True, '.tbl', [('TBL Files','*.tbl'),('All Files','*')], self)
if file:
tbl = TBL.TBL()
try:
tbl.load_file(file)
except PyMSError, e:
ErrorDialog(self, e)
return
max = len(tbl.strings)
ids = {}
for s,i in self.parent.strings.iteritems():
if s >= max:
ids[s] = i
if ids:
pass
if self.parent.ai:
self.parent.ai.tbl = tbl
self.parent.tbl = tbl
self.parent.stattxt(file)
self.title('String Editor (%s)' % file)
self.update()
self.parent.edittbl(False)
def opendefault(self):
self.open(os.path.join(BASE_DIR, 'Libs', 'MPQ', 'rez' 'stat_txt.tbl'))
def save(self, key=None, file=None):
if key and self.buttons['save']['state'] != NORMAL:
return
if file == None:
file = self.parent.stattxt()
try:
self.tbl.compile(file)
self.parent.stattxt(file)
except PyMSError, e:
ErrorDialog(self, e)
return
self.tbledited = False
def saveas(self, key=None):
if key and self.buttons['saveas']['state'] != NORMAL:
return
file = self.parent.select_file('Save stat_txt.tbl', False, '.tbl', [('TBL Files','*.tbl'),('All Files','*')], self)
if not file:
return
self.save(None, file)
def add(self, key=None):
if key and self.buttons['add']['state'] != NORMAL:
return
e = EditStringDialog(self, '', 'Add String')
if e.string:
self.parent.tbl.strings.append(e.string)
self.update()
self.listbox.select_clear(0, END)
self.listbox.select_set(END)
self.listbox.see(END)
self.parent.edittbl(True)
if self.parent.ai:
self.parent.resort()
def remove(self, key=None):
if key and self.buttons['remove']['state'] != NORMAL:
return
string = int(self.listbox.curselection()[0])
if self.parent.ai:
ids = {}
for s,i in self.parent.strings.iteritems():
if s > string:
ids[s] = i
if ids:
plural = 0
i = ''
e = 0
for s,x in ids.iteritems():
if e < 6:
i += ' '
comma = False
for n in x:
if plural < 2:
plural += 1
if e < 6 and comma:
i += ", "
else:
comma = True
if e < 6:
i += n
if e < 6:
i += ': %s\n' % s
e += 1
if e > 5:
i += 'And %s other scripts. ' % (e-5)
if plural == 2:
plural = 1
if not askquestion(parent=self, title='Remove String?', message="Deleting string '%s' will effect the AI Script%s:\n%sContinue removing string anyway?" % (string, 's' * plural, i), default=YES):
return
end = self.listbox.size()-1
if end in self.parent.strings:
new = self.listbox.size()-2
for id in self.parent.strings[end]:
self.parent.ai.ais[id][1] = new
if not new in self.parent.strings:
self.parent.strings[new] = []
self.parent.strings[new].extend(self.parent.strings[end])
del self.parent.strings[string]
if self.parent.ai:
self.parent.resort()
del self.parent.tbl.strings[string]
if string:
self.listbox.select_set(string-1)
else:
self.listbox.select_set(0)
self.parent.edittbl(True)
self.update()
def edit(self, key=None):
if key and self.buttons['edit']['state'] != NORMAL:
return
id = int(self.listbox.curselection()[0])
string = TBL.decompile_string(self.parent.tbl.strings[id])
e = EditStringDialog(self, string)
if string != e.string:
self.parent.edittbl(True)
self.parent.tbl.strings[id] = e.string
self.update()
if self.parent.ai:
self.parent.resort()
class FlagEditor(PyMSDialog):
def __init__(self, parent, flags):
self.flags = flags
self.location = IntVar()
self.location.set(not not flags & 1)
self.visible = IntVar()
self.visible.set(not not flags & 2)
self.bwonly = IntVar()
self.bwonly.set(not not flags & 4)
PyMSDialog.__init__(self, parent, 'Flag Editor')
def widgetize(self):
self.resizable(False, False)
choices = Frame(self)
Checkbutton(choices, text='Requires a Location', variable=self.location).grid(sticky=W)
Checkbutton(choices, text='Invisible in StarEdit', variable=self.visible).grid(sticky=W)
Checkbutton(choices, text='BroodWar Only', variable=self.bwonly).grid(sticky=W)
choices.pack(pady=3, padx=3)
buttons = Frame(self)
ok = Button(buttons, text='Ok', width=10, command=self.ok)
ok.pack(side=LEFT, padx=1, pady=3)
Button(buttons, text='Cancel', width=10, command=self.cancel).pack(side=LEFT, padx=1, pady=3)
buttons.pack(pady=3, padx=3)
return ok
def ok(self):
self.flags = self.location.get() + 2 * self.visible.get() + 4 * self.bwonly.get()
PyMSDialog.ok(self)
class ListboxTooltip(Tooltip):
def __init__(self, widget, font=None, delay=750, press=False):
Tooltip.__init__(self, widget, '', font, delay, press)
self.index = None
def enter(self, e):
if self.widget.size():
self.motion(e)
Tooltip.enter(self,e)
def leave(self, e=None):
Tooltip.leave(self,e)
if e and e.type == '4':
self.enter(e)
def motion(self, e):
if self.tip and self.index != self.widget.nearest(e.y):
self.leave()
self.enter(e)
self.pos = (e.x,e.y)
Tooltip.motion(self, e)
def showtip(self):
if self.tip:
return
self.tip = Toplevel(self.widget)
self.tip.maxsize(640,400)
self.tip.wm_overrideredirect(1)
pos = list(self.widget.winfo_pointerxy())
self.index = self.widget.nearest(pos[1] - self.widget.winfo_rooty())
item = self.widget.get_entry(self.index)
id = item[0]
flags = ''
comma = False
for d,f in zip(['BroodWar Only','Invisible in StarEdit','Requires a Location'],item[2]):
if f == '1':
if comma:
flags += ', '
else:
comma = True
if not flags:
flags = 'Flags : '
flags += d
if flags:
flags += '\n'
text = "Script ID : %s\nIn bwscript.bin : %s\n%sString ID : %s\n" % (id, ['No','Yes'][item[1]], flags, item[3])
ai = self.widget.master.master.ai
text += fit('String : ', TBL.decompile_string(ai.tbl.strings[ai.ais[id][1]]), end=True)
if id in ai.aiinfo and ai.aiinfo[id][0]:
text += 'Extra Information : %s' % ai.aiinfo[id][0].replace('\n','\n ')
else:
text = text[:-1]
frame = Frame(self.tip, background='#FFFFC8', relief=SOLID, borderwidth=1)
Label(frame, text=text, justify=LEFT, font=self.font, background='#FFFFC8', relief=FLAT).pack(padx=1, pady=1)
frame.pack()
self.tip.wm_geometry('+%d+%d' % (pos[0],pos[1]+22))
self.tip.update_idletasks()
move = False
if pos[0] + self.tip.winfo_reqwidth() > self.tip.winfo_screenwidth():
move = True
pos[0] = self.tip.winfo_screenwidth() - self.tip.winfo_reqwidth()
if pos[1] + self.tip.winfo_reqheight() + 22 > self.tip.winfo_screenheight():
move = True
pos[1] -= self.tip.winfo_reqheight() + 44
if move:
self.tip.wm_geometry('+%d+%d' % (pos[0],pos[1]+22))
class PyAI(Tk):
def __init__(self, guifile=None):
self.settings = loadsettings('PyAI',
{
'stat_txt':os.path.join(BASE_DIR, 'Libs', 'MPQ', 'rez', 'stat_txt.tbl'),
'unitsdat':'MPQ:arr\\units.dat',
'upgradesdat':'MPQ:arr\\upgrades.dat',
'techdatadat':'MPQ:arr\\techdata.dat',
}
)
# Remove sometime (now 2.2)
if 'datdialog' in self.settings:
del self.settings['datdialog']
#Window
Tk.__init__(self)
self.title('No files loaded')
try:
self.icon = os.path.join(BASE_DIR, 'Images','PyAI.ico')
self.wm_iconbitmap(self.icon)
except:
self.icon = '@%s' % os.path.join(BASE_DIR, 'Images','PyAI.xbm')
self.wm_iconbitmap(self.icon)
self.protocol('WM_DELETE_WINDOW', self.exit)
setup_trace(self, 'PyAI')
self.aiscript = None
self.bwscript = None
self.stat_txt = self.settings['stat_txt']
self.tbl = TBL.TBL()
try:
self.tbl.load_file(self.stat_txt)
except:
self.stat_txt = None
self.tbl = None
self.tbledited = False
self.unitsdat = None
self.upgradesdat = None
self.techdat = None
self.ai = None
self.strings = {}
self.edited = False
self.undos = []
self.redos = []
self.imports = []
self.extdefs = []
for t,l in [('imports',self.imports),('extdefs',self.extdefs)]:
if t in self.settings:
for f in self.settings.get(t):
if os.path.exists(f):
l.append(f)
self.highlights = self.settings.get('highlights', None)
self.findhistory = []
self.replacehistory = []
self.sort = StringVar()
self.reference = IntVar()
self.reference.set(self.settings.get('reference', 0))
self.extrainfo = IntVar()
self.extrainfo.set(self.settings.get('extrainfo', 1))
#Menu
menus = odict()
menus['File'] = [
('New', self.new, NORMAL, 'Ctrl+N', 0), # 0
('Open', self.open, NORMAL, 'Ctrl+O', 0), # 1
('Open Default Scripts', self.open_default, NORMAL, 'Ctrl+D', 5), # 2
('Open MPQ', self.open_mpq, [NORMAL,DISABLED][FOLDER], 'Ctrl+Alt+O', 5), # 3
('Save', self.save, DISABLED, 'Ctrl+S', 0), # 4
('Save As...', self.saveas, DISABLED, 'Ctrl+Alt+A', 5), # 5
('Save MPQ', self.savempq, [NORMAL,DISABLED][FOLDER], 'Ctrl+Alt+M', 1), # 6
('Close', self.close, DISABLED, 'Ctrl+W', 0), # 7
None,
('Set as default *.bin editor (Windows Only)', self.register, [DISABLED,NORMAL][win_reg], '', 2),
None,
('Exit', self.exit, NORMAL, 'Alt+F4', 0), # 7
]
menus['Edit'] = [
('Undo', self.undo, DISABLED, 'Ctrl+Z', 0), # 0
('Redo', self.redo, DISABLED, 'Ctrl+Y', 3), # 1
None,
('Select All', self.select_all, DISABLED, 'Ctrl+A', 7), # 3
('Add Blank Script', self.add, DISABLED, 'Insert', 4), # 4
('Remove Scripts', self.remove, DISABLED, 'Delete', 0), # 5
('Find Scripts', self.find, DISABLED, 'Ctrl+F', 0), # 6
None,
('Export Scripts', self.export, DISABLED, 'Ctrl+Alt+E', 0), # 8
('Import Scripts', self.iimport, DISABLED, 'Ctrl+Alt+I', 0), # 9
('Import a List of Files', self.listimport, DISABLED, 'Ctrl+L', 9), # 10
('Print Reference when Decompiling', self.reference, NORMAL, '', 6, True), # 11
('Save Information Comments and Labels', self.extrainfo, NORMAL, '', 0, True), # 12
None,
('Edit AI Script', self.edit, DISABLED, 'Ctrl+E', 0), #14
('Edit AI ID, String, and Extra Info.', self.edit, DISABLED, 'Ctrl+I', 8), # 15
('Edit Flags', self.editflags, DISABLED, 'Ctrl+G', 8), # 16
None,
('Manage External Definition Files', self.extdef, NORMAL, 'Ctrl+X', 8), # 18
('Manage TBL File', self.managetbl, NORMAL, 'Ctrl+T', 7), # 19
('Manage MPQ and DAT Settings', self.managedat, NORMAL, 'Ctrl+U', 7), # 20
]
menus['View'] = [
('File Order', self.order, NORMAL, '', 5, 'order'), # 0
('Sort by ID', self.idsort, NORMAL, '', 8, 'idsort'), # 1
('Sort by BroodWar', self.bwsort, NORMAL, '', 8, 'bwsort'), # 2
('Sort by Flags', self.flagsort, NORMAL, '', 8, 'flagsort'), # 3
('Sort by Strings', self.stringsort, NORMAL, '', 8, 'stringsort'), # 4
]
menus['Help'] = [
('View Help File', self.help, NORMAL, 'F1', 5), # 0
None,
('About PyAI', self.about, NORMAL, '', 0), # 2
]
self.menus = {}
menubar = Menu(self)
self.config(menu=menubar)
for name,menu in menus.iteritems():
self.menus[name] = Menu(menubar, tearoff=0)
for n,m in enumerate(menu):
if m:
if name == 'View':
l,c,s,a,u,v = m
self.menus[name].add_radiobutton(label=l, command=c, state=s, accelerator=a, underline=u, variable=self.sort, value=v)
elif len(m) == 6:
l,v,s,a,u,_ = m
self.menus[name].add_checkbutton(label=l, state=s, accelerator=a, underline=u, variable=v)
else:
l,c,s,a,u = m
self.menus[name].add_command(label=l, command=c, state=s, accelerator=a, underline=u)
if a:
if not a.startswith('F'):
self.bind('<%s%s>' % (a[:-1].replace('Ctrl','Control').replace('+','-'), a[-1].lower()), c)
else:
self.bind('<%s>' % a, c)
else:
self.menus[name].add_separator()
menubar.add_cascade(label=name, menu=self.menus[name], underline=0)
#Toolbar
bars = [
[
('new', self.new, 'New (Ctrl+N)', NORMAL),
('open', self.open, 'Open (Ctrl+O)', NORMAL),
('opendefault', self.open_default, 'Open Default Scripts (Ctrl+D)', NORMAL),
('openmpq', self.open_mpq, 'Open | |
class UMGrid:
"""
Interpolate the ocean mask onto the UM grid.
Creates the land fraction and mask, later used as an input to the UM.
"""
def __init__(self, um_restart, num_lon_points, num_lat_points, mom_grid,
output_dir):
self.SOUTHERN_EXTENT = -89.995002746582031
self.SOUTHERN_EXTENT_CNR = -89.999496459960938
self.NORTHERN_EXTENT = 89.995002746582031
self.NORTHERN_EXTENT_CNR = 89.999496459960938
self.mom_grid = mom_grid
self.um_restart = um_restart
self.um_restart_output = os.path.join(output_dir,
os.path.basename(um_restart))
self.lfrac_filename_nc = os.path.join(output_dir, 'lfrac.nc')
self.lfrac_filename_um = os.path.join(output_dir, 'lfrac')
self.mask_filename_nc = os.path.join(output_dir, 'qrparm.mask.nc')
self.mask_filename_um = os.path.join(output_dir, 'qrparm.mask')
self.num_lon_points = num_lon_points
self.num_lat_points = num_lat_points
self.corners = 4
# Set lats and lons.
self.lon = np.linspace(0, 360, num_lon_points, endpoint=False)
self.lat = np.linspace(-90, 90, num_lat_points)
dx_half = 360.0 / num_lon_points / 2.0
dy_half = (180.0 / (num_lat_points - 1) / 2.0)
# Similar to lon, lat but specify the coordinate at every grid
# point. Also it wraps along longitude.
self.x_t = np.tile(self.lon, (num_lat_points, 1))
self.y_t = np.tile(self.lat, (num_lon_points, 1))
self.y_t = self.y_t.transpose()
self.x_u = self.x_t + dx_half
self.y_u = self.y_t
self.x_v = self.x_t
self.y_v = self.y_t + dy_half
def make_corners(x, y, dx, dy):
# Set grid corners, we do these one corner at a time. Start at the
# bottom left and go anti-clockwise. This is the OASIS convention.
clon = np.empty((self.corners, x.shape[0], x.shape[1]))
clon[:] = np.NAN
clon[0, :, :] = x - dx
clon[1, :, :] = x + dx
clon[2, :, :] = x + dx
clon[3, :, :] = x - dx
assert(not np.isnan(np.sum(clon)))
clat = np.empty((self.corners, x.shape[0], x.shape[1]))
clat[:] = np.NAN
clat[0, :, :] = y[:, :] - dy
clat[1, :, :] = y[:, :] - dy
clat[2, :, :] = y[:, :] + dy
clat[3, :, :] = y[:, :] + dy
# The bottom latitude band should always be Southern extent, for
# all t, u, v.
clat[0, 0, :] = -90
clat[1, 0, :] = -90
# The top latitude band should always be Northern extent, for all
# t, u, v.
clat[2, -1, :] = 90
clat[3, -1, :] = 90
assert(not np.isnan(np.sum(clat)))
return clon, clat
self.clon_t, self.clat_t = make_corners(self.x_t, self.y_t,
dx_half, dy_half)
self.clon_u, self.clat_u = make_corners(self.x_u, self.y_u,
dx_half, dy_half)
self.clon_v, self.clat_v = make_corners(self.x_v, self.y_v,
dx_half, dy_half)
# The Northerly v points are going to be beyond the domain. Remove them
self.y_v = self.y_v[:-1, :]
self.x_v = self.x_v[:-1, :]
self.clat_v = self.clat_v[:, :-1, :]
self.clon_v = self.clon_v[:, :-1, :]
# self.area_v = self.area_v[:-1, :]
# Now that the grid is made we fix it up. We don't go from -90 to 90
# but from self.SOUTHERN_EXTENT to self.NORTHERN_EXTENT. As far as I
# can tell this is due to the SCRIP remapping library not handling the
# poles properly and making bad weights. There is a test for this in
# tests/test_scrip_remapping.py. If the tests don't pass there's no
# point running the model with those remapping files.
def fix_grid():
self.lat[0] = self.SOUTHERN_EXTENT
self.lat[-1] = self.NORTHERN_EXTENT
self.y_t[0, :] = self.SOUTHERN_EXTENT
self.y_t[-1, :] = self.NORTHERN_EXTENT
self.y_u[0, :] = self.SOUTHERN_EXTENT
self.y_u[-1, :] = self.NORTHERN_EXTENT
def fix_corners(clat):
clat[0, 0, :] = self.SOUTHERN_EXTENT_CNR
clat[1, 0, :] = self.SOUTHERN_EXTENT_CNR
clat[2, -1, :] = self.NORTHERN_EXTENT_CNR
clat[3, -1, :] = self.NORTHERN_EXTENT_CNR
fix_corners(self.clat_t)
fix_corners(self.clat_u)
fix_corners(self.clat_v)
fix_grid()
# Use corners to calculate areas.
self.area_t = self.calc_area(self.clon_t, self.clat_t)
self.area_u = self.calc_area(self.clon_u, self.clat_u)
self.area_v = self.calc_area(self.clon_v, self.clat_v)
# This is defined after a call to make_landfrac.
self.landfrac = None
self.mask_t = None
self.mask_u = None
self.mask_v = None
def calc_area(self, clons, clats):
"""
Calculate the area of lat-lon polygons.
We project sphere onto a flat surface using an equal area projection
and then calculate the area of flat polygon.
"""
def area_polygon(p):
"""
Calculate the area of a polygon.
Input is a polygon represented as a list of (x,y) vertex
coordinates, implicitly wrapping around from the last vertex to the
first.
See http://stackoverflow.com/questions/451426/
how-do-i-calculate-the-surface-area-of-a-2d-polygon
"""
def segments(v):
return zip(v, v[1:] + [v[0]])
return 0.5 * abs(sum(x0*y1 - x1*y0
for ((x0, y0), (x1, y1)) in segments(p)))
areas = np.zeros_like(clons[0])
areas[:] = np.NAN
m = Basemap(projection='laea', resolution='h',
llcrnrlon=0, llcrnrlat=-90.0,
urcrnrlon=360, urcrnrlat=90.0, lat_0=-90, lon_0=0)
x, y = m(clons, clats)
for i in range(x[0, :].shape[0]):
for j in range(x[0, :].shape[1]):
areas[i, j] = area_polygon(zip(x[:, i, j], y[:, i, j]))
assert(np.sum(areas) is not np.NAN)
assert(np.min(areas) > 0)
assert(abs(1 - np.sum(areas) / EARTH_AREA) < 2e-4)
return areas
def make_antarctic_mask(self, southern_lat, grid_lats):
"""
Create mask on grid_lats to mask out everything South of a particular
lat.
"""
def find_nearest_larger(val, array):
"""
Find the value which is nearest and larger than val in array.
"""
s_array = np.sort(array, axis=None)
r = np.searchsorted(s_array, val, side='right')
return s_array[r]
mask = np.zeros_like(grid_lats, dtype=bool)
# Find Southern latitude of the destination that matches source.
closest = find_nearest_larger(southern_lat, grid_lats)
excluded_row = np.where(closest == grid_lats)[0][0]
# Expect that lower latitudes have lower indices.
assert(all(grid_lats[excluded_row] > grid_lats[excluded_row - 1]))
# Mask out all latitude bands equal to and less than closest.
mask[0:excluded_row, :] = True
return mask
def make_landfrac(self):
"""
Regrid the ocean mask to create new land-sea fraction.
"""
src_clons, src_clats = oasis_to_2d_corners(self.mom_grid.clon,
self.mom_grid.clat)
dest_clons, dest_clats = oasis_to_2d_corners(self.clon_t, self.clat_t)
# The source grid is not defined South of -81. The easiest way to
# deal with this is to mask out the destination during regridding
# and then set it all to land.
ant_mask = self.make_antarctic_mask(np.min(self.mom_grid.y_t),
self.y_t)
# Set up regridder with source and destination grid defs. All lons are
# normalised -180, 180
src_lons = normalise_lons(self.mom_grid.x_t)
dest_lons = normalise_lons(self.x_t)
r = Regridder(src_lons, self.mom_grid.y_t, src_clons, src_clats, None,
dest_lons, self.y_t, dest_clons, dest_clats, ant_mask)
# Do regridding of mom ocean mask. This will result in an
# 'ocean fraction' not a land fraction.
self.landfrac = r.regrid(self.mom_grid.mask)
# Check regridding, ensure that src and dest masses are close.
src_mass = np.sum(self.mom_grid.area_t * self.mom_grid.mask)
dest_mass = np.sum(self.area_t * self.landfrac)
# assert(np.isclose(1, src_mass / dest_mass, atol=1e-5))
# FIXME: this is not very close!
assert(np.isclose(1, src_mass / dest_mass, atol=1e-3))
# The destination has been masked out over Antarctica for regridding
# purposes, set that area to land.
self.landfrac[np.where(ant_mask)] = 0
# Flip so that we have land fraction, rather than ocean fraction.
self.landfrac[:] = abs(1 - self.landfrac[:])
# Clean up points which have a very small land fraction.
self.landfrac[np.where(self.landfrac[:] < 0.01)] = 0
self.landfrac[np.where(self.landfrac[:] > 1)] = 1
def put_basic_header(self, file):
"""
Put in the basic netcdf header elements: lat, lon, time.
"""
file.createDimension('longitude', self.num_lon_points)
file.createDimension('latitude', self.num_lat_points)
file.createDimension('t')
lon = file.createVariable('longitude', 'f8', dimensions=('longitude'))
lon.long_name = 'longitude'
lon.standard_name = 'longitude'
lon.units = 'degrees_east'
lon.point_spacing = 'even'
lon.module = ''
lat = file.createVariable('latitude', 'f8', dimensions=('latitude'))
lat.long_name = 'latitude'
lat.standard_name = 'latitude'
lat.units = 'degrees_north'
lat.point_spacing = 'even'
t = file.createVariable('t', 'f8', dimensions=('t'))
t.long_name = 't'
t.units = 'days since 0001-01-01 00:00:00'
t.time_origin = '01-JAN-0001:00:00:00'
t[0] = 0
lon[:] = self.lon
lat[:] = self.lat
def write_landfrac(self, convert_to_um=False):
"""
Write out the land fraction.
"""
assert(self.landfrac is not None)
f = nc.Dataset(self.lfrac_filename_nc, 'w', format='NETCDF3_CLASSIC')
# Put in basic header elements lat, lon, time etc.
self.put_basic_header(f)
f.createDimension('ht', 1)
ht = f.createVariable('ht', 'f8', dimensions=('ht'))
ht.long_name = 'Height'
ht.units = 'm'
ht.positive = 'up'
lsm = f.createVariable('lsm', 'f8',
dimensions=('t', 'ht', 'latitude', 'longitude'))
lsm.name = 'lsm'
lsm.title = 'Stash code = 505'
lsm.title = 'Land fraction in grid box'
lsm.valid_min = 0.0
lsm.valid_max = 1.0
lsm[0, 0, :, :] = self.landfrac[:]
f.close()
# Convert to UM format.
if convert_to_um:
mkancil = Mkancil()
ret = mkancil.convert_lfrac()
assert(ret == 0)
assert(os.path.exists(self.lfrac_filename_um))
def write_mask(self, convert_to_um=False):
"""
Write out mask used by the UM.
This mask is used to differentiate between points that have some land
fraction and those which have none at all.
"""
assert(self.landfrac is not None)
f = nc.Dataset(self.mask_filename_nc, 'w', format='NETCDF3_CLASSIC')
# Put in basic header elements lat, lon, time etc.
self.put_basic_header(f)
f.createDimension('surface', 1)
surface = f.createVariable('surface', 'f8', dimensions=('surface'))
surface.long_name = 'Surface'
surface.units = 'level'
surface.positive = 'up'
lsm = f.createVariable('lsm', 'f8',
dimensions=('t', 'surface', 'latitude',
'longitude'))
lsm.name = 'lsm'
lsm.title = 'LAND MASK (No halo) (LAND=TRUE)'
lsm.valid_min = 0.0
lsm.valid_max | |
#!/usr/bin/env python
#
# Public Domain 2014-present MongoDB, Inc.
# Public Domain 2008-2014 WiredTiger, Inc.
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
import re
import wttest
from wtscenario import make_scenarios
class ParseException(Exception):
def __init__(self, msg):
super(ParseException, self).__init__(msg)
class Token:
UNKNOWN = '<unknown>'
NUMBER = 'Number'
STRING = 'String'
COLUMN = 'Column'
LPAREN = '('
RPAREN = ')'
LBRACKET = '{'
RBRACKET = '}'
COMMA = ','
OR = '||'
AND = '&&'
LT = '<'
GT = '>'
LE = '<='
GE = '>='
EQ = '=='
ATTRIBUTE = 'Attribute' # bracketed key value pair
COMPARE_OPS = [LT, GT, LE, GE, EQ]
COMPARATORS = [NUMBER, STRING]
def __init__(self, kind, tokenizer):
self.kind = kind
self.pos = tokenizer.off + tokenizer.pos
self.n = 0
self.s = ''
self.index = ''
self.attr_key = ''
self.attr_value = ''
self.groups = None
def __str__(self):
return '<Token ' + self.kind + ' at char ' + str(self.pos) + '>'
class Tokenizer:
def __init__(self, s):
self.off = 0
self.s = s + '?' # add a char that won't match anything
self.pos = 0
self.end = len(s)
self.re_num = re.compile(r"(\d+)")
self.re_quote1 = re.compile(r"'([^']*)'")
self.re_quote2 = re.compile(r"\"([^\"]*)\"")
self.re_attr = re.compile(r"\[(\w+)=(\w+)\]")
self.pushed = None
def newToken(self, kind, sz):
t = Token(kind, self)
self.pos += sz
return t
def error(self, s):
raise ParseException(str(self.pos) + ': ' + s)
def matched(self, kind, repat):
pos = self.pos
match = re.match(repat, self.s[pos:])
if not match:
end = pos + 10
if end > self.end:
end = self.end
self.error('matching ' + kind + ' at "' +
self.s[pos:end] + '..."')
t = self.newToken(kind, match.end())
t.groups = match.groups()
t.s = self.s[pos:pos + match.end()]
return t
def available(self):
if self.pushed == None:
self.pushback(self.token())
return (self.pushed != None)
def pushback(self, token):
if self.pushed != None:
raise AssertionError('pushback more than once')
self.pushed = token
def peek(self):
token = self.token()
self.pushback(token)
return token
def scan(self):
while self.pos < self.end and self.s[self.pos].isspace():
self.pos += 1
return '' if self.pos >= self.end else self.s[self.pos]
def token(self):
if self.pushed != None:
ret = self.pushed
self.pushed = None
return ret
c = self.scan()
if self.pos >= self.end:
return None
lookahead = '' if self.pos + 1 >= self.end else self.s[self.pos+1]
#self.tty("Tokenizer.token char=" + c + ", lookahead=" + lookahead)
if c == "'":
t = self.matched(Token.STRING, self.re_quote1)
t.s = t.groups[0]
return t
if c == '"':
t = self.matched(Token.STRING, self.re_quote2)
t.s = t.groups[0]
return t
if c in "{}(),":
return self.newToken(c, 1)
if c == "|":
if lookahead != "|":
self.error('matching OR')
return self.newToken(Token.OR, 2)
if c == "&":
if lookahead != "&":
self.error('matching AND')
return self.newToken(Token.AND, 2)
if c in "0123456789":
t = self.matched(Token.NUMBER, self.re_num)
t.s = t.groups[0]
t.n = int(t.s)
return t
if c in "ABCDEFGHIJ":
t = self.newToken(Token.COLUMN, 1)
t.s = c
return t
if c == '<':
if lookahead == '=':
return self.newToken(Token.LE, 2)
else:
return self.newToken(Token.LT, 1)
if c == '>':
if lookahead == '=':
return self.newToken(Token.GE, 2)
else:
return self.newToken(Token.GT, 1)
if c in "=":
if lookahead != "=":
self.error('matching EQ')
return self.newToken(Token.EQ, 2)
if c in "[":
t = self.matched(Token.ATTRIBUTE, self.re_attr)
t.attr_key = t.groups[0]
t.attr_value = t.groups[1]
return t
return None
def tty(self, s):
wttest.WiredTigerTestCase.tty(s)
# test_join07.py
# Join interpreter
class test_join07(wttest.WiredTigerTestCase):
reverseop = { '==' : '==', '<=' : '>=', '<' : '>', '>=' : '<=', '>' : '<' }
compareop = { '==' : 'eq', '<=' : 'le', '<' : 'lt', '>=' : 'ge',
'>' : 'gt' }
columnmult = { 'A' : 1, 'B' : 2, 'C' : 3, 'D' : 4, 'E' : 5,
'F' : 6, 'G' : 7, 'H' : 8, 'I' : 9, 'J' : 10 }
extractscen = [
('extractor', dict(extractor=True)),
('noextractor', dict(extractor=False))
]
scenarios = make_scenarios(extractscen)
def conn_extensions(self, extlist):
extlist.skip_if_missing = True
extlist.extension('extractors', 'csv')
def expect(self, token, expected):
if token == None or token.kind not in expected:
self.err(token, 'expected one of: ' + str(expected))
return token
def err(self, token, msg):
self.assertTrue(False, 'ERROR at token ' + str(token) + ': ' + msg)
def gen_key(self, i):
if self.keyformat == 'S':
return [ 'key%06d' % i ] # zero pad so it sorts expectedly
else:
return [ i ]
def gen_values(self, i):
s = ""
ret = []
for x in range(1, 11):
v = (i * x) % self.N
if x <= 5:
ret.append(v)
else:
ret.append(str(v))
if s != "":
s += ","
s += str(v)
ret.insert(0, s)
return ret
def iterate(self, jc, mbr):
mbr = set(mbr) # we need a mutable set
gotkeys = []
#self.tty('iteration expects ' + str(len(mbr)) +
# ' entries: ' + str(mbr))
while jc.next() == 0:
[k] = jc.get_keys()
values = jc.get_values()
if self.keyformat == 'S':
i = int(str(k[3:]))
else:
i = k
#self.tty('GOT key=' + str(k) + ', values=' + str(values))
# Duplicates may be returned when the disjunctions are used,
# so we ignore them.
if not i in gotkeys:
self.assertEquals(self.gen_values(i), values)
if not i in mbr:
self.tty('ERROR: result ' + str(i) + ' is not in: ' +
str(mbr))
self.assertTrue(i in mbr)
mbr.remove(i)
gotkeys.append(i)
self.assertEquals(0, len(mbr))
def token_literal(self, token):
if token.kind == Token.STRING:
return token.s
elif token.kind == Token.NUMBER:
return token.n
def idx_sim(self, x, mult, isstr):
if isstr:
return str(int(x) * mult % self.N)
else:
return (x * mult % self.N)
def mkmbr(self, expr):
return frozenset([x for x in self.allN if expr(x)])
def join_one_side(self, jc, coltok, littok, optok, conjunction,
isright, mbr):
idxname = 'index:join07:' + coltok.s
cursor = self.session.open_cursor(idxname, None, None)
jc.cursors.append(cursor)
literal = self.token_literal(littok)
cursor.set_key(literal)
searchret = cursor.search()
if searchret != 0:
self.tty('ERROR: cannot find value ' + str(literal) +
' in ' + idxname)
self.assertEquals(0, searchret)
op = optok.kind
if not isright:
op = self.reverseop[op]
mult = self.columnmult[coltok.s]
config = 'compare=' + self.compareop[op] + ',operation=' + \
('and' if conjunction else 'or')
if hasattr(coltok, 'bloom'):
config += ',strategy=bloom,count=' + str(coltok.bloom)
#self.tty('join(jc, cursor=' + str(literal) + ', ' + config)
self.session.join(jc, cursor, config)
isstr = type(literal) is str
if op == '==':
tmbr = self.mkmbr(lambda x: self.idx_sim(x, mult, isstr) == literal)
elif op == '<=':
tmbr = self.mkmbr(lambda x: self.idx_sim(x, mult, isstr) <= literal)
elif op == '<':
tmbr = self.mkmbr(lambda x: self.idx_sim(x, mult, isstr) < literal)
elif op == '>=':
tmbr = self.mkmbr(lambda x: self.idx_sim(x, mult, isstr) >= literal)
elif op == '>':
tmbr = self.mkmbr(lambda x: self.idx_sim(x, mult, isstr) > literal)
if conjunction:
mbr = mbr.intersection(tmbr)
else:
mbr = mbr.union(tmbr)
return mbr
def parse_join(self, jc, tokenizer, conjunction, mbr):
left = None
right = None
leftop = None
rightop = None
col = None
token = tokenizer.token()
if token.kind == Token.LPAREN:
subjc = self.session.open_cursor('join:table:join07', None, None)
jc.cursors.append(subjc)
submbr = self.parse_junction(subjc, tokenizer)
config = 'operation=' + ('and' if conjunction else 'or')
self.session.join(jc, subjc, config)
if conjunction:
mbr = mbr.intersection(submbr)
else:
mbr = mbr.union(submbr)
return mbr
if token.kind in | |
<reponame>interoberlin/Wunderbar-Python-SDK
"""
Implementation of the relayr HTTP RESTful API as individual endpoints.
This module contains the API class with one method for each API endpoint.
All method names start with the HTTP method followed by the resource name
used in that endpoint e.g. ``post_user_app`` for the endpoint
``POST /users/<id>/apps/<id>`` with minor modifications.
"""
import os
import time
import json
import platform
import urllib
import warnings
import logging
import datetime
import requests
from relayr import config
from relayr.version import __version__
from relayr.exceptions import RelayrApiException
def create_logger(sender):
"""Create a logger for the requesting object."""
logger = logging.getLogger('Relayr API Client')
logger.setLevel(logging.DEBUG)
logfile = "{0}/relayr-api-{1}.log".format(config.LOG_DIR, id(sender))
h = logging.FileHandler(logfile)
# h = logging.RotatingFileHandler(logfile,
# mode='a', maxBytes=2**14, backupCount=5, encoding=None, delay=0)
# h.setLevel(logging.DEBUG)
# create formatter and add it to the handler(s)
fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
formatter = logging.Formatter(fmt, '%Y-%m-%d %H:%M:%S.%f %Z%z')
formatter.converter = time.gmtime
h.setFormatter(formatter)
# add the handler(s) to the logger
logger.addHandler(h)
return logger
def build_curl_call(method, url, data=None, headers=None):
"""
Build and return a ``curl`` command for use on the command-line.
:param method: HTTP request method, ``GET``, ``POST``, etc.
:type method: string
:param url: Full HTTP path.
:type url: string
:param data: Data to be transmitted, usually *posted*.
:type data: object serializable as JSON
:param headers: Additional HTTP request headers.
:type headers: dictionary
:rtype: string
Example:
.. code-block:: python
cmd = build_curl_call('POST', 'http://foo.com/bar', data={'x': 42},
headers={'SUPER_SECRET_KEY': '123'})
print(cmd)
curl -X POST "http://foo.com/bar" -H "SUPER_SECRET_KEY: 123" --data "{\"x\": 42}"
"""
command = 'curl -X {0} "{1}"'.format(method.upper(), url)
if headers:
for k, v in headers.items():
command += ' -H "{0}: {1}"'.format(k, v)
if data:
jsdata = json.dumps(data)
command += " --data {0}".format(json.dumps(jsdata))
return command
class Api(object):
"""
This class provides direct access to the relayr API endpoints.
Examples:
.. code-block:: python
# Create an anonymous client and call simple API endpoints:
from relayr.api import Api
a = Api()
assert a.get_server_status() == {'database': 'ok'}
assert a.get_users_validate('<EMAIL>') == {'exists': False}
assert a.get_public_device_model_meanings() > 0
"""
def __init__(self, token=None):
"""
Object construction.
:param token: A token generated on the relayr platform for a combination of
a relayr user and application.
:type token: string
"""
self.token = token
self.host = config.relayrAPI
self.useragent = config.userAgent
self.headers = {
'User-Agent': self.useragent,
'Content-Type': 'application/json'
}
if self.token:
self.headers['Authorization'] = 'Bearer {0}'.format(self.token)
if config.LOG:
self.logger = create_logger(self)
self.logger.info('started')
# check if the API is available
try:
self.get_server_status()
except:
raise
def __del__(self):
"""Object destruction."""
if config.LOG:
self.logger.info('terminated')
def perform_request(self, method, url, data=None, headers=None):
"""
Perform an API call and return a JSON result as Python data structure.
:param method: HTTP request method, ``GET``, ``POST``, etc.
:type method: string
:param url: Full HTTP path.
:type url: string
:param data: Data to be transmitted, usually *posted*.
:type data: object serializable as JSON
:param headers: Additional HTTP request headers.
:type headers: dictionary
:rtype: string
Query parameters are expected in the ``url`` parameter.
For returned status codes other than 2XX a ``RelayrApiException``
is raised which contains the API call (method and URL) plus
a ``curl`` command replicating the API call for debugging reuse
on the command-line.
"""
if config.LOG:
command = build_curl_call(method, url, data, headers)
self.logger.info("API request: " + command)
json_data = 'null'
if data is not None:
json_data = json.dumps(data)
try:
json_data = json_data.encode('utf-8')
except (UnicodeDecodeError, AttributeError):
# bytes/str - no need to re-encode
pass
func = getattr(requests, method.lower())
resp = func(url, data=json_data or '', headers=headers or {})
resp.connection.close()
if config.LOG:
hd = dict(resp.headers.items())
self.logger.info("API response headers: " + json.dumps(hd))
self.logger.info("API response content: " + resp.content)
status = resp.status_code
if 200 <= status < 300:
try:
js = resp.json()
except:
js = None
# raise ValueError('Invalid JSON code(?): %r' % resp.content)
if config.DEBUG:
warnings.warn("Replaced suspicious API response (invalid JSON?) %r with 'null'!" % resp.content)
return status, js
else:
args = (resp.json()['message'], method.upper(), url)
msg = "{0} - {1} {2}".format(*args)
command = build_curl_call(method, url, data, headers)
msg = "%s - %s" % (msg, command)
raise RelayrApiException(msg)
# ..............................................................................
# System
# ..............................................................................
def get_users_validate(self, userEmail):
"""
Get a user email address validation.
:param userEmail: The user email address to be validated.
:type userEmail: string
:rtype: A dict with an ``exists`` field and a Boolean result value.
Sample result::
{"exists": True}
"""
# https://api.relayr.io/users/validate?email=<userEmail>
url = '{0}/users/validate?email={1}'.format(self.host, userEmail)
_, data = self.perform_request('GET', url, headers=self.headers)
return data
def get_server_status(self):
"""
Get server status.
:rtype: A dict with certain fields describing the server status.
Sample result::
{"database": "ok"}
"""
# https://api.relayr.io/server-status
url = '{0}/server-status'.format(self.host)
_, data = self.perform_request('GET', url, headers=self.headers)
return data
def post_oauth2_token(self, clientID, clientSecret, code, redirectURI):
"""
Generate and return an OAuth2 access token from supplied parameters.
:param clientID: The client's UUID.
:type clientID: string
:param clientSecret: The OAuth client secret.
:type clientSecret: string
:param code: The OAuth authorization code (valid for five minutes).
:type code: string
:param redirectURI: The redirect URI.
:type redirectURI: string
:rtype: dict with two fields, "access_token" and "token_type" (with string values for both)
"""
data = {
"client_id": clientID,
"client_secret": clientSecret,
"grant_type": "authorization_code",
"code": code,
"redirect_uri": redirectURI
}
# https://api.relayr.io/oauth2/token
url = '{0}/oauth2/token'.format(self.host)
_, data = self.perform_request('POST', url, data=data, headers=self.headers)
return data
def get_oauth2_appdev_token(self, appID):
"""
Get a token representing a specific relayr application and user.
:param appID: The application's UUID.
:type appID: string
:rtype: A dict with fields describing the token.
Sample result (anonymized token value)::
{
"token": "...",
"expiryDate": "2014-10-08T10:14:07.789Z"
}
"""
# https://api.relayr.io/oauth2/appdev-token/<appID>
url = '{0}/oauth2/appdev-token/{1}'.format(self.host, appID)
_, data = self.perform_request('GET', url, headers=self.headers)
return data
def post_oauth2_appdev_token(self, appID):
"""
Generate a new token representing a user and a relayr application.
:param appID: The application's UUID.
:type appID: string
:rtype: A dict with fields describing the token.
"""
# https://api.relayr.io/oauth2/appdev-token/<appID>
url = '{0}/oauth2/appdev-token/{1}'.format(self.host, appID)
_, data = self.perform_request('POST', url, headers=self.headers)
return data
def delete_oauth2_appdev_token(self, appID):
"""
Revoke token for an application with given UUID.
:param appID: The application's UUID.
:type appID: string
"""
# https://api.relayr.io/oauth2/appdev-token/<appID>
url = '{0}/oauth2/appdev-token/{1}'.format(self.host, appID)
_, data = self.perform_request('DELETE', url, headers=self.headers)
return data
def post_client_log(self, log_messages):
"""
Log a list of messages (only for internal use).
:param log_messages: The messages to be looged.
:type log_messages: A list of dicts with fields as in the example below.
:rtype: None
The timestamp field must be formatted according to ISO 8601, but
otherwise there are no restrictions.
Sample data input::
[{
"timestamp" : "1997-07-16T19:20:30.45+01:00",
"message" : "Heavy, unexpected rain shower.",
"connection" : {
"internet" : True,
"netScope" : "WAN",
"netType" : "LTE"
}
},
{
...
}
]
"""
# https://api.relayr.io/client/log
url = '{0}/client/log'.format(self.host)
_, data = self.perform_request('POST', url, data=log_messages, headers=self.headers)
return data
# ..............................................................................
# Users
# ..............................................................................
def get_oauth2_user_info(self):
"""
Return information about the user initiating the request.
:rtype: A dictionary with fields describing the user.
Sample result (partly anonymized values)::
{
"email": "<EMAIL>",
"id": "...",
"name": "joefoo"
}
"""
# https://api.relayr.io/oauth2/user-info
url = '{0}/oauth2/user-info'.format(self.host)
_, data = self.perform_request('GET', url, headers=self.headers)
return data
def patch_user(self, userID, name=None, email=None):
"""
Update user's name or email attribute, or both.
:param userID: the users's UUID
:type userID: string
:param name: the user name to be set
:type name: string
:param email: the user email to be set
:type email: string
:rtype: dict with user info fields
"""
data = {}
if name is not None:
data.update(name=name)
if email is not None:
data.update(email=email)
# https://api.relayr.io/users/%s
url = '{0}/users/{1}'.format(self.host, userID)
_, data = self.perform_request('PATCH', url, data=data, headers=self.headers)
return data
def post_user_app(self, userID, appID):
"""
Install a new app for a specific user.
:param userID: the users's UUID
:type userID: string
:param appID: The application's UUID.
:type appID: string
"""
# https://api.relayr.io/users/%s/apps/%s
url = '{0}/users/{1}/apps/{2}'.format(self.host, userID, appID)
_, data = self.perform_request('POST', url, headers=self.headers)
return data
def delete_user_app(self, userID, appID):
"""
Uninstall an app of a user with the respective UUIDs.
:param userID: the users's UUID
:type userID: string
:param appID: the app's UUID
:type appID: string
"""
# https://api.relayr.io/users/%s/apps/%s
url = '{0}/users/{1}/apps/{2}'.format(self.host, userID, appID)
_, data = self.perform_request('DELETE', url, headers=self.headers)
return data
def get_user_publishers(self, userID):
"""
Get all publishers owned by a user | |
notification queue and notifies
# clients. Task triggers subscription `publish` resolvers.
notifier_task: asyncio.Task
# The callback to invoke when client unsubscribes.
unsubscribed_callback: typing.Callable[[], None]
def __init__(self, *args, **kwargs):
assert self.schema is not None, (
"An attribute 'schema' is not set! Subclasses must specify "
"the schema which processes GraphQL subscription queries."
)
# Registry of active (subscribed) subscriptions.
self._subscriptions = {} # {'<sid>': '<SubInf>', ...}
self._sids_by_group = {} # {'<grp>': ['<sid0>', '<sid1>', ...], ...}
# Task that sends keepalive messages periodically.
self._keepalive_task = None
# Background tasks to clean it up when a client disconnects.
# We use weak collection so finished task will be autoremoved.
self._background_tasks = weakref.WeakSet()
# Remmember current eventloop so we can check it further in
# `_assert_thread` method.
self._eventloop = asyncio.get_running_loop()
# Crafty weak collection with per-operation locks. It holds a
# mapping from the operaion id (protocol message id) to the
# `asyncio.Lock` used to serialize processing of start & stop
# requests. Since the collection is weak, it automatically
# throws away items when locks are garbage collected.
self._operation_locks = weakref.WeakValueDictionary()
super().__init__(*args, **kwargs)
# ---------------------------------------------------------- CONSUMER EVENT HANDLERS
async def connect(self):
"""Handle new WebSocket connection."""
# Assert we run in a proper thread.
self._assert_thread()
# Check the subprotocol told by the client.
#
# NOTE: In Python 3.6 `scope["subprotocols"]` was a string, but
# starting with Python 3.7 it is a bytes. This can be a proper
# change or just a bug in the Channels to be fixed. So let's
# accept both variants until it becomes clear.
assert GRAPHQL_WS_SUBPROTOCOL in (
(sp.decode() if isinstance(sp, bytes) else sp)
for sp in self.scope["subprotocols"]
), (
f"WebSocket client does not request for the subprotocol "
f"{GRAPHQL_WS_SUBPROTOCOL}!"
)
# Accept connection with the GraphQL-specific subprotocol.
await self.accept(subprotocol=GRAPHQL_WS_SUBPROTOCOL)
async def disconnect(self, code):
"""WebSocket disconnection handler.
Remove itself from the Channels groups, clear triggers and stop
sending keepalive messages.
"""
# Assert we run in a proper thread.
self._assert_thread()
# Print debug or warning message depending on the value of the
# connection close code. We consider all reserver codes (<999),
# 1000 "Normal Closure", and 1001 "Going Away" as OK.
# See: https://developer.mozilla.org/en-US/docs/Web/API/CloseEvent
if not code:
log.warning("WebSocket connection closed without a code")
elif code <= 1001:
log.debug("WebSocket connection closed with code: %s.", code)
else:
log.warning("WebSocket connection closed with code: %s!", code)
# The list of awaitables to simultaneously wait at the end.
waitlist = []
# Unsubscribe from the Channels groups.
waitlist += [
self.channel_layer.group_discard(group, self.channel_name)
for group in self._sids_by_group
]
# Cancel all currently running background tasks.
for bg_task in self._background_tasks:
bg_task.cancel()
waitlist += list(self._background_tasks)
# Stop sending keepalive messages (if enabled).
if self._keepalive_task is not None:
self._keepalive_task.cancel()
waitlist += [self._keepalive_task]
if waitlist:
await asyncio.wait(waitlist)
self._subscriptions.clear()
self._sids_by_group.clear()
self._background_tasks.clear()
async def receive_json(self, content): # pylint: disable=arguments-differ
"""Process WebSocket message received from the client.
# NOTE: We force 'STOP' message processing to wait until 'START'
# with the same operation id finishes (if it is running). This
# protects us from a rase conditions which may happen when
# a client stops operation immediately after starting it. An
# illustrative example is a subscribe-unsubscribe pair. If we
# spawn processing of both messages concurrently we can deliver
# subscription confirmation after unsubscription confirmation.
"""
# Disable this check cause the current version of PyLint
# improperly complains when we assign a coroutine object to
# a local variable `task` below.
# pylint: disable=assignment-from-no-return
# Assert we run in a proper thread.
self._assert_thread()
# Extract message type based on which we select how to proceed.
msg_type = content["type"].upper()
if msg_type == "CONNECTION_INIT":
task = self._on_gql_connection_init(payload=content["payload"])
elif msg_type == "CONNECTION_TERMINATE":
task = self._on_gql_connection_terminate()
elif msg_type == "START":
op_id = content["id"]
# Create and lock a mutex for this particular operation id,
# so STOP processing for the same operation id will wail
# until START processing finishes. Locks are stored in a
# weak collection so we do not have to manually clean it up.
if op_id in self._operation_locks:
raise graphql.error.GraphQLError(
f"Operation with id={op_id} is already running!"
)
op_lock = asyncio.Lock()
self._operation_locks[op_id] = op_lock
await op_lock.acquire()
async def on_start():
try:
await self._on_gql_start(
operation_id=op_id, payload=content["payload"]
)
finally:
op_lock.release()
task = on_start()
elif msg_type == "STOP":
op_id = content["id"]
async def on_stop():
# Will until START message processing finishes, if any.
async with self._operation_locks.setdefault(op_id, asyncio.Lock()):
await self._on_gql_stop(operation_id=op_id)
task = on_stop()
else:
task = self._send_gql_error(
content["id"], f"Message of unknown type '{msg_type}' received!"
)
# If strict ordering is required then simply wait until the
# message processing is finished. Otherwise spawn a task so
# Channels may continue calling `receive_json` while requests
# (i.e. GraphQL documents) are being processed.
if self.strict_ordering:
await task
else:
self._spawn_background_task(task)
async def broadcast(self, message):
"""The broadcast message handler.
Method is called when new `broadcast` message received from the
Channels group. The message is sent by `Subscription.broadcast`.
Here we figure out the group message received from and trigger
the observable which makes the subscription process the query
and notify the client.
NOTE: There is an issue in the `channels_redis` implementation
which lead to the possibility to receive broadcast messages in
wrong order: https://github.com/django/channels_redis/issues/151
Currently we recommend to monkey-patch the `channels_redis` to
avoid this.
"""
# Assert we run in a proper thread.
self._assert_thread()
# If strict ordering is required then simply wait until all the
# broadcast messages are sent. Otherwise spawn a task so this
# consumer will continue receiving messages.
if self.strict_ordering:
await self._process_broadcast(message)
else:
self._spawn_background_task(self._process_broadcast(message))
async def _process_broadcast(self, message):
"""Process the broadcast message.
NOTE: Depending on the value of the `strict_ordering` setting
this method is either awaited directly or offloaded to an async
task by the `broadcast` method (message handler).
"""
# Assert we run in a proper thread. In particular, we can access
# the `_subscriptions` and `_sids_by_group` without any locks.
self._assert_thread()
group = message["group"]
# Do nothing if group does not exist. It is quite possible for
# a client and a backend to concurrently unsubscribe and send
# notification. And these events do not need to be synchronized.
if group not in self._sids_by_group:
return
payload = message["payload"]
# Put the payload to the notification queues of subscriptions
# belonging to the subscription group. Drop the oldest payloads
# if the `notification_queue` is full.
for sid in self._sids_by_group[group]:
subinf = self._subscriptions[sid]
while True:
try:
subinf.notification_queue.put_nowait(payload)
break
except asyncio.QueueFull:
# The queue is full - issue a warning and throw away
# the oldest item from the queue.
log.warning(
"Subscription notification dropped!"
" Subscription operation id: %s.",
sid,
)
subinf.notification_queue.get_nowait()
async def unsubscribe(self, message):
"""The unsubscribe message handler.
Method is called when new `unsubscribe` message received from
the Channels group. The message is typically sent by the method
`Subscription.unsubscribe`. Here we figure out the group message
received from and stop all the subscriptions in this group.
"""
# Assert we run in a proper thread.
self._assert_thread()
group = message["group"]
# Do nothing if group does not exist. It is quite possible for
# a client and a backend to unsubscribe from a subscription
# concurrently. And these events do not need to be synchronized.
if group not in self._sids_by_group:
return
# Send messages which look like user unsubscribes from all
# subscriptions in the subscription group. This saves us from
# thinking about rase condition between subscription and
# unsubscription.
await asyncio.wait(
[
self.receive_json({"type": "stop", "id": sid})
for sid in self._sids_by_group[group]
]
)
# ---------------------------------------------------------- GRAPHQL PROTOCOL EVENTS
async def _on_gql_connection_init(self, payload):
"""Process the CONNECTION_INIT message.
Start sending keepalive messages if `send_keepalive_every` set.
Respond with either CONNECTION_ACK or CONNECTION_ERROR message.
NOTE: Depending on the value of the `strict_ordering` setting
this method is either awaited directly or offloaded to an async
task. See the `receive_json` handler.
"""
# Assert we run in a proper thread.
| |
<reponame>felipeek/bullet3
# Lint as: python3
"""The base class for all quadrupeds."""
from typing import Any, Callable, Dict, Sequence, Tuple, Text, Union
import gin
import gym
import numpy as np
from pybullet_utils import bullet_client
from pybullet_envs.minitaur.envs_v2.sensors import sensor as sensor_lib
from pybullet_envs.minitaur.robots import hybrid_motor_model
from pybullet_envs.minitaur.robots import robot_base
from pybullet_envs.minitaur.robots import robot_config
from pybullet_envs.minitaur.robots import robot_urdf_loader
from pybullet_envs.minitaur.robots.safety import data_types as safety_data_types
from pybullet_envs.minitaur.robots.utilities import kinematics_utils
_UNIT_QUATERNION = (0, 0, 0, 1)
_GRAVITY_ACCELERATION_OFFSET = (0, 0, 10)
@gin.configurable
class QuadrupedBase(robot_base.RobotBase):
"""The basic quadruped class for both sim and real robots."""
def __init__(
self,
pybullet_client: bullet_client.BulletClient,
clock: Callable[..., float],
motor_control_mode: robot_config.MotorControlMode,
motor_limits: robot_config.MotorLimits,
motor_model_class: Any = hybrid_motor_model.HybridMotorModel,
action_filter: Any = None,
sensors: Sequence[sensor_lib.Sensor] = (),
safety_config: safety_data_types.SafetyConfig = None,
**kwargs,
):
"""Initializes the class.
Args:
pybullet_client: The PyBullet client.
clock: The sim or real clock. The clock function is typically provided by
the gym environment.
motor_control_mode: Specifies in which mode the motor operates.
motor_limits: The motor limits of the robot. Used by the motor_model_class
and action space building.
motor_model_class: The motor model to use. Not needed for real robots.
action_filter: The filter to smooth and/or regulate the actions.
sensors: All sensors mounted on the robot.
safety_config: The safety setting for the robot.
**kwargs: Additional args.
"""
self._pybullet_client = pybullet_client
self._clock = clock
self._motor_control_mode = motor_control_mode
self._motor_model_class = motor_model_class
self._motor_limits = motor_limits
self._action_space = None
self._action_names = None
self._action_filter = action_filter
self._sensors = sensors
self._safety_config = safety_config
self._urdf_loader = None
self._last_base_velocity = np.zeros(3)
self._last_observation_time = self._clock()
self._last_base_acceleration_world = np.zeros(3)
self._last_base_acceleration_accelerometer = np.zeros(3)
self.load()
def load(
self,
base_position: Tuple[float] = None,
base_orientation_quaternion: Tuple[float] = None,
joint_angles: Union[Dict[Text, float], Tuple[float]] = None,
):
"""Loads the URDF with the configured pose.
Args:
base_position: The base position after URDF loading. Will use the
configured pose in gin if None.
base_orientation_quaternion: The base orientation after URDF loading. Will
use the configured values in gin if not specified.
joint_angles: The desired joint angles after loading. Will use the
configured values if None.
"""
# A robot specific pre loading routing.
self._pre_load()
if not self._urdf_loader:
self._urdf_loader = robot_urdf_loader.RobotUrdfLoader(
pybullet_client=self._pybullet_client)
# Record the urdf pose at loading, which will be used as the rotation
# reference for base rotation computation.
self._init_urdf_position, self._init_orientation_quat = (
self._pybullet_client.getBasePositionAndOrientation(
self._urdf_loader.robot_id))
unused_position, self._init_orientation_inv_quat = (
self._pybullet_client.invertTransform(
position=(0, 0, 0), orientation=self._init_orientation_quat))
# Joint ids may be different from the motor ids.
self._joint_id_dict = self._urdf_loader.get_joint_id_dict()
for joint_id in self._joint_id_dict.values():
# Disables the default motors in PyBullet.
self._pybullet_client.setJointMotorControl2(
bodyIndex=self._urdf_loader.robot_id,
jointIndex=joint_id,
controlMode=self._pybullet_client.VELOCITY_CONTROL,
targetVelocity=0,
force=0)
# Removes the default joint damping in PyBullet.
self._pybullet_client.changeDynamics(
self._urdf_loader.robot_id,
joint_id,
linearDamping=0,
angularDamping=0)
# We expect that this is non-empty for all quadrupedes, and should be an
# OrderedDict.
self._motor_id_dict = self._urdf_loader.get_motor_id_dict()
if not self._motor_id_dict:
raise ValueError("Motor id dict cannot be empty for quadrupeds.")
self._motor_ids = self._motor_id_dict.values()
self._num_motors = len(self._motor_id_dict)
self._build_action_space()
# Not needed for real robots.
if self._motor_model_class:
# TODO(b/151664871): Also supports position/velocity limits in the motor
# model.
self._motor_model = self._motor_model_class(
num_motors=self._num_motors,
motor_control_mode=self._motor_control_mode,
torque_lower_limits=self._motor_limits.torque_lower_limits,
torque_upper_limits=self._motor_limits.torque_upper_limits,
)
# Caches the variable for faster computation during stepping.
self._motor_direction_dict = self._urdf_loader.get_joint_direction_dict(
self._motor_id_dict.keys())
self._motor_directions = np.array(list(self._motor_direction_dict.values()))
self._motor_offset_dict = self._urdf_loader.get_joint_offset_dict(
self._motor_id_dict.keys())
self._motor_offsets = np.array(list(self._motor_offset_dict.values()))
# A robot specific routine post loading.
self._on_load()
# Robot sensors may use information from the class. So we initialize them
# after the loading is done.
for sensor in self._sensors:
sensor.set_robot(self)
def _build_action_space(self):
"""Builds the action space of the robot using the motor limits."""
if self._motor_control_mode == robot_config.MotorControlMode.POSITION:
self._action_space = gym.spaces.Box(
low=self._motor_limits.angle_lower_limits,
high=self._motor_limits.angle_upper_limits,
shape=(self._num_motors,),
dtype=np.float32) # TODO(b/159160184) Make dtype configurable.
self._action_names = tuple(
"POSITION_{}".format(motor) for motor in self._motor_id_dict.keys())
elif self._motor_control_mode == robot_config.MotorControlMode.TORQUE:
self._action_space = gym.spaces.Box(
low=self._motor_limits.torque_lower_limits,
high=self._motor_limits.torque_upper_limits,
shape=(self._num_motors,),
dtype=np.float32)
self._action_names = tuple(
"TORQUE_{}".format(motor) for motor in self._motor_id_dict.keys())
elif self._motor_control_mode == robot_config.MotorControlMode.HYBRID:
hybrid_action_limits_low = [
self._motor_limits.angle_lower_limits, # q
# q_dot
self._motor_limits.velocity_lower_limits,
0, # kp
0, # kd
self._motor_limits.torque_lower_limits
] # tau
hybrid_action_limits_high = [
self._motor_limits.angle_upper_limits,
self._motor_limits.velocity_upper_limits, np.inf, np.inf,
self._motor_limits.torque_upper_limits
]
space_low = np.full(
(self._num_motors, robot_config.HYBRID_ACTION_DIMENSION),
hybrid_action_limits_low).ravel()
space_high = np.full(
(self._num_motors, robot_config.HYBRID_ACTION_DIMENSION),
hybrid_action_limits_high).ravel()
self._action_space = gym.spaces.Box(
low=space_low, high=space_high, dtype=np.float32)
self._action_names = tuple(
"HYBRID_{}".format(motor) for motor in self._motor_id_dict.keys())
else:
raise NotImplementedError("Not yet implemented!")
def _pre_load(self):
"""Robot specific pre load routine.
For example, this allows configuration of the URDF loader.
"""
pass
def _on_load(self):
"""Robot specific post load routine.
For example, we need to add add additional hinge constraints to the leg
components of Minitaur after loading.
"""
pass
@gin.configurable
def reset(
self,
base_position: Tuple[float] = None,
base_orientation_quaternion: Tuple[float] = None,
joint_angles: Union[Dict[Text, float], Tuple[float]] = None,
save_base_pose: bool = False,
**kwargs,
):
"""Resets the robot base and joint pose without reloading the URDF.
Base pose resetting only works for simulated robots or visualization of real
robots. This routine also updates the initial observation dict.
Args:
base_position: The desired base position. Will use the configured pose in
gin if None. Does not affect the position of the real robots in general.
base_orientation_quaternion: The base orientation after resetting. Will
use the configured values in gin if not specified.
joint_angles: The desired joint angles after resetting. Will use the
configured values if None.
save_base_pose: Save the base position and orientation as the default pose
after resetting.
**kwargs: Other args for backward compatibility. TODO(b/151975607): Remove
after migration.
"""
# Reset the robot's motor model.
self._motor_model.reset()
# Reset the quantities for computing base acceleration.
self._last_base_velocity = np.zeros(3)
self._last_observation_time = self._clock()
self._last_base_acceleration_world = np.zeros(3)
self._last_base_acceleration_accelerometer = np.zeros(3)
# Solves chicken and egg problem. We need to run a control step to obtain
# the first motor torques.
self._motor_torques = np.zeros(self._num_motors)
# Receives a set of observation from the robot in case the reset function
# needs to use them.
self.receive_observation()
self._reset_base_pose(base_position, base_orientation_quaternion)
self._reset_joint_angles(joint_angles)
if save_base_pose:
# Records the base pose at resetting again, in case Reset is called with a
# different base orientation. This base pose will be used as zero
# rotation reference for base rotation computation.
self._init_urdf_position, self._init_orientation_quat = (
self._pybullet_client.getBasePositionAndOrientation(
self._urdf_loader.robot_id))
unused_position, self._init_orientation_inv_quat = (
self._pybullet_client.invertTransform(
position=(0, 0, 0), orientation=self._init_orientation_quat))
# Updates the observation at the end of resetting.
self.receive_observation()
self._time_at_reset = self._clock()
def GetTimeSinceReset(self):
return self._clock() - self._time_at_reset
def _reset_base_pose(self, position=None, orientation_quat=None):
"""Resets the pose of the robot's base.
Base pose resetting only works for simulated robots or visualization of real
robots.
Args:
position: The desired base position. Will use the configured pose in gin
if None.
orientation_quat: The desired base rotation. Will use the configured
default pose in None.
"""
self._urdf_loader.reset_base_pose(position, orientation_quat)
def _reset_joint_angles(self,
joint_angles: Union[Tuple[float],
Dict[Text, float]] = None):
"""Resets the joint pose.
Real robots need to specify their routine to send joint angles. Simulated
Minitaur robots also needs to use dynamics to drive the motor joints, due to
the additional hinge joints not present in the URDF.
Args:
joint_angles: The joint pose if provided. Will use the robot default pose
from configuration.
"""
# TODO(b/148897311): Supports tuple as the input.
self._urdf_loader.reset_joint_angles(joint_angles)
def terminate(self):
"""The safe exit routine for the robot.
Only implemented for real robots.
"""
pass
def step(self, action: Any, num_sub_steps: int = 1):
"""Steps the simulation.
This is maintained for backward compatibility with the old robot class.
Args:
action: The control command to be executed by the robot.
num_sub_steps: Each action can be applied (possibly with interpolation)
multiple timesteps, to simulate the elapsed time between two consecutive
commands on real robots.
"""
action = self.pre_control_step(action)
for _ in range(num_sub_steps):
# TODO(b/149252003): Add sub sampling.
self.apply_action(action)
# Timestep is pre-determined at simulation setup.
self._pybullet_client.stepSimulation()
self.receive_observation()
self.post_control_step()
def pre_control_step(self, action: Any, control_timestep: float = None):
"""Processes the action and updates per control step quantities.
Args:
action: The input control command.
control_timestep: The control time step in the environment.
TODO(b/153835005), we can remove this once we pass env to the robot.
Returns:
The filtered action.
"""
if self._action_filter:
# We assume the filter will create a set of interpolated results.
action = self._action_filter.filter(action)
return action
def apply_action(self, motor_commands, motor_control_mode=None):
# TODO(b/148897311): Supports dict in the future.
motor_commands = np.asarray(motor_commands)
# We always use torque based control at the lowest level for quadrupeds.
unused_observed_torques, actual_torques | |
<gh_stars>1-10
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
import numpy as np
import pandas as pd
import random
import h5py
from skimage import io
from skimage import feature
from skimage.draw import circle
from scipy.ndimage.morphology import binary_fill_holes
from skimage.measure import label
from skimage.measure import regionprops
from skimage.morphology import remove_small_objects
from scipy.signal import find_peaks
from scipy.interpolate import interp1d
from scipy.signal import filtfilt
from scipy.interpolate import BSpline
from irtemp import centikelvin_to_celsius
import data_encoding as de
# Function to load the input file
def input_file(file_name):
'''
To load the imput file as an array.
Parameters
-----------
file_name : String
Name of the Tiff or HDF5 file to be loaded
as it is saved on the disk.
Provide file path if it is not in the same directory as
the jupyter notebook.
Returns
--------
frames : Array
In case of a video, returns an array for each frame
in the video.
In case of an image, return an array.
'''
file_type = file_name[-4:]
if file_type == 'HDF5':
file = h5py.File(file_name, 'r')
frames = []
for i in range(1, len(file.keys())+1):
frames.append(file['image'+str(i)])
elif file_type == 'tiff':
frames = io.imread(file_name)
return frames
# Function to flip the frames horizontally and vertically to correct
# for the mirroring during recording.
def flip_frame(frames):
'''
To flip all the loaded frames horizontally and vertically
to correct for the mirroring during recording.
Parameters
-----------
frames : Array
An array containing an array for each frame
in the video or just a single array in case of an image.
Returns
--------
flip_frames : Array
Flipped frames that can be processed to get temperature data.
'''
flip_frames = []
for frame in frames:
f_frame = np.fliplr(frame)
flip_frames.append(np.flipud(f_frame))
return flip_frames
# Function to detect edges, fill and label the samples.
def edge_detection(frames, n_samples):
'''
To detect the edges of the wells, fill and label them to
determine their centroids.
Parameters
-----------
frames : Array
The frames to be processed and determine the
sample temperature from.
n_samples : Int
The number of samples in the input video.
Returns
--------
labeled_samples : Array
All the samples in the frame are labeled
so that they can be used as props to get pixel data.
'''
for size in range(15, 9, -1):
for thres in range(1500, 900, -100):
edges = feature.canny(frames[0]/thres)
filled_samples = binary_fill_holes(edges)
cl_samples = remove_small_objects(filled_samples, min_size=size)
labeled_samples = label(cl_samples)
props = regionprops(labeled_samples, intensity_image=frames[0])
if len(props) == n_samples:
break
# if thres == 1000 and len(props) != n_samples:
# print('Not all the samples are being recognized with
# the set threshold range for size ',size)
if len(props) == n_samples:
break
if size == 10 and thres == 1000 and len(props) != n_samples:
print('Not all the samples are being recognized with the set \
minimum size and threshold range')
return labeled_samples
# Function to determine centroids of all the samples
def regprop(labeled_samples, frames, n_rows, n_columns):
'''
Determines the area and centroid of all samples.
Parameters
-----------
labeled_samples: Array
An array with labeled samples.
frames : Array
Original intensity image to determine
the intensity at sample centroids.
n_rows: Int
Number of rows of sample
n_columns: Int
Number of columns of sample
Returns
--------
regprops: Dict
A dictionary of dataframes with information about samples in every
frame of the video.
'''
regprops = {}
n_samples = n_rows * n_columns
unique_index = random.sample(range(100), n_samples)
for i in range(len(frames)):
props = regionprops(labeled_samples, intensity_image=frames[i])
# Initializing arrays for all sample properties obtained from regprops.
row = np.zeros(len(props)).astype(int)
column = np.zeros(len(props)).astype(int)
area = np.zeros(len(props))
radius = np.zeros(len(props))
perim = np.zeros(len(props))
intensity = np.zeros(len(props), dtype=np.float64)
plate = np.zeros(len(props), dtype=np.float64)
plate_coord = np.zeros(len(props))
c = 0
for prop in props:
row[c] = int(prop.centroid[0])
column[c] = int(prop.centroid[1])
# print(y[c])
area[c] = prop.area
perim[c] = prop.perimeter
radius[c] = prop.equivalent_diameter/2
rr, cc = circle(row[c], column[c], radius = radius[c]/3)
intensity[c] = np.mean(frames[i][rr,cc])
plate[c] = frames[i][row[c]][column[c]+int(radius[c])+3]
plate_coord[c] = column[c]+radius[c]+3
c = c + 1
regprops[i] = pd.DataFrame({'Row': row, 'Column': column,
'Plate_temp(cK)': plate,
'Radius': radius,
'Plate_coord': plate_coord,
'Area': area, 'Perim': perim,
'Sample_temp(cK)': intensity,
'unique_index': unique_index},
dtype=np.float64)
if len(regprops[i]) != n_samples:
print('Wrong number of samples are being detected in frame %d' % i)
regprops[i].sort_values(['Column', 'Row'], inplace=True)
return regprops
def sort_regprops(regprops, n_columns, n_rows):
'''
Function to sort the regprops to match the order in which the samples
are pipetted.
Parameters
------------
regprops : Dict
A dictionary of dataframes containing information about the sample.
n_columns : Int
Number of columns of samples
n_rows : Int
Number of rows of samples
Returns
--------
sorted_regprops : Dict
A dictionary of dataframe with information about samples in every
frame of the video. The order of the samples is sorted from
top to bottom and from left to right.
'''
sorted_regprops = {}
# n_samples = n_columns * n_rows
# After sorting the dataframe according by columns in ascending order.
sorted_rows = []
# Sorting the dataframe according to the row coordinate in each column.
# The samples are pipetted out top to bottom from left to right.
# The order of the samples in the dataframe
# should match the order of pipetting.
for j in range(0, n_columns):
df = regprops[0][j*n_rows:(j+1)*n_rows].sort_values(['Row'])
sorted_rows.append(df)
regprops[0] = pd.concat(sorted_rows)
# Creating an index to be used for reordering all the dataframes.
# The unique index is the sum of row and column coordinates.
reorder_index = regprops[0].unique_index
for k in range(0, len(regprops)):
regprops[k].set_index('unique_index', inplace=True)
sorted_regprops[k] = regprops[k].reindex(reorder_index)
return sorted_regprops
# Function to obtain temperature of samples and plate temp
def sample_temp(sorted_regprops, frames):
'''
Function to concatenate all the obtained temperature data
from the pixel values into lists.
Parameters
----------
sorted_regprops : Dict
The dictionary of sorted dataframes containing temperature data.
frames : Array
The array of frames to be processed to obtain temperature data.
Returns
-------
temp : List
Temperature of all the samples in every frame of the video.
plate_temp : List
Temperature of the plate next to every sample in every
frame of the video.
'''
temp = []
plate_temp = []
for j in range(len(sorted_regprops[1])):
temp_well = []
plate_well_temp = []
for i in range(len(frames)):
temp_well.append(centikelvin_to_celsius
(list(sorted_regprops[i]['Sample_temp(cK)'])[j]))
plate_well_temp.append(centikelvin_to_celsius(list
(sorted_regprops[i]['Plate_temp(cK)'])[j]))
temp.append(temp_well)
plate_temp.append(plate_well_temp)
return temp, plate_temp
# # Function to obtain melting point by extracting the inflection point
# def peak_detection(sample_temp, plate_temp, material):
# '''
# Function to determine inflection point in the sample temperature
# profile(melting point)
# Parameters
# -----------
# sample_temp : List
# Temperature of all the samples in every frame of the video.
# plate_temp : List
# Temperature profiles of all the plate locations
# material : String
# Can be 'Plate' or 'Sample'
# Returns
# --------
# peaks : List
# List of two highest peak(inflection points) indices in the
# given temperature profiles.
# infl : List
# List of temperature at inflection points for
# given temperature profiles.
# '''
# infl = []
# peak_indices = []
# for i in range(len(sample_temp)):
# frames = np.linspace(1,len(sample_temp[i]),len(sample_temp[i]))
# # Fitting a spline to the temperature profile of the samples.
# if material == 'Plate':
# bspl = BSpline(frames,plate_temp[i],k=3)
# # Stacking x and y to calculate gradient.
# gradient_array = np.column_stack((frames,bspl(frames)))
# else:
# f = interp1d(plate_temp[i], sample_temp[i],bounds_error=False)
# gradient_array = np.column_stack((plate_temp[i],f(plate_temp[i])))
# # Calculating gradient
# gradient = np.gradient(gradient_array,axis=0)
# # Calculating derivative
# derivative = gradient[:,1]/gradient[:,0]
# # Finding peaks in the derivative plot.
# peaks, properties = find_peaks(derivative, height=0)
# # Peak heights
# peak_heights = properties['peak_heights']
# a = list(peak_heights)
# max_height1 = np.max(a)
# a.remove(max_height1)
# max_height2 = np.max(a)
# # Appending the index of the two highest peaks to lists.
# inf_index1 = list(peak_heights).index(max_height1)
# inf_index2 = list(peak_heights).index(max_height2)
# # Appending the frame number in which these peaks occur to a list
# peak_indices.append([peaks[inf_index1],peaks[inf_index2]])
# # Appending the temperature at the peaks.
# if material == 'Plate':
# infl.append([plate_temp[i][peaks[inf_index1]],
# plate_temp[i][peaks[inf_index2]]])
# else:
# infl.append([sample_temp[i][peaks[inf_index1]],
# sample_temp[i][peaks[inf_index2]]])
# return peak_indices, infl
# Function to obtain melting point by extracting the inflection point
def peak_detection(sample_temp, plate_temp, material):
'''
Function to determine inflection point in the sample temperature
profile(melting point)
Parameters
-----------
sample_temp : List
Temperature of all the samples in every frame of the video.
plate_temp : List
Temperature profiles of all the plate locations
material : String
Can be 'Plate' or 'Sample'
Returns
--------
peaks : List
List of two highest peak(inflection points) indices in the
given temperature profiles.
infl : List
List of temperature at inflection points for
given temperature profiles.
'''
| |
import glob
import pickle
font = {"family": "normal", "weight": "bold", "size": 16}
import matplotlib
matplotlib.rc("font", **font)
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib.gridspec import GridSpec
import numpy as np
import imageio
from handcam.ltt.util.Utils import softmax
imageio.plugins.ffmpeg.download()
from moviepy.video.io.bindings import mplfig_to_npimage
import moviepy.editor as mpy
import cv2
import os
import tensorflow as tf
#
flags = tf.app.flags
# State your dataset directory
# flags.DEFINE_string('dataset_dir', '/local/home/luke/datasets/handcam/', 'String: Your dataset directory')
flags.DEFINE_string(
"dataset_dir",
"/local/home/luke/datasets/handcam/tfrecords",
"String: Your dataset directory",
)
flags.DEFINE_string(
"model_path",
"/media/luke/hdd-3tb/models/handcam/split8/sequence_resnet-18/rgbd/train/*/*/",
"path to model to use the results for plotting",
)
flags.DEFINE_bool(
"load_new_from_tfrecord",
True,
"Bool: Should load new or try to load the pickle file.",
)
FLAGS = flags.FLAGS
# model_list = glob.glob(os.path.join(FLAGS.model_path, "results_long_vid.pckl"))
model_list = glob.glob(os.path.join(FLAGS.model_path, "results_probabilities.pckl"))
sample_id = 0
# print(results['preds'][sample_id])
with open(model_list[0], "rb") as f:
results = pickle.load(f)
sample_name = results["sample_names"][sample_id] # "20180406/165402-grasp_5"
# for pred in results['preds'][sample_id]:
# print(pred)
class_probabilities = []
for class_id in range(7):
class_probabilities.append([])
for pred in results["preds"][sample_id]:
temp = softmax(pred)
class_probabilities[class_id].append(temp[class_id])
class_probabilities = np.array([np.array(i) for i in class_probabilities])
print(len(class_probabilities[0]))
labels = np.array([np.array(np.argmax(i)) for i in results["labels"][sample_id]])
# labels = np.argmax(labels, axis=1)
print(labels)
# object_touched_time = np.argwhere(labels != 6)[-1]
object_touched_time = 10
object_touched_frame = object_touched_time / 30.0
# arm_ready_time = np.argwhere(labels != 6)[0]
arm_ready_time = 100
arm_ready_frame = arm_ready_time / 30.0
#
# for frame_id in range(len(results['labels'][sample_id])):
# frame = results['labels'][frame_id]
# print(frame)
# if np.argmax(frame, axis=1) != 6:
# arm_ready_time = frame_id
#
# for frame_id in reversed(range(len(results['labels']))):
# frame = results['labels'][frame_id]
# if np.argmax(frame) != 6:
# object_touched_time = frame_id
print("Arm ready: %d" % arm_ready_time)
print("Object touched: %d" % object_touched_time)
for class_ in class_probabilities:
print(class_)
grasp_icons = []
for name in [
"grasp0.png",
"grasp1.png",
"grasp2.png",
"grasp3.png",
"grasp4.png",
"grasp5.png",
"grasp_none.png",
]:
img = cv2.imread(os.path.join("plot_icons", name))
img = cv2.resize(img, (0, 0), fx=0.25, fy=0.25)
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
print(img.shape)
grasp_icons.append(img)
# load the sample as tfrecord using sample_name
sample_path = os.path.join(FLAGS.dataset_dir, sample_name + ".tfrecord")
if FLAGS.load_new_from_tfrecord:
context_features = {
"vid_length": tf.FixedLenFeature((), tf.int64),
"first_grasp_frame": tf.FixedLenFeature((), tf.int64),
"last_grasp_frame": tf.FixedLenFeature((), tf.int64),
"sample_name": tf.FixedLenFeature((), tf.string),
}
sequence_features = {
"vid": tf.FixedLenSequenceFeature([], dtype=tf.string),
"frame_labels": tf.FixedLenSequenceFeature([], dtype=tf.int64),
}
def _parse_function_sequence(example_proto):
# parsed_features = tf.parse_single_example(example_proto, features)
print("next")
context_parsed, sequence_parsed = tf.parse_single_sequence_example(
example_proto, context_features, sequence_features
)
seq_len = tf.to_int32(context_parsed["vid_length"])
first_grasp_frame = tf.to_int32(context_parsed["first_grasp_frame"])
last_grasp_frame = tf.to_int32(context_parsed["last_grasp_frame"])
sample_name = tf.decode_raw(context_parsed["sample_name"], tf.uint8)
img = tf.decode_raw(sequence_parsed["vid"], tf.uint16)
img = tf.reshape(img, [-1, 240, 320, 4])
img = tf.cast(img, tf.float32)
one_hot = tf.one_hot(sequence_parsed["frame_labels"], 7, dtype=tf.int64)
return img, one_hot, seq_len, first_grasp_frame, last_grasp_frame, sample_name
def preprocessing_op_sequence(image_op):
"""
Creates preprocessing operations that are going to be applied on a single frame.
TODO: Customize for your needs.
You can do any preprocessing (masking, normalization/scaling of inputs, augmentation, etc.) by using tensorflow operations.
Built-in image operations: https://www.tensorflow.org/api_docs/python/tf/image
"""
with tf.name_scope("preprocessing"):
# crop
# image_op = image_op[8:232, 48:272, :]
image_op.set_shape([240, 320, 4])
return image_op
def read_and_decode_sequence(filename_queue):
# reader = tf.TFRecordReader()
# _, serialized_example = reader.read(filename_queue)
with tf.name_scope("TFRecordDecoding"):
# parse sequence
(
seq_img,
seq_labels,
seq_len,
first_grasp_frame,
last_grasp_frame,
sample_name,
) = _parse_function_sequence(filename_queue)
# preprocessing each frame
seq_img = tf.map_fn(
lambda x: preprocessing_op_sequence(x),
elems=seq_img,
dtype=tf.float32,
back_prop=False,
)
return [seq_img, seq_labels, seq_len, sample_name]
def input_pipeline_sequence(filenames, name="input_pipeline", shuffle=True):
with tf.name_scope(name):
# Create a queue of TFRecord input files.
filename_queue = tf.train.string_input_producer(
filenames, num_epochs=FLAGS.num_epochs, shuffle=shuffle
)
# Read the data from TFRecord files, decode and create a list of data samples by using threads.
sample_list = [
read_and_decode_sequence(filename_queue)
for _ in range(FLAGS.ip_num_read_threads)
]
# Create batches.
# Since the data consists of variable-length sequences, allow padding by setting dynamic_pad parameter.
# "batch_join" creates batches of samples and pads the sequences w.r.t the max-length sequence in the batch.
# Hence, the padded sequence length can be different for different batches.
batch_rgb, batch_labels, batch_lens, sample_names = tf.train.batch_join(
sample_list,
batch_size=1,
capacity=FLAGS.ip_queue_capacity,
enqueue_many=False,
dynamic_pad=True,
allow_smaller_final_batch=False,
name="batch_join_and_pad",
)
return batch_rgb, batch_labels, batch_lens, sample_names
with tf.variable_scope("preprocessing"):
filenames_placeholder = tf.placeholder(tf.string, shape=[None])
dataset = tf.data.TFRecordDataset(
filenames_placeholder, compression_type="GZIP"
)
dataset = dataset.map(
lambda x: read_and_decode_sequence(x), num_parallel_calls=4
)
dataset = dataset.repeat(1)
# dataset = dataset.shuffle(buffer_size=1000)
dataset = dataset.apply(tf.contrib.data.batch_and_drop_remainder(1))
dataset = dataset.prefetch(1)
iterator = tf.data.Iterator.from_structure(
dataset.output_types, dataset.output_shapes
)
initializer = iterator.make_initializer(dataset)
(
test_batch_samples_op,
test_batch_labels_op,
test_batch_seq_len_op,
sample_names,
) = iterator.get_next()
test_feed_dict = {filenames_placeholder: [sample_path]}
sess = tf.Session()
# init_op = tf.group(tf.global_variables_initializer(),tf.local_variables_initializer())
sess.run(tf.local_variables_initializer())
sess.run(tf.global_variables_initializer())
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
sess.run(initializer, feed_dict=test_feed_dict)
vid = sess.run(test_batch_samples_op)
with open("vid.pckl", "wb") as f:
pickle.dump(vid, f)
sess.close()
else:
with open("vid.pckl", "rb") as f:
vid = pickle.load(f)
# TODO: res[0] is shape [105,240,320,4] and ready to be used in the graphs below.
# some functions for processing the images
def rotate_for_display(vid):
out_vid = []
for frame in vid:
frame = np.rot90(frame)
frame = np.fliplr(frame)
out_vid.append(frame)
out_vid = np.asarray(out_vid, dtype=vid.dtype)
return out_vid
font = cv2.FONT_HERSHEY_SIMPLEX
text_color = (255, 255, 255)
def label_vid(vid):
for frame_id in range(vid.shape[0]):
label_style = "only_pred"
if label_style == "only_pred":
# Ground Truth
cv2.putText(
vid[frame_id],
"Prediction:",
(5, 255),
font,
0.6,
text_color,
1,
cv2.LINE_AA,
)
# cv2.putText(vid[frame_id], '%d' % labels[frame_id], (40, 310), font, 0.6, text_color, 2, cv2.LINE_AA)
vid[frame_id][320 - 48 - 10 : 320 - 10, 30:74] = grasp_icons[
int(np.argmax(class_probabilities[:, frame_id]))
]
else:
# Ground Truth
cv2.putText(
vid[frame_id], "Label:", (5, 255), font, 0.6, text_color, 1, cv2.LINE_AA
)
# cv2.putText(vid[frame_id], '%d' % labels[frame_id], (40, 310), font, 0.6, text_color, 2, cv2.LINE_AA)
vid[frame_id][320 - 48 - 10 : 320 - 10, 10:54] = grasp_icons[
labels[frame_id]
]
# Pred
cv2.putText(
vid[frame_id], "Pred:", (80, 255), font, 0.6, text_color, 1, cv2.LINE_AA
)
# cv2.putText(vid[frame_id], '%d' % np.argmax(class_probabilities[:,frame_id]), (100, 310), font, 0.6, text_color, 2, cv2.LINE_AA)
vid[frame_id][320 - 48 - 10 : 320 - 10, 82 : 82 + 44] = grasp_icons[
int(np.argmax(class_probabilities[:, frame_id]))
]
return vid
# accel = data_handler.get_sample_accel(sample_index)
# gyro = data_handler.get_sample_gyro(sample_index)
# pose = data_handler.get_sample_pose(sample_index)
# arm_ready_time = data_handler.get_sample_arm_ready_time(sample_index) / 1000.0
# object_touched_time = data_handler.get_sample_object_touched_time(sample_index) / 1000.0
depth_vid = rotate_for_display(vid[0, :, :, :, 3:])
rgb_vid = rotate_for_display(np.asarray(vid[0, :, :, :, 0:3], dtype=np.uint8))
rgb_vid = label_vid(rgb_vid)
start_num = 220
rgb_vid = rgb_vid[start_num:]
class_probabilities = class_probabilities[:, start_num:]
# accel = self.get_sample_accel(sample_index)
# gyro = self.get_sample_gyro(sample_index)
# pose = self.get_sample_pose(sample_index)
# arm_ready_time = self.get_sample_arm_ready_time(sample_index) / 1000.0
# object_touched_time = self.get_sample_object_touched_time(sample_index) / 1000.0
# depth_vid = self.get_sample_depth(sample_index)
# rgb_vid = self.get_sample_rgb(sample_index)
# time_acc = accel[:, 0] / 1000.0
# acc_x = accel[:, 1]
# acc_y = accel[:, 2]
# acc_z = accel[:, 3]
#
# time_gyro = gyro[:, 0] / 1000.0
# gyro_x = gyro[:, 1]
# gyro_y = gyro[:, 2]
# gyro_z = gyro[:, 3]
#
# time_pose = pose[:, 0] / 1000.0
# pose_x = pose[:, 1]
# pose_y = pose[:, 2]
# pose_z = pose[:, 3]
# pose_w = pose[:, 4]
# Plotting
# num of horiz grids
horiz_grid_num = 20
border_of_lr = int(horiz_grid_num / 2)
prob_plot_grid_start = 3
gs = GridSpec(7, horiz_grid_num)
gs.update(left=0.08, right=0.95, wspace=0.4, top=0.9, bottom=0.1, hspace=0.13)
fig = plt.figure(figsize=(20, 10))
# fig.subplots_adjust(hspace=0.0025)
# plt.suptitle(sample_path)
gs.tight_layout(fig)
ax_image = plt.subplot(gs[:, border_of_lr:])
ax_image.axis("off")
# No grasp
ax_no_grasp = plt.subplot(gs[0, prob_plot_grid_start:border_of_lr])
ax_no_grasp_yaxis_image = plt.subplot(gs[0, 0:prob_plot_grid_start])
ax_no_grasp_yaxis_image.imshow(grasp_icons[6])
ax_no_grasp_yaxis_image.get_xaxis().set_ticks([])
ax_no_grasp_yaxis_image.get_yaxis().set_ticks([])
ax_no_grasp_yaxis_image.axis("off")
# ax_no_grasp_yaxis_image.patch.set_visible(False)
# ax_no_grasp.set_ylabel("No Grasp", rotation='horizontal')
# ax_no_grasp.set_ylabel("No Grasp")
ax_no_grasp.get_xaxis().set_visible(False)
# Grasp0
ax_grasp0 = plt.subplot(gs[1, prob_plot_grid_start:border_of_lr])
ax_grasp0_yaxis_image = plt.subplot(gs[1, 0:prob_plot_grid_start])
ax_grasp0_yaxis_image.imshow(grasp_icons[0])
ax_grasp0_yaxis_image.get_xaxis().set_ticks([])
ax_grasp0_yaxis_image.get_yaxis().set_ticks([])
ax_grasp0_yaxis_image.axis("off")
# ax_grasp0.set_ylabel('Power Sphere')
ax_grasp0.get_xaxis().set_visible(False)
# Grasp1
ax_grasp1 = plt.subplot(gs[2, prob_plot_grid_start:border_of_lr])
ax_grasp1_yaxis_image = plt.subplot(gs[2, 0:prob_plot_grid_start])
ax_grasp1_yaxis_image.imshow(grasp_icons[1])
ax_grasp1_yaxis_image.get_xaxis().set_ticks([])
ax_grasp1_yaxis_image.get_yaxis().set_ticks([])
ax_grasp1_yaxis_image.axis("off")
# ax_grasp1.set_ylabel('Medium Wrap')
ax_grasp1.get_xaxis().set_visible(False)
# Grasp2
ax_grasp2 = plt.subplot(gs[3, prob_plot_grid_start:border_of_lr])
ax_grasp2_yaxis_image = plt.subplot(gs[3, 0:prob_plot_grid_start])
ax_grasp2_yaxis_image.imshow(grasp_icons[2])
ax_grasp2_yaxis_image.get_xaxis().set_ticks([])
ax_grasp2_yaxis_image.get_yaxis().set_ticks([])
ax_grasp2_yaxis_image.axis("off")
# ax_grasp2.set_ylabel('Tip Pinch')
ax_grasp2.get_xaxis().set_visible(False)
# Grasp3
ax_grasp3 = plt.subplot(gs[4, prob_plot_grid_start:border_of_lr])
ax_grasp3_yaxis_image = plt.subplot(gs[4, 0:prob_plot_grid_start])
ax_grasp3_yaxis_image.imshow(grasp_icons[3])
ax_grasp3_yaxis_image.get_xaxis().set_ticks([])
ax_grasp3_yaxis_image.get_yaxis().set_ticks([])
ax_grasp3_yaxis_image.axis("off")
# ax_grasp3.set_ylabel('Precision Disc')
ax_grasp3.get_xaxis().set_visible(False)
# Grasp4
ax_grasp4 = plt.subplot(gs[5, prob_plot_grid_start:border_of_lr])
ax_grasp4_yaxis_image = plt.subplot(gs[5, 0:prob_plot_grid_start])
ax_grasp4_yaxis_image.imshow(grasp_icons[4])
ax_grasp4_yaxis_image.get_xaxis().set_ticks([])
ax_grasp4_yaxis_image.get_yaxis().set_ticks([])
ax_grasp4_yaxis_image.axis("off")
# ax_grasp4.set_ylabel('Lateral Pinch')
ax_grasp4.get_xaxis().set_visible(False)
# grasp5
ax_grasp5 = plt.subplot(gs[6, prob_plot_grid_start:border_of_lr])
ax_grasp5_yaxis_image = plt.subplot(gs[6, 0:prob_plot_grid_start])
ax_grasp5_yaxis_image.imshow(grasp_icons[5])
ax_grasp5_yaxis_image.get_xaxis().set_ticks([])
ax_grasp5_yaxis_image.get_yaxis().set_ticks([])
ax_grasp5_yaxis_image.axis("off")
# ax_grasp5.set_ylabel('Writing Tripod')
ax_grasp5.set_xlabel("time (s)")
# Axis sharing
# ax_acc.get_shared_x_axes().join(ax_acc, ax_gyro, ax_pose)
im = ax_image.imshow(rgb_vid[0], animated=True)
time = np.asarray(range(rgb_vid.shape[0]), dtype=np.float) / 30.0
line_width = 3
# No grasp lines
# ax_no_grasp.axvline(arm_ready_frame, color='black', linestyle="--", alpha=0.5)
# ax_no_grasp.axvline(object_touched_frame, color='black', linestyle='--', alpha=0.5)
ax_no_grasp.plot(
time, class_probabilities[6], alpha=0.25, color="C0", linewidth=line_width
) # Faded line
ax_no_grasp.set_ylim(-0.05, 1.05)
ax_no_grasp.set_xlim(0, time[-1])
(line_no_grasp,) = ax_no_grasp.plot(
time, class_probabilities[6], color="C0", linewidth=line_width
) # actual current line
# Grasp0 lines
# ax_grasp0.axvline(arm_ready_frame, color='black', linestyle="--", alpha=0.5)
# ax_grasp0.axvline(object_touched_frame, color='black', linestyle='--', alpha=0.5)
ax_grasp0.plot(
time, class_probabilities[0], alpha=0.25, color="C0", linewidth=line_width
) # Fade line
ax_grasp0.set_ylim(-0.05, 1.05)
ax_grasp0.set_xlim(0, time[-1])
(line_grasp0,) = ax_grasp0.plot(
time, class_probabilities[0], color="C0", linewidth=line_width
) # actual current line
# Grasp1 lines
# ax_grasp1.axvline(arm_ready_frame, color='black', linestyle="--", alpha=0.5)
# ax_grasp1.axvline(object_touched_frame, color='black', linestyle='--', alpha=0.5)
ax_grasp1.plot(
time, class_probabilities[1], alpha=0.25, color="C0", linewidth=line_width
) # Fade line
ax_grasp1.set_ylim(-0.05, 1.05)
ax_grasp1.set_xlim(0, time[-1])
(line_grasp1,) = ax_grasp1.plot(
time, class_probabilities[1], color="C0", linewidth=line_width
) # actual current line
# Grasp2 lines
# ax_grasp2.axvline(arm_ready_frame, color='black', linestyle="--", alpha=0.5)
# ax_grasp2.axvline(object_touched_frame, color='black', linestyle='--', alpha=0.5)
ax_grasp2.plot(
time, class_probabilities[2], alpha=0.25, color="C0", linewidth=line_width
) # Fade line
ax_grasp2.set_ylim(-0.05, 1.05)
ax_grasp2.set_xlim(0, time[-1])
(line_grasp2,) = ax_grasp2.plot(
time, class_probabilities[2], color="C0", linewidth=line_width
) # actual current line
# Grasp3 lines
# ax_grasp3.axvline(arm_ready_frame, color='black', linestyle="--", alpha=0.5)
# ax_grasp3.axvline(object_touched_frame, color='black', linestyle='--', alpha=0.5)
ax_grasp3.plot(
time, class_probabilities[3], alpha=0.25, color="C0", linewidth=line_width
) # Fade line
ax_grasp3.set_ylim(-0.05, 1.05)
ax_grasp3.set_xlim(0, time[-1])
(line_grasp3,) = ax_grasp3.plot(
time, class_probabilities[3], color="C0", linewidth=line_width
) # actual current line
# Grasp4 lines
# ax_grasp4.axvline(arm_ready_frame, color='black', linestyle="--", alpha=0.5)
# ax_grasp4.axvline(object_touched_frame, color='black', linestyle='--', alpha=0.5)
ax_grasp4.plot(
time, class_probabilities[4], alpha=0.25, color="C0", linewidth=line_width
) # Fade line
ax_grasp4.set_ylim(-0.05, 1.05)
ax_grasp4.set_xlim(0, time[-1])
(line_grasp4,) = ax_grasp4.plot(
time, class_probabilities[4], color="C0", linewidth=line_width
) # actual current line
# Grasp5 lines
# ax_grasp5.axvline(arm_ready_frame, color='black', linestyle="--", alpha=0.5)
# ax_grasp5.axvline(object_touched_frame, color='black', linestyle='--', alpha=0.5)
ax_grasp5.plot(
time, class_probabilities[5], alpha=0.25, color="C0", linewidth=line_width
) # Fade line
ax_grasp5.set_ylim(-0.05, 1.05)
ax_grasp5.set_xlim(0, time[-1])
(line_grasp5,) = ax_grasp5.plot(
time, class_probabilities[5], color="C0", linewidth=line_width
) # actual current line
# Handle the plot backgrounds for ground truth
bad_color = "xkcd:apricot"
good_color = "xkcd:dirty blue"
good_alpha = | |
<filename>support/unclefuncs.py
#!/usr/bin/python
import os,random
from math import pi,sqrt
def crossprod(vec1,vec2):
vector = [vec1[1]*vec2[2]-vec1[2]*vec2[1],-(vec1[0]*vec2[2]-vec1[2]*vec2[0]),vec1[0]*vec2[1]-vec1[1]*vec2[0]]
return vector
def dotprod(vec1,vec2):
vector = vec1[0]*vec2[0]+vec1[1]*vec2[1]+vec1[2]*vec2[2]
return vector
def magnitude(vector):
mag=sqrt(vector[0]**2+vector[1]**2+vector[2]**2)
return mag
def magnitude2d(vector):
mag=sqrt(vector[0]**2+vector[1]**2)
return mag
def recip(latvecs):
crossprod1=crossprod(latvecs[1],latvecs[2])
crossprod2=crossprod(latvecs[2],latvecs[0])
crossprod3=crossprod(latvecs[0],latvecs[1])
dotprod1=dotprod(latvecs[0],crossprod1)
dotprod2=dotprod(latvecs[1],crossprod2)
dotprod3=dotprod(latvecs[2],crossprod3)
component1=[x/dotprod1 for x in crossprod1]
component2=[x/dotprod2 for x in crossprod2]
component3=[x/dotprod3 for x in crossprod3]
recipcell = [component1,component2,component3]
return recipcell
def kmesh(recipunitcell,resolution):
mesh1=[[recipunitcell[x][y]/resolution for y in range(3)] for x in range(3)]
mesh=[str(mesh1[x][0])+ ' ' + str(mesh1[x][1]) + ' ' + str(mesh1[x][2]) for x in range(3)]
return mesh
def findenergy():
if os.path.isfile('OUTCAR') == True:
outcarlines = readfile('OUTCAR')
else:
outcarlines = []
count = 0
energylines = [];
for i in outcarlines:
list = i.split()
if 'TOTEN' in list:
energylines.append(count)
count = count + 1
if len(energylines) > 0:
last = energylines[-1]
energyline = outcarlines[last]
energy = float(energyline.split()[4])
else:
energy = '????????'
return energy
def formationenthalpy(mixture,a,b,aatoms,batoms,totalatoms):
totatoms = aatoms + batoms
e = mixture/totatoms - (a/totalatoms * aatoms/totatoms + b/totalatoms * batoms/totatoms)
return e
def updatestructure(structurenumber,formationenthalpy):
structuresread = open('structures.in','r')
number = 0
count = 0
for i in structuresread:
if i == ('formation enthalpy' + str(structurenumber) + '\n'):
number = count
count = count + 1
structuresread.close()
readlines = readfile('structures.in')
if number != 0:
readlines[number] = str(formationenthalpy) + '\n'
writefile('structures.in',readlines)
def vegardslaw(constone,consttwo,largelatpar,smalllatpar):
latpar = largelatpar - (largelatpar - smalllatpar) * float(constone)/(float(constone) + float(consttwo))
return latpar
def getscore():
if os.path.isfile('finalcvs.out')==True:
output = open('finalcvs.out','r')
else:
output = []
count = 0
found = 0
for i in output:
if i == " Final Cross Validation Score:\n":
found = count + 1
count = count + 1
if os.path.isfile('finalcvs.out') == True:
output.close()
lines = readfile('finalcvs.out')
if found != 0:
score = float(lines[found])
else:
score = '???????????'
return score
def conv(file,keyword):
test = open(file,'r')
count = 0
energylines = [];
for i in test:
list = i.split()
if keyword in list:
energylines.append(count)
count = count + 1
test.close()
test1 = open(file,'r')
lines = test1.readlines()
test.close()
energies = []
for i in energylines:
energies.append(lines[i])
# energy = float(energyline.split()[4])
return energies
def randomize(numofstructs,path):
print numofstructs
data = range(2,numofstructs + 2)
ranlist = []
print data
for i in range(1,numofstructs + 1):
print i
ran = random.choice(data)
ranlist.append(ran)
data.remove(ran)
structlines = readfile(path + '/structures.orig')
list = structuresindices(structlines)
structlist = ['peratom\n']
constonelines = ''.join(structlines[1:list[1]])
structlist.append(constonelines)
for i in ranlist:
lines = ''.join(structlines[list[i-1]:list[i]])
structlist.append(lines)
print structlist
print '\n'
consttwolines = ''.join(structlines[list[-1]:])
structlist.append(consttwolines)
writefile(path +'/structures.in',structlist)
def readfile(file):
openfile = open(file,'r')
lines = openfile.readlines()
openfile.close()
return lines
def writefile(file,lines):
openfile = open(file,'w')
openfile.writelines(lines)
openfile.close()
def structuresindices(structlines):
index = 0
list = []
for i in structlines:
if '#-----' in i:
list.append(index)
index = index + 1
return list
def getindices(file,whattolookfor):
index = 0
list = []
lines = readfile(file)
for i in lines:
if whattolookfor in i:
list.append(index)
index = index + 1
return list
def placeunclefiles(homedir):
if os.path.isfile(homedir + '/GApar.in') == True:
os.system('cp ' + homedir + '/GApar.in .')
if os.path.isfile(homedir + '/lat.in') == True:
os.system('cp ' + homedir + '/lat.in .')
if os.path.isfile(homedir + '/fitpar.in') == True:
os.system('cp ' + homedir + '/fitpar.in .')
if os.path.isfile(homedir + '/control.in') == True:
os.system('cp ' + homedir + '/control.in .')
if os.path.isfile(homedir + '/bulkpar.in') == True:
os.system('cp ' + homedir + '/bulkpar.in .')
if os.path.isfile(homedir + '/groundstatesearch.in') == True:
os.system('cp ' + homedir + '/groundstatesearch.in .')
if os.path.isfile(homedir + '/MCpar.in') == True:
os.system('cp ' + homedir + '/MCpar.in .')
if os.path.isfile(homedir + '/finalecis.out') == True:
os.system('cp ' + homedir + '/finalecis.out .')
if os.path.isfile(homedir + '/figures.out') == True:
os.system('cp ' + homedir + '/figures.out .')
if os.path.isfile(homedir + '/finalcvs.out') == True:
os.system('cp ' + homedir + '/finalcvs.out .')
if os.path.isfile(homedir + '/struct_enum.out') == True:
os.system('cp ' + homedir + '/struct_enum.out .')
if os.path.isfile(homedir + '/genalgsummary.dat') == True:
os.system('cp ' + homedir + '/genalgsummary.dat .')
if os.path.isfile(homedir + '/listchildren.dat') == True:
os.system('cp ' + homedir + '/listchildren.dat .')
if os.path.isfile(homedir + '/listgeneration.dat') == True:
os.system('cp ' + homedir + '/listgeneration.dat .')
if os.path.isfile(homedir + '/population.out') == True:
os.system('cp ' + homedir + '/population.out .')
if os.path.isfile(homedir + '/gss.out') == True:
os.system('cp ' + homedir + '/gss.out .')
def concentrations(list):
uniqueconclist = uniquelist(list,1)
if os.path.isdir('concentrations') == False:
os.mkdir('concentrations')
else:
os.system('rm -rf concentrations')
os.mkdir('concentrations')
os.chdir('concentrations')
for i in uniqueconclist:
specificconcs = []
for j in list:
if j.split()[1] == i and len(j.split()) > 2:
specificconcs.append(j)
specificconcs = sorted(specificconcs)
writefile('concentration' + str(i),specificconcs)
os.chdir('../')
def numberofstructures(list):
uniqueconclist = uniquelist(list,1)
os.chdir('concentrations')
for l in uniqueconclist:
total = 0
struct = ['structure # # of occcurences\n--------------------------------------------\n']
plot = []
lines = readfile('concentration' + l)
uniquestructlist = uniquelist(lines,0)
for j in lines:
struct.append(j.split()[0])
for i in uniquestructlist:
number = struct.count(i)
total = total + number
plot.append(i.rjust(5) + ' ' + str(number).rjust(3) + '\n')
plot.append('total --> ' + str(total) + '\n')
writefile('plot' + l,plot)
def uniquelist(list,index):
uniquevaluelist = []
for i in list:
if i.split()[index] in uniquevaluelist:
nothing = 1
else:
uniquevaluelist.append(i.split()[index])
return uniquevaluelist
def getdirs(path,tag,line):
linelength = len(line)
dirlist = os.listdir(path)
changevar = []
for i in dirlist:
if tag in i:
index = i.find(line + '_') + linelength + 1
changevar.append(i[index:])
return changevar
def mkposcars(file):
input = readfile(file)
if input[0].split()[0] == 'all':
superstruct = getdirs('vaspruns','str','str')
elif '-' in input[0].split():
superstruct = []
for i in range(int(input[0].split()[0]),int(input[0].split()[2])+1):
superstruct.append(str(i))
else:
superstruct = input[0].split()
parentatoms = int(input[6])
element2 = 8 + parentatoms
elementone = input[1].split()[0]
elementtwo = input[element2].split()[0]
totatoms = int(input[6]) #find total number of atoms in parent unit cell
latparindex=9+totatoms #find location of lattice parameter
firstlatpar = float(input[2])
secondlatpar = float(input[latparindex])
if firstlatpar > secondlatpar:
largelatpar=firstlatpar
smalllatpar=secondlatpar
n=1
p=0
else:
largelatpar=secondlatpar
smalllatpar=firstlatpar
n=0
p=1
for i in superstruct:
os.system('makestr.new struct_enum.out ' + i)
if int(i) > 9999:
poscarread = open('vasp.0' + i, 'r')
elif int(i) > 999:
poscarread = open('vasp.00' + i, 'r')
elif int(i) > 99:
poscarread = open('vasp.000' + i, 'r')
elif int(i) > 9:
poscarread = open('vasp.0000' + i, 'r')
else:
poscarread = open('vasp.00000' + i, 'r')
lines = poscarread.readlines()
poscarread.close()
atoms = lines[5].split()
lines[1] = str(vegardslaw(atoms[n],atoms[p],largelatpar,smalllatpar)) + '\n'
totalatoms = int(atoms[0]) + int(atoms[1])
if int(i) > 9999:
poscarwrite = open('vasp.0' + i, 'w')
elif int(i) > 999:
poscarwrite = open('vasp.00' + i, 'w')
elif int(i) > 99:
poscarwrite = open('vasp.000' + i, 'w')
elif int(i) > 9:
poscarwrite = open('vasp.0000' + i, 'w')
else:
poscarwrite = open('vasp.00000' + i, 'w')
poscarwrite.writelines(lines)
poscarwrite.close()
indexone = 8+totatoms
indextwo = input.index('kpoints XxXxX\n')
writefile('vasp.' + elementone,input[1:indexone])
writefile('vasp.' + elementtwo,input[indexone:indextwo])
def strtocartesian(basisvecs,latticevecs):
cartesianbasisvecs = []
for k in basisvecs:
vecone = round(float(k.split()[0]) * float(latticevecs[0].split()[0]) + float(k.split()[1]) * float(latticevecs[1].split()[0]) + float(k.split()[2]) * float(latticevecs[2].split()[0]),8)
vectwo = round(float(k.split()[0]) * float(latticevecs[0].split()[1]) + float(k.split()[1]) * float(latticevecs[1].split()[1]) + float(k.split()[2]) * float(latticevecs[2].split()[1]),8)
vecthree = round(float(k.split()[0]) * float(latticevecs[0].split()[2]) + float(k.split()[1]) * float(latticevecs[1].split()[2]) + float(k.split()[2]) * float(latticevecs[2].split()[2]),8)
cartesianbasisvecs.append([vecone,vectwo,vecthree])
return cartesianbasisvecs
def strtocartesian2d(basisvecs,latticevecs):
cartesianbasisvecs = []
for k in basisvecs:
vecone = round(float(k.split()[0]) * float(latticevecs[0].split()[0]) + float(k.split()[1]) * float(latticevecs[1].split()[0]),2)
vectwo = round(float(k.split()[0]) * float(latticevecs[0].split()[1]) + float(k.split()[1]) * float(latticevecs[1].split()[1]),2)
cartesianbasisvecs.append([vecone,vectwo])
return cartesianbasisvecs
def xcombinations(items, n):
if n==0: yield []
else:
for i in xrange(len(items)):
for cc in xcombinations(items[:i]+items[i+1:],n-1):
yield [items[i]]+cc
def xuniqueCombinations(items, n):
if n==0: yield []
else:
for i in xrange(len(items)):
for cc in xuniqueCombinations(items[i+1:],n-1):
yield [items[i]]+cc
def xselections(items, n):
if n==0: yield []
else:
for i in xrange(len(items)):
for ss in xselections(items, n-1):
yield [items[i]]+ss
def xpermutations(items):
return xcombinations(items, len(items))
def cubicsymops():
temp = []
latveccombs = []
tuples = []
for z in xselections([1,-1],3):
tuples.append(z)
for x in xpermutations([1,0,0]):
if x not in temp:
temp.append(x)
for y in xpermutations(temp):
if y not in latveccombs:
latveccombs.append(y)
list = []
for i in tuples:
for j in latveccombs:
vec = [[j[x][0]*i[x],j[x][1]*i[x],j[x][2]*i[x]] for x in range(0,3)]
if vec not in list:
list.append(vec)
return list
def squaresymops():
temp = []
latveccombs = []
tuples = []
for z in xselections([1,-1],2):
tuples.append(z)
for x in xpermutations([1,0]):
if x not in temp:
temp.append(x)
for y in xpermutations(temp):
if y not in latveccombs:
latveccombs.append(y)
list = []
for i | |
<filename>ceph_deploy/lib/vendor/remoto/lib/vendor/execnet/gateway_base.py<gh_stars>0
"""
base execnet gateway code send to the other side for bootstrapping.
NOTE: aims to be compatible to Python 2.5-3.X, Jython and IronPython
(C) 2004-2013 <NAME>, <NAME>, <NAME>, <NAME> and others
"""
from __future__ import with_statement
import sys, os, weakref
import traceback, struct
# NOTE that we want to avoid try/except style importing
# to avoid setting sys.exc_info() during import
#
ISPY3 = sys.version_info >= (3, 0)
if ISPY3:
from io import BytesIO
exec("def do_exec(co, loc): exec(co, loc)\n"
"def reraise(cls, val, tb): raise val\n")
unicode = str
_long_type = int
from _thread import interrupt_main
else:
from StringIO import StringIO as BytesIO
exec("def do_exec(co, loc): exec co in loc\n"
"def reraise(cls, val, tb): raise cls, val, tb\n")
bytes = str
_long_type = long
try:
from thread import interrupt_main
except ImportError:
interrupt_main = None
#f = open("/tmp/execnet-%s" % os.getpid(), "w")
#def log_extra(*msg):
# f.write(" ".join([str(x) for x in msg]) + "\n")
class EmptySemaphore:
acquire = release = lambda self: None
def get_execmodel(backend):
if hasattr(backend, "backend"):
return backend
if backend == "thread":
importdef = {
'get_ident': ['thread::get_ident', '_thread::get_ident'],
'_start_new_thread': ['thread::start_new_thread',
'_thread::start_new_thread'],
'threading': ["threading",],
'queue': ["queue", "Queue"],
'sleep': ['time::sleep'],
'subprocess': ['subprocess'],
'socket': ['socket'],
'_fdopen': ['os::fdopen'],
'_lock': ['threading'],
'_event': ['threading'],
}
def exec_start(self, func, args=()):
self._start_new_thread(func, args)
elif backend == "eventlet":
importdef = {
'get_ident': ['eventlet.green.thread::get_ident'],
'_spawn_n': ['eventlet::spawn_n'],
'threading': ['eventlet.green.threading'],
'queue': ["eventlet.queue"],
'sleep': ['eventlet::sleep'],
'subprocess': ['eventlet.green.subprocess'],
'socket': ['eventlet.green.socket'],
'_fdopen': ['eventlet.green.os::fdopen'],
'_lock': ['eventlet.green.threading'],
'_event': ['eventlet.green.threading'],
}
def exec_start(self, func, args=()):
self._spawn_n(func, *args)
elif backend == "gevent":
importdef = {
'get_ident': ['gevent.thread::get_ident'],
'_spawn_n': ['gevent::spawn'],
'threading': ['threading'],
'queue': ["gevent.queue"],
'sleep': ['gevent::sleep'],
'subprocess': ['gevent.subprocess'],
'socket': ['gevent.socket'],
# XXX
'_fdopen': ['gevent.fileobject::FileObjectThread'],
'_lock': ['gevent.lock'],
'_event': ['gevent.event'],
}
def exec_start(self, func, args=()):
self._spawn_n(func, *args)
else:
raise ValueError("unknown execmodel %r" %(backend,))
class ExecModel:
def __init__(self, name):
self._importdef = importdef
self.backend = name
self._count = 0
def __repr__(self):
return "<ExecModel %r>" % self.backend
def __getattr__(self, name):
locs = self._importdef.get(name)
if locs is None:
raise AttributeError(name)
for loc in locs:
parts = loc.split("::")
loc = parts.pop(0)
try:
mod = __import__(loc, None, None, "__doc__")
except ImportError:
pass
else:
if parts:
mod = getattr(mod, parts[0])
setattr(self, name, mod)
return mod
raise AttributeError(name)
start = exec_start
def fdopen(self, fd, mode, bufsize=1):
return self._fdopen(fd, mode, bufsize)
def WorkerPool(self, hasprimary=False):
return WorkerPool(self, hasprimary=hasprimary)
def Semaphore(self, size=None):
if size is None:
return EmptySemaphore()
return self._lock.Semaphore(size)
def Lock(self):
return self._lock.RLock()
def RLock(self):
return self._lock.RLock()
def Event(self):
event = self._event.Event()
if sys.version_info < (2,7):
# patch wait function to return event state instead of None
real_wait = event.wait
def wait(timeout=None):
real_wait(timeout=timeout)
return event.isSet()
event.wait = wait
return event
def PopenPiped(self, args):
PIPE = self.subprocess.PIPE
return self.subprocess.Popen(args, stdout=PIPE, stdin=PIPE)
return ExecModel(backend)
class Reply(object):
""" reply instances provide access to the result
of a function execution that got dispatched
through WorkerPool.spawn()
"""
def __init__(self, task, threadmodel):
self.task = task
self._result_ready = threadmodel.Event()
self.running = True
def get(self, timeout=None):
""" get the result object from an asynchronous function execution.
if the function execution raised an exception,
then calling get() will reraise that exception
including its traceback.
"""
self.waitfinish(timeout)
try:
return self._result
except AttributeError:
reraise(*(self._excinfo[:3])) # noqa
def waitfinish(self, timeout=None):
if not self._result_ready.wait(timeout):
raise IOError("timeout waiting for %r" %(self.task, ))
def run(self):
func, args, kwargs = self.task
try:
try:
self._result = func(*args, **kwargs)
except:
# sys may be already None when shutting down the interpreter
if sys is not None:
self._excinfo = sys.exc_info()
finally:
self._result_ready.set()
self.running = False
class WorkerPool(object):
""" A WorkerPool allows to spawn function executions
to threads, returning a reply object on which you
can ask for the result (and get exceptions reraised).
This implementation allows the main thread to integrate
itself into performing function execution through
calling integrate_as_primary_thread() which will return
when the pool received a trigger_shutdown().
"""
def __init__(self, execmodel, hasprimary=False):
""" by default allow unlimited number of spawns. """
self.execmodel = execmodel
self._running_lock = self.execmodel.Lock()
self._running = set()
self._shuttingdown = False
self._waitall_events = []
if hasprimary:
if self.execmodel.backend != "thread":
raise ValueError("hasprimary=True requires thread model")
self._primary_thread_task_ready = self.execmodel.Event()
else:
self._primary_thread_task_ready = None
def integrate_as_primary_thread(self):
""" integrate the thread with which we are called as a primary
thread for executing functions triggered with spawn().
"""
assert self.execmodel.backend == "thread", self.execmodel
primary_thread_task_ready = self._primary_thread_task_ready
# interacts with code at REF1
while 1:
primary_thread_task_ready.wait()
reply = self._primary_thread_task
if reply is None: # trigger_shutdown() woke us up
break
self._perform_spawn(reply)
# we are concurrent with trigger_shutdown and spawn
with self._running_lock:
if self._shuttingdown:
break
primary_thread_task_ready.clear()
def trigger_shutdown(self):
with self._running_lock:
self._shuttingdown = True
if self._primary_thread_task_ready is not None:
self._primary_thread_task = None
self._primary_thread_task_ready.set()
def active_count(self):
return len(self._running)
def _perform_spawn(self, reply):
reply.run()
with self._running_lock:
self._running.remove(reply)
if not self._running:
while self._waitall_events:
waitall_event = self._waitall_events.pop()
waitall_event.set()
def _try_send_to_primary_thread(self, reply):
# REF1 in 'thread' model we give priority to running in main thread
# note that we should be called with _running_lock hold
primary_thread_task_ready = self._primary_thread_task_ready
if primary_thread_task_ready is not None:
if not primary_thread_task_ready.isSet():
self._primary_thread_task = reply
# wake up primary thread
primary_thread_task_ready.set()
return True
return False
def spawn(self, func, *args, **kwargs):
""" return Reply object for the asynchronous dispatch
of the given func(*args, **kwargs).
"""
reply = Reply((func, args, kwargs), self.execmodel)
with self._running_lock:
if self._shuttingdown:
raise ValueError("pool is shutting down")
self._running.add(reply)
if not self._try_send_to_primary_thread(reply):
self.execmodel.start(self._perform_spawn, (reply,))
return reply
def terminate(self, timeout=None):
""" trigger shutdown and wait for completion of all executions. """
self.trigger_shutdown()
return self.waitall(timeout=timeout)
def waitall(self, timeout=None):
""" wait until all active spawns have finished executing. """
with self._running_lock:
if not self._running:
return True
# if a Reply still runs, we let run_and_release
# signal us -- note that we are still holding the
# _running_lock to avoid race conditions
my_waitall_event = self.execmodel.Event()
self._waitall_events.append(my_waitall_event)
return my_waitall_event.wait(timeout=timeout)
sysex = (KeyboardInterrupt, SystemExit)
DEBUG = os.environ.get('EXECNET_DEBUG')
pid = os.getpid()
if DEBUG == '2':
def trace(*msg):
try:
line = " ".join(map(str, msg))
sys.stderr.write("[%s] %s\n" % (pid, line))
sys.stderr.flush()
except Exception:
pass # nothing we can do, likely interpreter-shutdown
elif DEBUG:
import tempfile, os.path
fn = os.path.join(tempfile.gettempdir(), 'execnet-debug-%d' % pid)
#sys.stderr.write("execnet-debug at %r" %(fn,))
debugfile = open(fn, 'w')
def trace(*msg):
try:
line = " ".join(map(str, msg))
debugfile.write(line + "\n")
debugfile.flush()
except Exception:
try:
v = sys.exc_info()[1]
sys.stderr.write(
"[%s] exception during tracing: %r\n" % (pid, v))
except Exception:
pass # nothing we can do, likely interpreter-shutdown
else:
notrace = trace = lambda *msg: None
class Popen2IO:
error = (IOError, OSError, EOFError)
def __init__(self, outfile, infile, execmodel):
# we need raw byte streams
self.outfile, self.infile = outfile, infile
if sys.platform == "win32":
import msvcrt
try:
msvcrt.setmode(infile.fileno(), os.O_BINARY)
msvcrt.setmode(outfile.fileno(), os.O_BINARY)
except (AttributeError, IOError):
pass
self._read = getattr(infile, "buffer", infile).read
self._write = getattr(outfile, "buffer", outfile).write
self.execmodel = execmodel
def read(self, numbytes):
"""Read exactly 'numbytes' bytes from the pipe. """
# a file in non-blocking mode may return less bytes, so we loop
buf = bytes()
while numbytes > len(buf):
data = self._read(numbytes-len(buf))
if not data:
raise EOFError("expected %d bytes, got %d" %(numbytes, len(buf)))
buf += data
return buf
def write(self, data):
"""write out all data bytes. """
assert isinstance(data, bytes)
self._write(data)
self.outfile.flush()
def close_read(self):
self.infile.close()
def close_write(self):
self.outfile.close()
class Message:
""" encapsulates Messages and their wire protocol. """
_types = []
def __init__(self, msgcode, channelid=0, data=''):
self.msgcode = msgcode
self.channelid = channelid
self.data = data
@staticmethod
def from_io(io):
try:
header = io.read(9) # type 1, channel 4, payload 4
if not header:
raise EOFError("empty read")
except EOFError:
e = sys.exc_info()[1]
raise EOFError('couldnt load message header, ' + e.args[0])
msgtype, channel, payload = struct.unpack('!bii', header)
return Message(msgtype, channel, io.read(payload))
def to_io(self, io):
if struct.pack is not None:
header = struct.pack('!bii', self.msgcode, self.channelid,
len(self.data))
io.write(header+self.data)
def received(self, gateway):
self._types[self.msgcode](self, gateway)
def __repr__(self):
name = self._types[self.msgcode].__name__.upper()
return "<Message %s channel=%s lendata=%s>" %(
name, self.channelid, len(self.data))
class GatewayReceivedTerminate(Exception):
""" Receiverthread got termination message. """
def _setupmessages():
def status(message, gateway):
# we use the channelid to send back information
# but don't instantiate a channel object
d = {'numchannels': len(gateway._channelfactory._channels),
'numexecuting': gateway._execpool.active_count(),
'execmodel': gateway.execmodel.backend,
}
gateway._send(Message.CHANNEL_DATA, message.channelid,
dumps_internal(d))
gateway._send(Message.CHANNEL_CLOSE, message.channelid)
def channel_exec(message, gateway):
channel = gateway._channelfactory.new(message.channelid)
gateway._local_schedulexec(channel=channel,sourcetask=message.data)
def channel_data(message, gateway):
gateway._channelfactory._local_receive(message.channelid, message.data)
def channel_close(message, gateway):
gateway._channelfactory._local_close(message.channelid)
def channel_close_error(message, gateway):
remote_error = RemoteError(loads_internal(message.data))
gateway._channelfactory._local_close(message.channelid, remote_error)
def | |
to represent data to.
Defaults to None.
Returns:
dict: Dictionary containing the created plot and its data table.
"""
outputs = {}
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True,"generator_Fuel_Cost",self.Scenarios),
(True,"generator_VO&M_Cost",self.Scenarios),
(True,"generator_Start_&_Shutdown_Cost",self.Scenarios),
(False,"generator_Emissions_Cost",self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
# Checks if all data required by plot is available, if 1 in list required data is missing
if 1 in check_input_data:
return MissingInputData()
for zone_input in self.Zones:
self.logger.info(f"Zone = {zone_input}")
gen_cost_out_chunks = []
for scenario in self.Scenarios:
self.logger.info(f"Scenario = {scenario}")
Fuel_Cost = self["generator_Fuel_Cost"].get(scenario)
# Check if Fuel_cost contains zone_input, skips if not
try:
Fuel_Cost = Fuel_Cost.xs(zone_input,level=self.AGG_BY)
except KeyError:
self.logger.warning(f"No Generators found for: {zone_input}")
continue
Fuel_Cost = Fuel_Cost.sum(axis=0)
Fuel_Cost.rename("Fuel_Cost", inplace=True)
VOM_Cost = self["generator_VO&M_Cost"].get(scenario)
VOM_Cost = VOM_Cost.xs(zone_input,level=self.AGG_BY)
VOM_Cost[0].values[VOM_Cost[0].values < 0] = 0
VOM_Cost = VOM_Cost.sum(axis=0)
VOM_Cost.rename("VO&M_Cost", inplace=True)
Start_Shutdown_Cost = self["generator_Start_&_Shutdown_Cost"].get(scenario)
Start_Shutdown_Cost = Start_Shutdown_Cost.xs(zone_input,level=self.AGG_BY)
Start_Shutdown_Cost = Start_Shutdown_Cost.sum(axis=0)
Start_Shutdown_Cost.rename("Start_&_Shutdown_Cost", inplace=True)
Emissions_Cost = self["generator_Emissions_Cost"][scenario]
if Emissions_Cost.empty:
self.logger.warning(f"generator_Emissions_Cost not included in {scenario} results, Emissions_Cost will not be included in plot")
Emissions_Cost = self["generator_Start_&_Shutdown_Cost"][scenario].copy()
Emissions_Cost.iloc[:,0] = 0
Emissions_Cost = Emissions_Cost.xs(zone_input,level=self.AGG_BY)
Emissions_Cost = Emissions_Cost.sum(axis=0)
Emissions_Cost.rename("Emissions_Cost", inplace=True)
Detailed_Gen_Cost = pd.concat([Fuel_Cost, VOM_Cost, Start_Shutdown_Cost, Emissions_Cost], axis=1, sort=False)
Detailed_Gen_Cost.columns = Detailed_Gen_Cost.columns.str.replace('_',' ')
Detailed_Gen_Cost = Detailed_Gen_Cost.sum(axis=0)
Detailed_Gen_Cost = Detailed_Gen_Cost.rename(scenario)
gen_cost_out_chunks.append(Detailed_Gen_Cost)
# Checks if gen_cost_out_chunks contains data, if not skips zone and does not return a plot
if not gen_cost_out_chunks:
outputs[zone_input] = MissingZoneData()
continue
Detailed_Gen_Cost_Out = pd.concat(gen_cost_out_chunks, axis=1, sort=False)
Detailed_Gen_Cost_Out = Detailed_Gen_Cost_Out.T/1000000 #Convert cost to millions
Detailed_Gen_Cost_Out.index = Detailed_Gen_Cost_Out.index.str.replace('_',' ')
# Deletes columns that are all 0
Detailed_Gen_Cost_Out = Detailed_Gen_Cost_Out.loc[:, (Detailed_Gen_Cost_Out != 0).any(axis=0)]
# Checks if Detailed_Gen_Cost_Out contains data, if not skips zone and does not return a plot
if Detailed_Gen_Cost_Out.empty:
outputs[zone_input] = MissingZoneData()
continue
# Data table of values to return to main program
Data_Table_Out = Detailed_Gen_Cost_Out.add_suffix(" (Million $)")
fig3, ax = plt.subplots(figsize=(self.x,self.y))
Detailed_Gen_Cost_Out.plot.bar(stacked=True, edgecolor='black', linewidth='0.1', ax=ax)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.axhline(y = 0)
ax.set_ylabel('Total Generation Cost (Million $)', color='black', rotation='vertical')
# Set x-tick labels
if len(self.custom_xticklabels) > 1:
tick_labels = self.custom_xticklabels
else:
tick_labels = Detailed_Gen_Cost_Out.index
PlotDataHelper.set_barplot_xticklabels(tick_labels, ax=ax)
ax.tick_params(axis='y', which='major', length=5, width=1)
ax.tick_params(axis='x', which='major', length=5, width=1)
ax.yaxis.set_major_formatter(mpl.ticker.FuncFormatter(lambda x, p: format(x, f',.{self.y_axes_decimalpt}f')))
ax.margins(x=0.01)
handles, labels = ax.get_legend_handles_labels()
ax.legend(reversed(handles), reversed(labels), loc='lower left',bbox_to_anchor=(1,0),
facecolor='inherit', frameon=True)
if mconfig.parser("plot_title_as_region"):
ax.set_title(zone_input)
cost_totals = Detailed_Gen_Cost_Out.sum(axis=1) #holds total of each bar
#inserts values into bar stacks
for patch in ax.patches:
width, height = patch.get_width(), patch.get_height()
if height<=2:
continue
x, y = patch.get_xy()
ax.text(x+width/2,
y+height/2,
'{:,.0f}'.format(height),
horizontalalignment='center',
verticalalignment='center', fontsize=12)
#inserts total bar value above each bar
for k, patch in enumerate(ax.patches):
height = cost_totals[k]
width = patch.get_width()
x, y = patch.get_xy()
ax.text(x+width/2,
y+height + 0.05*max(ax.get_ylim()),
'{:,.0f}'.format(height),
horizontalalignment='center',
verticalalignment='center', fontsize=15, color='red')
if k>=len(cost_totals)-1:
break
outputs[zone_input] = {'fig': fig3, 'data_table': Data_Table_Out}
return outputs
def sys_cost_type(self, start_date_range: str = None,
end_date_range: str = None, **_):
"""Creates stacked bar plot of total generation cost by generator technology type.
Another way to represent total generation cost, this time by tech type,
i.e Coal, Gas, Hydro etc.
Each sceanrio is plotted as a separate bar.
Args:
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
Returns:
dict: Dictionary containing the created plot and its data table.
"""
# Create Dictionary to hold Datframes for each scenario
outputs = {}
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True,"generator_Total_Generation_Cost",self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
# Checks if all data required by plot is available, if 1 in list required data is missing
if 1 in check_input_data:
return MissingInputData()
for zone_input in self.Zones:
gen_cost_out_chunks = []
self.logger.info(f"Zone = {zone_input}")
for scenario in self.Scenarios:
self.logger.info(f"Scenario = {scenario}")
Total_Gen_Stack = self["generator_Total_Generation_Cost"].get(scenario)
# Check if Total_Gen_Stack contains zone_input, skips if not
try:
Total_Gen_Stack = Total_Gen_Stack.xs(zone_input,level=self.AGG_BY)
except KeyError:
self.logger.warning(f"No Generators found for : {zone_input}")
continue
Total_Gen_Stack = self.df_process_gen_inputs(Total_Gen_Stack)
Total_Gen_Stack = Total_Gen_Stack.sum(axis=0)
Total_Gen_Stack.rename(scenario, inplace=True)
gen_cost_out_chunks.append(Total_Gen_Stack)
# Checks if gen_cost_out_chunks contains data, if not skips zone and does not return a plot
if not gen_cost_out_chunks:
outputs[zone_input] = MissingZoneData()
continue
Total_Generation_Stack_Out = pd.concat(gen_cost_out_chunks, axis=1, sort=False).fillna(0)
Total_Generation_Stack_Out = self.create_categorical_tech_index(Total_Generation_Stack_Out)
Total_Generation_Stack_Out = Total_Generation_Stack_Out.T/1000000 #Convert to millions
Total_Generation_Stack_Out = Total_Generation_Stack_Out.loc[:, (Total_Generation_Stack_Out != 0).any(axis=0)]
# Checks if Total_Generation_Stack_Out contains data, if not skips zone and does not return a plot
if Total_Generation_Stack_Out.empty:
outputs[zone_input] = MissingZoneData()
continue
# Data table of values to return to main program
Data_Table_Out = Total_Generation_Stack_Out.add_suffix(" (Million $)")
Total_Generation_Stack_Out.index = Total_Generation_Stack_Out.index.str.replace('_',' ')
fig1, ax = plt.subplots(figsize=(self.x,self.y))
Total_Generation_Stack_Out.plot.bar(stacked=True,
color=[self.PLEXOS_color_dict.get(x, '#333333') for x in Total_Generation_Stack_Out.columns],
edgecolor='black', linewidth='0.1', ax=ax)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_ylabel('Total System Cost (Million $)', color='black', rotation='vertical')
# Set x-tick labels
if len(self.custom_xticklabels) > 1:
tick_labels = self.custom_xticklabels
else:
tick_labels = Total_Generation_Stack_Out.index
PlotDataHelper.set_barplot_xticklabels(tick_labels, ax=ax)
ax.tick_params(axis='y', which='major', length=5, width=1)
ax.tick_params(axis='x', which='major', length=5, width=1)
ax.yaxis.set_major_formatter(mpl.ticker.FuncFormatter(lambda x, p: format(x, f',.{self.y_axes_decimalpt}f')))
ax.margins(x=0.01)
handles, labels = ax.get_legend_handles_labels()
ax.legend(reversed(handles), reversed(labels), loc='lower left',bbox_to_anchor=(1,0),
facecolor='inherit', frameon=True)
if mconfig.parser("plot_title_as_region"):
ax.set_title(zone_input)
outputs[zone_input] = {'fig': fig1, 'data_table': Data_Table_Out}
return outputs
def sys_cost_diff(self, start_date_range: str = None,
end_date_range: str = None, **_):
"""Creates stacked barplots of Total Generation Cost and Cost of Unserved Energy relative to a base scenario.
Barplots show the change in total total generation cost relative to a base scenario.
The default is to comapre against the first scenario provided in the inputs list.
Plot only shows totals and is NOT broken down into technology or cost type specific values.
Each sceanrio is plotted as a separate bar.
Args:
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
Returns:
dict: Dictionary containing the created plot and its data table.
"""
if self.AGG_BY == 'zone':
agg = 'zone'
else:
agg = 'region'
outputs = {}
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True, "generator_Total_Generation_Cost", self.Scenarios),
(False, f"{agg}_Cost_Unserved_Energy", self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
# Checks if all data required by plot is available, if 1 in list required data is missing
if 1 in check_input_data:
return MissingInputData()
for zone_input in self.Zones:
total_cost_chunk = []
self.logger.info(f"Zone = {zone_input}")
for scenario in self.Scenarios:
self.logger.info(f"Scenario = {scenario}")
Total_Systems_Cost = pd.DataFrame()
Total_Gen_Cost = self["generator_Total_Generation_Cost"].get(scenario)
try:
Total_Gen_Cost = Total_Gen_Cost.xs(zone_input,level=self.AGG_BY)
except KeyError:
self.logger.warning(f"No Generators found for : {zone_input}")
continue
Total_Gen_Cost = Total_Gen_Cost.sum(axis=0)
Total_Gen_Cost.rename("Total_Gen_Cost", inplace=True)
Cost_Unserved_Energy = self[f"{agg}_Cost_Unserved_Energy"][scenario]
if Cost_Unserved_Energy.empty:
Cost_Unserved_Energy = self["generator_Total_Generation_Cost"][scenario].copy()
Cost_Unserved_Energy.iloc[:,0] = 0
Cost_Unserved_Energy = Cost_Unserved_Energy.xs(zone_input,level=self.AGG_BY)
Cost_Unserved_Energy = Cost_Unserved_Energy.sum(axis=0)
Cost_Unserved_Energy.rename("Cost_Unserved_Energy", inplace=True)
Total_Systems_Cost = pd.concat([Total_Systems_Cost, Total_Gen_Cost, Cost_Unserved_Energy], axis=1, sort=False)
Total_Systems_Cost.columns = Total_Systems_Cost.columns.str.replace('_',' ')
Total_Systems_Cost.rename({0:scenario}, axis='index', inplace=True)
total_cost_chunk.append(Total_Systems_Cost)
# Checks if total_cost_chunk contains data, if not skips zone and does not return a plot
if not total_cost_chunk:
outputs[zone_input] = MissingZoneData()
continue
Total_Systems_Cost_Out = pd.concat(total_cost_chunk, axis=0, sort=False)
Total_Systems_Cost_Out = Total_Systems_Cost_Out/1000000 #Convert cost to millions
#Ensures region has generation, else skips
try:
Total_Systems_Cost_Out = Total_Systems_Cost_Out-Total_Systems_Cost_Out.xs(self.Scenarios[0]) #Change to a diff on first scenario
except KeyError:
outputs[zone_input] = MissingZoneData()
continue
Total_Systems_Cost_Out.drop(self.Scenarios[0],inplace=True) #Drop base entry
# Checks if Total_Systems_Cost_Out contains data, if not skips zone and | |
import glob, logging, os, sys, time, traceback
from pprint import pprint, pformat
from functools import partial
from fnmatch import fnmatch
import maya_helpers as mh
import prefs, util
import maya.OpenMayaUI as omui
from maya.OpenMaya import MGlobal
from dson import DSON, content
import Qt
from dsonimport import DSONImporter, DSONImporterConfig
log = logging.getLogger('DSONImporter')
# The directory we store settings, cached, and other persistent files. Is there a better
# place to put this?
_storage_path = '%s/dsonimport' % os.environ['MAYA_APP_DIR']
UserSortRole = Qt.Qt.UserRole
class UIState(object):
def __init__(self):
# These are the modifier URLs that have been explicitly enabled by the user. This odesn't
# include modifiers that are enabled due to dependencies.
self.active_modifiers = set()
# These are the modifier URLs that have been marked dynamic by the user.
self.dynamic_modifiers = set()
self.prefs = prefs.load_prefs()
self.current_modifier_info = None
def save_prefs(self):
"""
If true, we'll force modifiers enabled if we think they're required by another modifier.
"""
prefs.save_prefs(self.prefs)
@property
def enforce_requirements(self):
return self.prefs.get('enforce_requirements', True)
@enforce_requirements.setter
def enforce_requirements(self, value):
self.prefs['enforce_requirements'] = value
self.save_prefs()
@property
def hide_unused_figures(self):
return self.prefs.get('hide_unused_figures', True)
@hide_unused_figures.setter
def hide_unused_figures(self, value):
self.prefs['hide_unused_figures'] = value
self.save_prefs()
def update_dynamic(self, modifier):
dynamic_check_state = self.modifier_url_dynamic_state(modifier.asset_url)
dynamic_checked = dynamic_check_state != Qt.Qt.Unchecked
modifier_check_state = self.modifier_url_checked_state(modifier.asset_url)
enabled_checked = modifier_check_state != Qt.Qt.Unchecked
# Configure a modifier as dynamic if it's both configured as dynamic and enabled.
dynamic = dynamic_checked and enabled_checked
if self.asset_config.get_dynamic(modifier) == dynamic:
return False
self.asset_config.set_dynamic(modifier, dynamic)
return True
def update_modifier_info(self):
# Set the dynamic state of all modifiers to what they're set to in the UI. This marks
# modifiers as dynamic that were partially checked because they had to be dynamic.
for modifier in self.asset_cache_resolver.all_modifiers.itervalues():
self.update_dynamic(modifier)
self.current_modifier_info = self.asset_cache_resolver.get_modifier_info(self.asset_config)
def modifier_url_checked_state(self, modifier_url, stack=None):
"""
If a modifier is enabled by the user, return Qt.Qt.Checked.
If a modifier is enabled because another modifier depends on it, return Qt.PartiallyChecked.
If a modifier is disabled, return Qt.Qt.Unchecked.
"""
if stack is None:
stack = set()
enabled_by_user = modifier_url in self.active_modifiers
if enabled_by_user:
return Qt.Qt.Checked
if modifier_url not in stack and self.enforce_requirements:
try:
# Make sure we don't recurse endlessly if there are circular dependencies.
stack.add(modifier_url)
# See if any modifiers which depend on this modifier are active.
required_by_urls = self.asset_cache_resolver.modifiers_required_by.get(modifier_url, set())
for required_by_url in required_by_urls:
if self.modifier_url_checked_state(required_by_url, stack=stack) != Qt.Qt.Unchecked:
return Qt.Qt.PartiallyChecked
finally:
stack.remove(modifier_url)
return Qt.Qt.Unchecked
def modifier_url_dynamic_state(self, modifier_url):
"""
If a modifier is dynamic because the user enabled it, return Qt.Qt.Checked.
If a modifier is dynamic because it would have no effect if it was static, return Qt.PartiallyChecked.
If a modifier is static, return Qt.Qt.Unchecked.
"""
enabled_by_user = modifier_url in self.dynamic_modifiers
if enabled_by_user:
return Qt.Qt.Checked
if self.current_modifier_info is not None:
if modifier_url in self.current_modifier_info.available_for_dynamic or modifier_url in self.current_modifier_info.unused_modifiers:
return Qt.Qt.PartiallyChecked
return Qt.Qt.Unchecked
def coalesce_messages(target, handler):
"""
If target is called more than once per update, collect the calls and call handler
with all of the targets.
"""
pending_updates = []
pending_update_ids = set()
def proxy(item):
if not pending_updates:
def process_updates():
updates = list(pending_updates)
pending_updates[:] = []
pending_update_ids.clear()
try:
handler(updates)
except Exception as e:
# Exceptions out of here can hard lock the UI, so catch them and log them.
log.error('%s', traceback.format_exc())
Qt.QTimer.singleShot(0, process_updates)
# Why is QStandardItem unhashable?
if id(item) in pending_update_ids:
return
pending_update_ids.add(id(item))
pending_updates.append(item)
target.connect(proxy)
class Filter(Qt.QSortFilterProxyModel):
"""
This handles a few things:
- We receive a list of items to always filter out. This hides items that aren't currently
available in the view. This allows us to update the displayed list without rebuilding it.
- Checking an item with children checks all of its visible children. Hidden children won't
be changed, so you can enter a search filter, then check the parent to check only the visible
children.
- A parent with only some visible children checked will be partially checked. Hidden entries
don't affect this. An item with one child checked can be partially checked, then change to
fully checked if the filter changes and only displays the one child.
"""
def __init__(self):
super(Filter, self).__init__()
self.modifier_urls_to_display = None
self.filterString = None
# We need to know if there are visible children of a group to figure out if the group
# is visible, but filterAcceptsRow won't be called in the right order. Cache the results,
# so we don't evaluate nodes repeatedly.
self.filter_cache = {}
self.last_seen_checked = {}
def setSourceModel(self, source):
# We don't expect to be set twice.
assert self.sourceModel() is None
super(Filter, self).setSourceModel(source)
coalesce_messages(self.sourceModel().itemChanged, self._changed)
def invalidateFilter(self):
self.filter_cache = {}
return Qt.QSortFilterProxyModel.invalidateFilter(self)
def setFilterFixedString(self, s, *args, **kwargs):
self.filter_cache = {}
self.filterString = s
return Qt.QSortFilterProxyModel.setFilterFixedString(self, s, *args, **kwargs)
def _filterAcceptsRow(self, sourceRow, sourceParent):
src_index = self.sourceModel().index(sourceRow, 0, sourceParent)
item = self.sourceModel().itemFromIndex(src_index)
if getattr(item, 'is_group', False):
# Show groups if there are any visible children.
num_children = self.sourceModel().rowCount(src_index)
has_any_visible_children = False
for row in xrange(num_children):
child = src_index.child(row, 0)
child_row = child.row()
if self.filterAcceptsRow(child_row, src_index):
has_any_visible_children = True
break
if not has_any_visible_children:
return False
return True
modifier = getattr(item, 'modifier', None)
if modifier is not None:
# Hide modifiers that are explicitly hidden via self.modifier_urls_to_display. These
# are modifiers that aren't available with the current configuration, but which might
# become available if dynamic flags are changed.
if self.modifier_urls_to_display is not None and modifier.asset_url not in self.modifier_urls_to_display:
return False
if self.filterString is not None:
return modifier.substring_matches(self.filterString)
return super(Filter, self).filterAcceptsRow(sourceRow, sourceParent)
def filterAcceptsRow(self, sourceRow, sourceParent):
key = (sourceRow, sourceParent)
cached_result = self.filter_cache.get(key)
if cached_result is not None:
return cached_result
result = self._filterAcceptsRow(sourceRow, sourceParent)
self.filter_cache[key] = result
return result
def _changed(self, items):
# This is received when a source item changes. If an item changes, all of its parent items
# may change, so signal them too.
parents = {}
for item in items:
checked = item.data(Qt.Qt.CheckStateRole)
parent = item.parent()
while parent is not None:
if not (parent.flags() & Qt.Qt.ItemIsTristate):
break
parents[id(parent)] = parent
parent = parent.parent()
for parent in parents.itervalues():
# XXX: What's the third parameter that QT5 added?
if MGlobal.apiVersion() >= 201700:
self.dataChanged.emit(parent, parent, [])
else:
self.dataChanged.emit(parent, parent)
def setData(self, index, value, role):
result = Qt.QSortFilterProxyModel.setData(self, index, value, role)
if role == Qt.Qt.CheckStateRole:
# If an entry is checked or unchecked and it has children, propagate that state to its children.
# Note that we're working on the proxied indexes, so we'll only change children who are visible
# and not filtered out.
for row in xrange(self.rowCount(index)):
child = index.child(row, 0)
super(Qt.QSortFilterProxyModel, self).setData(child, value, Qt.Qt.CheckStateRole)
return result
def _show_partially_checked(self, index):
if not (self.flags(index) & Qt.Qt.ItemIsTristate):
return None
# If we're a leaf, we just use our own state.
if self.rowCount(index) == 0:
return None
any_partially_checked = False
all_checked = True
for row in xrange(self.rowCount(index)):
# Partially checked children mean different things depending on if they're leaves or
# not. If a child has children of its own, it works like us: partially checked means
# something inside it is checked. If a child is a leaf and it's partially checked,
# it actually means it's checked due to a dependency on another item. In the former
# case we should show ourselves as partially checked too. In the latter case, we
# should act as if it's fully checked and show ourselves as fully checked.
child = index.child(row, 0)
state = self.data(child, Qt.Qt.CheckStateRole)
has_children = self.rowCount(child) > 0
if not has_children and state == Qt.Qt.PartiallyChecked:
state = Qt.Qt.Checked
if state == Qt.Qt.Checked:
any_partially_checked = True
else:
all_checked = False
if all_checked:
return Qt.Qt.Checked
if any_partially_checked:
return Qt.Qt.PartiallyChecked
return Qt.Qt.Unchecked
def data(self, index, role):
if role == Qt.Qt.CheckStateRole:
partially_checked = self._show_partially_checked(index)
if partially_checked is not None:
return partially_checked
return Qt.QSortFilterProxyModel.data(self, index, role)
def lessThan(self, index1, index2):
def data(item, col):
return item.sibling(item.row(), col).data(Qt.Qt.DisplayRole)
# Sort by group first, name second. Add an arbitrary tiebreaker last: for some reason
# this isn't a stable sort, so if we have ties items will shift around as they're changed.
data1 = [data(index1, col) for col in (2, 0)]
data1.append(index1.sibling(index1.row(), 0).data(UserSortRole))
data2 = [data(index2, | |
res_2[mask], res_1[mask]
steps_taken = 0
success = np.all(np.abs(phi_1 - phi_2) < SOLVE_TOL)
while steps_taken < 50 and not success:
outputs['phi'][:] = 0.5 * (phi_1 + phi_2)
self.apply_nonlinear(inputs, outputs, residuals)
new_res = residuals['phi']
mask_1 = new_res < 0
mask_2 = new_res > 0
phi_1[mask_1] = outputs['phi'][mask_1]
res_1[mask_1] = new_res[mask_1]
phi_2[mask_2] = outputs['phi'][mask_2]
res_2[mask_2] = new_res[mask_2]
steps_taken += 1
success = np.all(np.abs(phi_1 - phi_2) < SOLVE_TOL)
# only need to do this to get into the ballpark
if DEBUG_PRINT:
res_norm = np.linalg.norm(new_res)
print(f"{steps_taken} solve_nonlinear res_norm: {res_norm}")
# Fix up the other outputs.
out_names = ('Np', 'Tp', 'a', 'ap', 'u', 'v', 'alpha', 'W', 'cl', 'cd', 'cn', 'ct', 'F', 'G')
for name in out_names:
if np.all(np.logical_not(np.isnan(residuals[name]))):
outputs[name] += residuals[name]
# Fix up the other residuals.
self.apply_nonlinear(inputs, outputs, residuals)
if not success:
raise om.AnalysisError(
"CCBlade _solve_nonlinear_bracketing failed")
def _solve_nonlinear_brent(self, inputs, outputs):
SOLVE_TOL = 1e-10
DEBUG_PRINT = self.options['debug_print']
# Find brackets for the phi residual
bracket_found, phi_1, phi_2 = self._first_bracket(
inputs, outputs, self._residuals)
if not np.all(bracket_found):
# print(f"bracket_found =\n{bracket_found}")
raise om.AnalysisError("CCBlade bracketing failed")
# Create a wrapper function compatible with the brentv function.
def f(x):
outputs['phi'][:, :] = x
self.apply_nonlinear(inputs, outputs, self._residuals)
return np.copy(self._residuals['phi'])
# Find the root.
phi, steps_taken, success = brentv(f, phi_1, phi_2, tolerance=SOLVE_TOL)
# Fix up the other outputs.
out_names = ('Np', 'Tp', 'a', 'ap', 'u', 'v', 'alpha', 'W', 'cl', 'cd', 'cn', 'ct', 'F', 'G')
for name in out_names:
if np.all(np.logical_not(np.isnan(self._residuals[name]))):
outputs[name] += self._residuals[name]
# Fix up the other residuals.
self.apply_nonlinear(inputs, outputs, self._residuals)
if DEBUG_PRINT:
res_norm = np.linalg.norm(self._residuals['phi'])
print(f"CCBlade brentv steps taken: {steps_taken}, residual norm = {res_norm}")
if not success:
raise om.AnalysisError(
"CCBlade _solve_nonlinear_brent failed")
def _first_bracket(self, inputs, outputs, residuals):
num_nodes = self.options['num_nodes']
num_radial = self.options['num_radial']
# parameters
npts = 20 # number of discretization points to find bracket in residual solve
Vx = inputs['Vx']
Vy = inputs['Vy']
# quadrants
epsilon = 1e-6
q1 = [epsilon, np.pi/2]
q2 = [-np.pi/2, -epsilon]
q3 = [np.pi/2, np.pi-epsilon]
q4 = [-np.pi+epsilon, -np.pi/2]
# ---- determine quadrants based on case -----
# Vx_is_zero = np.isclose(Vx, 0.0, atol=1e-6)
# Vy_is_zero = np.isclose(Vy, 0.0, atol=1e-6)
# I'll just be lame for now and use loops.
phi_1 = np.zeros((num_nodes, num_radial))
phi_2 = np.zeros((num_nodes, num_radial))
success = np.tile(False, (num_nodes, num_radial))
for i in range(num_nodes):
for j in range(num_radial):
if Vx[i, j] > 0 and Vy[i, j] > 0:
order = (q1, q2, q3, q4)
elif Vx[i, j] < 0 and Vy[i, j] > 0:
order = (q2, q1, q4, q3)
elif Vx[i, j] > 0 and Vy[i, j] < 0:
order = (q3, q4, q1, q2)
else: # Vx[i, j] < 0 and Vy[i, j] < 0
order = (q4, q3, q2, q1)
for (phimin, phimax) in order: # quadrant orders. In most cases it should find root in first quadrant searched.
backwardsearch = False
if np.isclose(phimin, -np.pi/2) or np.isclose(phimax, -np.pi/2): # q2 or q4
backwardsearch = True
# find bracket
found, p1, p2 = self._first_bracket_search(
inputs, outputs, residuals, i, j,
phimin, phimax, npts, backwardsearch)
success[i, j], phi_1[i, j], phi_2[i, j] = found, p1, p2
# once bracket is found, return it.
if success[i, j]:
break
return success, phi_1, phi_2
def _first_bracket_search(self, inputs, outputs, residuals,
i, j,
xmin, xmax, n, backwardsearch):
xvec = np.linspace(xmin, xmax, n)
if backwardsearch: # start from xmax and work backwards
xvec = xvec[::-1]
outputs['phi'][i, j] = xvec[0]
self.apply_nonlinear(inputs, outputs, residuals)
fprev = residuals['phi'][i, j]
for k in range(1, n):
outputs['phi'][i, j] = xvec[k]
self.apply_nonlinear(inputs, outputs, residuals)
fnext = residuals['phi'][i, j]
if fprev*fnext < 0: # bracket found
if backwardsearch:
return True, xvec[k], xvec[k-1]
else:
return True, xvec[k-1], xvec[k]
fprev = fnext
return False, 0.0, 0.0
class FunctionalsComp(om.ExplicitComponent):
def initialize(self):
self.options.declare('num_nodes', types=int)
self.options.declare('num_radial', types=int)
self.options.declare('num_blades', types=int)
self.options.declare('dynamic_coloring', types=bool, default=False)
def setup(self):
num_nodes = self.options['num_nodes']
num_radial = self.options['num_radial']
self.add_input('hub_radius', shape=(num_nodes, 1), units='m')
self.add_input('prop_radius', shape=(num_nodes, 1), units='m')
self.add_input('radii', shape=(num_nodes, num_radial), units='m')
self.add_input('Np',
shape=(num_nodes, num_radial), units='N/m')
self.add_input('Tp',
shape=(num_nodes, num_radial), units='N/m')
self.add_input('omega', shape=num_nodes, units='rad/s')
self.add_input('v', shape=num_nodes, units='m/s')
self.add_output('thrust', shape=num_nodes, units='N')
self.add_output('torque', shape=num_nodes, units='N*m')
self.add_output('power', shape=num_nodes, units='W')
self.add_output('efficiency', shape=num_nodes, val=10.)
if self.options['dynamic_coloring']:
self.declare_partials('*', '*', method='fd')
# turn on dynamic partial coloring
self.declare_coloring(wrt='*', method='cs', perturb_size=1e-5,
num_full_jacs=2, tol=1e-20, orders=20,
show_summary=True, show_sparsity=False)
else:
ss_sizes = {'i': num_nodes, 'j': num_radial}
rows, cols = get_rows_cols(ss_sizes=ss_sizes, of_ss='i', wrt_ss='ij')
self.declare_partials('thrust', 'Np', rows=rows, cols=cols)
self.declare_partials('thrust', 'radii', rows=rows, cols=cols)
self.declare_partials('torque', 'Tp', rows=rows, cols=cols)
self.declare_partials('torque', 'radii', rows=rows, cols=cols)
self.declare_partials('power', 'Tp', rows=rows, cols=cols)
self.declare_partials('power', 'radii', rows=rows, cols=cols)
self.declare_partials('efficiency', 'Np', rows=rows, cols=cols)
self.declare_partials('efficiency', 'Tp', rows=rows, cols=cols)
self.declare_partials('efficiency', 'radii', rows=rows, cols=cols)
rows, cols = get_rows_cols(ss_sizes=ss_sizes, of_ss='i', wrt_ss='i')
self.declare_partials('thrust', 'hub_radius', rows=rows, cols=cols)
self.declare_partials('thrust', 'prop_radius', rows=rows, cols=cols)
self.declare_partials('torque', 'hub_radius', rows=rows, cols=cols)
self.declare_partials('torque', 'prop_radius', rows=rows, cols=cols)
self.declare_partials('power', 'hub_radius', rows=rows, cols=cols)
self.declare_partials('power', 'prop_radius', rows=rows, cols=cols)
self.declare_partials('power', 'omega', rows=rows, cols=cols)
self.declare_partials('efficiency', 'hub_radius', rows=rows, cols=cols)
self.declare_partials('efficiency', 'prop_radius', rows=rows, cols=cols)
self.declare_partials('efficiency', 'omega', rows=rows, cols=cols)
self.declare_partials('efficiency', 'v', rows=rows, cols=cols)
def compute(self, inputs, outputs):
num_nodes = self.options['num_nodes']
num_radial = self.options['num_radial']
B = self.options['num_blades']
v = inputs['v']
omega = inputs['omega']
dtype = omega.dtype
radii = np.empty((num_nodes, num_radial+2), dtype=dtype)
radii[:, 0] = inputs['hub_radius'][:, 0]
radii[:, 1:-1] = inputs['radii']
radii[:, -1] = inputs['prop_radius'][:, 0]
Np = np.empty((num_nodes, num_radial+2), dtype=dtype)
Np[:, 0] = 0.0
Np[:, 1:-1] = inputs['Np']
Np[:, -1] = 0.0
thrust = outputs['thrust'][:] = B*np.sum((radii[:, 1:] - radii[:, :-1])*0.5*(Np[:, :-1] + Np[:, 1:]), axis=1)
Tp = np.empty((num_nodes, num_radial+2), dtype=dtype)
Tp[:, 0] = 0.0
Tp[:, 1:-1] = inputs['Tp']
Tp[:, -1] = 0.0
Tp *= radii
torque = outputs['torque'][:] = B*np.sum((radii[:, 1:] - radii[:, :-1])*0.5*(Tp[:, :-1] + Tp[:, 1:]), axis=1)
outputs['power'][:] = torque*omega
outputs['efficiency'][:] = (thrust*v)/outputs['power']
def compute_partials(self, inputs, partials):
num_nodes = self.options['num_nodes']
num_radial = self.options['num_radial']
B = self.options['num_blades']
v = inputs['v']
omega = inputs['omega']
dtype = omega.dtype
radii = np.empty((num_nodes, num_radial+2), dtype=dtype)
radii[:, 0] = inputs['hub_radius'][:, 0]
radii[:, 1:-1] = inputs['radii']
radii[:, -1] = inputs['prop_radius'][:, 0]
Np = np.empty((num_nodes, num_radial+2), dtype=dtype)
Np[:, 0] = 0.0
Np[:, 1:-1] = inputs['Np']
Np[:, -1] = 0.0
thrust = B*np.sum((radii[:, 1:] - radii[:, :-1])*0.5*(Np[:, :-1] + Np[:, 1:]), axis=1)
dthrust_dNp = partials['thrust', 'Np']
dthrust_dNp.shape = (num_nodes, num_radial)
dthrust_dNp[:, :] = B*(radii[:, 1:-1] - radii[:, :-2])*0.5 + B*(radii[:, 2:] - radii[:, 1:-1])*0.5
dthrust_dradii = partials['thrust', 'radii']
dthrust_dradii.shape = (num_nodes, num_radial)
dthrust_dradii[:, :] = B*0.5*(Np[:, :-2] + Np[:, 1:-1]) - B*0.5*(Np[:, 1:-1] + Np[:, 2:])
dthrust_dhub_radius = partials['thrust', 'hub_radius']
dthrust_dhub_radius.shape = (num_nodes,)
dthrust_dhub_radius[:] = B*(-0.5)*(Np[:, 0] + Np[:, 1])
dthrust_dprop_radius = partials['thrust', 'prop_radius']
dthrust_dprop_radius.shape = (num_nodes,)
dthrust_dprop_radius[:] = B*0.5*(Np[:, -2] + Np[:, -1])
Tp = np.empty((num_nodes, num_radial+2), dtype=dtype)
Tp[:, 0] = 0.0
Tp[:, 1:-1] = inputs['Tp']
Tp[:, -1] = 0.0
torque = B*np.sum((radii[:, 1:] - radii[:, :-1])*0.5*(Tp[:, :-1]*radii[:, :-1] + Tp[:, 1:]*radii[:, 1:]), axis=1)
dtorque_dTp = partials['torque', 'Tp']
dtorque_dTp.shape = (num_nodes, num_radial)
dtorque_dTp[:, :] = B*(radii[:, 1:-1] - radii[:, :-2])*0.5*radii[:, 1:-1] + B*(radii[:, 2:] - radii[:, 1:-1])*0.5*radii[:, 1:-1]
dtorque_dradii = partials['torque', 'radii']
dtorque_dradii.shape = (num_nodes, num_radial)
dtorque_dradii[:, :] = B*(0.5)*(Tp[:, :-2]*radii[:, :-2] + Tp[:, 1:-1]*radii[:, 1:-1])
dtorque_dradii[:, :] += B*(-0.5)*(Tp[:, 1:-1]*radii[:, 1:-1] + Tp[:, 2:]*radii[:, 2:])
dtorque_dradii[:, :] += B*(radii[:, 2:] - radii[:, 1:-1])*0.5*(Tp[:, 1:-1])
dtorque_dradii[:, :] += B*(radii[:, 1:-1] - radii[:, :-2])*0.5*(Tp[:, 1:-1])
dtorque_dhub_radius = partials['torque', 'hub_radius']
dtorque_dhub_radius.shape = (num_nodes,)
dtorque_dhub_radius[:] = B*(-0.5)*(Tp[:, 0]*radii[:, 0] + Tp[:, 1]*radii[:, 1])
dtorque_dhub_radius[:] += B*(radii[:, 1] - radii[:, 0])*0.5*(Tp[:, 0])
dtorque_dprop_radius = partials['torque', 'prop_radius']
dtorque_dprop_radius.shape = (num_nodes,)
dtorque_dprop_radius[:] = B*(0.5)*(Tp[:, -2]*radii[:, -2] + Tp[:, -1]*radii[:, -1])
dtorque_dprop_radius[:] += B*(radii[:, -1] - radii[:, -2])*0.5*(Tp[:, -1])
power = torque*omega
dpower_dTp = partials['power', 'Tp']
dpower_dTp.shape = (num_nodes, num_radial)
dpower_dTp[:, :] = dtorque_dTp*omega[:, np.newaxis]
dpower_dradii = partials['power', 'radii']
dpower_dradii.shape = (num_nodes, num_radial)
dpower_dradii[:, :] = dtorque_dradii*omega[:, np.newaxis]
dpower_dhub_radius = partials['power', 'hub_radius']
dpower_dhub_radius.shape = (num_nodes,)
dpower_dhub_radius[:] = dtorque_dhub_radius*omega
dpower_dprop_radius = partials['power', 'prop_radius']
dpower_dprop_radius.shape = (num_nodes,)
dpower_dprop_radius[:] = dtorque_dprop_radius*omega
dpower_domega = partials['power', 'omega']
dpower_domega.shape = (num_nodes,)
dpower_domega[:] = torque
# efficiency = (thrust*v)/power
defficiency_dNp = partials['efficiency', 'Np']
defficiency_dNp.shape = (num_nodes, num_radial)
defficiency_dNp[:, :] = dthrust_dNp*v[:, np.newaxis]/power[:, np.newaxis]
defficiency_dTp = partials['efficiency', 'Tp']
defficiency_dTp.shape = (num_nodes, num_radial)
defficiency_dTp[:, :] = -(thrust[:, np.newaxis]*v[:, np.newaxis])/(power[:, np.newaxis]*power[:, np.newaxis])*dpower_dTp
defficiency_dradii = partials['efficiency', 'radii']
defficiency_dradii.shape = (num_nodes, num_radial)
defficiency_dradii[:, :] = (power[:, np.newaxis]) * (dthrust_dradii*v[:, np.newaxis])
defficiency_dradii[:, :] -= | |
<filename>scripts/ccpp_prebuild.py
#!/usr/bin/env python
# Standard modules
import argparse
import collections
import logging
import os
import sys
# DH* TODO
# CONSISTENCY CHECK BETWEEN OPTIONAL ARGUMENTS IN THE METADATA TABLE AND IN
# THE ACTUAL ARGUMENT LIST / FORTRAN VARIABLE DECLARATIONS (RANKS, TYPE, INTENT).
# *DH
# Local modules
from common import encode_container, execute
from metadata_parser import merge_metadata_dicts, parse_scheme_tables, parse_variable_tables
from mkcap import Cap, CapsMakefile, SchemesMakefile
from mkdoc import metadata_to_html, metadata_to_latex
###############################################################################
# User definitions #
###############################################################################
# List of configured host models
HOST_MODELS = ["FV3", "SCM"]
###############################################################################
# Set up the command line argument parser and other global variables #
###############################################################################
parser = argparse.ArgumentParser()
parser.add_argument('--model', action='store', choices=HOST_MODELS, help='host model (case-sensitive)', required=True)
parser.add_argument('--debug', action='store_true', help='enable debugging output', default=False)
# BASEDIR is the current directory where this script is executed
BASEDIR = os.getcwd()
###############################################################################
# Functions and subroutines #
###############################################################################
def parse_arguments():
"""Parse command line arguments."""
success = True
args = parser.parse_args()
host_model = args.model
debug = args.debug
return (success, host_model, debug)
def import_config(host_model):
"""Import the configuration file for a given host model"""
success = True
config = {}
# Import the host-model specific CCPP prebuild config
ccpp_prebuild_config_name = "ccpp_prebuild_config_{0}".format(host_model)
ccpp_prebuild_config = __import__(ccpp_prebuild_config_name)
# Definitions in host-model dependent CCPP prebuild config script
config['variable_definition_files'] = ccpp_prebuild_config.VARIABLE_DEFINITION_FILES
config['scheme_files'] = ccpp_prebuild_config.SCHEME_FILES
config['schemes_makefile'] = ccpp_prebuild_config.SCHEMES_MAKEFILE
config['target_files'] = ccpp_prebuild_config.TARGET_FILES
config['caps_makefile'] = ccpp_prebuild_config.CAPS_MAKEFILE
config['caps_dir'] = ccpp_prebuild_config.CAPS_DIR
config['optional_arguments'] = ccpp_prebuild_config.OPTIONAL_ARGUMENTS
config['module_include_file'] = ccpp_prebuild_config.MODULE_INCLUDE_FILE
config['fields_include_file'] = ccpp_prebuild_config.FIELDS_INCLUDE_FILE
config['html_vartable_file'] = ccpp_prebuild_config.HTML_VARTABLE_FILE
config['latex_vartable_file'] = ccpp_prebuild_config.LATEX_VARTABLE_FILE
# Template code in host-model dependent CCPP prebuild config script
config['module_use_template_host_cap'] = ccpp_prebuild_config.MODULE_USE_TEMPLATE_HOST_CAP
config['ccpp_data_structure'] = ccpp_prebuild_config.CCPP_DATA_STRUCTURE
config['module_use_template_scheme_cap'] = ccpp_prebuild_config.MODULE_USE_TEMPLATE_SCHEME_CAP
return(success, config)
def setup_logging(debug):
success = True
if debug:
level = logging.DEBUG
else:
level = logging.INFO
logging.basicConfig(format='%(levelname)s: %(message)s', level=level)
if debug:
logging.info('Logging level set to DEBUG')
else:
logging.info('Logging level set to INFO')
return success
def gather_variable_definitions(variable_definition_files):
"""Scan all Fortran source files with variable definitions on the host model side."""
logging.info('Parsing metadata tables for variables provided by host model ...')
success = True
metadata_define = {}
for variable_definition_file in variable_definition_files:
(filedir, filename) = os.path.split(variable_definition_file)
# Change to directory of variable_definition_file and parse it
os.chdir(os.path.join(BASEDIR,filedir))
metadata = parse_variable_tables(filename)
metadata_define = merge_metadata_dicts(metadata_define, metadata)
# Return to BASEDIR
os.chdir(BASEDIR)
return (success, metadata_define)
def collect_physics_subroutines(scheme_files):
"""Scan all Fortran source files in scheme_files for subroutines with argument tables."""
logging.info('Parsing metadata tables in physics scheme files ...')
success = True
# Parse all scheme files
metadata_request = {}
arguments_request = {}
for scheme_file in scheme_files:
(scheme_filepath, scheme_filename) = os.path.split(os.path.abspath(scheme_file))
# Change to directory where scheme_file lives
os.chdir(scheme_filepath)
(metadata, arguments) = parse_scheme_tables(scheme_filename)
metadata_request = merge_metadata_dicts(metadata_request, metadata)
arguments_request.update(arguments)
os.chdir(BASEDIR)
# Return to BASEDIR
os.chdir(BASEDIR)
return (success, metadata_request, arguments_request)
def check_optional_arguments(metadata, arguments, optional_arguments):
"""Check if for each subroutine with optional arguments, an entry exists in the
optional_arguments dictionary. This is required to generate the caps correctly
and to assess whether the variable is required from the host model. Optional
arguments that are not requested by the model as specified in the dictionary
optional_arguments are deleted from the list of requested data individually
for each subroutine."""
logging.info('Checking optional arguments in physics schemes ...')
success = True
for var_name in sorted(metadata.keys()):
# The notation metadata[var_name][:] is a convenient way to make a copy
# of the metadata[var_name] list, which allows removing items as we go
for var in metadata[var_name][:]:
if var.optional in ['t', 'T']:
for item in var.container.split(' '):
subitems = item.split('_')
if subitems[0] == 'MODULE':
module_name = '_'.join(subitems[1:])
elif subitems[0] == 'SCHEME':
scheme_name = '_'.join(subitems[1:])
elif subitems[0] == 'SUBROUTINE':
subroutine_name = '_'.join(subitems[1:])
else:
success = False
logging.error('Invalid identifier {0} in container value {1} of requested variable {2}'.format(
subitems[0], var.container, var_name))
if not module_name in optional_arguments.keys() or not \
subroutine_name in optional_arguments[module_name].keys():
success = False
logging.error('No entry found in optional_arguments dictionary for optional argument ' + \
'{0} to subroutine {1} in module {2}'.format(var_name, subroutine_name, module_name))
if type(optional_arguments[module_name][subroutine_name]) is list:
if var_name in optional_arguments[module_name][subroutine_name]:
logging.debug('Optional argument {0} to subroutine {1} in module {2} is required, keep in list'.format(
var_name, subroutine_name, module_name))
else:
logging.debug('Optional argument {0} to subroutine {1} in module {2} is not required, remove from list'.format(
var_name, subroutine_name, module_name))
# Remove this var instance from list of var instances for this var_name
metadata[var_name].remove(var)
# Remove var_name from list of calling arguments for that subroutine
arguments[module_name][scheme_name][subroutine_name].remove(var_name)
elif optional_arguments[module_name][subroutine_name] == 'all':
logging.debug('optional argument {0} to subroutine {1} in module {2} is required, keep in list'.format(
var_name, subroutine_name, module_name))
# If metadata[var_name] is now empty, i.e. the variable is not
# requested at all by the model, remove the entry from metadata
if not metadata[var_name]:
del metadata[var_name]
return (success, metadata, arguments)
def compare_metadata(metadata_define, metadata_request):
"""Compare the requested metadata to the defined one. For each requested entry, a
single (i.e. non-ambiguous entry) must be present in the defined entries. All optional
arguments that are still in the list of required variables for a scheme are needed,
since they were checked in the routine check_optional_arguments beforehand."""
logging.info('Comparing metadata for requested and provided variables ...')
success = True
modules = []
metadata = {}
for var_name in sorted(metadata_request.keys()):
# Check that variable is provided by the model
if not var_name in metadata_define.keys():
requested_by = ' & '.join(var.container for var in metadata_request[var_name])
success = False
logging.error('Variable {0} requested by {1} not provided by the model'.format(var_name, requested_by))
continue
# Check that an unambiguous target exists for this variable
if len(metadata_define[var_name]) > 1:
success = False
requested_by = ' & '.join(var.container for var in metadata_request[var_name])
provided_by = ' & '.join(var.container for var in metadata_define[var_name])
error_message = ' error, variable {0} requested by {1} cannot be identified unambiguously.'.format(var_name, requested_by) +\
' Multiple definitions in {0}'.format(provided_by)
logging.error(error_message)
continue
# Check that the variable properties are compatible between the model and the schemes
if not metadata_request[var_name][0].compatible(metadata_define[var_name][0]):
success = False
error_message = ' incompatible entries in metadata for variable {0}:\n'.format(var_name) +\
' provided: {0}\n'.format(metadata_define[var_name][0].print_debug()) +\
' requested: {0}'.format(metadata_request[var_name][0].print_debug())
logging.error(error_message)
continue
# Construct the actual target variable and list of modules to use from the information in 'container'
var = metadata_define[var_name][0]
target = ''
for item in var.container.split(' '):
subitems = item.split('_')
if subitems[0] == 'MODULE':
modules.append('_'.join(subitems[1:]))
elif subitems[0] == 'TYPE':
pass
else:
logging.error('Unknown identifier {0} in container value of defined variable {1}'.format(subitems[0], var_name))
target += var.local_name
# Copy the length kind from the variable definition to update len=* in the variable requests
if var.type == 'character':
kind = var.kind
metadata[var_name] = metadata_request[var_name]
# Set target and kind (if applicable)
for var in metadata[var_name]:
var.target = target
logging.debug('Requested variable {0} in {1} matched to target {2} in module {3}'.format(var_name, var.container, target, modules[-1]))
# Update len=* for character variables
if var.type == 'character' and var.kind == 'len=*':
logging.debug('Update kind information for requested variable {0} in {1} from {2} to {3}'.format(var_name, var.container, var.kind, kind))
var.kind = kind
# Remove duplicated from list of modules
modules = sorted(list(set(modules)))
return (success, modules, metadata)
def create_module_use_statements(modules, module_use_template_host_cap):
# module_use_template_host_cap must include the required modules
# for error handling of the ccpp_field_add statments
logging.info('Generating module use statements ...')
success = True
module_use_statements = module_use_template_host_cap
cnt = 1
for module in modules:
module_use_statements += 'use {0}\n'.format(module)
cnt += 1
logging.info('Generated module use statements for {0} module(s)'.format(cnt))
return (success, module_use_statements)
def create_ccpp_field_add_statements(metadata, ccpp_data_structure):
# The metadata container may contain multiple entries
# for the same variable standard_name, but for different
# "callers" (i.e. subroutines using it) with potentially
# different local_name. We only need to add it once to
# the add_field statement, since the target (i.e. the
# original variable defined by the model) is the same.
logging.info('Generating ccpp_field_add statements ...')
success = True
ccpp_field_add_statements = ''
cnt = 0
for var_name in sorted(metadata.keys()):
# Add variable with var_name = standard_name once
var = metadata[var_name][0]
ccpp_field_add_statements += var.print_add(ccpp_data_structure)
cnt += 1
logging.info('Generated ccpp_field_add statements for {0} variable(s)'.format(cnt))
return (success, ccpp_field_add_statements)
def generate_include_files(module_use_statements, ccpp_field_add_statements,
target_files, module_include_file, fields_include_file):
logging.info('Generating include files for host model cap {0} ...'.format(', '.join(target_files)))
success = True
target_dirs = []
for target_file in target_files:
target_dirs.append(os.path.split(target_file)[0])
target_dirs = sorted(list(set(target_dirs)))
for target_dir in target_dirs:
# module use statements
includefile = os.path.join(target_dir, module_include_file)
logging.info('Generated module-use include file {0}'.format(includefile))
with open(includefile, "w") as f:
f.write(module_use_statements)
# ccpp_field_add statements
includefile = os.path.join(target_dir, fields_include_file)
logging.info('Generated fields-add | |
<gh_stars>1-10
#!/usr/bin/python
# ================
# databaseTools.py
# ================
#
# See README.md
#
# ===============
# Based on RadioDBTools.py by <NAME>.
# ===============
#
# This code is heavily based upon an open source code developed by <NAME>:
#
# --------------------------------------------------------------------------
# Name: RadioDBTools.py
# Purpose: To allow easy interfacing (uploading, downloading)
# with a CouchDB instance.
#
# Creator: <NAME>
# Email: <EMAIL>
#
# Created: 27/06/2013
# Copyright: (c) <NAME> 2013
# Licence: GNU General Public License
# Version: 1.0
# --------------------------------------------------------------------------
import sys
import os
import getpass
import re
import requests
import json
import codecs
name = "RadioDBTools.py"
MADFversion = "2.01"
def help():
"""Help menu"""
print("< help > subroutine called:"
"\nWelcome to " + name +
"\nUsage: python" + name + "[-u|-d|-h]"
"\n\nOptions:\n"
"-u : Uploads .json files to a couchdb instance of your choice.\n"
" Useage: python" + name + "-u [URL] [.Extension | File(s) to Upload].\n"
" Optional parameter URL accepts full URL's and automatically\n"
" uploads all documents in current directory.\n"
" Optional parameter. Extension tells program to ignore\n"
" all files without a certain extension, i.e. .json files.\n"
" (Note: The \".\" is a required character.)\n"
" Optional parameter [File(s) to Upload], allows you to give " + name +"\n"
" a list of files to upload.\n"
"-d : Downloads .json files from a couchdb instance of\n"
" your choice.\n"
" Useage: python" + name + "-d [URL].\n"
" Optional parameter URL accepts full URL's and automatically\n"
" downloads all documents in specified database.\n"
"-p : Prunes (Deletes) all files of type \"measurement\" from a\n"
" couchdb instance of your choice.\n"
" Useage: python" + name + "-p.\n\n"
"URL Format: http[s]://[username:password@]CouchDBName.com[:port]/DBName\n"
"ex.: http://localhost:5984/database1\n"
" https://ben:mypassword@radiopurity.org/database1")
def main():
"""Calls the different subroutines based on command line arguments"""
if len(sys.argv) > 1:
if sys.argv[1] == "-h":
help()
elif sys.argv[1] == "-u":
if len(sys.argv) > 1:
upload_json()
else:
help()
print("\n*******\nERROR:\n"
"Incorrect number of arguments.\n*******\n")
elif sys.argv[1] == "-d":
if len(sys.argv) > 1 and len(sys.argv) < 4:
download_json()
else:
help()
print("\n*******\nERROR:\n"
"Incorrect number of arguments.\n*******\n")
elif sys.argv[1] == "-p":
if len(sys.argv) == 2:
prune_db()
else:
help()
print("\n*******\nERROR:\n"
"Incorrect number of arguments.\n*******\n")
else:
help()
print("\n*******\nERROR:\nUnknown or incorrect argument. "
"Could not parse <" +
sys.argv[1] +
">. \nPlease try again.\n*******\n")
else:
help()
print("\n*******\nERROR\nNo argument specified.\n"
"Please try again.\n*******\n")
def get_uuid(db):
import requests
uuid_url = db + "/_uuids"
uuid = requests.get(uuid_url).text[11:][:-4]
return uuid
def upload_json():
try: # Fix Python 2.x.
if sys.version_info>=(3,0):
modified_input = input
print("Python 3.x Compatible!")
else:
modified_input=raw_input
except NameError:
pass
"""Upload JSON documents to a CouchDB"""
validate = True
try:
import validateJSON
except:
print("validateJSON.py not found. Skipping Validation.")
validate = False
pwd = <PASSWORD>()
uploadListing=[]
filesSpecified=False
for i in sys.argv:
if not i==sys.argv[0]:
if i in os.listdir(pwd):
filesSpecified=True
uploadListing.append(i)
command_line_override = False
use_extension = False
extension = ""
if len(sys.argv) > 2 and sys.argv[2][0] != ".":
try:
temp = re.findall("/.*?:", sys.argv[2])
if len(temp) > 0:
username = temp[0].replace("/", "").replace(":", "")
print("Using user name : ", username)
else:
username = ""
print("No user name found. Continuing without user name.")
temp = []
temp = re.findall(":[^/]*?@", sys.argv[2])
if len(temp) > 0:
password = temp[0].replace("@", "").replace(":", "")
print("Using password : ", password)
else:
password = ""
print("No password found. Continuing without password.")
try:
url = re.findall("@.*",
sys.argv[2])[0].replace("@", "").replace("://", "")
except:
url = re.findall("://.*",
sys.argv[2])[0].replace("@", "").replace("://", "")
couchdb, temp, db = url.rpartition("/")
print("Using CouchDB URL : ", couchdb)
print("Using Database Name: ", db)
if couchdb == "" or db == "":
raise
couchdb = "http://" + couchdb
command_line_override = True
except:
print("\n\nFailed to find username/password/CouchDB "
"URL/Database Name.\n"
"OR Failed to recognize listed file(s) as valid.\n"
"Proceeding with prompt based input.\n\n")
if not filesSpecified:
if len(sys.argv) > 2:
if sys.argv[2][0] == ".":
extension = sys.argv[2]
use_extension = True
elif len(sys.argv) > 3 and sys.argv[3][0] == ".":
extension = sys.argv[3]
use_extension = True
msg = "Upload all files in\n" + pwd + "\n(y/N) ? "
if command_line_override:
uploadAll = "y"
else:
uploadAll = modified_input(msg).lower()
dirListing = []
if use_extension:
for i in os.listdir(pwd):
if extension in i:
dirListing.append(i)
else:
dirListing = os.listdir(pwd)
if uploadAll == "y":
uploadListing = dirListing
else:
print("\n")
uploadListing = []
for i in dirListing:
msg = "Would you like to upload " + i + " (Y/N) ? "
upload = modified_input(msg).lower()
if upload == "y":
uploadListing.append(i)
if uploadListing == []:
print("No applicable files found. Aborting Upload.")
exit()
if filesSpecified:
print("\nProceeding to upload "+ repr(len(uploadListing)) +
" files found on the command line.\n")
print("Files about to be uploaded are:")
for i in uploadListing:
print("--> "+ i)
print()
if not command_line_override:
couchdb = modified_input("CouchDB URL (no username or password,"
" enter for localhost) : ")
if len(couchdb) < 3:
couchdb = "http://localhost:5984"
if couchdb.endswith('/'):
couchdb = couchdb[:-1]
db = modified_input("Database name : ")
if db.endswith('/'):
db = db[:-1]
username = modified_input("User-name (if applicable) : ")
password = <PASSWORD>.getpass(prompt="Password (if applicable, "
"will not display) : ")
successes = []
failures = []
invalid = []
print("\n Document Name HTML Status Code (201 is Success)")
for i in uploadListing:
if validate:
if validateJSON.is_valid_JSON(i):
f = open(i, 'r')
data = f.read()
doc_url = couchdb + "/" + db + "/" + get_uuid(couchdb)
r = requests.put(doc_url, data=data,
auth=(username, password))
print(' ' + i.ljust(31) + repr(r).rjust(26))
if r.status_code == requests.codes.created:
successes.append(i)
else:
failures.append(i)
else:
invalid.append(i)
else:
f = open(i, 'r')
data = f.read()
doc_url = couchdb + "/" + db + "/" + get_uuid(couchdb)
r = requests.put(doc_url, data=data,
auth=(username, password))
print(' ' + i.ljust(31) + repr(r).rjust(26))
if r.status_code == requests.codes.created:
successes.append(i)
else:
failures.append(i)
print("\nSuccessful Uploads : " + repr(len(successes)))
print("Failed Uploads : " + repr(len(failures)))
print("Failed due to invalid JSON : " + repr(len(invalid)))
if len(failures) > 0:
print("\nThese files failed to upload:")
for i in failures:
print(" " + i)
if len(invalid) > 0:
print("\nThese files failed to upload due to invalid JSON :")
for i in invalid:
print(" " + i)
print("")
def download_json():
"""Download JSON documents from a CouchDB"""
command_line_override = False
try: # Fix Python 2.x.
if sys.version_info>=(3,0):
modified_input = input
print("Python 3.x Compatible!")
else:
modified_input=raw_input
except NameError:
pass
if len(sys.argv) == 3:
try:
temp = re.findall("/.*?:", sys.argv[2])
if len(temp) > 0:
username = temp[0].replace("/", "").replace(":", "")
print("Using user name : ", username)
else:
username = ""
print("No user name found. Continuing without user name.")
temp = []
temp = re.findall(":[^/]*?@", sys.argv[2])
if len(temp) > 0:
password = temp[0].replace("@", "").replace(":", "")
print("Using password : ", password)
else:
password = ""
print("No password found. Continuing without password.")
try:
url = re.findall("@.*",
sys.argv[2])[0].replace("@", "").replace("://", "")
except:
url = re.findall("://.*",
sys.argv[2])[0].replace("@", "").replace("://", "")
couchdb, temp, db = url.rpartition("/")
print("Using CouchDB URL : ", couchdb)
print("Using Database Name: ", db)
if couchdb == "" or db == "":
raise
couchdb = "http://" + couchdb
command_line_override = True
except:
print("\n\nFailed to find "
"username/password/CouchDB URL/Database Name."
"Proceeding with prompt based input.\n\n")
if not command_line_override:
couchdb = modified_input("CouchDB URL (no username "
"or password, enter for localhost) : ")
if len(couchdb) < 3:
couchdb = "http://localhost:5984"
if couchdb.endswith('/'):
couchdb = couchdb[:-1]
db = modified_input("Database name : ")
if db.endswith('/'):
db = db[:-1]
username = modified_input("User-name (if applicable) : ")
password = getpass.getpass(
prompt="Password (if applicable, will not display) : ")
db_url = couchdb + "/" + db + "/"
all_docs_url = db_url + "_all_docs"
all_docs = requests.get(all_docs_url)
doc_ids = []
doc_ids_raw = re.findall("\"id\":\".*?\"", all_docs.text)
for i in doc_ids_raw:
doc_ids.append(i[6:-1])
count = 0
for i in doc_ids:
count += 1
doc_url = db_url + i
r = requests.get(doc_url, auth=(username, password))
outFileName = db + "_" + repr(count) + ".json"
out = codecs.open(outFileName, 'w', 'utf-8')
out.write(json.dumps(r.json(), indent=2))
out.close()
if count % 10 == 0:
print(" downloading ... " + repr(count).rjust(4) +
" / " + repr(len(doc_ids)).rjust(4))
print("\nNumber of Downloads Completed: " + repr(count) + '\n')
def prune_db():
"""Delete all documents of type 'measurement' from a CouchDB"""
try: # Fix Python 2.x.
if sys.version_info>=(3,0):
modified_input = input
print("Python 3.x Compatible!")
else:
modified_input=raw_input
except NameError:
pass
print("Warning: This | |
<gh_stars>10-100
#!/usr/bin/env python
#-*- coding: utf-8 -*-
##---------------------------------------------------------------------------------------
# Main Screen
#
##---------------------------------------------------------------------------------------
from imports import *
import config
class MainScreen(Screen):
def __init__(self,**kwargs):
super (MainScreen, self).__init__(**kwargs)
##---------------------------------------------------------------------------------------
# general
##---------------------------------------------------------------------------------------
# set background; to have no background, delete or move the background images
#try:
# self.texture = Image(source='resources/bg_main/' + styles.curr_palette["name"].replace (" ", "_") + '_5.png').texture
# self.texture.wrap = 'repeat'
# self.texture.uvsize = (4, 4)
# with self.canvas:
# Rectangle(pos=(0,0), size=Window.size, texture=self.texture)
#except:
# pass
self.mainBox = BoxLayout(orientation="horizontal")
self.add_widget(self.mainBox)
# comment this out to return to default text input behavior, ie, enter while focused on main textinput does nothing
Window.bind(on_key_down=self.key_action)
#Button.background_down=""
##---------------------------------------------------------------------------------------
# SIDE PANEL - right horizontal stack
##---------------------------------------------------------------------------------------
self.leftAccordion = Accordion(orientation='horizontal', size_hint=(.6, 1), min_space = config.aiheight)
self.mainBox.add_widget(self.leftAccordion)
##---------------------------------------------------------------------------------------
# Center status across top
##---------------------------------------------------------------------------------------
self.centerBox = BoxLayout(orientation='vertical', padding=(10,10))
self.statusBox = BoxLayout(orientation='horizontal', size_hint=(1,.10), padding=(10,10))
self.trackBox = BoxLayout(orientation="horizontal", size_hint=(.25,1))
self.trackDownButton = Button(text="-", size_hint=(.3,1))
self.trackDownButton.bind(on_press=self.pressGenericButton)
self.trackDownButton.bind(on_release=self.releaseTrackerDown)
self.trackBox.add_widget(self.trackDownButton)
self.trackLabel = Label(text="0", size_hint=(.3,1))
self.trackBox.add_widget(self.trackLabel)
self.trackUpButton = Button(text="+", size_hint=(.3,1))
self.trackUpButton.bind(on_press=self.pressGenericButton)
self.trackUpButton.bind(on_release=self.releaseTrackerUp)
self.trackBox.add_widget(self.trackUpButton)
self.statusBox.add_widget(self.trackBox)
self.bookmarkBox = BoxLayout(orientation="horizontal", size_hint=(.75,1))
for i in range(0,5):
btn = ToggleButton(text="-", group='bookmarks', font_size=config.basefont, size_hint=(1,1), background_color=neutral, font_name='maintextfont', allow_no_selection=True)
btn.bind(on_press=self.toggledBookmark)
btn.value = i
btn.index = -9
self.bookmarkBox.add_widget(btn)
self.clearBookmarkButton = ToggleButton(text="Clear", group='clear', font_size=config.basefont90, size_hint=(1,1), background_color=neutral, font_name='maintextfont', allow_no_selection=True)
self.bookmarkBox.add_widget(self.clearBookmarkButton)
self.clearBookmarkButton.bind(on_press=self.pressGenericButton)
self.statusBox.add_widget(self.bookmarkBox)
self.mechanicsButton = Button(text="PROSE", font_size=config.basefont90, size_hint=(.15,1), background_color=neutral, font_name='maintextfont')
self.statusBox.add_widget(self.mechanicsButton)
self.mechanicsButton.bind(on_press=self.pressGenericButton)
self.mechanicsButton.bind(on_release=hideMechanicsBlocks)
self.mechanicsButton.self = self
try:
enterBehaviorList = config.formats['status_tags']
except:
enterBehaviorList = ['plain', 'aside', 'oracle', 'result', 'query', 'mechanic1', 'mechanic2', 'ephemeral']
enterBehaviorList = enterBehaviorList + ["None"]
self.enterSpinner = Spinner(
# default value shown
text='plain',
# available values
values=enterBehaviorList,
background_normal='',
background_color=accent1,
background_down='',
background_color_down=accent2,
font_size=config.basefont90,
size_hint=(.2, 1),
)
self.enterSpinner.bind(text=self.toggleEnterBehavior)
self.statusBox.add_widget(self.enterSpinner)
self.centerBox.add_widget(self.statusBox)
##---------------------------------------------------------------------------------------
# Center text display
##---------------------------------------------------------------------------------------
#self.centerBox.add_widget(Label(text="---------------------", color=styles.textcolor, size_hint=(1,.04), font_name="maintextfont", font_size=config.basefont ))
self.threadButtonBox = GridLayout(cols=2, spacing=5, size_hint=(1,.05))
self.button = Button(text="copy to main window", size_hint=(1,.03), font_size=config.basefont75)
self.button.bind(on_press=self.pressGenericButton)
self.button.bind(on_release=self.copyThreadsToMain)
self.threadButtonBox.add_widget(self.button)
self.randomThreadButton = Button(text="random thread", halign='center', font_size=config.basefont75)
self.randomThreadButton.bind(on_press=self.pressGenericButton)
self.randomThreadButton.bind(on_release=self.releaseRandomThread)
self.threadButtonBox.add_widget(self.randomThreadButton)
self.centerBox.add_widget(self.threadButtonBox)
self.threadDisplay = ScrollView(size_hint=(1,.30))
self.threadDisplayGrid = GridLayout(cols=2, spacing=10, size_hint_y=None, size_hint_x=1, padding=(10,10))
self.threadDisplayGrid.bind(minimum_height = self.threadDisplayGrid.setter('height'))
self.threadDisplay.add_widget(self.threadDisplayGrid)
self.centerBox.add_widget(self.threadDisplay)
#self.centerBox.add_widget(Label(text="---------------------", color=styles.textcolor, size_hint=(1,.04), font_name="maintextfont", font_size=config.basefont ))
self.titleBarBox = BoxLayout(orientation='horizontal', size_hint=(1,.05))
self.jumpButton = Button(text="top", size_hint=(1,1), font_size=config.basefont75)
self.jumpButton.bind(on_press=self.pressGenericButton)
self.jumpButton.bind(on_release=self.navJump)
self.titleBarBox.add_widget(self.jumpButton)
self.findButton = Button(text="find", size_hint=(1,1), font_size=config.basefont75)
self.findButton.bind(on_press=self.pressGenericButton)
self.findButton.bind(on_release=self.navFind)
self.titleBarBox.add_widget(self.findButton)
self.nextButton = Button(text="next", size_hint=(1,1), font_size=config.basefont75)
self.nextButton.bind(on_press=self.pressGenericButton)
self.nextButton.bind(on_release=self.navNext)
self.titleBarBox.add_widget(self.nextButton)
self.centerBox.add_widget(self.titleBarBox)
self.centerDisplay = ScrollView(size_hint=(1,1))
self.centerDisplayGrid = GridLayout(cols=1, size_hint_y=None, padding=20, spacing="20dp")
self.centerDisplayGrid.bind(minimum_height = self.centerDisplayGrid.setter('height'))
self.centerDisplay.add_widget(self.centerDisplayGrid)
self.centerBox.add_widget(self.centerDisplay)
##---------------------------------------------------------------------------------------
# main text input & control panel
##---------------------------------------------------------------------------------------
self.controlBox = BoxLayout(orientation='horizontal', size_hint=(1,.45))
self.textInputMainBox = BoxLayout(orientation='vertical')
self.textInput = TextInput(text='', hint_text="", size_hint=(1,1), font_size=config.maintextinputfont)
#self.textInput.bind(on_text_validate=self.text_entered)
self.textInputMainBox.add_widget(self.textInput)
##---------------------------------------------------------------------------------------
# center footerself.box
##---------------------------------------------------------------------------------------
self.footerBox = BoxLayout(orientation="horizontal")
# flags & toggles
self.oracleButton = Button(text=config.oracle)
self.oracleButton.bind(on_release=self.cycleOracle)
self.qualitiesButton = ToggleButton(text="DQ")
self.qualitiesButton.bind(on_release=self.toggleResolutionMode)
self.saveButton = Button(text="Save")
self.saveButton.bind(on_press=self.pressGenericButton)
self.saveButton.bind(on_release=self.releaseSave)
#self.box for adding threads & actors
self.threadSubmitButton = Button(text="Add\nThread", halign='center', size_hint=(1,1), font_size=config.basefont80)
self.threadSubmitButton.bind(on_press=self.pressGenericButton)
self.threadSubmitButton.bind(on_release=self.releaseThread)
self.addActorButton = Button(text="Add\nActor", halign='center', size_hint=(1,1), font_size=config.basefont80)
self.addActorButton.bind(on_press=self.pressGenericButton)
self.addActorButton.bind(on_release=self.releaseAddActor)
# pick one from a list
self.listButton1 = Button(text="Pick\nOne", halign="center", font_size=config.basefont75, size_hint=(1,1), )
self.listButton1.bind(on_press=self.pressGenericButton)
self.listButton1.bind(on_release=self.chooseFromList)
self.listButton1.value = 0
self.listButton2 = Button(text="Pick\n2d4", halign="center", font_size=config.basefont75, size_hint=(1,1), )
self.listButton2.bind(on_press=self.pressGenericButton)
self.listButton2.bind(on_release=self.chooseFromList)
self.listButton2.value = 1
self.listButton3 = Button(text="Pick\n2d6", halign="center", font_size=config.basefont75, size_hint=(1,1), )
self.listButton3.bind(on_press=self.pressGenericButton)
self.listButton3.bind(on_release=self.chooseFromList)
self.listButton3.value = 2
self.listButton4 = Button(text="Pick\n3:2:1", halign="center", font_size=config.basefont75, size_hint=(1,1), )
self.listButton4.bind(on_press=self.pressGenericButton)
self.listButton4.bind(on_release=self.chooseFromList)
self.listButton4.value = 3
self.listButton5 = Button(text="Pick\nTwo", halign="center", font_size=config.basefont75, size_hint=(1,1), )
self.listButton5.bind(on_press=self.pressGenericButton)
self.listButton5.bind(on_release=self.chooseFromList)
self.listButton5.value = 4
self.listButton6 = Button(text="Pick\nThree", halign="center", font_size=config.basefont75, size_hint=(1,1), )
self.listButton6.bind(on_press=self.pressGenericButton)
self.listButton6.bind(on_release=self.chooseFromList)
self.listButton6.value = 5
self.listButton7 = Button(text="Pick\nFour", halign="center", font_size=config.basefont75, size_hint=(1,1), )
self.listButton7.bind(on_press=self.pressGenericButton)
self.listButton7.bind(on_release=self.chooseFromList)
self.listButton7.value = 6
self.listButton8 = Button(text="Pick\nFive", halign="center", font_size=config.basefont75, size_hint=(1,1), )
self.listButton8.bind(on_press=self.pressGenericButton)
self.listButton8.bind(on_release=self.chooseFromList)
self.listButton8.value = 7
# dice presets
self.diceButtonsList = []
for preset in config.dice_presets:
if len(preset) > 4:
self.button = Button(text=preset, font_size=config.basefont80)
else:
self.button = Button(text=preset)
self.button.bind(on_press=self.pressGenericButton)
self.button.bind(on_release=self.releasePresetDice)
self.diceButtonsList.append(self.button)
#diceList = ["4", "6", "8", "10", "12", "20", "30", "100"]
diceSpinnersList = []
for item in config.dice_spinner_list:
diceValueList = []
for i in range(1,11):
diceValueList.append( str(i) + "d" + item )
self.spinner = Spinner(
# default value shown
text=diceValueList[0],
# available values
values=diceValueList,
background_normal='',
background_color=neutral,
background_down='',
background_color_down=accent2,
font_size=config.basefont90,
)
self.spinner.bind(text=self.releasePresetDice)
diceSpinnersList.append(self.spinner)
diceAltButtonsList = []
self.button = Button(text="ORE")
self.button.bind(on_press=self.pressGenericButton)
self.button.bind(on_release=self.releaseORERoll)
diceAltButtonsList.append(self.button)
self.button = Button(text="FATE")
self.button.bind(on_press=self.pressGenericButton)
self.button.bind(on_release=self.releaseFateRoll)
diceAltButtonsList.append(self.button)
self.flagsBox = BoxLayout(orientation='vertical', size_hint=(.1,1))
self.flagsBox.add_widget(self.oracleButton)
self.flagsBox.add_widget(self.qualitiesButton)
self.flagsBox.add_widget(self.saveButton)
self.threadBox = BoxLayout(orientation='vertical', size_hint=(.1,1))
self.threadBox.add_widget(self.threadSubmitButton)
self.threadBox.add_widget(self.addActorButton)
self.weightedBox = GridLayout(cols=4, size_hint=(.3,1))
self.weightedBox.add_widget(self.listButton1)
self.weightedBox.add_widget(self.listButton5)
self.weightedBox.add_widget(self.listButton6)
self.weightedBox.add_widget(self.listButton7)
self.weightedBox.add_widget(self.listButton8)
self.weightedBox.add_widget(self.listButton2)
self.weightedBox.add_widget(self.listButton3)
self.weightedBox.add_widget(self.listButton4)
self.dicePresetsBox = GridLayout(cols=4, size_hint=(.3,1))
for dice in self.diceButtonsList:
self.dicePresetsBox.add_widget(dice)
self.diceSpinnersBox = GridLayout(cols=1, size_hint=(.1,1))
for spinner in diceSpinnersList:
self.diceSpinnersBox.add_widget(spinner)
self.diceAltBox = GridLayout(cols=1, size_hint=(.1,1))
for alt in diceAltButtonsList:
self.diceAltBox.add_widget(alt)
self.footerBox.add_widget(self.flagsBox)
self.footerBox.add_widget(self.weightedBox)
self.footerBox.add_widget(self.dicePresetsBox)
self.footerBox.add_widget(self.diceSpinnersBox)
self.footerBox.add_widget(self.diceAltBox)
self.footerBox.add_widget(self.threadBox)
self.textInputMainBox.add_widget(self.footerBox)
self.controlBox.add_widget(self.textInputMainBox)
##---------------------------------------------------------------------------------------
# Center text submit buttons
##---------------------------------------------------------------------------------------
self.submitButtonsBox = BoxLayout(orientation='vertical', size_hint=(.23,1))
self.questionSubmitButton = Button(text="???")
self.questionSubmitButton.bind(on_press=self.pressGenericButton)
self.questionSubmitButton.bind(on_release=self.releaseQuestion)
self.submitButtonsBox.add_widget(self.questionSubmitButton)
self.playerSubmitButton = Button(text="Direct")
self.playerSubmitButton.bind(on_press=self.pressGenericButton)
self.playerSubmitButton.bind(on_release=self.releasePlayer)
self.submitButtonsBox.add_widget(self.playerSubmitButton)
self.dmSubmitButton = Button(text="Aside")
self.dmSubmitButton.bind(on_press=self.pressGenericButton)
self.dmSubmitButton.bind(on_release=self.releaseDM)
self.submitButtonsBox.add_widget(self.dmSubmitButton)
self.rollSubmitButton = Button(text="Roll Dice")
self.rollSubmitButton.bind(on_press=self.pressGenericButton)
self.rollSubmitButton.bind(on_release=self.releaseRoll)
self.submitButtonsBox.add_widget(self.rollSubmitButton)
self.seedButtonsBox = BoxLayout(orientation='vertical', size_hint_y=2)
self.seedButton = Button(text="Seed")
self.seedButton.bind(on_press=self.pressGenericButton)
self.seedButton.bind(on_release=self.getSeed)
self.seedButtonsBox.add_widget(self.seedButton)
self.seedAlternateButton = Button(text="Action")
self.seedAlternateButton.bind(on_press=self.pressGenericButton)
self.seedAlternateButton.bind(on_release=self.getSeedAlternate)
self.seedButtonsBox.add_widget(self.seedAlternateButton)
self.submitButtonsBox.add_widget(self.seedButtonsBox)
# scenario buttons go here, if a scenario is loaded
self.scenarioButtonList = []
button = Button(text="show scene")
button.self = self
button.bind(on_press=self.pressGenericButton)
button.bind(on_release=self.showBlock)
self.scenarioButtonList.append(button)
button = Button(text="show exits")
button.self = self
button.bind(on_press=self.pressGenericButton)
button.bind(on_release=self.showExits)
self.scenarioButtonList.append(button)
self.controlBox.add_widget(self.submitButtonsBox)
self.centerBox.add_widget(self.controlBox)
self.mainBox.add_widget(self.centerBox)
##---------------------------------------------------------------------------------------
# SIDE PANEL - right horizontal stack for trackers
##---------------------------------------------------------------------------------------
self.rightAccordion = Accordion(orientation='horizontal', size_hint=(.6, 1), min_space = config.aiheight)
self.mainBox.add_widget(self.rightAccordion)
##---------------------------------------------------------------------------------------
# PC panel
##---------------------------------------------------------------------------------------
self.pcAccordionItem = AccordionItem(title='Character Sheets', background_normal='resources' + os.sep + 'bg_bars' + os.sep + styles.curr_palette["name"].replace (" ", "_") + '_5.png', background_selected='resources' + os.sep + 'bg_bars' + os.sep + styles.curr_palette["name"].replace (" ", "_") + '_5.png', min_space = config.aiheight)
self.pcStackAccordion = Accordion(orientation='vertical', size_hint=(1,1), min_space = config.aiheight)
self.pcAccordionItem.add_widget(self.pcStackAccordion)
self.rightAccordion.add_widget(self.pcAccordionItem)
# let's not get too fancy/custom with this; just add fixed panels
self.pcPanelsList = []
self.topgrid = []
self.halfgrid = []
self.bottomgrid = []
for i in range(config.general['max_character_sheets']):
config.pcKeyLabelArray.append([])
config.pcValueLabelArray.append([])
self.pcPanelsList.append(AccordionItem(title='Character ' + str(i), background_normal='resources' + os.sep + 'bg_bars' + os.sep + styles.curr_palette["name"].replace (" ", "_") + '_5.png', background_selected='resources' + os.sep + 'bg_bars' + os.sep + styles.curr_palette["name"].replace (" ", "_") + '_5.png', min_space = config.aiheight))
self.box = BoxLayout(orientation='vertical')
self.buttonbox = GridLayout(cols=2, spacing=5, size_hint=(1,.05))
self.button = Button(text="copy to main window", font_size=config.basefont75)
self.button.bind(on_press=self.pressGenericButton)
self.button.bind(on_release=self.copyPCsToMain)
self.button.sheet = i
self.buttonbox.add_widget(self.button)
self.button = Button(text="random major", halign='center', font_size=config.basefont75)
self.button.bind(on_press=self.pressGenericButton)
self.button.bind(on_release=self.releaseRandomPC)
self.buttonbox.add_widget(self.button)
self.box.add_widget(self.buttonbox)
self.display = ScrollView(size_hint=(1, 1))
self.displaygrid = GridLayout(cols=1, spacing=5, size_hint_y=None, size_hint_x=1)
self.displaygrid.bind(minimum_height = self.displaygrid.setter('height'))
self.display.add_widget(self.displaygrid)
self.topgrid.append(GridLayout(cols=2, size_hint_y=None))
self.topgrid[-1].bind(minimum_height = self.topgrid[-1].setter('height'))
self.halfgrid.append(GridLayout(cols=4, size_hint_y=None))
self.halfgrid[-1].bind(minimum_height = self.halfgrid[-1].setter('height'))
self.bottomgrid.append(GridLayout(cols=2, size_hint_y=None))
self.bottomgrid[-1].bind(minimum_height = self.bottomgrid[-1].setter('height'))
for x in range(0,39):
if x <= 26:
ml = False
ht = config.tallheight
fs = config.basefont90
else:
ml = True
ht = config.doubleheight
fs = config.basefont90
if x >= 4 and x <= 26:
xhint = .25
else:
xhint = .15
label = TextInput(text="", multiline=ml, size_hint_y=None, size_hint_x=xhint, height=ht, font_size=fs, font_name='maintextfont', background_color=neutral, foreground_color=styles.textcolor)
label.self = self
label.value = x
config.pcKeyLabelArray[i].append(label)
label.bind(focus=focusChangePC)
label = TextInput(text="", multiline=ml, size_hint_y=None, size_hint_x=1.0-xhint, height=ht, font_size=fs, font_name='maintextfont', background_color=neutral, foreground_color=styles.textcolor)
label.text_size = (self.displaygrid.width, None)
label.self = self
label.value = x
config.pcValueLabelArray[i].append(label)
label.bind(focus=focusChangePC)
if x >= 4 and x <= 26:
self.halfgrid[-1].add_widget(config.pcKeyLabelArray[i][-1])
self.halfgrid[-1].add_widget(config.pcValueLabelArray[i][-1])
elif x <= 3:
self.topgrid[-1].add_widget(config.pcKeyLabelArray[i][-1])
self.topgrid[-1].add_widget(config.pcValueLabelArray[i][-1])
else:
self.bottomgrid[-1].add_widget(config.pcKeyLabelArray[i][-1])
self.bottomgrid[-1].add_widget(config.pcValueLabelArray[i][-1])
self.displaygrid.add_widget(self.topgrid[-1])
self.displaygrid.add_widget(self.halfgrid[-1])
self.displaygrid.add_widget(self.bottomgrid[-1])
self.box.add_widget(self.display)
self.pcPanelsList[-1].add_widget(self.box)
# add the actual PC panels later
##---------------------------------------------------------------------------------------
# actor panel
##---------------------------------------------------------------------------------------
self.actorAItem = AccordionItem(title='Actor Tracker', background_normal='resources' + os.sep + 'bg_bars' + os.sep + styles.curr_palette["name"].replace (" ", "_") + '_5.png', background_selected='resources' + os.sep + 'bg_bars' + os.sep + styles.curr_palette["name"].replace (" ", "_") + '_5.png', min_space = config.aiheight)
self.actorMainBox = BoxLayout(orientation='vertical')
self.actorButtonBox = GridLayout(cols=2, spacing=5, size_hint=(1,.05))
self.button = Button(text="copy to main window", size_hint=(1,.05), font_size=config.basefont75)
self.button.bind(on_press=self.pressGenericButton)
self.button.bind(on_release=self.copyActorToMain)
self.actorButtonBox.add_widget(self.button)
self.randomActorButton = Button(text="random actor", halign='center', font_size=config.basefont75)
self.randomActorButton.bind(on_press=self.pressGenericButton)
self.randomActorButton.bind(on_release=self.releaseRandomActor)
self.actorButtonBox.add_widget(self.randomActorButton)
self.actorMainBox.add_widget(self.actorButtonBox)
self.actorMainBox.add_widget(Label(text="Actors", halign="center", size_hint=(1,.05), font_size=config.basefont90))
self.actorDisplay = ScrollView(size_hint=(1, .80))
self.actorDisplayGrid = GridLayout(cols=1, spacing=5, size_hint_y=None, size_hint_x=1)
self.actorDisplayGrid.bind(minimum_height = self.actorDisplayGrid.setter('height'))
self.actorDisplay.add_widget(self.actorDisplayGrid)
self.actorMainBox.add_widget(self.actorDisplay)
self.actorIndexToggle = Button(text="Actor Index", halign="center", height=config.tallheight, size_hint=(1,None), font_size=config.basefont90)
self.actorIndexToggle.value = config.general['actor_index_state']
self.actorIndexToggle.bind(on_press=self.pressGenericButton)
self.actorIndexToggle.bind(on_release=self.toggleActorIndexSize)
self.actorMainBox.add_widget(self.actorIndexToggle)
self.actorIndexDisplay = ScrollView(size_hint=(1,.20))
self.actorIndexDisplayGrid = GridLayout(cols=1, spacing=5, size_hint_y=None, size_hint_x=1)
self.actorIndexDisplayGrid.bind(minimum_height = self.actorIndexDisplayGrid.setter('height'))
self.actorIndexDisplay.add_widget(self.actorIndexDisplayGrid)
self.actorMainBox.add_widget(self.actorIndexDisplay)
self.actorAItem.add_widget(self.actorMainBox)
self.rightAccordion.add_widget(self.actorAItem)
#---------------------------------------------------------------------------------------
# tracks & scratchpad panel
#---------------------------------------------------------------------------------------
self.tracksAItem = AccordionItem(title='Tracks, Status, Notes', background_normal='resources' + os.sep + 'bg_bars' + os.sep + styles.curr_palette["name"].replace (" ", "_") + '_5.png', background_selected='resources' + os.sep + 'bg_bars' + os.sep + styles.curr_palette["name"].replace (" ", "_") + '_5.png', min_space = config.aiheight)
self.tracksMainBox = BoxLayout(orientation='vertical')
self.trackButtonBox = GridLayout(cols=2, spacing=5, size_hint=(1,.05))
self.button = Button(text="copy to main window", size_hint=(1,.05), font_size=config.basefont75)
self.button.bind(on_press=self.pressGenericButton)
self.button.bind(on_release=self.copyTracksToMain)
self.trackButtonBox.add_widget(self.button)
self.randomTrackButton = Button(text="random track", halign='center', font_size=config.basefont75)
self.randomTrackButton.bind(on_press=self.pressGenericButton)
self.randomTrackButton.bind(on_release=self.releaseRandomTrack)
self.trackButtonBox.add_widget(self.randomTrackButton)
self.tracksMainBox.add_widget(self.trackButtonBox)
self.trackTitleGrid = GridLayout(cols=2, spacing=5, size_hint=(1,.10))
label = Label(text="Status/Condition/Track", size_hint_x=.90, font_size=config.basefont90, font_name='maintextfont', background_color=neutral, foreground_color=styles.textcolor)
label.bind(width=lambda instance, value: setattr(instance, 'text_size', (value, None)))
self.trackTitleGrid.add_widget(label)
label = Label(text="On?", size_hint_x=.10, font_size=config.basefont90, font_name='maintextfont', background_color=neutral, foreground_color=styles.textcolor)
self.trackTitleGrid.add_widget(label)
self.tracksMainBox.add_widget(self.trackTitleGrid)
self.trackDisplay = ScrollView(size_hint=(1, 1))
self.trackDisplayGrid = GridLayout(cols=2, spacing=5, size_hint_y=None, size_hint_x=1)
self.trackDisplayGrid.bind(minimum_height = self.trackDisplayGrid.setter('height'))
for i in range(1,30):
label = TextInput(text="", multiline=False, size_hint_y=None, size_hint_x=.90, height=config.tallheight, font_size=config.basefont90, font_name='maintextfont', | |
"""
slice_oper_unset = -1
slice_oper_down = 0
slice_oper_up = 1
slice_oper_na = 2
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_dnx_driver_oper as meta
return meta._meta_table['SliceStateEnum']
class Fia(object):
"""
FIA driver operational data
.. attribute:: nodes
FIA driver operational data for available nodes
**type**\: :py:class:`Nodes <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dnx_driver_oper.Fia.Nodes>`
"""
_prefix = 'dnx-driver-oper'
_revision = '2015-11-09'
def __init__(self):
self.nodes = Fia.Nodes()
self.nodes.parent = self
class Nodes(object):
"""
FIA driver operational data for available nodes
.. attribute:: node
FIA operational data for a particular node
**type**\: list of :py:class:`Node <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dnx_driver_oper.Fia.Nodes.Node>`
"""
_prefix = 'dnx-driver-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.node = YList()
self.node.parent = self
self.node.name = 'node'
class Node(object):
"""
FIA operational data for a particular node
.. attribute:: node_name <key>
Node ID
**type**\: str
**pattern:** ([a\-zA\-Z0\-9\_]\*\\d+/){1,2}([a\-zA\-Z0\-9\_]\*\\d+)
.. attribute:: asic_statistics
FIA asic statistics information
**type**\: :py:class:`AsicStatistics <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dnx_driver_oper.Fia.Nodes.Node.AsicStatistics>`
.. attribute:: clear_statistics
Clear statistics information
**type**\: :py:class:`ClearStatistics <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dnx_driver_oper.Fia.Nodes.Node.ClearStatistics>`
.. attribute:: diag_shell
FIA diag shell information
**type**\: :py:class:`DiagShell <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dnx_driver_oper.Fia.Nodes.Node.DiagShell>`
.. attribute:: driver_information
FIA driver information
**type**\: :py:class:`DriverInformation <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dnx_driver_oper.Fia.Nodes.Node.DriverInformation>`
.. attribute:: oir_history
FIA operational data of oir history
**type**\: :py:class:`OirHistory <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dnx_driver_oper.Fia.Nodes.Node.OirHistory>`
.. attribute:: register_dump
FIA register dump information
**type**\: :py:class:`RegisterDump <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dnx_driver_oper.Fia.Nodes.Node.RegisterDump>`
.. attribute:: rx_link_information
FIA link rx information
**type**\: :py:class:`RxLinkInformation <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dnx_driver_oper.Fia.Nodes.Node.RxLinkInformation>`
.. attribute:: tx_link_information
FIA link TX information
**type**\: :py:class:`TxLinkInformation <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dnx_driver_oper.Fia.Nodes.Node.TxLinkInformation>`
"""
_prefix = 'dnx-driver-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.node_name = None
self.asic_statistics = Fia.Nodes.Node.AsicStatistics()
self.asic_statistics.parent = self
self.clear_statistics = Fia.Nodes.Node.ClearStatistics()
self.clear_statistics.parent = self
self.diag_shell = Fia.Nodes.Node.DiagShell()
self.diag_shell.parent = self
self.driver_information = Fia.Nodes.Node.DriverInformation()
self.driver_information.parent = self
self.oir_history = Fia.Nodes.Node.OirHistory()
self.oir_history.parent = self
self.register_dump = Fia.Nodes.Node.RegisterDump()
self.register_dump.parent = self
self.rx_link_information = Fia.Nodes.Node.RxLinkInformation()
self.rx_link_information.parent = self
self.tx_link_information = Fia.Nodes.Node.TxLinkInformation()
self.tx_link_information.parent = self
class RxLinkInformation(object):
"""
FIA link rx information
.. attribute:: link_options
Option table for link rx information
**type**\: :py:class:`LinkOptions <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dnx_driver_oper.Fia.Nodes.Node.RxLinkInformation.LinkOptions>`
"""
_prefix = 'dnx-driver-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.link_options = Fia.Nodes.Node.RxLinkInformation.LinkOptions()
self.link_options.parent = self
class LinkOptions(object):
"""
Option table for link rx information
.. attribute:: link_option
Option \: topo , flag , stats
**type**\: list of :py:class:`LinkOption <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dnx_driver_oper.Fia.Nodes.Node.RxLinkInformation.LinkOptions.LinkOption>`
"""
_prefix = 'dnx-driver-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.link_option = YList()
self.link_option.parent = self
self.link_option.name = 'link_option'
class LinkOption(object):
"""
Option \: topo , flag , stats
.. attribute:: option <key>
Link option
**type**\: str
**pattern:** (flap)\|(topo)
.. attribute:: rx_asic_instances
Instance table for rx information
**type**\: :py:class:`RxAsicInstances <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dnx_driver_oper.Fia.Nodes.Node.RxLinkInformation.LinkOptions.LinkOption.RxAsicInstances>`
"""
_prefix = 'dnx-driver-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.option = None
self.rx_asic_instances = Fia.Nodes.Node.RxLinkInformation.LinkOptions.LinkOption.RxAsicInstances()
self.rx_asic_instances.parent = self
class RxAsicInstances(object):
"""
Instance table for rx information
.. attribute:: rx_asic_instance
Instance number for rx link information
**type**\: list of :py:class:`RxAsicInstance <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dnx_driver_oper.Fia.Nodes.Node.RxLinkInformation.LinkOptions.LinkOption.RxAsicInstances.RxAsicInstance>`
"""
_prefix = 'dnx-driver-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.rx_asic_instance = YList()
self.rx_asic_instance.parent = self
self.rx_asic_instance.name = 'rx_asic_instance'
class RxAsicInstance(object):
"""
Instance number for rx link information
.. attribute:: instance <key>
Receive instance
**type**\: int
**range:** 0..255
.. attribute:: rx_links
Link table class for rx information
**type**\: :py:class:`RxLinks <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dnx_driver_oper.Fia.Nodes.Node.RxLinkInformation.LinkOptions.LinkOption.RxAsicInstances.RxAsicInstance.RxLinks>`
"""
_prefix = 'dnx-driver-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.instance = None
self.rx_links = Fia.Nodes.Node.RxLinkInformation.LinkOptions.LinkOption.RxAsicInstances.RxAsicInstance.RxLinks()
self.rx_links.parent = self
class RxLinks(object):
"""
Link table class for rx information
.. attribute:: rx_link
Link number for rx link information
**type**\: list of :py:class:`RxLink <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dnx_driver_oper.Fia.Nodes.Node.RxLinkInformation.LinkOptions.LinkOption.RxAsicInstances.RxAsicInstance.RxLinks.RxLink>`
"""
_prefix = 'dnx-driver-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.rx_link = YList()
self.rx_link.parent = self
self.rx_link.name = 'rx_link'
class RxLink(object):
"""
Link number for rx link information
.. attribute:: end_number
End number
**type**\: int
**range:** 0..35
.. attribute:: rx_link
Single link information
**type**\: list of :py:class:`RxLink_ <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dnx_driver_oper.Fia.Nodes.Node.RxLinkInformation.LinkOptions.LinkOption.RxAsicInstances.RxAsicInstance.RxLinks.RxLink.RxLink_>`
.. attribute:: start_number
Start number
**type**\: int
**range:** 0..35
.. attribute:: status_option
RX link status option
**type**\: str
**pattern:** [\\w\\\-\\.\:,\_@#%$\\+=\\\|;]+
"""
_prefix = 'dnx-driver-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.end_number = None
self.rx_link = YList()
self.rx_link.parent = self
self.rx_link.name = 'rx_link'
self.start_number = None
self.status_option = None
class RxLink_(object):
"""
Single link information
.. attribute:: link <key>
Single link
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: admin_state
Admin State
**type**\: :py:class:`AdminStateEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dnx_driver_oper.AdminStateEnum>`
.. attribute:: error_state
Error State
**type**\: :py:class:`LinkErrorStateEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dnx_driver_oper.LinkErrorStateEnum>`
.. attribute:: far_end_link
far end link
**type**\: :py:class:`FarEndLink <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dnx_driver_oper.Fia.Nodes.Node.RxLinkInformation.LinkOptions.LinkOption.RxAsicInstances.RxAsicInstance.RxLinks.RxLink.RxLink_.FarEndLink>`
.. attribute:: far_end_link_in_hw
far end link in hw
**type**\: :py:class:`FarEndLinkInHw <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dnx_driver_oper.Fia.Nodes.Node.RxLinkInformation.LinkOptions.LinkOption.RxAsicInstances.RxAsicInstance.RxLinks.RxLink.RxLink_.FarEndLinkInHw>`
.. attribute:: flags
flags
**type**\: str
.. attribute:: flap_cnt
flap cnt
**type**\: int
**range:** 0..4294967295
.. attribute:: history
history
**type**\: :py:class:`History <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dnx_driver_oper.Fia.Nodes.Node.RxLinkInformation.LinkOptions.LinkOption.RxAsicInstances.RxAsicInstance.RxLinks.RxLink.RxLink_.History>`
.. attribute:: is_conf_pending
is conf pending
**type**\: bool
.. attribute:: is_link_valid
is link valid
**type**\: bool
.. attribute:: num_admin_shuts
num admin shuts
**type**\: int
**range:** 0..4294967295
.. attribute:: oper_state
Oper State
**type**\: :py:class:`OperStateEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dnx_driver_oper.OperStateEnum>`
.. attribute:: speed
speed
**type**\: int
**range:** 0..4294967295
.. attribute:: stage
Stage
**type**\: :py:class:`LinkStageEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dnx_driver_oper.LinkStageEnum>`
.. attribute:: this_link
this link
**type**\: :py:class:`ThisLink <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dnx_driver_oper.Fia.Nodes.Node.RxLinkInformation.LinkOptions.LinkOption.RxAsicInstances.RxAsicInstance.RxLinks.RxLink.RxLink_.ThisLink>`
"""
_prefix = 'dnx-driver-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.link = None
self.admin_state = None
self.error_state = None
self.far_end_link = Fia.Nodes.Node.RxLinkInformation.LinkOptions.LinkOption.RxAsicInstances.RxAsicInstance.RxLinks.RxLink.RxLink_.FarEndLink()
self.far_end_link.parent = self
self.far_end_link_in_hw = Fia.Nodes.Node.RxLinkInformation.LinkOptions.LinkOption.RxAsicInstances.RxAsicInstance.RxLinks.RxLink.RxLink_.FarEndLinkInHw()
self.far_end_link_in_hw.parent = self
self.flags = None
self.flap_cnt = None
self.history = Fia.Nodes.Node.RxLinkInformation.LinkOptions.LinkOption.RxAsicInstances.RxAsicInstance.RxLinks.RxLink.RxLink_.History()
self.history.parent = self
self.is_conf_pending = None
self.is_link_valid = None
self.num_admin_shuts = None
self.oper_state = None
self.speed = None
self.stage = None
self.this_link = Fia.Nodes.Node.RxLinkInformation.LinkOptions.LinkOption.RxAsicInstances.RxAsicInstance.RxLinks.RxLink.RxLink_.ThisLink()
self.this_link.parent = self
class ThisLink(object):
"""
this link
.. attribute:: asic_id
asic id
**type**\: :py:class:`AsicId <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dnx_driver_oper.Fia.Nodes.Node.RxLinkInformation.LinkOptions.LinkOption.RxAsicInstances.RxAsicInstance.RxLinks.RxLink.RxLink_.ThisLink.AsicId>`
.. attribute:: link_num
link num
**type**\: int
**range:** 0..4294967295
.. attribute:: link_stage
Link Stage
**type**\: :py:class:`LinkStageEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dnx_driver_oper.LinkStageEnum>`
.. attribute:: link_type
Link Type
**type**\: :py:class:`LinkEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dnx_driver_oper.LinkEnum>`
.. attribute:: phy_link_num
phy link num
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'dnx-driver-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.asic_id = Fia.Nodes.Node.RxLinkInformation.LinkOptions.LinkOption.RxAsicInstances.RxAsicInstance.RxLinks.RxLink.RxLink_.ThisLink.AsicId()
self.asic_id.parent = self
self.link_num = None
self.link_stage = None
self.link_type = None
self.phy_link_num = None
class AsicId(object):
"""
asic id
.. attribute:: asic_instance
asic instance
**type**\: int
**range:** 0..4294967295
.. attribute:: asic_type
Asic Type
**type**\: :py:class:`AsicEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dnx_driver_oper.AsicEnum>`
.. attribute:: rack_num
rack num
**type**\: int
**range:** 0..4294967295
.. attribute:: rack_type
Rack Type
**type**\: :py:class:`RackEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dnx_driver_oper.RackEnum>`
.. attribute:: slot_num
slot num
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'dnx-driver-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.asic_instance = None
self.asic_type = None
self.rack_num = None
self.rack_type = None
self.slot_num = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-dnx-driver-oper:asic-id'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.asic_instance is not None:
return True
if self.asic_type is not None:
return True
if self.rack_num is not None:
return True
if self.rack_type is not None:
return True
if self.slot_num is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_dnx_driver_oper as meta
return meta._meta_table['Fia.Nodes.Node.RxLinkInformation.LinkOptions.LinkOption.RxAsicInstances.RxAsicInstance.RxLinks.RxLink.RxLink_.ThisLink.AsicId']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-dnx-driver-oper:this-link'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.asic_id is not None and self.asic_id._has_data():
return True
if self.link_num is not None:
return True
if self.link_stage is not None:
return True
if self.link_type is not None:
return True
if self.phy_link_num is not None:
return True
| |
str(y2) + ", " + str(self.isRaw()) + "]"
class RawMBR(MBR):
def __init__(self, upper_left, lower_right, contained_item):
MBR.__init__(self, upper_left, lower_right)
self.contained_item = contained_item
def isRaw(self):
return True
@staticmethod
def makeMBRFromPoint(point):
upper_left = point
lower_right = point
result_mbr = RawMBR(upper_left, lower_right, point)
return result_mbr
def getContainedItem(self):
# print self.contained_item
return self.contained_item
def getMBRList(self):
return [self]
# mbr_list is a list of mbr's that can be either all raw or all composite
class CompositeMBR(MBR):
def __init__(self, upper_left, lower_right, mbr_list):
MBR.__init__(self, upper_left, lower_right)
self.mbr_list = mbr_list
def getMBRList(self):
return self.mbr_list
def isComposite(self):
return True
@staticmethod
def makeMBR(component_mbr_list):
upper_left_points = [x.getUpperLeft() for x in component_mbr_list]
lower_right_points = [x.getLowerRight() for x in component_mbr_list]
points = upper_left_points + lower_right_points
x_values = [x[0] for x in points]
y_values = [x[1] for x in points]
min_x_value = min(x_values)
max_x_value = max(x_values)
min_y_value = min(y_values)
max_y_value = max(y_values)
overall_upper_left = (min_x_value, min_y_value)
overall_lower_right = (max_x_value, max_y_value)
result_mbr = CompositeMBR(overall_upper_left, overall_lower_right, component_mbr_list)
return result_mbr
class Point:
def __init__(self, x, y, id_value):
self.x = x
self.y = y
self.id_value = id_value
@staticmethod
def toPoint(mbr):
if mbr.getUpperLeft() != mbr.getLowerRight():
raise Exception("attempted to turn a non-point mbr to a point")
return mbr.getUpperLeft()
def getX(self):
return self.x
def getY(self):
return self.y
def getIDValue(self):
return self.id_value
import string
class RTree:
def __init__(self):
root_node = RTreeNode(None, [], True)
root_mbr = CompositeMBR(None, None, None)
root_entry = RTreeEntry(root_mbr, root_node)
self.setRootEntry(root_entry)
# return an in-order string
def toString(self):
root = self.getRootEntry().getChild()
return self.toStringHelper(root)
def toStringHelper(self, node):
if node == None:
return ""
entries = node.getEntries()
children = node.getChildren()
have_node_str = True
is_root_node = node == self.getRootEntry().getChild()
if is_root_node == True:
have_node_str = True
overall_str_list = None
if is_root_node == False:
overall_str_list = [node.getParent().retrieveEntryForChild(node).getMBR().toString()]
else:
overall_str_list = [] if node.getNumChildren() == 0 else [self.getRootEntry().getMBR().toString()]
for entry in entries:
child = entry.getChild()
child_str = self.toStringHelper(child)
curr_str = child_str
overall_str_list.append(curr_str)
overall_str = "(" + string.join(overall_str_list, " ") + ")"
return overall_str
"""
def setRoot(self, node):
self.root = node
def getRoot(self):
return self.root
"""
def getRootEntry(self):
return self.root_entry
def setRootEntry(self, root_entry):
self.root_entry = root_entry
# in case of ties, return multiple candidates
def chooseEntriesWithMinimalAreaEnlargement(self, entries, entry):
mbr_to_entry_dict = {}
for i in range(len(entries)):
curr_entry = entries[i]
curr_mbr = curr_entry.getMBR()
mbr_to_entry_dict[curr_mbr] = curr_entry
mbr_list = [x.getMBR() for x in entries]
mbr = entry.getMBR()
tagged_enlargement_values = [(MBR.getAreaEnlargement(x, mbr), x) for x in mbr_list]
enlargement_values = [x[0] for x in tagged_enlargement_values]
min_enlargement_value = min(enlargement_values)
candidate_tagged_enlargement_values = [x for x in tagged_enlargement_values if x[0] == min_enlargement_value]
candidate_entries = [mbr_to_entry_dict[x[1]] for x in candidate_tagged_enlargement_values]
return candidate_entries
# chosen_tagged_enlargement_value = candidate_tagged_enlargement_values[0]
# chosen_enlargement, chosen_mbr = chosen_tagged_enlargement_value
# tagged_mbr_list = [(children[x], entries[x].getMBR()) for x in range(len(children))]
# candidate_tagged_mbr_list = [x for x in tagged_mbr_list if x[1] == chosen_mbr]
# chosen_tagged_mbr = canddiate_tagged_mbr_list[0]
# chosen_child, chosen_mbr = chosen_tagged_mbr
# return chosen_child
def resolveEnlargementTie(self, entries, entry):
mbr = entry.getMBR()
tagged_mbr_list = []
for curr_entry in entries:
base_mbr = curr_entry.getMBR()
curr_mbr = MBR.getEnlargedMBR(base_mbr, mbr)
tagged_mbr_list.append((curr_mbr, curr_entry))
tagged_area_values = [(x[0].getArea(), x[1]) for x in tagged_mbr_list]
area_values = [x[0] for x in tagged_area_values]
min_area = min(area_values)
candidate_tagged_area_values = [x for x in tagged_area_values if x[0] == min_area]
candidate_entries = [x[1] for x in candidate_tagged_area_values]
return candidate_entries
# we assume that >= 2 entries are provided
# we take special precautions to make the two returned entries be different
@staticmethod
def linearPickSeeds(entries):
mbr_list = [x.getMBR() for x in entries]
# largest dead space along any dimension
upper_left_points = [x.getUpperLeft() for x in mbr_list]
lower_right_points = [x.getLowerRight() for x in mbr_list]
points = upper_left_points + lower_right_points
x_values = [x[0] for x in points]
y_values = [x[1] for x in points]
min_x = min(x_values)
max_x = max(x_values)
min_y = min(y_values)
max_y = max(y_values)
x_size = max_x - min_x
y_size = max_y - min_y
x_values_upper_left = [x[0] for x in upper_left_points]
y_values_upper_left = [x[1] for x in upper_left_points]
x_values_lower_right = [x[0] for x in lower_right_points]
y_values_lower_right = [x[1] for x in lower_right_points]
highest_low_side_x = max(x_values_upper_left)
lowest_high_side_x = min(x_values_lower_right)
highest_low_side_y = max(y_values_upper_left)
lowest_high_side_y = min(y_values_lower_right)
x_separation = highest_low_side_x - lowest_high_side_x
y_separation = highest_low_side_y - lowest_high_side_y
normalized_x_separation = x_separation / (1.0 * x_size + 1)
normalized_y_separation = y_separation / (1.0 * y_size + 1)
x_responsible_mbr_candidates1 = [x for x in mbr_list if x.getUpperLeft()[0] == highest_low_side_x]
chosen_x_responsible_mbr_candidate1 = x_responsible_mbr_candidates1[0]
x_responsible_mbr_candidates2 = [x for x in mbr_list if x.getLowerRight()[0] == lowest_high_side_x]
winnowed_x_responsible_mbr_candidates2 = [x for x in x_responsible_mbr_candidates2 if x != chosen_x_responsible_mbr_candidate1]
chosen_x_responsible_mbr_candidate2 = winnowed_x_responsible_mbr_candidates2[0]
y_responsible_mbr_candidates1 = [x for x in mbr_list if x.getUpperLeft()[1] == highest_low_side_y]
chosen_y_responsible_mbr_candidate1 = y_responsible_mbr_candidates1[0]
y_responsible_mbr_candidates2 = [x for x in mbr_list if x.getLowerRight()[1] == lowest_high_side_y]
winnowed_y_responsible_mbr_candidates2 = [x for x in y_responsible_mbr_candidates2 if x != chosen_y_responsible_mbr_candidate1]
chosen_y_responsible_mbr_candidate2 = winnowed_y_responsible_mbr_candidates2[0]
chosen_x_responsible_entry_candidates1 = [x for x in entries if x.getMBR() == chosen_x_responsible_mbr_candidate1]
chosen_x_responsible_entry_candidates2 = [x for x in entries if x.getMBR() == chosen_x_responsible_mbr_candidate2]
chosen_y_responsible_entry_candidates1 = [x for x in entries if x.getMBR() == chosen_y_responsible_mbr_candidate1]
chosen_y_responsible_entry_candidates2 = [x for x in entries if x.getMBR() == chosen_y_responsible_mbr_candidate2]
chosen_x_entry1 = chosen_x_responsible_entry_candidates1[0]
chosen_x_entry2 = chosen_x_responsible_entry_candidates2[0]
chosen_y_entry1 = chosen_y_responsible_entry_candidates1[0]
chosen_y_entry2 = chosen_y_responsible_entry_candidates2[0]
if normalized_y_separation >= normalized_x_separation:
return (chosen_y_entry1, chosen_y_entry2)
elif normalized_x_separation > normalized_y_separation:
# there was an error here
return (chosen_x_entry1, chosen_x_entry2)
# choose non-traditional leaf
def chooseLeaf(self, entry):
return self.chooseLeafHelper(entry, self.getRootEntry().getChild())
def chooseLeafHelper(self, entry, node):
if node.isLeafNode() == True:
if node == self.getRootEntry().getChild():
return node
else:
return node.getParent()
else:
entries = node.getEntries()
candidate_entries = self.chooseEntriesWithMinimalAreaEnlargement(entries, entry)
if len(candidate_entries) != 1:
# resolve a tie
candidate_entries = self.resolveEnlargementTie(candidate_entries, entry)
chosen_entry = candidate_entries[0]
chosen_child = chosen_entry.getChild()
return self.chooseLeafHelper(entry, chosen_child)
@staticmethod
def quadraticPickSeeds(entries):
# mbr_list = [x.getMBR() for x in entries]
# choose pair with largest dead area
tagged_pairs = []
for entry1 in entries:
for entry2 in entries:
if entry1 == entry2:
continue
curr_pair = (entry1, entry2)
curr_entry_list = [entry1, entry2]
curr_mbr_list = [x.getMBR() for x in curr_entry_list]
curr_mbr = CompositeMBR.makeMBR(curr_mbr_list)
curr_area1 = entry1.getMBR().getArea()
curr_area2 = entry2.getMBR().getArea()
curr_mbr_area = curr_mbr.getArea()
dead_area = curr_mbr_area - (curr_area1 + curr_area2)
tagged_pair = (dead_area, curr_pair)
tagged_pairs.append(tagged_pair)
dead_area_values = [x[0] for x in tagged_pairs]
max_dead_area_value = max(dead_area_values)
candidate_tagged_pairs = [x for x in tagged_pairs if x[0] == max_dead_area_value]
chosen_tagged_pair = candidate_tagged_pairs[0]
chosen_pair = tagged_pair[1]
return chosen_pair
# return a tuple (x_distributions1, x_distributions2, y_distributions1, y_distributions2)
# where each element is a list of distributions, with each distribution being a pair
# (left_entries, right_entries)
def insert(self, entry):
# retrieve a non-traditional leaf node
leaf_node = self.chooseLeaf(entry)
adjust_result = None
"""
if leaf_node == self.getRootEntry().getChild() and self.getRootEntry().getMBR().getUpperLeft() == None:
self.setRootEntry(entry)
return
"""
if leaf_node.isFull() == False:
# do not have to split node
leaf_node.addEntry(entry)
# this is necessary
entry.getChild().setParent(leaf_node)
# call adjustTree to resize bounding boxes of ancestors and propagate splits
adjust_result = RTree.adjustTree(self, leaf_node, [entry], False, True)
else:
# split node
# print "leaf node:", leaf_node
split_result = self.splitNode(leaf_node, entry)
# l and ll are internal nodes
l, ll, e, ee = split_result
# print l, ll, leaf_node
# we might be able to handle propagating the first split manually,
# and we would continue as if we currently have no split to propagate
# e and ee are for entries for the two children that result from split of l
adjust_result = RTree.adjustTree(self, l, [e, ee], True, True)
# check result of tree-adjust to see whether we plan on splitting root
# in case the root has to be split, create a new root
# increase the height of the tree by one
# grow tree taller
ended_with_split2, resulting_entries_from_split = adjust_result
# print "ended with split:", ended_with_split2
# we ended adjust-tree by requiring a split of root
if ended_with_split2 == True:
# raise Exception()
# resulting_entries_from_split takes on form (ended_with_split, [resulting_first_entry_from_split, resulting_second_entry_from_split?]) tuple
e, ee = resulting_entries_from_split
l = e.getChild()
ll = ee.getChild()
if (self.getRootEntry().getChild().getNumEntries() + 1) <= self.getRootEntry().getChild().getMaximumNumEntriesPerNode():
# there is space at root
self.getRootEntry().getChild().addEntry(ee)
ll.setParent(self.getRootEntry().getChild())
else:
split_result = self.splitNode(self.getRootEntry().getChild(), ee)
l, ll, e, ee = split_result
resulting_entries_from_split = [e, ee]
next_root = RTreeNode(None, resulting_entries_from_split, False)
l.setParent(next_root)
ll.setParent(next_root)
self.getRootEntry().setChild(next_root)
# split a node associated with many entries while | |
directions.
verticalFlag = self.splitVerticalFlag = not self.splitVerticalFlag
orientation = g.choose(verticalFlag,"vertical","horizontal")
g.app.config.setWindowPref("initial_splitter_orientation",orientation)
# Reconfigure the bars.
bar1.place_forget()
bar2.place_forget()
self.configureBar(bar1,verticalFlag)
self.configureBar(bar2,not verticalFlag)
# Make the initial placements again.
self.placeSplitter(bar1,split1Pane1,split1Pane2,verticalFlag)
self.placeSplitter(bar2,split2Pane1,split2Pane2,not verticalFlag)
# Adjust the log and body panes to give more room around the bars.
self.reconfigurePanes()
# Redraw with an appropriate ratio.
vflag,ratio,secondary_ratio = frame.initialRatios()
self.resizePanesToRatio(ratio,secondary_ratio)
#@+node:ekr.20090126093408.243: *5* Help Menu...
#@+node:ekr.20090126093408.244: *6* leoHelp
def leoHelp (self,event=None):
g.es("leoHelp not ready yet")
return ##
file = os.path.join(g.app.loadDir,"..","doc","sbooks.chm")
file = g.toUnicode(file)
if os.path.exists(file):
os.startfile(file)
else:
answer = g.app.gui.runAskYesNoDialog(c,
"Download Tutorial?",
"Download tutorial (sbooks.chm) from SourceForge?")
if answer == "yes":
try:
if 0: # Download directly. (showProgressBar needs a lot of work)
url = "http://umn.dl.sourceforge.net/sourceforge/leo/sbooks.chm"
import urllib
self.scale = None
urllib.urlretrieve(url,file,self.showProgressBar)
if self.scale:
self.scale.destroy()
self.scale = None
else:
url = "http://prdownloads.sourceforge.net/leo/sbooks.chm?download"
import webbrowser
os.chdir(g.app.loadDir)
webbrowser.open_new(url)
except:
g.es("exception dowloading sbooks.chm")
g.es_exception()
#@+node:ekr.20090126093408.245: *7* showProgressBar
def showProgressBar (self,count,size,total):
# g.trace("count,size,total:" + count + "," + size + "," + total)
if self.scale == None:
#@+<< create the scale widget >>
#@+node:ekr.20090126093408.246: *8* << create the scale widget >>
top = Tk.Toplevel()
top.title("Download progress")
self.scale = scale = Tk.Scale(top,state="normal",orient="horizontal",from_=0,to=total)
scale.pack()
top.lift()
#@-<< create the scale widget >>
self.scale.set(count*size)
self.scale.update_idletasks()
#@+node:ekr.20090126093408.247: *4* updateAllMenus (wxFrame)
def updateAllMenus(self,event):
"""Called whenever any menu is pulled down."""
# We define this routine to strip off the even param.
self.menu.updateAllMenus()
#@-others
#@+node:ekr.20090126093408.248: *3* wxLeoIconBar class
class wxLeoIconBar:
'''An adaptor class that uses a wx.ToolBar for Leo's icon area.'''
#@+others
#@+node:ekr.20090126093408.249: *4* __init__ wxLeoIconBar
def __init__ (self,c,parentFrame): # wxLeoIconBar
self.c = c
self.widgets = []
self.toolbar = toolbar = self.iconFrame = parentFrame.CreateToolBar() # A wxFrame method
# self.toolbar.SetToolPacking(5)
# Insert a spacer to increase the height of the bar.
if wx.Platform == "__WXMSW__":
tsize = (32,32)
path = os.path.join(g.app.loadDir,"..","Icons","LeoApp.ico")
bitmap = wx.Bitmap(path,wx.BITMAP_TYPE_ICO)
toolbar.SetToolBitmapSize(tsize)
toolbar.AddLabelTool(-1,'',bitmap)
# Set the official ivar.
c.frame.iconFrame = self.iconFrame
#@+node:ekr.20090126093408.250: *4* add
def add(self,*args,**keys):
"""Add a button containing text or a picture to the icon bar.
Pictures take precedence over text"""
toolbar = self.toolbar
text = keys.get('text') or ''
bg = keys.get('bg')
command = keys.get('command')
# Create the button with a unique id.
id = wx.NewId()
b = wx.Button(toolbar,id,label=text,size=wx.Size(-1,24))
b.SetBackgroundColour('leo blue')
self.widgets.append(b)
# Right-clicks delete the button.
def onRClickCallback(event,self=self,b=b):
self.deleteButton(b)
b.Bind(wx.EVT_RIGHT_UP,onRClickCallback)
self.setCommandForButton(b,command)
tool = toolbar.AddControl(b)
toolbar.Realize()
return b
#@+node:ekr.20090126093408.252: *4* clear
def clear(self):
"""Destroy all the widgets in the icon bar"""
for w in self.widgets:
self.toolbar.RemoveTool(w.GetId())
self.widgets = []
#@+node:ekr.20090126093408.253: *4* deleteButton
def deleteButton (self,w):
self.toolbar.RemoveTool(w.GetId())
#@+node:ekr.20090126093408.254: *4* getFrame
def getFrame (self):
return self.iconFrame
#@+node:ekr.20090126093408.255: *4* setCommandForButton
def setCommandForButton(self,b,command):
c = self.c
if command:
def onClickCallback(event=None,c=c,command=command):
command(event=event)
c.outerUpdate()
self.toolbar.Bind(wx.EVT_BUTTON,onClickCallback,b)
#@+node:ekr.20090126093408.256: *4* show/hide (do nothings)
def pack (self): pass
def unpack (self): pass
show = pack
hide = unpack
#@-others
#@+node:ekr.20090126093408.257: *3* wxLeoLog class (leoLog)
class wxLeoLog (leoFrame.leoLog):
"""The base class for the log pane in Leo windows."""
#@+others
#@+node:ekr.20090126093408.258: *4* leoLog.__init__
def __init__ (self,c,nb):
self.c = c
self.nb = nb
self.isNull = False
self.logCtrl = None
self.newlines = 0
self.frameDict = {} # Keys are log names, values are None or wx.Frames.
self.textDict = {} # Keys are log names, values are None or Text controls.
self.createInitialTabs()
self.setFontFromConfig()
#@+node:ekr.20090126093408.259: *5* leoLog.createInitialTabs
def createInitialTabs (self):
c = self.c ; nb = self.nb
# Create the Log tab.
self.logCtrl = self.selectTab('Log')
# Create the Find tab.
win = self.createTab('Find',createText=False)
color = name2color('leo blue')
win.SetBackgroundColour(color)
self.findTabHandler = g.app.gui.createFindTab(c,parentFrame=win)
# Create the Spell tab.
win = self.createTab('Spell',createText=False)
color = name2color('leo pink')
win.SetBackgroundColour(color)
self.spellTabHandler = g.app.gui.createSpellTab(c,parentFrame=win)
# Make sure the Log is selected.
self.selectTab('Log')
#@+node:ekr.20090126093408.260: *5* leoLog.setTabBindings
def setTabBindings (self,tag=None):
pass # g.trace('wxLeoLog')
def bind (self,*args,**keys):
# No need to do this: we can set the master binding by hand.
pass # g.trace('wxLeoLog',args,keys)
#@+node:ekr.20090126093408.261: *4* Config
#@+node:ekr.20090126093408.262: *5* leoLog.configure
def configure (self,*args,**keys):
g.trace(args,keys)
#@+node:ekr.20090126093408.263: *5* leoLog.configureBorder
def configureBorder(self,border):
g.trace(border)
#@+node:ekr.20090126093408.264: *5* leoLog.setLogFontFromConfig
def setFontFromConfig (self):
pass # g.trace()
#@+node:ekr.20090126093408.265: *4* wxLog.put & putnl
# All output to the log stream eventually comes here.
def put (self,s,color=None,tabName=None):
if tabName: self.selectTab(tabName)
if self.logCtrl:
self.logCtrl.appendText(s)
def putnl (self,tabName=None):
if tabName: self.selectTab(tabName)
if self.logCtrl:
self.logCtrl.appendText('\n')
self.logCtrl.scrollLines(1)
#@+node:ekr.20090126093408.266: *4* Tab (wxLog)
#@+node:ekr.20090126093408.267: *5* createTab
def createTab (self,tabName,createText=True,wrap='none'): # wxLog.
nb = self.nb
# g.trace(tabName)
if createText:
win = logFrame = wx.Panel(nb)
nb.AddPage(win,tabName)
w = plainTextWidget(self.c,win,
name='text tab:%s' % tabName)
w.setBackgroundColor(name2color('leo blue'))
sizer = wx.BoxSizer(wx.HORIZONTAL)
sizer.Add(w.widget,1,wx.EXPAND)
win.SetSizer(sizer)
sizer.Fit(win)
self.textDict [tabName] = w
self.frameDict [tabName] = win
return w
else:
win = wx.Panel(nb,name='tab:%s' % tabName)
self.textDict [tabName] = None
self.frameDict [tabName] = win
nb.AddPage(win,tabName)
return win
#@+node:ekr.20090126093408.268: *5* selectTab
def selectTab (self,tabName,createText=True,wrap='none'):
'''Create the tab if necessary and make it active.'''
tabFrame = self.frameDict.get(tabName)
if not tabFrame:
self.createTab(tabName,createText=createText)
# Update the status vars.
self.tabName = tabName
self.logCtrl = self.textDict.get(tabName)
self.tabFrame = self.frameDict.get(tabName)
nb = self.nb
for i in range(nb.GetPageCount()):
s = nb.GetPageText(i)
if s == tabName:
nb.SetSelection(i)
assert nb.GetPage(i) == self.tabFrame
return self.tabFrame
#@+node:ekr.20090126093408.269: *5* clearTab
def clearTab (self,tabName,wrap='none'):
self.selectTab(tabName,wrap=wrap)
w = self.logCtrl
w and w.setAllText('')
#@+node:ekr.20090126093408.270: *5* deleteTab
def deleteTab (self,tabName):
c = self.c ; nb = self.nb
if tabName not in ('Log','Find','Spell'):
for i in range(nb.GetPageCount()):
s = nb.GetPageText(i)
if s == tabName:
nb.DeletePage(i)
self.textDict [tabName] = None
self.frameDict [tabName] = False # A bit of a kludge.
self.tabName = None
break
self.selectTab('Log')
c.invalidateFocus()
c.bodyWantsFocus()
#@+node:ekr.20090126093408.271: *5* getSelectedTab
def getSelectedTab (self):
return self.tabName
#@+node:ekr.20090126093408.272: *5* hideTab
def hideTab (self,tabName):
self.selectTab('Log')
#@+node:ekr.20090126093408.273: *5* numberOfVisibleTabs
def numberOfVisibleTabs (self):
return self.nb.GetPageCount()
#@+node:ekr.20090126093408.274: *5* Not used yet
if 0:
#@+others
#@+node:ekr.20090126093408.275: *6* cycleTabFocus
def cycleTabFocus (self,event=None,stop_w = None):
'''Cycle keyboard focus between the tabs in the log pane.'''
c = self.c ; d = self.frameDict # Keys are page names. Values are Tk.Frames.
w = d.get(self.tabName)
# g.trace(self.tabName,w)
values = d.values()
if self.numberOfVisibleTabs() > 1:
i = i2 = values.index(w) + 1
if i == len(values): i = 0
tabName = d.keys()[i]
self.selectTab(tabName)
return
#@+node:ekr.20090126093408.276: *6* lower/raiseTab
def lowerTab (self,tabName):
if tabName:
b = self.nb.tab(tabName) # b is a Tk.Button.
b.config(bg='grey80')
self.c.invalidateFocus()
self.c.bodyWantsFocus()
def raiseTab (self,tabName):
if tabName:
b = self.nb.tab(tabName) # b is a Tk.Button.
b.config(bg='LightSteelBlue1')
self.c.invalidateFocus()
self.c.bodyWantsFocus()
#@+node:ekr.20090126093408.277: *6* renameTab
def renameTab (self,oldName,newName):
label = self.nb.tab(oldName)
label.configure(text=newName)
#@+node:ekr.20090126093408.278: *6* setTabBindings
def setTabBindings (self,tabName):
c = self.c ; k = c.k
tab = self.nb.tab(tabName)
w = self.textDict.get(tabName)
# Send all event in the text area to the master handlers.
for kind,handler in (
('<Key>', k.masterKeyHandler),
('<Button-1>', k.masterClickHandler),
('<Button-3>', k.masterClick3Handler),
):
w.bind(kind,handler)
# Clicks in the tab area are harmless: use the old code.
def tabMenuRightClickCallback(event,menu=self.menu):
return self.onRightClick(event,menu)
def tabMenuClickCallback(event,tabName=tabName):
return self.onClick(event,tabName)
tab.bind('<Button-1>',tabMenuClickCallback)
tab.bind('<Button-3>',tabMenuRightClickCallback)
k.completeAllBindingsForWidget(w)
#@+node:ekr.20090126093408.279: *6* Tab menu callbacks & helpers (not ready yet)
if 0:
#@+others
#@+node:ekr.20090126093408.280: *7* onRightClick & onClick
def onRightClick (self,event,menu):
c = self.c
menu.post(event.x_root,event.y_root)
def onClick (self,event,tabName):
self.selectTab(tabName)
#@+node:ekr.20090126093408.281: *7* newTabFromMenu
def newTabFromMenu (self,tabName='Log'):
self.selectTab(tabName)
# This is called by getTabName.
def selectTabCallback (newName):
return self.selectTab(newName)
self.getTabName(selectTabCallback)
#@+node:ekr.20090126093408.282: *7* renameTabFromMenu
def renameTabFromMenu (self,tabName):
if tabName in ('Log','Completions'):
g.es('can not rename %s tab' % (tabName),color='blue')
else:
def renameTabCallback (newName):
return self.renameTab(tabName,newName)
self.getTabName(renameTabCallback)
#@+node:ekr.20090126093408.283: *7* getTabName
def getTabName (self,exitCallback):
canvas = self.nb.component('hull')
# Overlay what is there!
f = Tk.Frame(canvas)
f.pack(side='top',fill='both',expand=1)
row1 = Tk.Frame(f)
row1.pack(side='top',expand=0,fill='x',pady=10)
row2 = Tk.Frame(f)
row2.pack(side='top',expand=0,fill='x')
Tk.Label(row1,text='Tab name').pack(side='left')
e = Tk.Entry(row1,background='white')
e.pack(side='left')
def getNameCallback (event=None):
s = e.get().strip()
f.pack_forget()
if s: exitCallback(s)
def closeTabNameCallback (event=None):
f.pack_forget()
b = Tk.Button(row2,text='Ok',width=6,command=getNameCallback)
b.pack(side='left',padx=10)
b = Tk.Button(row2,text='Cancel',width=6,command=closeTabNameCallback)
b.pack(side='left')
e.focus_force()
e.bind('<Return>',getNameCallback)
#@-others
#@-others
#@-others
#@+node:ekr.20090126093408.284: *3* wxLeoMenu class (leoMenu)
class wxLeoMenu (leoMenu.leoMenu):
#@+others
#@+node:ekr.20090126093408.285: *4* wxLeoMenu.__init__
def __init__ (self,frame):
# Init the base class.
leoMenu.leoMenu.__init__(self,frame)
# Init the ivars.
self.c = frame.c
self.frame = frame
self.acceleratorDict = {}
# Keys are menus, values are list of tuples used to create wx accelerator tables.
self.menuDict = {}
#@+node:ekr.20090126093408.286: *4* Accelerators
#@+at
# Accelerators are NOT SHOWN when the user opens the menu with the mouse!
# This is a wx bug.
#@+node:ekr.20090126093408.287: *5* createAccelLabel
def createAccelLabel (self,keys):
'''Create the menu label by inserting '&' at the underline spot.'''
label = keys.get('label')
underline = keys.get('underline')
accel = keys.get('accelerator')
ch | |
<reponame>Joyoe/Magisk-nosbin_magisk-nohide
# Author: <NAME> <<EMAIL>>
# Author: <NAME> <<EMAIL>>
# Author: <NAME> <<EMAIL>>
import errno
import selinux
import setools
import glob
import sepolgen.defaults as defaults
import sepolgen.interfaces as interfaces
import sys
import os
import re
import gzip
PROGNAME = "policycoreutils"
try:
import gettext
kwargs = {}
if sys.version_info < (3,):
kwargs['unicode'] = True
gettext.install(PROGNAME,
localedir="/usr/share/locale",
codeset='utf-8',
**kwargs)
except:
try:
import builtins
builtins.__dict__['_'] = str
except ImportError:
import __builtin__
__builtin__.__dict__['_'] = unicode
TYPE = 1
ROLE = 2
ATTRIBUTE = 3
PORT = 4
USER = 5
BOOLEAN = 6
TCLASS = 7
ALLOW = 'allow'
AUDITALLOW = 'auditallow'
NEVERALLOW = 'neverallow'
DONTAUDIT = 'dontaudit'
SOURCE = 'source'
TARGET = 'target'
PERMS = 'permlist'
CLASS = 'class'
TRANSITION = 'transition'
ROLE_ALLOW = 'role_allow'
# Autofill for adding files *************************
DEFAULT_DIRS = {}
DEFAULT_DIRS["/etc"] = "etc_t"
DEFAULT_DIRS["/tmp"] = "tmp_t"
DEFAULT_DIRS["/usr/lib/systemd/system"] = "unit_file_t"
DEFAULT_DIRS["/lib/systemd/system"] = "unit_file_t"
DEFAULT_DIRS["/etc/systemd/system"] = "unit_file_t"
DEFAULT_DIRS["/var/cache"] = "var_cache_t"
DEFAULT_DIRS["/var/lib"] = "var_lib_t"
DEFAULT_DIRS["/var/log"] = "log_t"
DEFAULT_DIRS["/var/run"] = "var_run_t"
DEFAULT_DIRS["/run"] = "var_run_t"
DEFAULT_DIRS["/run/lock"] = "var_lock_t"
DEFAULT_DIRS["/var/run/lock"] = "var_lock_t"
DEFAULT_DIRS["/var/spool"] = "var_spool_t"
DEFAULT_DIRS["/var/www"] = "content_t"
file_type_str = {}
file_type_str["a"] = _("all files")
file_type_str["f"] = _("regular file")
file_type_str["d"] = _("directory")
file_type_str["c"] = _("character device")
file_type_str["b"] = _("block device")
file_type_str["s"] = _("socket file")
file_type_str["l"] = _("symbolic link")
file_type_str["p"] = _("named pipe")
trans_file_type_str = {}
trans_file_type_str[""] = "a"
trans_file_type_str["--"] = "f"
trans_file_type_str["-d"] = "d"
trans_file_type_str["-c"] = "c"
trans_file_type_str["-b"] = "b"
trans_file_type_str["-s"] = "s"
trans_file_type_str["-l"] = "l"
trans_file_type_str["-p"] = "p"
# the setools policy handle
_pol = None
# cache the lookup results
file_equiv_modified = None
file_equiv = None
local_files = None
fcdict = None
methods = []
all_types = None
all_types_info = None
user_types = None
role_allows = None
portrecs = None
portrecsbynum = None
all_domains = None
roles = None
selinux_user_list = None
login_mappings = None
file_types = None
port_types = None
bools = None
all_attributes = None
booleans = None
booleans_dict = None
all_allow_rules = None
all_transitions = None
def policy_sortkey(policy_path):
# Parse the extension of a policy path which looks like .../policy/policy.31
extension = policy_path.rsplit('/policy.', 1)[1]
try:
return int(extension), policy_path
except ValueError:
# Fallback with sorting on the full path
return 0, policy_path
def get_installed_policy(root="/"):
try:
path = root + selinux.selinux_binary_policy_path()
policies = glob.glob("%s.*" % path)
policies.sort(key=policy_sortkey)
return policies[-1]
except:
pass
raise ValueError(_("No SELinux Policy installed"))
def get_store_policy(store):
"""Get the path to the policy file located in the given store name"""
policies = glob.glob("%s%s/policy/policy.*" %
(selinux.selinux_path(), store))
if not policies:
return None
# Return the policy with the higher version number
policies.sort(key=policy_sortkey)
return policies[-1]
def policy(policy_file):
global all_domains
global all_attributes
global bools
global all_types
global role_allows
global users
global roles
global file_types
global port_types
all_domains = None
all_attributes = None
bools = None
all_types = None
role_allows = None
users = None
roles = None
file_types = None
port_types = None
global _pol
try:
_pol = setools.SELinuxPolicy(policy_file)
except:
raise ValueError(_("Failed to read %s policy file") % policy_file)
def load_store_policy(store):
policy_file = get_store_policy(store)
if not policy_file:
return None
policy(policy_file)
try:
policy_file = get_installed_policy()
policy(policy_file)
except ValueError as e:
if selinux.is_selinux_enabled() == 1:
raise e
def info(setype, name=None):
if setype == TYPE:
q = setools.TypeQuery(_pol)
q.name = name
results = list(q.results())
if name and len(results) < 1:
# type not found, try alias
q.name = None
q.alias = name
results = list(q.results())
return ({
'aliases': list(map(str, x.aliases())),
'name': str(x),
'permissive': bool(x.ispermissive),
'attributes': list(map(str, x.attributes()))
} for x in results)
elif setype == ROLE:
q = setools.RoleQuery(_pol)
if name:
q.name = name
return ({
'name': str(x),
'roles': list(map(str, x.expand())),
'types': list(map(str, x.types())),
} for x in q.results())
elif setype == ATTRIBUTE:
q = setools.TypeAttributeQuery(_pol)
if name:
q.name = name
return ({
'name': str(x),
'types': list(map(str, x.expand())),
} for x in q.results())
elif setype == PORT:
q = setools.PortconQuery(_pol)
if name:
ports = [int(i) for i in name.split("-")]
if len(ports) == 2:
q.ports = ports
elif len(ports) == 1:
q.ports = (ports[0], ports[0])
if _pol.mls:
return ({
'high': x.ports.high,
'protocol': str(x.protocol),
'range': str(x.context.range_),
'type': str(x.context.type_),
'low': x.ports.low,
} for x in q.results())
return ({
'high': x.ports.high,
'protocol': str(x.protocol),
'type': str(x.context.type_),
'low': x.ports.low,
} for x in q.results())
elif setype == USER:
q = setools.UserQuery(_pol)
if name:
q.name = name
if _pol.mls:
return ({
'range': str(x.mls_range),
'name': str(x),
'roles': list(map(str, x.roles)),
'level': str(x.mls_level),
} for x in q.results())
return ({
'name': str(x),
'roles': list(map(str, x.roles)),
} for x in q.results())
elif setype == BOOLEAN:
q = setools.BoolQuery(_pol)
if name:
q.name = name
return ({
'name': str(x),
'state': x.state,
} for x in q.results())
elif setype == TCLASS:
q = setools.ObjClassQuery(_pol)
if name:
q.name = name
return ({
'name': str(x),
'permlist': list(x.perms),
} for x in q.results())
else:
raise ValueError("Invalid type")
def _setools_rule_to_dict(rule):
d = {
'type': str(rule.ruletype),
'source': str(rule.source),
'target': str(rule.target),
'class': str(rule.tclass),
}
# Evaluate boolean expression associated with given rule (if there is any)
try:
# Get state of all booleans in the conditional expression
boolstate = {}
for boolean in rule.conditional.booleans:
boolstate[str(boolean)] = boolean.state
# evaluate if the rule is enabled
enabled = rule.conditional.evaluate(**boolstate) == rule.conditional_block
except AttributeError:
# non-conditional rules are always enabled
enabled = True
d['enabled'] = enabled
try:
d['permlist'] = list(map(str, rule.perms))
except AttributeError:
pass
try:
d['transtype'] = str(rule.default)
except AttributeError:
pass
try:
d['boolean'] = [(str(rule.conditional), enabled)]
except AttributeError:
pass
try:
d['filename'] = rule.filename
except AttributeError:
pass
return d
def search(types, seinfo=None):
if not seinfo:
seinfo = {}
valid_types = set([ALLOW, AUDITALLOW, NEVERALLOW, DONTAUDIT, TRANSITION, ROLE_ALLOW])
for setype in types:
if setype not in valid_types:
raise ValueError("Type has to be in %s" % " ".join(valid_types))
source = None
if SOURCE in seinfo:
source = str(seinfo[SOURCE])
target = None
if TARGET in seinfo:
target = str(seinfo[TARGET])
tclass = None
if CLASS in seinfo:
tclass = str(seinfo[CLASS]).split(',')
toret = []
tertypes = []
if ALLOW in types:
tertypes.append(ALLOW)
if NEVERALLOW in types:
tertypes.append(NEVERALLOW)
if AUDITALLOW in types:
tertypes.append(AUDITALLOW)
if DONTAUDIT in types:
tertypes.append(DONTAUDIT)
if len(tertypes) > 0:
q = setools.TERuleQuery(_pol,
ruletype=tertypes,
source=source,
target=target,
tclass=tclass)
if PERMS in seinfo:
q.perms = seinfo[PERMS]
toret += [_setools_rule_to_dict(x) for x in q.results()]
if TRANSITION in types:
rtypes = ['type_transition', 'type_change', 'type_member']
q = setools.TERuleQuery(_pol,
ruletype=rtypes,
source=source,
target=target,
tclass=tclass)
if PERMS in seinfo:
q.perms = seinfo[PERMS]
toret += [_setools_rule_to_dict(x) for x in q.results()]
if ROLE_ALLOW in types:
ratypes = ['allow']
q = setools.RBACRuleQuery(_pol,
ruletype=ratypes,
source=source,
target=target,
tclass=tclass)
for r in q.results():
toret.append({'source': str(r.source),
'target': str(r.target)})
return toret
def get_conditionals(src, dest, tclass, perm):
tdict = {}
tlist = []
src_list = [src]
dest_list = [dest]
# add assigned attributes
try:
src_list += list(filter(lambda x: x['name'] == src, get_all_types_info()))[0]['attributes']
except:
pass
try:
dest_list += list(filter(lambda x: x['name'] == dest, get_all_types_info()))[0]['attributes']
except:
pass
allows = map(lambda y: y, filter(lambda x:
x['source'] in src_list and
x['target'] in dest_list and
set(perm).issubset(x[PERMS]) and
'boolean' in x,
get_all_allow_rules()))
try:
for i in allows:
tdict.update({'source': i['source'], 'boolean': i['boolean']})
if tdict not in tlist:
tlist.append(tdict)
tdict = {}
except KeyError:
return(tlist)
return (tlist)
def get_conditionals_format_text(cond):
enabled = False
for x in cond:
if x['boolean'][0][1]:
enabled = True
break
return _("-- Allowed %s [ %s ]") % (enabled, " || ".join(set(map(lambda x: "%s=%d" % (x['boolean'][0][0], x['boolean'][0][1]), cond))))
def get_types_from_attribute(attribute):
return list(info(ATTRIBUTE, attribute))[0]["types"]
def get_file_types(setype):
flist = []
mpaths = {}
for f in get_all_file_types():
if f.startswith(gen_short_name(setype)):
flist.append(f)
fcdict = get_fcdict()
for f in flist:
try:
mpaths[f] = (fcdict[f]["regex"], file_type_str[fcdict[f]["ftype"]])
except KeyError:
mpaths[f] = []
return mpaths
def get_real_type_name(name):
"""Return the real name of a type
* If 'name' refers to a type alias, return the corresponding type name.
* Otherwise return the original name (even if the type does not exist).
"""
if not name:
return name
try:
return next(info(TYPE, name))["name"]
except (RuntimeError, StopIteration):
return name
def get_writable_files(setype):
file_types = get_all_file_types()
all_writes = []
mpaths = {}
permlist = search([ALLOW], {'source': setype, 'permlist': ['open', 'write'], 'class': 'file'})
if permlist is None or len(permlist) == 0:
return mpaths
fcdict = get_fcdict()
attributes = ["proc_type", "sysctl_type"]
for i in permlist:
if i['target'] in attributes:
continue
if "enabled" in i:
if not i["enabled"]:
continue
if i['target'].endswith("_t"):
if i['target'] not in file_types:
continue
if i['target'] not in all_writes:
if i['target'] != setype:
all_writes.append(i['target'])
else:
for t in get_types_from_attribute(i['target']):
if t not in all_writes:
all_writes.append(t)
for f in all_writes:
try:
mpaths[f] = (fcdict[f]["regex"], file_type_str[fcdict[f]["ftype"]])
except KeyError:
mpaths[f] = [] # {"regex":[],"paths":[]}
return mpaths
def find_file(reg):
if os.path.exists(reg):
return [reg]
try:
pat = re.compile(r"%s$" % reg)
except:
print("bad reg:", reg)
return []
p = reg
if p.endswith("(/.*)?"):
p = p[:-6] + "/"
path = os.path.dirname(p)
try: # Bug fix: when "all files on system"
| |
6, 5, 20, 7, 5, 15, 5, 15, 7, 7, 0, 0],
[ 7, 16, 9, 8, 2, 4, 5, 15, 2, 5, 2, 4, 5, 15, 2, 4, 5, 15,
2, 4, 5, 6, 5, 20, 7, 5, 15, 5, 15, 7, 7, 0, 0],
[ 7, 16, 9, 8, 2, 4, 5, 15, 2, 5, 2, 4, 5, 15, 2, 4, 5, 15,
2, 4, 5, 6, 5, 20, 7, 5, 15, 5, 15, 7, 7, 0, 0],
[ 7, 16, 9, 8, 2, 4, 5, 15, 2, 5, 2, 4, 5, 15, 2, 4, 5, 15,
2, 4, 5, 6, 5, 20, 7, 5, 15, 5, 15, 7, 7, 0, 0],
[ 9, 18, 17, 27, 8, 5, 5, 2, 5, 5, 15, 2, 5, 2, 4, 20, 12, 5,
5, 2, 5, 24, 20, 2, 4, 5, 7, 0, 0, 0, 0, 0, 0],
[ 9, 18, 17, 27, 8, 5, 5, 2, 5, 5, 15, 2, 5, 2, 4, 20, 12, 5,
5, 2, 5, 24, 20, 2, 4, 5, 7, 0, 0, 0, 0, 0, 0],
[ 9, 18, 17, 27, 8, 5, 5, 2, 5, 5, 15, 2, 5, 2, 4, 20, 12, 5,
5, 2, 5, 24, 20, 2, 4, 5, 7, 0, 0, 0, 0, 0, 0],
[ 9, 8, 2, 5, 15, 15, 5, 24, 17, 8, 13, 25, 22, 17, 28, 26, 8, 15,
6, 20, 12, 10, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 9, 8, 2, 5, 15, 15, 5, 24, 17, 8, 13, 25, 22, 17, 28, 26, 8, 15,
6, 20, 12, 10, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 7, 9, 8, 17, 8, 5, 5, 10, 10, 2, 4, 5, 5, 2, 13, 2, 17, 2,
5, 2, 4, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 7, 9, 8, 17, 8, 5, 5, 10, 10, 2, 4, 5, 5, 2, 13, 2, 17, 2,
5, 2, 4, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 7, 9, 8, 17, 8, 5, 5, 10, 10, 2, 4, 5, 5, 2, 13, 2, 17, 2,
5, 2, 4, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 7, 9, 8, 17, 8, 5, 5, 10, 10, 2, 4, 5, 5, 2, 13, 2, 17, 2,
5, 2, 4, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 9, 8, 5, 5, 2, 5, 2, 4, 5, 15, 2, 4, 5, 5, 15, 2, 5, 2,
4, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 9, 8, 5, 5, 2, 5, 2, 4, 5, 15, 2, 4, 5, 5, 15, 2, 5, 2,
4, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 9, 8, 5, 5, 2, 5, 2, 4, 5, 15, 2, 4, 5, 5, 15, 2, 5, 2,
4, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 9, 8, 5, 5, 2, 5, 2, 4, 5, 15, 2, 4, 5, 5, 15, 2, 5, 2,
4, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 9, 8, 10, 2, 4, 15, 15, 2, 4, 2, 5, 15, 2, 5, 2, 15, 20, 7,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 9, 8, 10, 2, 4, 15, 15, 2, 4, 2, 5, 15, 2, 5, 2, 15, 20, 7,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 9, 8, 5, 5, 5, 2, 4, 2, 5, 17, 8, 22, 17, 8, 15, 10, 10, 5,
5, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 9, 8, 5, 5, 5, 2, 4, 2, 5, 17, 8, 22, 17, 8, 15, 10, 10, 5,
5, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 9, 8, 2, 15, 2, 6, 5, 2, 4, 2, 5, 5, 2, 4, 14, 10, 2, 5,
5, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 9, 8, 2, 15, 2, 6, 5, 2, 4, 2, 5, 5, 2, 4, 14, 10, 2, 5,
5, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 9, 8, 2, 15, 2, 6, 5, 2, 4, 2, 5, 5, 2, 4, 14, 10, 2, 5,
5, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 9, 8, 2, 15, 2, 6, 5, 2, 4, 2, 5, 5, 2, 4, 14, 10, 2, 5,
5, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 9, 8, 5, 15, 17, 5, 10, 7, 10, 5, 7, 2, 5, 5, 2, 5, 15, 2,
4, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 9, 8, 5, 15, 17, 5, 10, 7, 10, 5, 7, 2, 5, 5, 2, 5, 15, 2,
4, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
device='cuda:0')
new_inputs['ner_ids']= tensor([[2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2],
[2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2],
[2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2],
[2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 0, 0],
[2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 0, 0],
[2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 0, 0],
[2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 0, 0],
[2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 0, 0, 0, 0, 0, 0],
[2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 0, 0, 0, 0, 0, 0],
[2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 0, 0, 0, 0, 0, 0],
[2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0],
[2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, | |
<gh_stars>1-10
from __future__ import division, print_function
import argparse
import sys, os, time, gzip, glob
from collections import defaultdict
from base.config import combine_configs
from base.io_util import make_dir, remove_dir, tree_to_json, write_json, myopen
from base.sequences_process import sequence_set
from base.utils import num_date, save_as_nexus, parse_date
from base.tree import tree
# from base.fitness_model import fitness_model
from base.frequencies import alignment_frequencies, tree_frequencies, make_pivots
from base.auspice_export import export_metadata_json, export_frequency_json, export_tip_frequency_json
import numpy as np
from datetime import datetime
import json
from pdb import set_trace
from base.logger import logger
from Bio import SeqIO
from Bio import AlignIO
import cPickle as pickle
def collect_args():
parser = argparse.ArgumentParser(
description = "Process (prepared) JSON(s)",
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('-j', '--json', help="prepared JSON to process")
parser.add_argument('--clean', default=False, action='store_true', help="clean build (remove previous checkpoints)")
parser.add_argument('--tree_method', type=str, default='raxml', choices=["fasttree", "raxml", "iqtree"], help="specify the method used to build the tree")
parser.add_argument('--no_tree', action='store_true', help="do not build a tree")
return parser
class process(object):
"""process influenza virus sequences in mutliple steps to allow visualization in browser
* filtering and parsing of sequences
* alignment
* tree building
* frequency estimation of clades and mutations
* export as json
"""
def __init__(self, config):
""" check config file, make necessary directories, set up logger """
super(process, self).__init__()
self.config = combine_configs("process", config)
# try:
# assert(os.path.basename(os.getcwd()) == self.config["dir"])
# except AssertionError:
# print("Run this script from within the {} directory".format(self.config["dir"]))
# sys.exit(2)
for p in self.config["output"].values():
if not os.path.isdir(p):
os.makedirs(p)
self.log = logger(self.config["output"]["data"], False)
# parse the JSON into different data bits
try:
with open(self.config["in"], 'r') as fh:
data = json.load(fh)
except Exception as e:
self.log.fatal("Error loading JSON. Error: {}".format(e))
self.info = data["info"]
if "time_interval" in data["info"]:
self.info["time_interval"] = [datetime.strptime(x, '%Y-%m-%d').date()
for x in data["info"]["time_interval"]]
self.info["lineage"] = data["info"]["lineage"]
if 'leaves' in data:
self.tree_leaves = data['leaves']
try:
self.colors = data["colors"]
except KeyError:
self.log.notify("* colours have not been set")
self.colors = False
try:
self.lat_longs = data["lat_longs"]
except KeyError:
self.log.notify("* latitude & longitudes have not been set")
self.lat_longs = False
# backwards compatability - set up file_dumps (need to rewrite sometime)
# self.sequence_fname = self.input_data_path+'.fasta'
self.file_dumps = {}
self.output_path = os.path.join(self.config["output"]["data"], self.info["prefix"])
self.file_dumps['seqs'] = self.output_path + '_sequences.pkl.gz'
self.file_dumps['tree'] = self.output_path + '_tree.newick'
self.file_dumps['nodes'] = self.output_path + '_nodes.pkl.gz'
if self.config["clean"] == True:
self.log.notify("Removing intermediate files for a clean build")
for f in glob.glob(self.output_path+"*"):
os.remove(f)
if "reference" in data:
self.seqs = sequence_set(self.log, data["sequences"], data["reference"], self.info["date_format"])
else:
self.log.fatal("No reference provided. Cannot continue.")
# self.seqs = sequence_set(self.log, data["sequences"], False, self.info["date_format"])
# backward compatability
self.reference_seq = self.seqs.reference_seq
self.proteins = self.seqs.proteins
for trait in self.info["traits_are_dates"]:
self.seqs.convert_trait_to_numerical_date(trait, self.info["date_format"])
# Prepare titers if they are available.
if "titers" in data:
self.log.debug("Loaded %i titer measurements" % len(data["titers"]))
# Convert titer dictionary indices from JSON-compatible strings back
# to tuples.
self.titers = {eval(key): value
for key, value in data["titers"].iteritems()}
## usefull flag to set (from pathogen run file) to disable restoring
self.try_to_restore = True
def dump(self):
'''
write the current state to file
'''
self.log.warn("unsure if dump() works")
from cPickle import dump
from Bio import Phylo
for attr_name, fname in self.file_dumps.iteritems():
if hasattr(self,attr_name):
print("dumping",attr_name)
#if attr_name=='seqs': self.seqs.all_seqs = None
with myopen(fname, 'wb') as ofile:
if attr_name=='nodes':
continue
elif attr_name=='tree':
#biopython trees don't pickle well, write as newick + node info
self.tree.dump(fname, self.file_dumps['nodes'])
else:
dump(getattr(self,attr_name), ofile, -1)
def load(self, debug=False):
'''
reconstruct instance from files
'''
self.log.warn("unsure if load() works")
from cPickle import load
for attr_name, fname in self.file_dumps.iteritems():
if attr_name=='tree':
continue
if os.path.isfile(fname):
with myopen(fname, 'r') as ifile:
print('loading',attr_name,'from file',fname)
setattr(self, attr_name, load(ifile))
tree_name = self.file_dumps['tree']
if os.path.isfile(tree_name):
if os.path.isfile(self.file_dumps['nodes']):
node_file = self.file_dumps['nodes']
else:
node_file = None
# load tree, build if no tree file available
self.build_tree(tree_name, node_file, root='none', debug=debug)
def align(self, codon_align=False, debug=False, fill_gaps=False):
'''
(1) Align sequences, remove non-reference insertions
NB step 1 is skipped if a valid aln file is found
(2) Translate
(3) Write to multi-fasta
CODON ALIGNMENT IS NOT IMPLEMENTED
'''
fnameStripped = self.output_path + "_aligned_stripped.mfa"
if self.try_to_restore:
self.seqs.try_restore_align_from_disk(fnameStripped)
if not hasattr(self.seqs, "aln"):
if codon_align:
self.seqs.codon_align()
else:
self.seqs.align(self.config["subprocess_verbosity_level"], debug=debug)
# need to redo everything
self.try_to_restore = False
self.seqs.strip_non_reference()
if fill_gaps:
self.seqs.make_gaps_ambiguous()
else:
self.seqs.make_terminal_gaps_ambiguous()
AlignIO.write(self.seqs.aln, fnameStripped, 'fasta')
if not self.seqs.reference_in_dataset:
self.seqs.remove_reference_from_alignment()
# if outgroup is not None:
# self.seqs.clock_filter(n_iqd=3, plot=False, max_gaps=0.05, root_seq=outgroup)
self.seqs.translate() # creates self.seqs.translations
# save additional translations - disabled for now
# for name, msa in self.seqs.translations.iteritems():
# SeqIO.write(msa, self.output_path + "_aligned_" + name + ".mfa", "fasta")
def get_pivots_via_spacing(self):
try:
time_interval = self.info["time_interval"]
assert("pivot_spacing" in self.config)
except AssertionError:
self.log.fatal("Cannot space pivots without prividing \"pivot_spacing\" in the config")
except KeyError:
self.log.fatal("Cannot space pivots without a time interval in the prepared JSON")
return np.arange(time_interval[1].year+(time_interval[1].month-1)/12.0,
time_interval[0].year+time_interval[0].month/12.0,
self.config["pivot_spacing"])
def restore_mutation_frequencies(self):
if self.try_to_restore:
try:
with open(self.output_path + "_mut_freqs.pickle", 'rb') as fh:
pickle_seqs = pickle.load(fh)
assert(pickle_seqs == set(self.seqs.seqs.keys()))
pickled = pickle.load(fh)
assert(len(pickled) == 3)
self.mutation_frequencies = pickled[0]
self.mutation_frequency_confidence = pickled[1]
self.mutation_frequency_counts = pickled[2]
self.log.notify("Successfully restored mutation frequencies")
return
except IOError:
pass
except AssertionError as err:
self.log.notify("Tried to restore mutation frequencies but failed: {}".format(err))
#no need to remove - we'll overwrite it shortly
self.mutation_frequencies = {}
self.mutation_frequency_confidence = {}
self.mutation_frequency_counts = {}
def estimate_mutation_frequencies(self,
inertia=0.0,
min_freq=0.01,
stiffness=20.0,
pivots=24,
region="global",
include_set={}):
'''
calculate the frequencies of mutation in a particular region
currently the global frequencies should be estimated first
because this defines the set of positions at which frequencies in
other regions are estimated.
'''
if not hasattr(self.seqs, 'aln'):
self.log.warn("Align sequences first")
return
def filter_alignment(aln, region=None, lower_tp=None, upper_tp=None):
from Bio.Align import MultipleSeqAlignment
tmp = aln
if region is not None:
if type(region)==str:
tmp = [s for s in tmp if s.attributes['region']==region]
elif type(region)==list:
tmp = [s for s in tmp if s.attributes['region'] in region]
else:
self.log.warn("region must be string or list")
return
if lower_tp is not None:
tmp = [s for s in tmp if np.mean(s.attributes['num_date'])>=lower_tp]
if upper_tp is not None:
tmp = [s for s in tmp if np.mean(s.attributes['num_date'])<upper_tp]
return MultipleSeqAlignment(tmp)
if not hasattr(self, 'pivots'):
tps = np.array([np.mean(x.attributes['num_date']) for x in self.seqs.seqs.values()])
self.pivots=make_pivots(pivots, tps)
# else:
# self.log.notify('estimate_mutation_frequencies: using self.pivots')
if not hasattr(self, 'mutation_frequencies'):
self.restore_mutation_frequencies()
# loop over nucleotide sequences and translations and calcuate
# region specific frequencies of mutations above a certain threshold
if type(region)==str:
region_name = region
region_match = region
elif type(region)==tuple:
region_name=region[0]
region_match=region[1]
else:
self.log.warn("region must be string or tuple")
return
# loop over different alignment types
for prot, aln in [('nuc',self.seqs.aln)] + self.seqs.translations.items():
if (region_name,prot) in self.mutation_frequencies:
self.log.notify("Skipping Frequency Estimation for region \"{}\", protein \"{}\"".format(region_name, prot))
continue
self.log.notify("Starting Frequency Estimation for region \"{}\", protein \"{}\"".format(region_name, prot))
# determine set of positions that have to have a frequency calculated
if prot in include_set:
tmp_include_set = [x for x in include_set[prot]]
else:
tmp_include_set = []
tmp_aln = filter_alignment(aln, region = None if region=='global' else region_match,
lower_tp=self.pivots[0], upper_tp=self.pivots[-1])
if ('global', prot) in self.mutation_frequencies:
tmp_include_set += set([pos for (pos, mut) in self.mutation_frequencies[('global', prot)]])
time_points = [np.mean(x.attributes['num_date']) for x in tmp_aln]
if len(time_points)==0:
self.log.notify('no samples in region {} (protein: {})'.format(region_name, prot))
self.mutation_frequency_counts[region_name]=np.zeros_like(self.pivots)
continue
# instantiate alignment frequency
aln_frequencies = alignment_frequencies(tmp_aln, time_points, self.pivots,
ws=max(2,len(time_points)//10),
inertia=inertia,
stiffness=stiffness, method='SLSQP')
if prot=='nuc': # if this is a nucleotide alignment, set all non-canonical states to N
A = aln_frequencies.aln
A[~((A=='A')|(A=='C')|(A=='G')|(A=='T')|('A'=='-'))] = 'N'
aln_frequencies.mutation_frequencies(min_freq=min_freq, include_set=tmp_include_set,
ignore_char='N' if prot=='nuc' else 'X')
self.mutation_frequencies[(region_name,prot)] = aln_frequencies.frequencies
self.mutation_frequency_confidence[(region_name,prot)] = aln_frequencies.calc_confidence()
self.mutation_frequency_counts[region_name]=aln_frequencies.counts
self.log.notify("Saving mutation frequencies (pickle)")
with open(self.output_path + "_mut_freqs.pickle", 'wb') as fh:
pickle.dump(set(self.seqs.seqs.keys()), fh, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump((self.mutation_frequencies,
self.mutation_frequency_confidence,
self.mutation_frequency_counts), fh, protocol=pickle.HIGHEST_PROTOCOL)
def global_frequencies(self, min_freq, average_global=False, inertia=2.0/12, stiffness=2.0*12):
# set pivots and define groups of larger regions for frequency display
pivots = self.get_pivots_via_spacing()
acronyms = set([x[1] for x in self.info["regions"] if x[1]!=""])
region_groups = {str(x):[str(y[0]) for y in self.info["regions"] if y[1] == x] for x in acronyms}
pop_sizes = {str(x):np.sum([y[-1] for y in self.info["regions"] if y[1] == x]) for x in acronyms}
total_popsize = np.sum(pop_sizes.values())
# if global frequencies are to be calculated from the set of sequences, do the following
if average_global==False:
self.estimate_mutation_frequencies(pivots=pivots, min_freq=min_freq,
inertia=np.exp(-inertia), stiffness=stiffness)
for region in region_groups.iteritems():
self.estimate_mutation_frequencies(region=region, min_freq=min_freq,
inertia=np.exp(-inertia), stiffness=stiffness)
return
# ELSE:
# if global frequences are to be calculated from a weighted average of regional ones
# the following applies:
# determine sites whose frequencies need to be computed in all regions
self.seqs.diversity_statistics()
include_set = {}
| |
Leakage': 0.00611897,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00348781,
'Renaming Unit/Peak Dynamic': 4.56169,
'Renaming Unit/Runtime Dynamic': 0.400316,
'Renaming Unit/Subthreshold Leakage': 0.070483,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0362779,
'Runtime Dynamic': 2.8585,
'Subthreshold Leakage': 6.21877,
'Subthreshold Leakage with power gating': 2.58311},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0348477,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.23006,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.246033,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.0547716,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.0883445,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.0445933,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.187709,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.0249212,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.2718,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.046481,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00229737,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.027307,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0169904,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.073788,
'Execution Unit/Register Files/Runtime Dynamic': 0.0192878,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.0662421,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.151905,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 0.99434,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 3.50704e-05,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 3.50704e-05,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 3.03448e-05,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 1.16368e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000244069,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.000344554,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.000343448,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0163333,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 1.03894,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.032963,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.0554753,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 3.30788,
'Instruction Fetch Unit/Runtime Dynamic': 0.10546,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0425415,
'L2/Runtime Dynamic': 0.0262001,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 1.60967,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.241818,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0120528,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0120528,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 1.66658,
'Load Store Unit/Runtime Dynamic': 0.313311,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.02972,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.0594401,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0105477,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.011183,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.0645975,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.00541485,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.238826,
'Memory Management Unit/Runtime Dynamic': 0.0165979,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 13.1171,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.12227,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.00395915,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0249395,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': | |
import pandas as pd
from .classes import Dependencies
def normalize(dependencies, df):
"""
Normalizes the dependency relationships in dependencies into new
groups by breaking up all partial and transitive dependencies.
Arguments:
dependencies (Dependencies) : the dependencies to be split up
Returns:
new_groups (list[Dependencies]) : list of new dependencies objects
representing the new groups
"""
dependencies.remove_implied_extroneous()
no_part_deps = remove_part_deps(dependencies, df)
no_trans_deps = []
for grp in no_part_deps:
no_trans_deps += remove_trans_deps(grp, df)
return no_trans_deps
class DepDF(object):
"""
Represents dataframe and functional dependencies between columns in it.
Used in the normalization process.
Attributes:
deps
df
parent
children
index
"""
def __init__(self, deps, df, index, parent=None):
"""
Creates a DepDF.
Arguments:
deps (Dependencies) : dependenies among the df
df (pd.DataFrame) : dataframe for the object
index (list[str]) : index columns for dataframe
parent (DepDF, optional) : parent DepDF object
"""
self.deps = deps
self.df = df
self.parent = parent
self.children = []
self.index = index
def return_dfs(self):
"""
Returns the dataframes stored in self and all its descendents.
Returns:
dfs (list[pd.DataFrame]) : dataframes
"""
if self.children == []:
return [self.df]
result = [self.df]
for child in self.children:
result += child.return_dfs()
return result
def make_indexes(depdf):
"""
Goes through depdf, and all of its descendents, and if any have primary keys
of more than one attribute, creates a new index column, and replaces the
old primary key columns with the new column in the parent df.
Arguments:
depdf (DepDF) : depDF to make indexes for
"""
prim_key = depdf.deps.get_prim_key()
if len(prim_key) > 1:
depdf.df.insert(0, '_'.join(prim_key), range(0, len(depdf.df)))
depdf.index = ['_'.join(prim_key)]
# now need to replace it in the parent df...
if depdf.parent is not None:
add = [None] * len(depdf.parent.df)
indices = depdf.parent.df.groupby(prim_key).indices
for name in indices:
mask = None
for i in range(len(prim_key)):
m = depdf.df[prim_key[i]] == name[i]
if mask is None:
mask = m
else:
mask = mask & m
new_val = depdf.df[mask]['_'.join(prim_key)].item()
for index in indices[name]:
add[index] = new_val
depdf.parent.df.drop(columns=prim_key, inplace=True)
depdf.parent.df.insert(len(depdf.parent.df.columns), '_'.join(prim_key), add)
for child in depdf.children:
make_indexes(child)
def normalize_dataframe(depdf):
"""
Normalizes the dataframe represetned by depdf, created descendents
as needed.
Arguments:
depdf (DepDF) : depdf to normalize
"""
part_deps = depdf.deps.find_partial_deps()
filter(part_deps, depdf.df)
if part_deps != []:
split_on = find_most_comm(part_deps, depdf.deps, depdf.df)
split_up(split_on, depdf)
return
trans_deps = depdf.deps.find_trans_deps()
filter(trans_deps, depdf.df)
if trans_deps != []:
split_on = find_most_comm(trans_deps, depdf.deps, depdf.df)
split_up(split_on, depdf)
return
def split_up(split_on, depdf):
"""
Breaks off a depdf and forms its child. Recursively calls normalize on
the original depdf, and its newly formed child.
Arguments:
split_on (list[str]) : attributes to split the dataframe on
depdf (DepDF) : the depdf ot split
"""
parent_deps, child_deps = split_on_dep(split_on, depdf.deps)
child = DepDF(child_deps, form_child(depdf.df, child_deps), split_on, depdf)
depdf.deps = parent_deps
depdf.df = depdf.df.drop(columns=list(set(depdf.df.columns).difference(parent_deps.all_attrs())))
depdf.children.append(child)
normalize_dataframe(depdf)
normalize_dataframe(child)
def form_child(df, deps):
"""
Returns a new dataframe based off of the dependencies in deps.
Arguments:
df (pd.DataFrame) : dataframe to create new dataframe from
deps (Dependencies) : dependencies to base new dataframe off of
"""
attrs = deps.all_attrs()
drops = set(df.columns).difference(attrs)
new_df = df.drop(columns=list(drops))
new_df = drop_primary_dups(new_df, deps.get_prim_key())
return new_df
def remove_part_deps(dependencies, df):
"""
Breaks up the dependency relations in dependencies into new groups of
relations so that there are no more partial dependencies.
Arguments:
dependencies (Dependncies) : the dependencies to be split up
Returns:
new_groups (list[Dependencies]) : list of new dependencies objects
representing the new groups with no partial depenencies
"""
part_deps = dependencies.find_partial_deps()
filter(part_deps, df)
if part_deps == []:
return [dependencies]
new_deps = split_on_dep(find_most_comm(part_deps, dependencies), dependencies)
return remove_part_deps(new_deps[0], df) + remove_part_deps(new_deps[1], df)
def remove_trans_deps(dependencies, df):
"""
Breaks up the dependency relations in dependencies into new groups of
relations so that there are no more transitive dependencies.
Arguments:
dependencies (Dependencies) : the dependencies to be split up
Returns:
new_groups (list[Dependencies]): list of new dependencies objects
representing the new groups with no transitive depenencies
"""
trans_deps = dependencies.find_trans_deps()
filter(trans_deps, df)
if trans_deps == []:
return [dependencies]
new_deps = split_on_dep(find_most_comm(trans_deps, dependencies), dependencies)
return remove_trans_deps(new_deps[0], df) + remove_trans_deps(new_deps[1], df)
def find_most_comm(deps, dependencies, df=None):
"""
Given a list of dependency relations, finds the most common set of
LHS attributes. If more than one LHS set occurs the same amount of
times, chooses the set with the least number of attributes.
Arguments:
deps (list[(set[str], str)]) : list of tuples representing relations
where the lhs is a set of attribute names, and the rhs is an attribute.
Returns:
most_comm (set[str]) : the most common lhs set of attributes
"""
positions = {}
priority_lst = []
for lhs, rhs in deps:
if frozenset(lhs) in positions:
ind = positions[frozenset(lhs)]
score = priority_lst[ind][0] + 1
while ind != 0 and priority_lst[ind - 1][0] < score:
priority_lst[ind] = priority_lst[ind - 1]
positions[frozenset(priority_lst[ind - 1][1])] = ind
ind -= 1
priority_lst[ind] = (score, lhs)
positions[frozenset(lhs)] = ind
else:
priority_lst.append((1, lhs))
positions[frozenset(lhs)] = len(priority_lst) - 1
# IF THEY ARE THE SAME, CHOOSE ONE WITH SHORTEST LENGHT
options = [item[1] for item in priority_lst if item[0] == priority_lst[0][0]]
max_lhs = choose_index(options, df)
# max_lhs = priority_lst[0][1]
# scr = priority_lst[0][0]
# i = 1
# while i < len(priority_lst) and priority_lst[i][0] == scr:
# if len(priority_lst[i][1]) < len(max_lhs):
# max_lhs = priority_lst[i][1]
# i += 1
for i in range(len(max_lhs)):
for key in dependencies.get_prim_key():
if dependencies.equiv_attrs(max_lhs[i], key):
max_lhs[i] = key
return max_lhs
def split_on_dep(lhs_dep, dependencies):
"""
Given the LHS attributes of a dependency, breaks up the dependency
relations in dependencies into two groups so that the LHS given is
the primary key of the new group. The old group keeps the same
primary key.
Arguments:
lhs_dep (list[str]) : set of attributes to be the new group's
primary key
dependencies (Dependencies) : dependency relations to be split up
Returns:
new_groups ((Dependencies, Dependencies)) : the new groups
"""
new_deps = {}
old_deps = dependencies.serialize()
new_rhs = set()
# new primary key
for attr in lhs_dep:
new_deps[attr] = old_deps[attr][:]
for rhs in list(old_deps.keys()):
for lhs in old_deps[rhs]:
if set(lhs).issubset(lhs_dep):
# if lhs_dep in old_deps[rhs]:
new_deps[rhs] = old_deps[rhs]
old_deps.pop(rhs)
new_rhs.add(rhs)
break
for rhs in old_deps:
for lhs in old_deps[rhs][:]:
if len(new_rhs.intersection(lhs)) != 0:
old_deps[rhs].remove(lhs)
old_rhs = set(list(old_deps.keys()))
for attr in lhs_dep:
old_rhs.remove(attr)
for rhs in new_deps:
for lhs in new_deps[rhs][:]:
if len(old_rhs.intersection(lhs)) != 0:
new_deps[rhs].remove(lhs)
return (Dependencies(old_deps, dependencies.get_prim_key()), Dependencies(new_deps, lhs_dep))
def drop_primary_dups(df, prim_key):
"""
Drops all duplicates based off of the columns in prim_key. If there isn't a
unique value for the other columns, for every unique instance of columns in
prim_key, keeps the "mode" of the unique instances' occurance.
Arguments:
df (pd.DataFrame) : dataframe to drop duplicates of
prim_key (list[str]) : columns that form the primary key of the dataframe
Returns:
new_df (pd.DataFrame) : dataframe with duplicates dropped
"""
df_lst = []
if df.drop_duplicates(prim_key).shape[0] == df.shape[0]:
return df
groups = df.groupby(prim_key)
for name, group in groups:
df_lst.append(group.mode().iloc[0])
# new_df = new_df.append(group.mode().iloc[0], ignore_index=True)
result = (pd.DataFrame(df_lst, columns=df.columns)).reset_index(drop=True)
return result.astype(dict(df.dtypes))
def choose_index(keys, df):
"""
Chooses key from a list of keys. Order of priority:
1) shortest length
2) has "id" in some form in name of an attribute
3) has attribute furthest to the left in table
Arguments:
keys (list[set[str]]) : list of keys to choose from
df (pd.DataFrame) : pandas dataframe keys are for
Returns:
index (list[str]) : chosen key
"""
sort_key = sorted(keys, key=len)
m = len(sort_key[0])
options = [key for key in sort_key if len(key) == m]
for key in options:
for attr in key:
if "_id" in attr.lower() or " id" in attr.lower() or "id _" in attr.lower() or "id " in attr.lower():
return list(key)
if df is None:
return list(options[0])
for col in df.columns:
includes = [option for option in options if col in option]
if len(includes) == 1:
return list(includes[0])
if len(includes) > 1:
options = includes
return list(options[0])
def filter(keys, df):
"""
Filters out any keys that contain attributes that are not strings, ints, or
categories from a list of relations.
Arguments:
keys (list[(list[str], str)]) : relationships to filter out
df (pd.DataFrame) : dataframe attributes in keys are from
| |
#!/usr/bin/python
#
# Copyright (c) 2016, Seraphim Sense Ltd.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted
# provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions
# and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of
# conditions and the following disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to
# endorse or promote products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
# BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import threading
import serial.tools.list_ports
import argparse
import array
import time
import serial
import struct
import logging
import json
import os
# Set to 1 to enable debug prints of raw UART messages
DEBUG = 0
GATT_FILE = 'gatt.tmp'
class GattType:
SERVICE = 0x2800
CHARACTERISTIC = 0x2803
class Str:
SERVICE = '2800'
CHARACTERISTIC = '2803'
def makeUuidFromArray(uint8array):
uuid = ''
for i in reversed(uint8array):
uuid += '%02X' % (i)
return uuid
def makeHexFromArray(uint8array):
h = ''
for i in uint8array:
if i < 256:
h += '%02X' % (i)
else:
ht = ('%04X' % (i))
h += ''.join([ht[2:], ht[0:2]])
return h
def makeStringFromArray(uint8array):
s = ''
# print uint8array
for i in uint8array:
if i == 0: break
else: s += unichr(i)
return s
class Uint16:
def __init__(self, value=0):
self.value = value
def serialize(self):
return struct.unpack('2B', struct.pack('H', self.value))
def deserialize(self, packed):
# self.value = struct.pack('H', struct.unpack('2B', *packed))
self.value = packed[0] + (packed[1] << 8)
return self.value
class Uint32:
def __init__(self, value=0):
self.value = value
def serialize(self):
return struct.unpack('4B', struct.pack('H', self.value))
def deserialize(self, packed):
# self.value = struct.pack('H', struct.unpack('2B', *packed))
self.value = packed[0] + (packed[1] << 8) + (packed[2] << 16) + (packed[3] << 24)
return self.value
class Uint8Array:
def __init__(self, theArray=[]):
self.array = theArray
def serialize(self):
return [len(self.array)] + list(self.array)
def deserialize(self, packed):
length = packed[0]
self.array = []
if length:
self.array = packed[1:]
if length != len(self.array):
raise BleValueError()
return self.array
class BleMessage:
"""Encapsulate low level BGAPI message containing header and payload."""
def __init__(self, header, payload):
assert(len(header) == 4)
self.header = header
self.payload = payload
def __str__(self):
s = ''
for b in self.header: s += '%02X' % (b) + ' '
s += ' : '
for b in self.payload: s += '%02X' % (b) + ' '
return s
def equalsByHeader(self, other):
"""Compare messages using only the header bytes, excluding byte 1
(payload length).
"""
h1 = self.header
h2 = other.header
return h1[0] == h2[0] and h1[2] == h2[2] and h1[3] == h2[3]
class BleCommand(BleMessage):
def __init__(self, header, payload=[]):
BleMessage.__init__(self, header, payload)
class BleEvent(BleMessage):
def __init__(self, header, payload=[]):
BleMessage.__init__(self, header, payload)
class BleResponse(BleMessage):
def __init__(self, header, payload=[]):
BleMessage.__init__(self, header, payload)
class HelloResponse(BleResponse):
def __init__(self, payload=[]):
BleResponse.__init__(self, (0x00, 0x00, 0x00, 0x01), payload)
class GetInfoCommand(BleCommand):
def __init__(self):
BleCommand.__init__(self, (0x00, 0x00, 0x00, 0x08))
class HelloCommand(BleCommand):
def __init__(self):
BleCommand.__init__(self, (0, 0, 0, 1))
class SystemResetCommand(BleCommand):
def __init__(self):
BleCommand.__init__(self, (0, 1, 0, 0), [0])
class SystemBootEvent(BleEvent):
def __init__(self, payload=[]):
BleEvent.__init__(self, (0x80, 0x0C, 0x00, 0x00), payload)
class SmBondingFailEvent(BleEvent):
def __init__(self, payload=[]):
BleEvent.__init__(self, (0x80, 0x03, 0x05, 0x01), payload)
class AttClientIndicated(BleEvent):
def __init__(self, payload=[]):
BleEvent.__init__(self, (0x80, 0x03, 0x04, 0x00), payload)
class AttClientProcedureCompleted(BleEvent):
def __init__(self, payload=[]):
BleEvent.__init__(self, (0x80, 0x00, 0x04, 0x01), payload)
if payload:
self.connection = payload[0]
self.result = Uint16().deserialize(payload[1:3])
self.chrHandle = Uint16().deserialize(payload[3:5])
class ConnectionDisconnectCommand(BleCommand):
def __init__(self, connection):
BleCommand.__init__(self, (0x00, 0x01, 0x03, 0x00), [connection])
class ConnectionDisconnectResponse(BleResponse):
def __init__(self, payload=[]):
BleMessage.__init__(self, (0x00, 0x03, 0x03, 0x00), payload)
class ConnectionDisconnectedEvent(BleEvent):
def __init__(self, payload=[]):
BleEvent.__init__(self, (0x80, 0x00, 0x03, 0x04), payload)
class ConnectDirectCommand(BleCommand):
def __init__(self, address):
addr_type = [0]
conn_interval_min = Uint16(16).serialize() # Units of 1.25ms
conn_interval_max = Uint16(32).serialize() # Units of 1.25ms
timeout = Uint16(100).serialize() # Units of 10ms - was 10
latency = Uint16(0).serialize()
payload = list(address)
payload.extend(addr_type)
payload.extend(conn_interval_min)
payload.extend(conn_interval_max)
payload.extend(timeout)
payload.extend(latency)
BleCommand.__init__(self, (0x00, 0x00, 0x06, 0x03), payload)
class ConnectDirectResponse(BleResponse):
def __init__(self, payload=[]):
BleMessage.__init__(self, (0x00, 0x00, 0x06, 0x03), payload)
class ConnectionStatusEvent(BleEvent):
def __init__(self, payload=[]):
BleEvent.__init__(self, (0x80, 0x00, 0x03, 0x00), payload)
if payload:
assert(len(payload) == 16)
self.connection = payload[0]
self.flags = payload[1]
self.address = payload[2:8]
self.bonding = payload[15]
class GetConnectionsCommand(BleCommand):
def __init__(self):
BleCommand.__init__(self, (0x00, 0x00, 0x00, 0x06))
class GetConnectionsResponse(BleResponse):
def __init__(self, payload=[]):
BleMessage.__init__(self, (0x00, 0x00, 0x00, 0x06), payload)
class GetConnectionsEvent(BleEvent):
def __init__(self, payload=[]):
if len(payload) >= 16:
self.connection = self.payload[0]
self.flags = self.payload[1]
self.bd_addr = makeHexFromArray(self.payload[2:8])[::-1]
self.address_type = self.payload[8]
self.conn_interval = makeHexFromArray(self.payload[9:11])[::-1]
self.timeout = makeHexFromArray(self.payload[11:13])[::-1]
self.latency = makeHexFromArray(self.payload[13:15])[::-1]
self.bonding = self.payload[15]
BleMessage.__init__(self, (0x80, 0x00, 0x03, 0x00), payload)
class GetRssiCommand(BleCommand):
def __init__(self, connection):
BleCommand.__init__(self, (0x00, 0x00, 0x03, 0x01), [connection])
class GetRssiResponse(BleResponse):
def __init__(self, payload=[]):
BleMessage.__init__(self, (0x00, 0x00, 0x03, 0x01), payload)
if payload:
self.connection = self.payload[0]
self.rssi = self.payload[1]
class AttClientFindInformationCommand(BleCommand):
def __init__(self, connection, start, end):
payload = [connection]
payload.extend(Uint16(start).serialize())
payload.extend(Uint16(end).serialize())
BleCommand.__init__(self, [0x00, 0x00, 0x04, 0x03], payload)
class AttClientFindInformationResponse(BleResponse):
def __init__(self, payload=[]):
BleResponse.__init__(self, (0x00, 0x00, 0x04, 0x03), payload)
class AttClientFindInformationFoundEvent(BleResponse):
def __init__(self, payload=[]):
BleResponse.__init__(self, (0x80, 0x00, 0x04, 0x04), payload)
if len(payload) >= 4:
self.connection = payload[0]
self.chrHandle = Uint16().deserialize(payload[1:3])
self.uuid = makeUuidFromArray(Uint8Array().deserialize(payload[3:]))
class AttClientReadByHandleCommand(BleCommand):
def __init__(self, connection, handle):
payload = [connection]
payload.extend(Uint16(handle).serialize())
BleCommand.__init__(self, (0x00, 0x00, 0x04, 0x04), payload)
class AttClientReadByHandleResponse(BleResponse):
def __init__(self, payload=[]):
BleResponse.__init__(self, (0x00, 0x00, 0x04, 0x04), payload)
class FindByTypeValueCommand(BleCommand):
def __init__(self, connection, start, end, uuid, value):
payload = [connection]
payload.extend(Uint16(start).serialize())
payload.extend(Uint16(end).serialize())
payload.extend(Uint16(uuid).serialize())
payload.extend(Uint8Array(value).serialize())
BleCommand.__init__(self, (0x00, 0x08, 0x04, 0x00), payload)
class FindByTypeValueResponse(BleResponse):
def __init__(self, payload=[]):
BleResponse.__init__(self, (0x00, 0x00, 0x04, 0x00), payload)
class ReadByGroupTypeCommand(BleCommand):
def __init__(self, connection, start, end, uuid):
payload = [connection]
payload.extend(Uint16(start).serialize())
payload.extend(Uint16(end).serialize())
payload.extend(Uint8Array(Uint16(uuid).serialize()).serialize())
BleCommand.__init__(self, (0x00, 0x00, 0x04, 0x01), payload)
class ReadByGroupTypeResponse(BleResponse):
def __init__(self, payload=[]):
BleResponse.__init__(self, (0x00, 0x00, 0x04, 0x01), payload)
class ReadByTypeCommand(BleCommand):
def __init__(self, connection, start, end, uuid):
payload = [connection]
payload.extend(Uint16(start).serialize())
payload.extend(Uint16(end).serialize())
payload.extend(Uint8Array(Uint16(uuid).serialize()).serialize())
BleCommand.__init__(self, (0x00, 0x00, 0x04, 0x02), payload)
class ReadByTypeResponse(BleResponse):
def __init__(self, payload=[]):
BleResponse.__init__(self, (0x00, 0x00, 0x04, 0x02), payload)
class AttClientGroupFoundEvent(BleEvent):
def __init__(self, payload=[]):
BleEvent.__init__(self, (0x80, 0x00, 0x04, 0x02), payload)
if len(payload) > 5:
self.connection = payload[0]
self.start = Uint16().deserialize(payload[1:3])
self.end = Uint16().deserialize(payload[3:5])
self.uuid = makeUuidFromArray(Uint8Array().deserialize(payload[5:]))
class AttClientReadMultipleCommand(BleCommand):
def __init__(self, connection, handles):
payload = [connection]
for handle in handles:
self.payload.extend(Uint16(handle).serialize())
BleCommand.__init__(self, (0x00, 0x02, 0x04, 0x0B), payload)
class AttClientReadMultipleResponse(BleResponse):
def __init__(self, payload=[]):
if payload:
self.connection = payload[0]
self.attHandles = payload[1:]
BleResponse.__init__(self, (0x00, 0x03, 0x04, 0x0B), payload)
class AttClientAttributeWriteCommand(BleCommand):
def __init__(self, connection, handle, data):
payload = [connection]
payload.extend(Uint16(handle).serialize())
payload.extend(Uint8Array(data).serialize())
BleCommand.__init__(self, (0x00, 0x00, 0x04, 0x05), payload)
class AttClientAttributeWriteResponse(BleResponse):
def __init__(self, payload=[]):
BleResponse.__init__(self, (0x00, 0x00, 0x04, 0x05), payload)
class AttClientAttributePrepareWriteCommand(BleCommand):
def __init__(self, connection, handle, offset, data):
payload = [connection]
payload.extend(Uint16(handle).serialize())
payload.extend(Uint16(offset).serialize())
payload.extend(Uint8Array(data).serialize())
BleCommand.__init__(self, (0x00, 0x00, 0x04 , 0x09), payload)
class AttClientAttributePrepareWriteResponse(BleResponse):
def __init__(self, payload=[]):
BleResponse.__init__(self, (0x00, 0x00, 0x04, 0x09), payload)
class AttClientExecuteWriteCommand(BleCommand):
def __init__(self, connection):
BleCommand.__init__(self, (0x00, 0x02, 0x04, 0x0A), [connection, 1])
class AttClientExecuteWriteCommandResponse(BleResponse):
def __init__(self, payload=[]):
BleResponse.__init__(self, (0x00, 0x00, 0x04, 0x0A), payload)
if payload:
self.connection = payload[0]
self.result = payload[0] + payload[1] * 256
class AttClientAttributeValueEvent(BleEvent):
def __init__(self, payload=[]):
BleEvent.__init__(self, (0x80, 0x00, 0x04, 0x05), payload)
if payload:
self.connection = payload[0]
self.attHandle = Uint16().deserialize(payload[1:3])
self.type = payload[3]
self.data = payload[5:]
class AttClientReadMultipleResponseEvent(BleEvent):
def __init__(self, payload=[]):
BleEvent.__init__(self, (0x80, 0x02, 0x04, 0x06), [])
if payload:
self.attValue = self.payload[5:]
self.attHandle = Uint16().deserialize(self.payload[1:3])
class ProtocolErrorEvent(BleEvent):
def __init__(self, payload=[]):
BleEvent.__init__(self, (0x80, 0x02, 0x00, 0x06), payload)
if payload:
self.reason = Uint16().deserialize(payload[0:2])
def makeBleMessage(header, payload):
"""Factory method for identifying incoming BLE messages and creating the
correct message subclass instance.
"""
# All the supported messages are listed here. Keep ABC-sorted for neatness.
lookup = {
AttClientAttributePrepareWriteResponse().header : AttClientAttributePrepareWriteResponse,
AttClientAttributeValueEvent().header : AttClientAttributeValueEvent,
AttClientAttributeWriteResponse().header : AttClientAttributeWriteResponse,
AttClientExecuteWriteCommandResponse().header | |
<reponame>montag451/core
import atexit
import logging
import os
import re
import tempfile
import threading
import time
from concurrent import futures
from typing import Type
import grpc
from grpc import ServicerContext
from core import utils
from core.api.grpc import (
common_pb2,
configservices_pb2,
core_pb2,
core_pb2_grpc,
grpcutils,
)
from core.api.grpc.configservices_pb2 import (
ConfigService,
GetConfigServiceDefaultsRequest,
GetConfigServiceDefaultsResponse,
GetConfigServicesRequest,
GetConfigServicesResponse,
GetNodeConfigServiceConfigsRequest,
GetNodeConfigServiceConfigsResponse,
GetNodeConfigServiceRequest,
GetNodeConfigServiceResponse,
GetNodeConfigServicesRequest,
GetNodeConfigServicesResponse,
SetNodeConfigServiceRequest,
SetNodeConfigServiceResponse,
)
from core.api.grpc.core_pb2 import (
ExecuteScriptResponse,
GetEmaneEventChannelRequest,
GetEmaneEventChannelResponse,
)
from core.api.grpc.events import EventStreamer
from core.api.grpc.grpcutils import (
get_config_options,
get_emane_model_id,
get_links,
get_net_stats,
)
from core.emane.nodes import EmaneNet
from core.emulator.coreemu import CoreEmu
from core.emulator.data import LinkData
from core.emulator.emudata import LinkOptions, NodeOptions
from core.emulator.enumerations import EventTypes, LinkTypes, MessageFlags
from core.emulator.session import Session
from core.errors import CoreCommandError, CoreError
from core.location.mobility import BasicRangeModel, Ns2ScriptedMobility
from core.nodes.base import CoreNodeBase, NodeBase
from core.nodes.docker import DockerNode
from core.nodes.lxd import LxcNode
from core.services.coreservices import ServiceManager
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
_INTERFACE_REGEX = re.compile(r"veth(?P<node>[0-9a-fA-F]+)")
class CoreGrpcServer(core_pb2_grpc.CoreApiServicer):
"""
Create CoreGrpcServer instance
:param coreemu: coreemu object
"""
def __init__(self, coreemu: CoreEmu) -> None:
super().__init__()
self.coreemu = coreemu
self.running = True
self.server = None
atexit.register(self._exit_handler)
def _exit_handler(self) -> None:
logging.debug("catching exit, stop running")
self.running = False
def _is_running(self, context) -> bool:
return self.running and context.is_active()
def _cancel_stream(self, context) -> None:
context.abort(grpc.StatusCode.CANCELLED, "server stopping")
def listen(self, address: str) -> None:
logging.info("CORE gRPC API listening on: %s", address)
self.server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
core_pb2_grpc.add_CoreApiServicer_to_server(self, self.server)
self.server.add_insecure_port(address)
self.server.start()
try:
while True:
time.sleep(_ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
self.server.stop(None)
def get_session(self, session_id: int, context: ServicerContext) -> Session:
"""
Retrieve session given the session id
:param session_id: session id
:param context:
:return: session object that satisfies, if session not found then raise an
exception
:raises Exception: raises grpc exception when session does not exist
"""
session = self.coreemu.sessions.get(session_id)
if not session:
context.abort(grpc.StatusCode.NOT_FOUND, f"session {session_id} not found")
return session
def get_node(
self, session: Session, node_id: int, context: ServicerContext
) -> NodeBase:
"""
Retrieve node given session and node id
:param session: session that has the node
:param node_id: node id
:param context:
:return: node object that satisfies. If node not found then raise an exception.
:raises Exception: raises grpc exception when node does not exist
"""
try:
return session.get_node(node_id)
except CoreError:
context.abort(grpc.StatusCode.NOT_FOUND, f"node {node_id} not found")
def validate_service(
self, name: str, context: ServicerContext
) -> Type[ConfigService]:
"""
Validates a configuration service is a valid known service.
:param name: name of service to validate
:param context: grpc context
:return: class for service to validate
:raises Exception: raises grpc exception when service does not exist
"""
service = self.coreemu.service_manager.services.get(name)
if not service:
context.abort(grpc.StatusCode.NOT_FOUND, f"unknown service {name}")
return service
def StartSession(
self, request: core_pb2.StartSessionRequest, context: ServicerContext
) -> core_pb2.StartSessionResponse:
"""
Start a session.
:param request: start session request
:param context: grpc context
:return: start session response
"""
logging.debug("start session: %s", request)
session = self.get_session(request.session_id, context)
# clear previous state and setup for creation
session.clear()
session.set_state(EventTypes.CONFIGURATION_STATE)
if not os.path.exists(session.session_dir):
os.mkdir(session.session_dir)
# location
if request.HasField("location"):
grpcutils.session_location(session, request.location)
# add all hooks
for hook in request.hooks:
session.add_hook(hook.state, hook.file, None, hook.data)
# create nodes
_, exceptions = grpcutils.create_nodes(session, request.nodes)
if exceptions:
exceptions = [str(x) for x in exceptions]
return core_pb2.StartSessionResponse(result=False, exceptions=exceptions)
# emane configs
config = session.emane.get_configs()
config.update(request.emane_config)
for config in request.emane_model_configs:
_id = get_emane_model_id(config.node_id, config.interface_id)
session.emane.set_model_config(_id, config.model, config.config)
# wlan configs
for config in request.wlan_configs:
session.mobility.set_model_config(
config.node_id, BasicRangeModel.name, config.config
)
# mobility configs
for config in request.mobility_configs:
session.mobility.set_model_config(
config.node_id, Ns2ScriptedMobility.name, config.config
)
# service configs
for config in request.service_configs:
grpcutils.service_configuration(session, config)
# config service configs
for config in request.config_service_configs:
node = self.get_node(session, config.node_id, context)
service = node.config_services[config.name]
if config.config:
service.set_config(config.config)
for name, template in config.templates.items():
service.set_template(name, template)
# service file configs
for config in request.service_file_configs:
session.services.set_service_file(
config.node_id, config.service, config.file, config.data
)
# create links
_, exceptions = grpcutils.create_links(session, request.links)
if exceptions:
exceptions = [str(x) for x in exceptions]
return core_pb2.StartSessionResponse(result=False, exceptions=exceptions)
# asymmetric links
_, exceptions = grpcutils.edit_links(session, request.asymmetric_links)
if exceptions:
exceptions = [str(x) for x in exceptions]
return core_pb2.StartSessionResponse(result=False, exceptions=exceptions)
# set to instantiation and start
session.set_state(EventTypes.INSTANTIATION_STATE)
# boot services
boot_exceptions = session.instantiate()
if boot_exceptions:
exceptions = []
for boot_exception in boot_exceptions:
for service_exception in boot_exception.args:
exceptions.append(str(service_exception))
return core_pb2.StartSessionResponse(result=False, exceptions=exceptions)
return core_pb2.StartSessionResponse(result=True)
def StopSession(
self, request: core_pb2.StopSessionRequest, context: ServicerContext
) -> core_pb2.StopSessionResponse:
"""
Stop a running session.
:param request: stop session request
:param context: grpc context
:return: stop session response
"""
logging.debug("stop session: %s", request)
session = self.get_session(request.session_id, context)
session.data_collect()
session.set_state(EventTypes.DATACOLLECT_STATE, send_event=True)
session.clear()
session.set_state(EventTypes.SHUTDOWN_STATE, send_event=True)
return core_pb2.StopSessionResponse(result=True)
def CreateSession(
self, request: core_pb2.CreateSessionRequest, context: ServicerContext
) -> core_pb2.CreateSessionResponse:
"""
Create a session
:param request: create-session request
:param context:
:return: a create-session response
"""
logging.debug("create session: %s", request)
session = self.coreemu.create_session(request.session_id)
session.set_state(EventTypes.DEFINITION_STATE)
session.location.setrefgeo(47.57917, -122.13232, 2.0)
session.location.refscale = 150000.0
return core_pb2.CreateSessionResponse(
session_id=session.id, state=session.state
)
def DeleteSession(
self, request: core_pb2.DeleteSessionRequest, context: ServicerContext
) -> core_pb2.DeleteSessionResponse:
"""
Delete the session
:param request: delete-session request
:param context: context object
:return: a delete-session response
"""
logging.debug("delete session: %s", request)
result = self.coreemu.delete_session(request.session_id)
return core_pb2.DeleteSessionResponse(result=result)
def GetSessions(
self, request: core_pb2.GetSessionsRequest, context: ServicerContext
) -> core_pb2.GetSessionsResponse:
"""
Delete the session
:param request: get-session request
:param context: context object
:return: a delete-session response
"""
logging.debug("get sessions: %s", request)
sessions = []
for session_id in self.coreemu.sessions:
session = self.coreemu.sessions[session_id]
session_summary = core_pb2.SessionSummary(
id=session_id,
state=session.state,
nodes=session.get_node_count(),
file=session.file_name,
)
sessions.append(session_summary)
return core_pb2.GetSessionsResponse(sessions=sessions)
def GetSessionLocation(
self, request: core_pb2.GetSessionLocationRequest, context: ServicerContext
) -> core_pb2.GetSessionLocationResponse:
"""
Retrieve a requested session location
:param request: get-session-location request
:param context: context object
:return: a get-session-location response
"""
logging.debug("get session location: %s", request)
session = self.get_session(request.session_id, context)
x, y, z = session.location.refxyz
lat, lon, alt = session.location.refgeo
scale = session.location.refscale
location = core_pb2.SessionLocation(
x=x, y=y, z=z, lat=lat, lon=lon, alt=alt, scale=scale
)
return core_pb2.GetSessionLocationResponse(location=location)
def SetSessionLocation(
self, request: core_pb2.SetSessionLocationRequest, context: ServicerContext
) -> core_pb2.SetSessionLocationResponse:
"""
Set session location
:param request: set-session-location request
:param context: context object
:return: a set-session-location-response
"""
logging.debug("set session location: %s", request)
session = self.get_session(request.session_id, context)
grpcutils.session_location(session, request.location)
return core_pb2.SetSessionLocationResponse(result=True)
def SetSessionState(
self, request: core_pb2.SetSessionStateRequest, context: ServicerContext
) -> core_pb2.SetSessionStateResponse:
"""
Set session state
:param request: set-session-state request
:param context:context object
:return: set-session-state response
"""
logging.debug("set session state: %s", request)
session = self.get_session(request.session_id, context)
try:
state = EventTypes(request.state)
session.set_state(state)
if state == EventTypes.INSTANTIATION_STATE:
if not os.path.exists(session.session_dir):
os.mkdir(session.session_dir)
session.instantiate()
elif state == EventTypes.SHUTDOWN_STATE:
session.shutdown()
elif state == EventTypes.DATACOLLECT_STATE:
session.data_collect()
elif state == EventTypes.DEFINITION_STATE:
session.clear()
result = True
except KeyError:
result = False
return core_pb2.SetSessionStateResponse(result=result)
def GetSessionOptions(
self, request: core_pb2.GetSessionOptionsRequest, context: ServicerContext
) -> core_pb2.GetSessionOptionsResponse:
"""
Retrieve session options.
:param request:
get-session-options request
:param context: context object
:return: get-session-options response about all session's options
"""
logging.debug("get session options: %s", request)
session = self.get_session(request.session_id, context)
current_config = session.options.get_configs()
default_config = session.options.default_values()
default_config.update(current_config)
config = get_config_options(default_config, session.options)
return core_pb2.GetSessionOptionsResponse(config=config)
def SetSessionOptions(
self, request: core_pb2.SetSessionOptionsRequest, context: ServicerContext
) -> core_pb2.SetSessionOptionsResponse:
"""
Update a session's configuration
:param request: set-session-options request
:param context: context object
:return: set-session-options response
"""
logging.debug("set session options: %s", request)
session = self.get_session(request.session_id, context)
config = session.options.get_configs()
config.update(request.config)
return core_pb2.SetSessionOptionsResponse(result=True)
def GetSessionMetadata(
self, request: core_pb2.GetSessionMetadataRequest, context: ServicerContext
) -> core_pb2.GetSessionMetadataResponse:
"""
Retrieve session metadata.
:param request: get session metadata
request
:param context: context object
:return: get session metadata response
"""
logging.debug("get session metadata: %s", request)
session = self.get_session(request.session_id, context)
return core_pb2.GetSessionMetadataResponse(config=session.metadata)
def SetSessionMetadata(
self, request: core_pb2.SetSessionMetadataRequest, context: ServicerContext
) -> core_pb2.SetSessionMetadataResponse:
"""
Update a session's metadata.
:param request: set metadata request
:param context: context object
:return: set metadata response
"""
logging.debug("set session metadata: %s", request)
session = self.get_session(request.session_id, context)
session.metadata = dict(request.config)
return core_pb2.SetSessionMetadataResponse(result=True)
def CheckSession(
self, request: core_pb2.GetSessionRequest, context: ServicerContext
) -> core_pb2.CheckSessionResponse:
"""
Checks if a session exists.
:param request: check session request
:param context: context object
:return: check session response
"""
result = request.session_id in self.coreemu.sessions
return core_pb2.CheckSessionResponse(result=result)
def GetSession(
self, request: core_pb2.GetSessionRequest, context: ServicerContext
) -> core_pb2.GetSessionResponse:
"""
Retrieve requested session
:param request: get-session request
:param context: context object
:return: get-session response
"""
logging.debug("get session: %s", request)
session = self.get_session(request.session_id, context)
links = []
nodes = []
for _id in session.nodes:
node = session.nodes[_id]
if not isinstance(node.id, int):
continue
node_type = session.get_node_type(node.__class__)
model = getattr(node, "type", None)
position = core_pb2.Position(
x=node.position.x, y=node.position.y, z=node.position.z
)
services = getattr(node, "services", [])
if services is None:
services = []
services = [x.name for | |
# coding: utf-8
"""
Nexus Repository Manager REST API
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: 3.20.1-01
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from nexus_api_python_client.configuration import Configuration
class CreateLdapServerXo(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'name': 'str',
'protocol': 'str',
'use_trust_store': 'bool',
'host': 'str',
'port': 'int',
'search_base': 'str',
'auth_scheme': 'str',
'auth_realm': 'str',
'auth_username': 'str',
'connection_timeout_seconds': 'int',
'connection_retry_delay_seconds': 'int',
'max_incidents_count': 'int',
'user_base_dn': 'str',
'user_subtree': 'bool',
'user_object_class': 'str',
'user_ldap_filter': 'str',
'user_id_attribute': 'str',
'user_real_name_attribute': 'str',
'user_email_address_attribute': 'str',
'user_password_attribute': 'str',
'ldap_groups_as_roles': 'bool',
'group_type': 'str',
'group_base_dn': 'str',
'group_subtree': 'bool',
'group_object_class': 'str',
'group_id_attribute': 'str',
'group_member_attribute': 'str',
'group_member_format': 'str',
'user_member_of_attribute': 'str',
'auth_password': '<PASSWORD>'
}
attribute_map = {
'name': 'name',
'protocol': 'protocol',
'use_trust_store': 'useTrustStore',
'host': 'host',
'port': 'port',
'search_base': 'searchBase',
'auth_scheme': 'authScheme',
'auth_realm': 'authRealm',
'auth_username': 'authUsername',
'connection_timeout_seconds': 'connectionTimeoutSeconds',
'connection_retry_delay_seconds': 'connectionRetryDelaySeconds',
'max_incidents_count': 'maxIncidentsCount',
'user_base_dn': 'userBaseDn',
'user_subtree': 'userSubtree',
'user_object_class': 'userObjectClass',
'user_ldap_filter': 'userLdapFilter',
'user_id_attribute': 'userIdAttribute',
'user_real_name_attribute': 'userRealNameAttribute',
'user_email_address_attribute': 'userEmailAddressAttribute',
'user_password_attribute': 'userPasswordAttribute',
'ldap_groups_as_roles': 'ldapGroupsAsRoles',
'group_type': 'groupType',
'group_base_dn': 'groupBaseDn',
'group_subtree': 'groupSubtree',
'group_object_class': 'groupObjectClass',
'group_id_attribute': 'groupIdAttribute',
'group_member_attribute': 'groupMemberAttribute',
'group_member_format': 'groupMemberFormat',
'user_member_of_attribute': 'userMemberOfAttribute',
'auth_password': '<PASSWORD>'
}
def __init__(self, name=None, protocol=None, use_trust_store=None, host=None, port=None, search_base=None, auth_scheme=None, auth_realm=None, auth_username=None, connection_timeout_seconds=None, connection_retry_delay_seconds=None, max_incidents_count=None, user_base_dn=None, user_subtree=None, user_object_class=None, user_ldap_filter=None, user_id_attribute=None, user_real_name_attribute=None, user_email_address_attribute=None, user_password_attribute=None, ldap_groups_as_roles=None, group_type=None, group_base_dn=None, group_subtree=None, group_object_class=None, group_id_attribute=None, group_member_attribute=None, group_member_format=None, user_member_of_attribute=None, auth_password=None, local_vars_configuration=None): # noqa: E501
"""CreateLdapServerXo - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._name = None
self._protocol = None
self._use_trust_store = None
self._host = None
self._port = None
self._search_base = None
self._auth_scheme = None
self._auth_realm = None
self._auth_username = None
self._connection_timeout_seconds = None
self._connection_retry_delay_seconds = None
self._max_incidents_count = None
self._user_base_dn = None
self._user_subtree = None
self._user_object_class = None
self._user_ldap_filter = None
self._user_id_attribute = None
self._user_real_name_attribute = None
self._user_email_address_attribute = None
self._user_password_attribute = None
self._ldap_groups_as_roles = None
self._group_type = None
self._group_base_dn = None
self._group_subtree = None
self._group_object_class = None
self._group_id_attribute = None
self._group_member_attribute = None
self._group_member_format = None
self._user_member_of_attribute = None
self._auth_password = None
self.discriminator = None
self.name = name
self.protocol = protocol
if use_trust_store is not None:
self.use_trust_store = use_trust_store
self.host = host
self.port = port
self.search_base = search_base
self.auth_scheme = auth_scheme
if auth_realm is not None:
self.auth_realm = auth_realm
if auth_username is not None:
self.auth_username = auth_username
self.connection_timeout_seconds = connection_timeout_seconds
self.connection_retry_delay_seconds = connection_retry_delay_seconds
self.max_incidents_count = max_incidents_count
if user_base_dn is not None:
self.user_base_dn = user_base_dn
if user_subtree is not None:
self.user_subtree = user_subtree
if user_object_class is not None:
self.user_object_class = user_object_class
if user_ldap_filter is not None:
self.user_ldap_filter = user_ldap_filter
if user_id_attribute is not None:
self.user_id_attribute = user_id_attribute
if user_real_name_attribute is not None:
self.user_real_name_attribute = user_real_name_attribute
if user_email_address_attribute is not None:
self.user_email_address_attribute = user_email_address_attribute
if user_password_attribute is not None:
self.user_password_attribute = user_password_attribute
if ldap_groups_as_roles is not None:
self.ldap_groups_as_roles = ldap_groups_as_roles
self.group_type = group_type
if group_base_dn is not None:
self.group_base_dn = group_base_dn
if group_subtree is not None:
self.group_subtree = group_subtree
if group_object_class is not None:
self.group_object_class = group_object_class
if group_id_attribute is not None:
self.group_id_attribute = group_id_attribute
if group_member_attribute is not None:
self.group_member_attribute = group_member_attribute
if group_member_format is not None:
self.group_member_format = group_member_format
if user_member_of_attribute is not None:
self.user_member_of_attribute = user_member_of_attribute
self.auth_password = <PASSWORD>
@property
def name(self):
"""Gets the name of this CreateLdapServerXo. # noqa: E501
LDAP server name # noqa: E501
:return: The name of this CreateLdapServerXo. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this CreateLdapServerXo.
LDAP server name # noqa: E501
:param name: The name of this CreateLdapServerXo. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def protocol(self):
"""Gets the protocol of this CreateLdapServerXo. # noqa: E501
LDAP server connection Protocol to use # noqa: E501
:return: The protocol of this CreateLdapServerXo. # noqa: E501
:rtype: str
"""
return self._protocol
@protocol.setter
def protocol(self, protocol):
"""Sets the protocol of this CreateLdapServerXo.
LDAP server connection Protocol to use # noqa: E501
:param protocol: The protocol of this CreateLdapServerXo. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and protocol is None: # noqa: E501
raise ValueError("Invalid value for `protocol`, must not be `None`") # noqa: E501
allowed_values = ["ldap", "ldaps"] # noqa: E501
if self.local_vars_configuration.client_side_validation and protocol not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `protocol` ({0}), must be one of {1}" # noqa: E501
.format(protocol, allowed_values)
)
self._protocol = protocol
@property
def use_trust_store(self):
"""Gets the use_trust_store of this CreateLdapServerXo. # noqa: E501
Whether to use certificates stored in NXRM's truststore # noqa: E501
:return: The use_trust_store of this CreateLdapServerXo. # noqa: E501
:rtype: bool
"""
return self._use_trust_store
@use_trust_store.setter
def use_trust_store(self, use_trust_store):
"""Sets the use_trust_store of this CreateLdapServerXo.
Whether to use certificates stored in NXRM's truststore # noqa: E501
:param use_trust_store: The use_trust_store of this CreateLdapServerXo. # noqa: E501
:type: bool
"""
self._use_trust_store = use_trust_store
@property
def host(self):
"""Gets the host of this CreateLdapServerXo. # noqa: E501
LDAP server connection hostname # noqa: E501
:return: The host of this CreateLdapServerXo. # noqa: E501
:rtype: str
"""
return self._host
@host.setter
def host(self, host):
"""Sets the host of this CreateLdapServerXo.
LDAP server connection hostname # noqa: E501
:param host: The host of this CreateLdapServerXo. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and host is None: # noqa: E501
raise ValueError("Invalid value for `host`, must not be `None`") # noqa: E501
self._host = host
@property
def port(self):
"""Gets the port of this CreateLdapServerXo. # noqa: E501
LDAP server connection port to use # noqa: E501
:return: The port of this CreateLdapServerXo. # noqa: E501
:rtype: int
"""
return self._port
@port.setter
def port(self, port):
"""Sets the port of this CreateLdapServerXo.
LDAP server connection port to use # noqa: E501
:param port: The port of this CreateLdapServerXo. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and port is None: # noqa: E501
raise ValueError("Invalid value for `port`, must not be `None`") # noqa: E501
self._port = port
@property
def search_base(self):
"""Gets the search_base of this CreateLdapServerXo. # noqa: E501
LDAP location to be added to the connection URL # noqa: E501
:return: The search_base of this CreateLdapServerXo. # noqa: E501
:rtype: str
"""
return self._search_base
@search_base.setter
def search_base(self, search_base):
"""Sets the search_base of this CreateLdapServerXo.
LDAP location to be added to the connection URL # noqa: E501
:param search_base: The search_base of this CreateLdapServerXo. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and search_base is None: # noqa: E501
raise ValueError("Invalid value for `search_base`, must not be `None`") # noqa: E501
self._search_base = search_base
@property
def auth_scheme(self):
"""Gets the auth_scheme of this CreateLdapServerXo. # noqa: E501
Authentication scheme used for connecting to LDAP server # noqa: E501
:return: The auth_scheme of this CreateLdapServerXo. # noqa: E501
:rtype: str
"""
return self._auth_scheme
@auth_scheme.setter
def auth_scheme(self, auth_scheme):
"""Sets the auth_scheme of this CreateLdapServerXo.
Authentication scheme used for connecting to LDAP server # noqa: E501
:param auth_scheme: The auth_scheme of this CreateLdapServerXo. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and auth_scheme is None: # noqa: E501
raise ValueError("Invalid value for `auth_scheme`, must not be `None`") # noqa: E501
allowed_values = ["NONE", "SIMPLE", "DIGEST_MD5", "CRAM_MD5"] # noqa: E501
if self.local_vars_configuration.client_side_validation and auth_scheme not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `auth_scheme` ({0}), must be one of {1}" # noqa: E501
.format(auth_scheme, allowed_values)
)
self._auth_scheme = auth_scheme
@property
def auth_realm(self):
"""Gets the auth_realm of this CreateLdapServerXo. # noqa: E501
The SASL realm to bind to. Required if authScheme is CRAM_MD5 or DIGEST_MD5 # noqa: E501
:return: The auth_realm of this CreateLdapServerXo. # noqa: E501
:rtype: | |
<gh_stars>1-10
# -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Date: 2019/10/21 12:08
Desc: 获取金十数据-数据中心-主要机构-宏观经济
"""
import json
import time
import pandas as pd
import requests
from tqdm import tqdm
from akshare.economic.cons import (
JS_CONS_GOLD_ETF_URL,
JS_CONS_SLIVER_ETF_URL,
JS_CONS_OPEC_URL,
)
def macro_cons_gold_volume():
"""
全球最大黄金ETF—SPDR Gold Trust持仓报告, 数据区间从20041118-至今
:return: pandas.Series
2004-11-18 8.09
2004-11-19 57.85
2004-11-22 87.09
2004-11-23 87.09
2004-11-24 96.42
...
2019-10-20 924.64
2019-10-21 924.64
2019-10-22 919.66
2019-10-23 918.48
2019-10-24 918.48
"""
t = time.time()
res = requests.get(
JS_CONS_GOLD_ETF_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["黄金"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["总库存(吨)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "etf",
"attr_id": "1",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", keep="last", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "gold_volume"
temp_df = temp_df.astype(float)
return temp_df
def macro_cons_gold_change():
"""
全球最大黄金ETF—SPDR Gold Trust持仓报告, 数据区间从20041118-至今
:return: pandas.Series
2004-11-18 0
2004-11-19 49.76
2004-11-22 29.24
2004-11-23 0.00
2004-11-24 9.33
...
2019-10-20 0.00
2019-10-21 0.00
2019-10-22 -4.98
2019-10-23 -1.18
2019-10-24 0.00
"""
t = time.time()
res = requests.get(
JS_CONS_GOLD_ETF_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["黄金"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["增持/减持(吨)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "etf",
"attr_id": "1",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, [0, 2]]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", keep="last", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "gold_change"
temp_df = temp_df.astype(float)
return temp_df
def macro_cons_gold_amount():
"""
全球最大黄金ETF—SPDR Gold Trust持仓报告, 数据区间从20041118-至今
:return: pandas.Series
2004-11-18 114920000.00
2004-11-19 828806907.20
2004-11-22 1253785205.50
2004-11-23 1254751438.19
2004-11-24 1390568824.08
...
2019-10-20 44286078486.23
2019-10-21 44333677232.68
2019-10-22 43907962483.56
2019-10-23 44120217405.82
2019-10-24 44120217405.82
"""
t = time.time()
res = requests.get(
JS_CONS_GOLD_ETF_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["黄金"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["总价值(美元)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "etf",
"attr_id": "1",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, [0, 3]]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", keep="last", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "gold_amount"
temp_df = temp_df.astype(float)
return temp_df
def macro_cons_silver_volume():
"""
全球最大白银ETF--iShares Silver Trust持仓报告, 数据区间从20060429-至今
:return: pandas.Series
2006-04-29 653.17
2006-05-02 653.17
2006-05-03 995.28
2006-05-04 1197.43
2006-05-05 1306.29
...
2019-10-17 11847.91
2019-10-18 11847.91
2019-10-21 11813.02
2019-10-22 11751.96
2019-10-23 11751.96
"""
t = time.time()
res = requests.get(
JS_CONS_SLIVER_ETF_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["白银"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["总库存(吨)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "etf",
"attr_id": "2",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, [0, 1]]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", keep="last", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "silver_volume"
url = "https://cdn.jin10.com/data_center/reports/etf_2.json"
r = requests.get(url)
data_json = r.json()
append_temp_df = pd.DataFrame(data_json["values"]).T
append_temp_df.columns = [item["name"] for item in data_json["keys"]]
temp_append_df = append_temp_df["总库存"]
temp_append_df.name = "silver_volume"
temp_df = temp_df.reset_index()
temp_df["index"] = temp_df["index"].astype(str)
temp_df = temp_df.append(temp_append_df.reset_index())
temp_df.drop_duplicates(subset=["index"], keep="last", inplace=True)
temp_df.index = pd.to_datetime(temp_df["index"])
del temp_df["index"]
temp_df = temp_df[temp_df != 'Show All']
temp_df.sort_index(inplace=True)
temp_df = temp_df.astype(float)
return temp_df
def macro_cons_silver_change():
"""
全球最大白银ETF--iShares Silver Trust持仓报告, 数据区间从20060429-至今
:return: pandas.Series
2006-04-29 0
2006-05-02 0.00
2006-05-03 342.11
2006-05-04 202.15
2006-05-05 108.86
...
2019-10-17 -58.16
2019-10-18 0.00
2019-10-21 -34.89
2019-10-22 -61.06
2019-10-23 0.00
"""
t = time.time()
res = requests.get(
JS_CONS_SLIVER_ETF_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["白银"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["增持/减持(吨)"]
temp_df.name = "silver_change"
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "etf",
"attr_id": "2",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, [0, 2]]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", keep="last", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "silver_change"
url = "https://cdn.jin10.com/data_center/reports/etf_2.json"
r = requests.get(url)
data_json = r.json()
append_temp_df = pd.DataFrame(data_json["values"]).T
append_temp_df.columns = [item["name"] for item in data_json["keys"]]
temp_append_df = append_temp_df["增持/减持"]
temp_append_df.name = "silver_change"
temp_df = temp_df.reset_index()
temp_df["index"] = temp_df["index"].astype(str)
temp_df = temp_df.append(temp_append_df.reset_index())
temp_df.drop_duplicates(subset=["index"], keep="last", inplace=True)
temp_df.index = pd.to_datetime(temp_df["index"])
del temp_df["index"]
temp_df = temp_df[temp_df != 'Show All']
temp_df.sort_index(inplace=True)
temp_df = temp_df.astype(float)
return temp_df
def macro_cons_silver_amount():
"""
全球最大白银ETF--iShares Silver Trust持仓报告, 数据区间从20060429-至今
:return: pandas.Series
2006-04-29 263651152
2006-05-02 263651152
2006-05-03 445408550
2006-05-04 555123947
2006-05-05 574713264
...
2019-10-17 Show All
2019-10-18 Show All
2019-10-21 Show All
2019-10-22 Show All
2019-10-23 Show All
"""
t = time.time()
res = requests.get(
JS_CONS_SLIVER_ETF_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["白银"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["总价值(美元)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "etf",
"attr_id": "2",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, [0, 3]]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", keep="last", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "silver_amount"
url = "https://cdn.jin10.com/data_center/reports/etf_2.json"
r = requests.get(url)
data_json = r.json()
append_temp_df = pd.DataFrame(data_json["values"]).T
append_temp_df.columns = [item["name"] for item in data_json["keys"]]
temp_append_df = append_temp_df["总价值"]
temp_append_df.name = "silver_amount"
temp_df = temp_df.reset_index()
temp_df["index"] = temp_df["index"].astype(str)
temp_df = temp_df.append(temp_append_df.reset_index())
temp_df.drop_duplicates(subset=["index"], keep="last", inplace=True)
temp_df.index = pd.to_datetime(temp_df["index"])
| |
mut_cand[1].pos = arg_obj
except:
pass
if isinstance(arg_node, ast.Name):
assert self.active_scope is self.cur_frame_original_scope
arg_dsym = self.active_scope.lookup_data_symbol_by_name(arg_node.id)
if arg_dsym is None:
self.active_scope.upsert_data_symbol_for_name(
arg_node.id,
arg_obj,
set(),
self.prev_trace_stmt_in_cur_frame.stmt_node,
implicit=True,
)
mut_cand[-2].append(resolve_rval_symbols(arg_node))
mut_cand[-1].append(arg_obj)
def _save_mutation_candidate(
self, obj: Any, method_name: Optional[str], obj_name: Optional[str] = None
) -> None:
mutation_event = resolve_mutating_method(obj, method_name)
if mutation_event is None or isinstance(
mutation_event, MutatingMethodEventNotYetImplemented
):
mutation_event = StandardMutation()
self.mutation_candidate = ((obj, obj_name, method_name), mutation_event, [], [])
@pyc.register_raw_handler(pyc.before_call)
@pyc.skip_when_tracing_disabled
def before_call(self, function_or_method, *_, **__):
if self.saved_complex_symbol_load_data is None:
obj, attr_or_subscript, is_subscript, obj_name = None, None, None, None
else:
# TODO: this will cause errors if we add more fields
(
_,
obj,
attr_or_subscript,
is_subscript,
*_,
obj_name,
) = self.saved_complex_symbol_load_data
if obj is not None and is_subscript is not None:
if is_subscript:
# TODO: need to do this also for chained calls, e.g. f()()
method_name = None
else:
assert isinstance(attr_or_subscript, str)
method_name = attr_or_subscript
# method_name should match ast_by_id[function_or_method].func.id
self._save_mutation_candidate(obj, method_name, obj_name=obj_name)
self.saved_complex_symbol_load_data = None
with self.lexical_call_stack.push():
pass
self.active_scope = self.cur_frame_original_scope
@pyc.register_raw_handler((pyc.before_function_body, pyc.before_lambda_body))
def before_function_body(self, _obj: Any, function_id: NodeId, *_, **__):
ret = self.is_tracing_enabled and function_id not in self._seen_functions_ids
if ret:
self._seen_functions_ids.add(function_id)
return ret
@pyc.register_raw_handler(pyc.after_call)
def after_call(
self,
retval: Any,
_node_id: NodeId,
frame: FrameType,
*_,
call_node_id: NodeId,
**__,
):
tracing_will_be_enabled_by_end = self.is_tracing_enabled
if not self.is_tracing_enabled:
tracing_will_be_enabled_by_end = self._should_attempt_to_reenable_tracing(
frame
)
if tracing_will_be_enabled_by_end:
# if tracing gets reenabled here instead of at the 'before_stmt' handler, then we're still
# at the same module stmt as when tracing was disabled, and we still have a 'return' to trace
self.call_depth = 1
self.call_stack.clear()
self.lexical_call_stack.clear()
if not tracing_will_be_enabled_by_end:
return
# no need to reset active scope here;
# that will happen in the 'after chain' handler
if len(self.lexical_call_stack) > 0:
# skip / give up if tracing was recently reenabled
self.lexical_call_stack.pop()
self.prev_node_id_in_cur_frame_lexical = None
self._process_possible_mutation(retval)
if not self.is_tracing_enabled:
self._enable_tracing()
# Note: we don't trace set literals
@pyc.register_raw_handler(
(
pyc.before_dict_literal,
pyc.before_list_literal,
pyc.before_tuple_literal,
)
)
@pyc.skip_when_tracing_disabled
def before_literal(self, *_, **__):
parent_scope = self.active_literal_scope or self.cur_frame_original_scope
with self.lexical_literal_stack.push():
self.active_literal_scope = Namespace(
None, Namespace.ANONYMOUS, parent_scope
)
@pyc.register_raw_handler(
(
pyc.after_dict_literal,
pyc.after_list_literal,
pyc.after_tuple_literal,
)
)
@pyc.skip_when_tracing_disabled
def after_literal(
self, literal: Union[dict, list, tuple], node_id: NodeId, *_, **__
):
try:
self.active_literal_scope.update_obj_ref(literal)
logger.warning("create literal scope %s", self.active_literal_scope)
starred_idx = -1
starred_namespace = None
outer_deps = set()
for (i, inner_obj), (
inner_key_node,
inner_val_node,
) in match_container_obj_or_namespace_with_literal_nodes(
literal, self.ast_node_by_id[node_id] # type: ignore
):
# TODO: memoize symbol resolution; otherwise this will be quadratic for deeply nested literals
if isinstance(inner_val_node, ast.Starred):
inner_symbols = set()
starred_idx += 1
if starred_idx == 0:
starred_syms = self.resolve_loaded_symbols(inner_val_node)
starred_namespace = (
nbs().namespaces.get(starred_syms[0].obj_id, None)
if starred_syms
else None
)
if starred_namespace is not None:
starred_dep = starred_namespace.lookup_data_symbol_by_name_this_indentation(
starred_idx, is_subscript=True
)
inner_symbols.add(starred_dep)
else:
inner_symbols = resolve_rval_symbols(inner_val_node)
if inner_key_node is not None:
outer_deps.update(resolve_rval_symbols(inner_key_node))
self.node_id_to_loaded_symbols.pop(id(inner_val_node), None)
inner_symbols.discard(None)
if isinstance(
i, (int, str)
): # TODO: perform more general check for SupportedIndexType
self.active_literal_scope.upsert_data_symbol_for_name(
i,
inner_obj,
inner_symbols,
self.prev_trace_stmt_in_cur_frame.stmt_node,
is_subscript=True,
implicit=True,
# this is necessary in case some literal object got reused,
# since as of this comment (2021/08/14) we do not clear
# GC'd symbols from the symbol graph
propagate=False,
)
self.node_id_to_loaded_literal_scope[node_id] = self.active_literal_scope
parent_scope: Scope = self.active_literal_scope.parent_scope
assert parent_scope is not None
literal_sym = parent_scope.upsert_data_symbol_for_name(
"<literal_sym_%d>" % id(literal),
literal,
outer_deps,
self.prev_trace_stmt_in_cur_frame.stmt_node,
is_anonymous=True,
implicit=True,
propagate=False,
)
self.node_id_to_loaded_symbols[node_id].append(literal_sym)
return literal
finally:
self.lexical_literal_stack.pop()
@pyc.register_raw_handler(pyc.dict_key)
@pyc.skip_when_tracing_disabled
def dict_key(self, obj: Any, key_node_id: NodeId, *_, **__):
self.node_id_to_saved_dict_key[key_node_id] = obj
return obj
@pyc.register_raw_handler(pyc.dict_value)
@pyc.skip_when_tracing_disabled
def dict_value(
self,
obj: Any,
value_node_id: NodeId,
*_,
key_node_id: NodeId,
dict_node_id: NodeId,
**__,
):
scope = self.node_id_to_loaded_literal_scope.pop(value_node_id, None)
if scope is None:
return obj
# if we found a pending literal, assert that it's not dict unpacking
assert key_node_id is not None
key_obj = self.node_id_to_saved_dict_key.pop(key_node_id, None)
if isinstance(key_obj, (str, int)):
scope.scope_name = str(key_obj)
return obj
@pyc.register_raw_handler((pyc.list_elt, pyc.tuple_elt))
@pyc.skip_when_tracing_disabled
def list_or_tuple_elt(
self,
obj: Any,
elt_node_id: NodeId,
*_,
index: Optional[int],
container_node_id: NodeId,
**__,
):
scope = self.node_id_to_loaded_literal_scope.pop(elt_node_id, None)
if scope is None:
return obj
if index is not None:
scope.scope_name = str(index)
return obj
@pyc.register_raw_handler(pyc.after_lambda)
@pyc.skip_when_tracing_disabled
def after_lambda(self, obj: Any, lambda_node_id: int, frame: FrameType, *_, **__):
sym_deps = []
node = self.ast_node_by_id[lambda_node_id]
for kw_default in node.args.defaults: # type: ignore
sym_deps.extend(self.resolve_loaded_symbols(kw_default))
sym = self.active_scope.upsert_data_symbol_for_name(
"<lambda_sym_%d>" % id(obj),
obj,
sym_deps,
self.prev_trace_stmt_in_cur_frame.stmt_node,
is_function_def=True,
propagate=False,
)
# FIXME: this is super brittle. We're passing in a stmt node to update the mapping from
# stmt_node to function symbol, but simultaneously forcing the lambda symbol to hold
# a reference to the lambda in order to help with symbol resolution later
sym.stmt_node = node
self.node_id_to_loaded_symbols[lambda_node_id].append(sym)
@pyc.register_raw_handler(pyc.after_stmt)
def after_stmt(self, ret_expr: Any, stmt_id: int, frame: FrameType, *_, **__):
if stmt_id in self.seen_stmts:
return ret_expr
self._saved_stmt_ret_expr = ret_expr
stmt = self.ast_node_by_id.get(stmt_id, None)
if stmt is not None:
self.handle_other_sys_events(
None, 0, frame, pyc.after_stmt, stmt_node=cast(ast.stmt, stmt)
)
return ret_expr
@pyc.register_raw_handler(pyc.after_module_stmt)
def after_module_stmt(self, *_, **__):
if self.is_tracing_enabled:
assert self.cur_frame_original_scope.is_global
ret = self._saved_stmt_ret_expr
self._saved_stmt_ret_expr = None
self._module_stmt_counter += 1
return ret
@pyc.register_raw_handler(pyc.before_stmt)
def before_stmt(self, _ret: None, stmt_id: int, frame: FrameType, *_, **__) -> None:
self.next_stmt_node_id = stmt_id
if stmt_id in self.seen_stmts:
return
# logger.warning('reenable tracing: %s', site_id)
if self.prev_trace_stmt_in_cur_frame is not None:
prev_trace_stmt_in_cur_frame = self.prev_trace_stmt_in_cur_frame
# both of the following stmts should be processed when body is entered
if isinstance(
prev_trace_stmt_in_cur_frame.stmt_node, (ast.For, ast.If, ast.With)
):
self.after_stmt(None, prev_trace_stmt_in_cur_frame.stmt_id, frame)
trace_stmt = self.traced_statements.get(stmt_id, None)
if trace_stmt is None:
trace_stmt = TraceStatement(
frame, cast(ast.stmt, self.ast_node_by_id[stmt_id])
)
self.traced_statements[stmt_id] = trace_stmt
self.prev_trace_stmt_in_cur_frame = trace_stmt
if not self.is_tracing_enabled and self._should_attempt_to_reenable_tracing(
frame
):
# At this point, we can be sure we're at the top level
# because tracing was enabled in a top-level handler.
# We also need to clear the stack, as we won't catch
# the return event (since tracing was already disabled
# when we got to a `before_stmt` event).
self.call_depth = 0
self.call_stack.clear()
self.lexical_call_stack.clear()
self.after_stmt_reset_hook()
self._enable_tracing()
def _should_attempt_to_reenable_tracing(self, frame: FrameType) -> bool:
if nbs().is_develop:
assert not self.is_tracing_enabled
assert self.call_depth > 0, (
"expected managed call depth > 0, got %d" % self.call_depth
)
call_depth = 0
while frame is not None:
if nbs().is_cell_file(frame.f_code.co_filename):
call_depth += 1
frame = frame.f_back
if nbs().is_develop:
assert call_depth >= 1, "expected call depth >= 1, got %d" % call_depth
# TODO: allow reenabling tracing beyond just at the top level
if call_depth != 1:
return False
if len(self.call_stack) == 0:
stmt_in_top_level_frame = self.prev_trace_stmt_in_cur_frame
else:
stmt_in_top_level_frame = self.call_stack.get_field(
"prev_trace_stmt_in_cur_frame", depth=0
)
if stmt_in_top_level_frame.finished:
return False
if nbs().trace_messages_enabled:
self.EVENT_LOGGER.warning("reenable tracing >>>")
return True
def _get_or_make_trace_stmt(
self, stmt_node: ast.stmt, frame: FrameType
) -> TraceStatement:
trace_stmt = self.traced_statements.get(id(stmt_node), None)
if trace_stmt is None:
trace_stmt = TraceStatement(frame, stmt_node)
self.traced_statements[id(stmt_node)] = trace_stmt
return trace_stmt
def _maybe_log_event(
self, event: pyc.TraceEvent, stmt_node: ast.stmt, trace_stmt: TraceStatement
):
if nbs().trace_messages_enabled:
codeline = astunparse.unparse(stmt_node).strip("\n").split("\n")[0]
codeline = " " * getattr(stmt_node, "col_offset", 0) + codeline
self.EVENT_LOGGER.warning(
" %3d: %10s >>> %s", trace_stmt.lineno, event, codeline
)
def _get_stmt_node_for_sys_event(
self, event: pyc.TraceEvent, cell_num: int, lineno: int
) -> Optional[ast.stmt]:
if event == pyc.return_ and self.next_stmt_node_id is not None:
# this branch necessary for python < 3.8 where the frame
# position maps to the calling location instead of the return
return cast(ast.stmt, self.ast_node_by_id[self.next_stmt_node_id])
try:
stmt_node = self.stmt_by_lineno_by_module_id[cell_num][lineno]
if event == pyc.call and not isinstance(
stmt_node, (ast.AsyncFunctionDef, ast.FunctionDef)
):
# TODO: this is bad and I should feel bad. Need a better way to figure out which
# stmt is executing than by using line numbers.
parent_node = self.parent_stmt_by_id.get(id(stmt_node), None)
if nbs().is_develop:
logger.info(
"node %s parent %s",
ast.dump(stmt_node),
None if parent_node is None else ast.dump(parent_node),
)
if (
parent_node is not None
and getattr(parent_node, "lineno", None) == lineno
and isinstance(parent_node, (ast.AsyncFunctionDef, ast.FunctionDef))
):
stmt_node = parent_node
return stmt_node
except KeyError as e:
if nbs().is_develop:
self.EVENT_LOGGER.warning(
"got key error for stmt node in cell %d, line %d",
cell_num,
lineno,
)
raise e
return None
@pyc.register_raw_handler(pyc.call)
def handle_call(
self,
ret_obj: Any,
_node_id: None,
frame: FrameType,
event: pyc.TraceEvent,
*_,
**__,
):
cell_num, lineno = nbs().get_position(frame)
assert cell_num is not None
stmt_node = self._get_stmt_node_for_sys_event(event, cell_num, lineno)
trace_stmt = self._get_or_make_trace_stmt(stmt_node, frame)
self._maybe_log_event(event, stmt_node, trace_stmt)
try:
prev_node_id_in_cur_frame_lexical = self.lexical_call_stack.get_field(
"prev_node_id_in_cur_frame_lexical"
)
| |
for
hoverinfo .
hoverlabel
:class:`plotly.graph_objects.candlestick.Hoverlabel`
instance or dict with compatible properties
hovertext
Same as `text`.
hovertextsrc
Sets the source reference on Chart Studio Cloud for
hovertext .
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
ids .
increasing
:class:`plotly.graph_objects.candlestick.Increasing`
instance or dict with compatible properties
legendgroup
Sets the legend group for this trace. Traces part of
the same legend group hide/show at the same time when
toggling legend items.
line
:class:`plotly.graph_objects.candlestick.Line` instance
or dict with compatible properties
low
Sets the low values.
lowsrc
Sets the source reference on Chart Studio Cloud for
low .
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
meta .
name
Sets the trace name. The trace name appear as the
legend item and on hover.
opacity
Sets the opacity of the trace.
open
Sets the open values.
opensrc
Sets the source reference on Chart Studio Cloud for
open .
selectedpoints
Array containing integer indices of selected points.
Has an effect only for traces that support selections.
Note that an empty array means an empty selection where
the `unselected` are turned on for all points, whereas,
any other non-array values means no selection all where
the `selected` and `unselected` styles have no effect.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
stream
:class:`plotly.graph_objects.candlestick.Stream`
instance or dict with compatible properties
text
Sets hover text elements associated with each sample
point. If a single string, the same string appears over
all the data points. If an array of string, the items
are mapped in order to this trace's sample points.
textsrc
Sets the source reference on Chart Studio Cloud for
text .
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
whiskerwidth
Sets the width of the whiskers relative to the box'
width. For example, with 1, the whiskers are as wide as
the box(es).
x
Sets the x coordinates. If absent, linear coordinate
will be generated.
xaxis
Sets a reference between this trace's x coordinates and
a 2D cartesian x axis. If "x" (the default value), the
x coordinates refer to `layout.xaxis`. If "x2", the x
coordinates refer to `layout.xaxis2`, and so on.
xcalendar
Sets the calendar system to use with `x` date data.
xperiod
Only relevant when the axis `type` is "date". Sets the
period positioning in milliseconds or "M<n>" on the x
axis. Special values in the form of "M<n>" could be
used to declare the number of months. In this case `n`
must be a positive integer.
xperiod0
Only relevant when the axis `type` is "date". Sets the
base for period positioning in milliseconds or date
string on the x0 axis. When `x0period` is round number
of weeks, the `x0period0` by default would be on a
Sunday i.e. 2000-01-02, otherwise it would be at
2000-01-01.
xperiodalignment
Only relevant when the axis `type` is "date". Sets the
alignment of data points on the x axis.
xsrc
Sets the source reference on Chart Studio Cloud for x
.
yaxis
Sets a reference between this trace's y coordinates and
a 2D cartesian y axis. If "y" (the default value), the
y coordinates refer to `layout.yaxis`. If "y2", the y
coordinates refer to `layout.yaxis2`, and so on.
row : 'all', int or None (default)
Subplot row index (starting from 1) for the trace to be
added. Only valid if figure was created using
`plotly.tools.make_subplots`.If 'all', addresses all
rows in the specified column(s).
col : 'all', int or None (default)
Subplot col index (starting from 1) for the trace to be
added. Only valid if figure was created using
`plotly.tools.make_subplots`.If 'all', addresses all
columns in the specified row(s).
secondary_y: boolean or None (default None)
If True, associate this trace with the secondary y-axis of the
subplot at the specified row and col. Only valid if all of the
following conditions are satisfied:
* The figure was created using `plotly.subplots.make_subplots`.
* The row and col arguments are not None
* The subplot at the specified row and col has type xy
(which is the default) and secondary_y True. These
properties are specified in the specs argument to
make_subplots. See the make_subplots docstring for more info.
Returns
-------
Figure
"""
from plotly.graph_objs import Candlestick
new_trace = Candlestick(
close=close,
closesrc=closesrc,
customdata=customdata,
customdatasrc=customdatasrc,
decreasing=decreasing,
high=high,
highsrc=highsrc,
hoverinfo=hoverinfo,
hoverinfosrc=hoverinfosrc,
hoverlabel=hoverlabel,
hovertext=hovertext,
hovertextsrc=hovertextsrc,
ids=ids,
idssrc=idssrc,
increasing=increasing,
legendgroup=legendgroup,
line=line,
low=low,
lowsrc=lowsrc,
meta=meta,
metasrc=metasrc,
name=name,
opacity=opacity,
open=open,
opensrc=opensrc,
selectedpoints=selectedpoints,
showlegend=showlegend,
stream=stream,
text=text,
textsrc=textsrc,
uid=uid,
uirevision=uirevision,
visible=visible,
whiskerwidth=whiskerwidth,
x=x,
xaxis=xaxis,
xcalendar=xcalendar,
xperiod=xperiod,
xperiod0=xperiod0,
xperiodalignment=xperiodalignment,
xsrc=xsrc,
yaxis=yaxis,
**kwargs
)
return self.add_trace(new_trace, row=row, col=col, secondary_y=secondary_y)
def add_carpet(
self,
a=None,
a0=None,
aaxis=None,
asrc=None,
b=None,
b0=None,
baxis=None,
bsrc=None,
carpet=None,
cheaterslope=None,
color=None,
customdata=None,
customdatasrc=None,
da=None,
db=None,
font=None,
ids=None,
idssrc=None,
meta=None,
metasrc=None,
name=None,
opacity=None,
stream=None,
uid=None,
uirevision=None,
visible=None,
x=None,
xaxis=None,
xsrc=None,
y=None,
yaxis=None,
ysrc=None,
row=None,
col=None,
secondary_y=None,
**kwargs
):
"""
Add a new Carpet trace
The data describing carpet axis layout is set in `y` and
(optionally) also `x`. If only `y` is present, `x` the plot is
interpreted as a cheater plot and is filled in using the `y`
values. `x` and `y` may either be 2D arrays matching with each
dimension matching that of `a` and `b`, or they may be 1D
arrays with total length equal to that of `a` and `b`.
Parameters
----------
a
An array containing values of the first parameter value
a0
Alternate to `a`. Builds a linear space of a
coordinates. Use with `da` where `a0` is the starting
coordinate and `da` the step.
aaxis
:class:`plotly.graph_objects.carpet.Aaxis` instance or
dict with compatible properties
asrc
Sets the source reference on Chart Studio Cloud for a
.
b
A two dimensional array of y coordinates at each carpet
point.
b0
Alternate to `b`. Builds a linear space of a
coordinates. Use with `db` where `b0` is the starting
coordinate and `db` the step.
baxis
:class:`plotly.graph_objects.carpet.Baxis` instance or
dict with compatible properties
bsrc
Sets the source reference on Chart Studio Cloud for b
.
carpet
An identifier for this carpet, so that `scattercarpet`
and `contourcarpet` traces can specify a carpet plot on
which they | |
kmer in kmer_probe_map.keys():
for probe, pos in kmer_probe_map[kmer]:
native_dict[kmer].append((probe.seq_str, pos))
native_dict = dict(native_dict)
return SharedKmerProbeMap(keys, probe_seqs_ind, probe_pos, probe_seqs,
k, probe_seqs_to_probe, native_dict)
def set_max_num_processes_for_probe_finding_pools(max_num_processes=8):
"""Set the maximum number of processes to use in a probe finding pool.
Args:
max_num_processes: an int (>= 1) specifying the maximum number of
processes to use in a multiprocessing.Pool when a num_processes
argument is not provided to probe.open_probe_finding_pool; uses
min(the number of CPUs in the system, max_num_processes) processes
when num_processes is not provided to
probe.open_probe_finding_pool
"""
global _pfp_max_num_processes
_pfp_max_num_processes = max_num_processes
set_max_num_processes_for_probe_finding_pools()
def open_probe_finding_pool(kmer_probe_map,
cover_range_for_probe_in_subsequence_fn,
num_processes=None,
use_native_dict=False):
"""Open a pool for calling find_probe_covers_in_sequence().
The variables to share with the processes (e.g., kmer_probe_map.keys)
cannot be pickled and are not intended to be. But all variables that are
global in a module (prior to making the pool) are accessible to processes
in the pool that are executing a top-level function in the module
(like _find_probe_covers_in_subsequence()). Thus, this function -- along
with opening a multiprocessing pool -- also makes global the variables
that should be shared with the worker processes.
All the global variables that are part of this probe finding pool are
prefixed with '_pfp'.
Args:
kmer_probe_map: instance of SharedKmerProbeMap
cover_range_for_probe_in_subsequence_fn: function that
determines whether a probe "covers" a part of a subsequence
of sequence; if it returns None, there is no coverage;
otherwise it returns the range of the subsequence covered
by the probe
num_processes: number of processes/workers to have in the pool;
if None, uses min(the number of CPUs in the system,
_pfp_max_num_processes)
use_native_dict: use the native Python dict in kmer_probe_map
rather than the primitive types that are more suited to
sharing across processes; depending on the input, this can
result in considerably more memory use (see SharedKmerProbeMap
for an explanation of why) but may provide an improvement
in runtime
Raises:
RuntimeError if the pool is already open; only one pool may be
open at a time
"""
global _pfp_is_open
global _pfp_max_num_processes
global _pfp_pool
global _pfp_work_was_submitted
global _pfp_cover_range_for_probe_in_subsequence_fn
global _pfp_kmer_probe_map_keys
global _pfp_kmer_probe_map_probe_seqs_ind
global _pfp_kmer_probe_map_probe_pos
global _pfp_kmer_probe_map_probe_seqs
global _pfp_kmer_probe_map_probe_seqs_to_probe
global _pfp_kmer_probe_map_k
global _pfp_kmer_probe_map_native
global _pfp_kmer_probe_map_use_native
try:
if _pfp_is_open:
raise RuntimeError("Probe finding pool is already open")
except NameError:
pass
if num_processes is None:
num_processes = min(multiprocessing.cpu_count(),
_pfp_max_num_processes)
logger.debug("Opening a probe finding pool with %d processes",
num_processes)
_pfp_is_open = True
_pfp_cover_range_for_probe_in_subsequence_fn = \
cover_range_for_probe_in_subsequence_fn
# Rather than saving kmer_probe_map directly, save pointers to individual
# variables and have the function a process executes reconstruct an
# instance of SharedKmerProbeMap from these variables
# This way, we can be careful to only share in memory with other processes
# variables that do not have to be copied -- i.e., those processes explicitly
# access certain variables and we can ensure that variables that would
# need to be copied (like kmer_probe_map.probe_seqs_to_probe) are not
# accidentally accessed by a process
_pfp_kmer_probe_map_keys = kmer_probe_map.keys
_pfp_kmer_probe_map_probe_seqs_ind = kmer_probe_map.probe_seqs_ind
_pfp_kmer_probe_map_probe_pos = kmer_probe_map.probe_pos
_pfp_kmer_probe_map_probe_seqs = kmer_probe_map.probe_seqs
_pfp_kmer_probe_map_probe_seqs_to_probe = \
kmer_probe_map.probe_seqs_to_probe
_pfp_kmer_probe_map_k = kmer_probe_map.k
_pfp_kmer_probe_map_native = kmer_probe_map.native_dict
_pfp_kmer_probe_map_use_native = use_native_dict
# Note that the pool must be created at the very end of this function
# because the only global variables shared with processes in this
# pool are those that are created prior to creating the pool
# Sometimes opening a pool (via multiprocessing.Pool) hangs indefinitely,
# particularly when many pools are opened/closed repeatedly by a master
# process; this likely stems from issues in multiprocessing.Pool. So set
# a timeout on opening the pool, and try again if it times out. It
# appears, from testing, that opening a pool may timeout a few times in
# a row, but eventually succeeds.
time_limit = 60
while True:
try:
with timeout.time_limit(time_limit):
_pfp_pool = multiprocessing.Pool(num_processes)
break
except timeout.TimeoutException:
# Try again
logger.debug("Pool initialization timed out; trying again")
time_limit *= 2
continue
_pfp_work_was_submitted = False
logger.debug("Successfully opened a probe finding pool")
def close_probe_finding_pool():
"""Close the pool for calling find_probe_covers_in_sequence().
This closes the multiprocessing pool and also deletes pointers to the
variables that were made global in this module in order to be shared
with worker processes.
Raises:
RuntimeError if the pool is not open
"""
global _pfp_is_open
global _pfp_pool
global _pfp_work_was_submitted
global _pfp_cover_range_for_probe_in_subsequence_fn
global _pfp_kmer_probe_map_keys
global _pfp_kmer_probe_map_probe_seqs_ind
global _pfp_kmer_probe_map_probe_pos
global _pfp_kmer_probe_map_probe_seqs
global _pfp_kmer_probe_map_probe_seqs_to_probe
global _pfp_kmer_probe_map_k
global _pfp_kmer_probe_map_native
global _pfp_kmer_probe_map_use_native
pfp_is_open = False
try:
if _pfp_is_open:
pfp_is_open = True
except NameError:
pass
if not pfp_is_open:
raise RuntimeError("Probe finding pool is not open")
logger.debug("Closing the probe finding pool of processes")
del _pfp_cover_range_for_probe_in_subsequence_fn
del _pfp_kmer_probe_map_keys
del _pfp_kmer_probe_map_probe_seqs_ind
del _pfp_kmer_probe_map_probe_pos
del _pfp_kmer_probe_map_probe_seqs
del _pfp_kmer_probe_map_probe_seqs_to_probe
del _pfp_kmer_probe_map_k
del _pfp_kmer_probe_map_native
del _pfp_kmer_probe_map_use_native
_pfp_pool.close()
# In Python versions earlier than 2.7.3 there is a bug (see
# http://bugs.python.org/issue12157) that occurs if a pool p is
# created and p.join() is called, but p.map() is never called (i.e.,
# no work is submitted to the processes in the pool); the bug
# causes p.join() to sometimes hang indefinitely.
# That could happen here if a probe finding pool is opened/closed
# but find_probe_covers_in_sequence() is never called; the variable
# _pfp_work_was_submitted ensures that join() is only called on
# the pool if work was indeed submitted.
if _pfp_work_was_submitted:
# Due to issues that likely stem from bugs in the multiprocessing
# module, calls to _pfp_pool.terminate() and _pfp_pool.join()
# sometimes hang indefinitely (even when work was indeed submitted
# to the processes). So make a best effort in calling these functions
# -- i.e., use a timeout around calls to these functions
try:
with timeout.time_limit(60):
_pfp_pool.terminate()
except timeout.TimeoutException:
# Ignore the timeout
# If _pfp_pool.terminate() or _pfp_pool.join() fails this will
# not affect correctness and will not necessarily prevent
# additional pools from being created, so let the program continue
# to execute because it will generally be able to keep making
# progress
logger.debug(("Terminating the probe finding pool timed out; "
"ignoring"))
pass
except:
# _pfp_pool.terminate() occassionally raises another exception
# (NoneType) if it tries to terminate a process that has already
# been terminated; ignoring that exception should not affect
# correctness or prevent additional pools from being created, so
# is better to ignore it than to let the exception crash the
# program
pass
try:
with timeout.time_limit(60):
_pfp_pool.join()
except timeout.TimeoutException:
# Ignore the timeout
# If _pfp_pool.terminate() or _pfp_pool.join() fails this will
# not affect correctness and will not necessarily prevent
# additional pools from being created, so let the program continue
# to execute because it will generally be able to keep making
# progress
logger.debug(("Joining the probe finding pool timed out; "
"ignoring"))
pass
except:
# Ignore any additional exception from _pfp_pool.join() rather
# than letting it crash the program
pass
del _pfp_pool
_pfp_is_open = False
del _pfp_work_was_submitted
gc.collect()
logger.debug("Successfully closed the probe finding pool")
def _find_probe_covers_in_subsequence(bounds,
sequence,
merge_overlapping=True):
"""Helper function for find_probe_covers_in_sequence().
Scans through a subsequence of sequence, as specified by bounds, and
looks for probes that cover a range of the subsequence.
Args:
bounds: tuple of the form (start, end); scan through each k-mer
in sequence beginning with the k-mer whose first base is
at start and ending with the k-mer whose first base is at
end-1
sequence: sequence (as a string) in which to find ranges that
probes cover
merge_overlapping: when True, merges overlapping ranges into
a single range and returns the ranges in sorted order; when
False, intervals returned may be overlapping (e.g., if a
probe covers two regions that overlap)
Returns:
dict mapping probe sequences (as strings) to the set of ranges
(each range is a tuple of the form (start, end)) that each probe
"covers" in the scanned subsequence
"""
if bounds is None:
return {}
global _pfp_cover_range_for_probe_in_subsequence_fn
global _pfp_kmer_probe_map_keys
global _pfp_kmer_probe_map_probe_seqs_ind
global _pfp_kmer_probe_map_probe_pos
global _pfp_kmer_probe_map_probe_seqs
global _pfp_kmer_probe_map_k
global _pfp_kmer_probe_map_use_native
if _pfp_kmer_probe_map_use_native:
global _pfp_kmer_probe_map_native
shared_kmer_probe_map = _pfp_kmer_probe_map_native
else:
shared_kmer_probe_map = SharedKmerProbeMap(
_pfp_kmer_probe_map_keys,
_pfp_kmer_probe_map_probe_seqs_ind,
_pfp_kmer_probe_map_probe_pos,
_pfp_kmer_probe_map_probe_seqs,
_pfp_kmer_probe_map_k,
None,
None)
k = _pfp_kmer_probe_map_k
# Each time a probe is found to cover | |
the transaction node dns endpoint basic auth password.
:type password: str
:param firewall_rules: Gets or sets the firewall rules.
:type firewall_rules: list[~azure.mgmt.blockchain.models.FirewallRule]
:param consortium_management_account_password: Sets the managed consortium management account
password.
:type consortium_management_account_password: str
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'password': {'key': 'properties.password', 'type': 'str'},
'firewall_rules': {'key': 'properties.firewallRules', 'type': '[FirewallRule]'},
'consortium_management_account_password': {'key': 'properties.consortiumManagementAccountPassword', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(BlockchainMemberUpdate, self).__init__(**kwargs)
self.tags = kwargs.get('tags', None)
self.password = kwargs.get('password', None)
self.firewall_rules = kwargs.get('firewall_rules', None)
self.consortium_management_account_password = kwargs.get('consortium_management_account_password', None)
class Consortium(msrest.serialization.Model):
"""Consortium payload.
:param name: Gets or sets the blockchain member name.
:type name: str
:param protocol: Gets or sets the protocol for the consortium. Possible values include:
"NotSpecified", "Parity", "Quorum", "Corda".
:type protocol: str or ~azure.mgmt.blockchain.models.BlockchainProtocol
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'protocol': {'key': 'protocol', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Consortium, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.protocol = kwargs.get('protocol', None)
class ConsortiumCollection(msrest.serialization.Model):
"""Collection of the consortium payload.
:param value: Gets or sets the collection of consortiums.
:type value: list[~azure.mgmt.blockchain.models.Consortium]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[Consortium]'},
}
def __init__(
self,
**kwargs
):
super(ConsortiumCollection, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
class ConsortiumMember(msrest.serialization.Model):
"""Consortium approval.
:param name: Gets the consortium member name.
:type name: str
:param display_name: Gets the consortium member display name.
:type display_name: str
:param subscription_id: Gets the consortium member subscription id.
:type subscription_id: str
:param role: Gets the consortium member role.
:type role: str
:param status: Gets the consortium member status.
:type status: str
:param join_date: Gets the consortium member join date.
:type join_date: ~datetime.datetime
:param date_modified: Gets the consortium member modified date.
:type date_modified: ~datetime.datetime
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'subscription_id': {'key': 'subscriptionId', 'type': 'str'},
'role': {'key': 'role', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'join_date': {'key': 'joinDate', 'type': 'iso-8601'},
'date_modified': {'key': 'dateModified', 'type': 'iso-8601'},
}
def __init__(
self,
**kwargs
):
super(ConsortiumMember, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.display_name = kwargs.get('display_name', None)
self.subscription_id = kwargs.get('subscription_id', None)
self.role = kwargs.get('role', None)
self.status = kwargs.get('status', None)
self.join_date = kwargs.get('join_date', None)
self.date_modified = kwargs.get('date_modified', None)
class ConsortiumMemberCollection(msrest.serialization.Model):
"""Collection of consortium payload.
:param value: Gets or sets the collection of consortiums.
:type value: list[~azure.mgmt.blockchain.models.ConsortiumMember]
:param next_link: Gets or sets the URL, that the client should use to fetch the next page (per
server side paging).
It's null for now, added for future use.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[ConsortiumMember]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ConsortiumMemberCollection, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class FirewallRule(msrest.serialization.Model):
"""Ip range for firewall rules.
:param rule_name: Gets or sets the name of the firewall rules.
:type rule_name: str
:param start_ip_address: Gets or sets the start IP address of the firewall rule range.
:type start_ip_address: str
:param end_ip_address: Gets or sets the end IP address of the firewall rule range.
:type end_ip_address: str
"""
_attribute_map = {
'rule_name': {'key': 'ruleName', 'type': 'str'},
'start_ip_address': {'key': 'startIpAddress', 'type': 'str'},
'end_ip_address': {'key': 'endIpAddress', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(FirewallRule, self).__init__(**kwargs)
self.rule_name = kwargs.get('rule_name', None)
self.start_ip_address = kwargs.get('start_ip_address', None)
self.end_ip_address = kwargs.get('end_ip_address', None)
class NameAvailability(msrest.serialization.Model):
"""Name availability payload which is exposed in the response of the resource provider.
:param name_available: Gets or sets the value indicating whether the name is available.
:type name_available: bool
:param message: Gets or sets the message.
:type message: str
:param reason: Gets or sets the name availability reason. Possible values include:
"NotSpecified", "AlreadyExists", "Invalid".
:type reason: str or ~azure.mgmt.blockchain.models.NameAvailabilityReason
"""
_attribute_map = {
'name_available': {'key': 'nameAvailable', 'type': 'bool'},
'message': {'key': 'message', 'type': 'str'},
'reason': {'key': 'reason', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(NameAvailability, self).__init__(**kwargs)
self.name_available = kwargs.get('name_available', None)
self.message = kwargs.get('message', None)
self.reason = kwargs.get('reason', None)
class NameAvailabilityRequest(msrest.serialization.Model):
"""Name availability request payload which is exposed in the request of the resource provider.
:param name: Gets or sets the name to check.
:type name: str
:param type: Gets or sets the type of the resource to check.
:type type: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(NameAvailabilityRequest, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.type = kwargs.get('type', None)
class OperationResult(msrest.serialization.Model):
"""Operation result payload which is exposed in the response of the resource provider.
:param name: Gets or sets the operation name.
:type name: str
:param start_time: Gets or sets the operation start time.
:type start_time: ~datetime.datetime
:param end_time: Gets or sets the operation end time.
:type end_time: ~datetime.datetime
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
}
def __init__(
self,
**kwargs
):
super(OperationResult, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.start_time = kwargs.get('start_time', None)
self.end_time = kwargs.get('end_time', None)
class ResourceProviderOperation(msrest.serialization.Model):
"""Operation payload which is exposed in the response of the resource provider.
:param origin: Gets or sets the origin.
:type origin: str
:param name: Gets or sets the operation name.
:type name: str
:param is_data_action: Gets or sets a value indicating whether the operation is a data action
or not.
:type is_data_action: bool
:param display: Gets or sets operation display.
:type display: ~azure.mgmt.blockchain.models.ResourceProviderOperationDisplay
"""
_attribute_map = {
'origin': {'key': 'origin', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'is_data_action': {'key': 'isDataAction', 'type': 'bool'},
'display': {'key': 'display', 'type': 'ResourceProviderOperationDisplay'},
}
def __init__(
self,
**kwargs
):
super(ResourceProviderOperation, self).__init__(**kwargs)
self.origin = kwargs.get('origin', None)
self.name = kwargs.get('name', None)
self.is_data_action = kwargs.get('is_data_action', None)
self.display = kwargs.get('display', None)
class ResourceProviderOperationCollection(msrest.serialization.Model):
"""Collection of operation payload which is exposed in the response of the resource provider.
:param value: Gets or sets the collection of operations.
:type value: list[~azure.mgmt.blockchain.models.ResourceProviderOperation]
:param next_link: Gets or sets the URL, that the client should use to fetch the next page (per
server side paging).
It's null for now, added for future use.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[ResourceProviderOperation]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ResourceProviderOperationCollection, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class ResourceProviderOperationDisplay(msrest.serialization.Model):
"""Operation display payload which is exposed in the response of the resource provider.
:param provider: Gets or sets the name of the provider for display purposes.
:type provider: str
:param resource: Gets or sets the name of the resource type for display purposes.
:type resource: str
:param operation: Gets or sets the name of the operation for display purposes.
:type operation: str
:param description: Gets or sets the description of the provider for display purposes.
:type description: str
"""
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'resource': {'key': 'resource', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ResourceProviderOperationDisplay, self).__init__(**kwargs)
self.provider = kwargs.get('provider', None)
self.resource = kwargs.get('resource', None)
self.operation = kwargs.get('operation', None)
self.description = kwargs.get('description', None)
class ResourceTypeSku(msrest.serialization.Model):
"""Resource type Sku.
:param resource_type: Gets or sets the resource type.
:type resource_type: str
:param skus: Gets or sets the Skus.
:type skus: list[~azure.mgmt.blockchain.models.SkuSetting]
"""
_attribute_map = {
'resource_type': {'key': 'resourceType', 'type': 'str'},
'skus': {'key': 'skus', 'type': '[SkuSetting]'},
}
def __init__(
self,
**kwargs
):
super(ResourceTypeSku, self).__init__(**kwargs)
self.resource_type = kwargs.get('resource_type', None)
self.skus = kwargs.get('skus', None)
class ResourceTypeSkuCollection(msrest.serialization.Model):
"""Collection of the resource type Sku.
:param value: Gets or sets the collection of resource type Sku.
:type value: list[~azure.mgmt.blockchain.models.ResourceTypeSku]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[ResourceTypeSku]'},
}
def __init__(
self,
**kwargs
):
super(ResourceTypeSkuCollection, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
class Sku(msrest.serialization.Model):
"""Blockchain member Sku in payload.
:param name: Gets or sets Sku name.
:type name: str
:param tier: Gets or sets Sku tier.
:type tier: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'tier': {'key': 'tier', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Sku, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.tier = kwargs.get('tier', None)
class SkuSetting(msrest.serialization.Model):
"""Sku Setting.
:param name: Gets or sets the Sku name.
:type name: str
:param tier: Gets or sets the Sku tier.
:type tier: str
:param locations: Gets | |
in info/has_prefix not included in archive'.format(
filename
),
)
def check_prefix_file_mode(self):
"""Check that the has_prefix mode is either binary or text."""
if self.prefix_file_contents is not None:
_, mode, _ = self.prefix_file_contents
if mode not in ["binary", "text"]:
return Error(
self.path,
"C1130",
u'Found invalid mode "{}" in info/has_prefix'.format(mode),
)
def check_prefix_file_binary_mode(self):
"""Check that the has_prefix file binary mode is correct."""
if self.prefix_file_contents is not None:
placeholder, mode, _ = self.prefix_file_contents
if mode == "binary":
if self.name == "python":
return Error(
self.path,
"C1131",
"Binary placeholder found in info/has_prefix not allowed when building Python",
)
elif self.win_pkg:
return Error(
self.path,
"C1132",
"Binary placeholder found in info/has_prefix not allowed in Windows package",
)
elif len(placeholder) != 255:
return Error(
self.path,
"C1133",
u'Binary placeholder "{}" found in info/has_prefix does not have a length of 255 bytes'.format(
placeholder
),
)
def check_for_post_links(self):
"""Check the tar archive for pre and post link files."""
for filepath in self.paths:
if filepath.endswith(
(
"-post-link.sh",
"-pre-link.sh",
"-pre-unlink.sh",
"-post-link.bat",
"-pre-link.bat",
"-pre-unlink.bat",
)
):
return Error(
self.path,
"C1134",
u'Found pre/post link file "{}" in archive'.format(filepath),
)
def check_for_egg(self):
"""Check the tar archive for egg files."""
for filepath in self.paths:
if filepath.endswith(".egg"):
return Error(
self.path,
"C1135",
u'Found egg file "{}" in archive'.format(filepath),
)
def check_for_easy_install_script(self):
"""Check the tar archive for easy_install scripts."""
for filepath in self.paths:
if filepath.startswith(
(
os.path.join("bin", "easy_install"),
os.path.join("Scripts", "easy_install"),
)
):
return Error(
self.path,
"C1136",
u'Found easy_install script "{}" in archive'.format(filepath),
)
def check_for_pth_file(self):
"""Check the tar archive for .pth files."""
for filepath in self.paths:
if filepath.endswith(".pth"):
return Error(
self.path,
"C1137",
u'Found namespace file "{}" in archive'.format(
os.path.normpath(filepath)
),
)
def check_for_pyo_file(self):
"""Check the tar archive for .pyo files"""
for filepath in self.paths:
if filepath.endswith(".pyo") and self.name != "python":
return Error(
self.path,
"C1138",
u'Found pyo file "{}" in archive'.format(filepath),
)
def check_for_pyc_in_site_packages(self):
"""Check that .pyc files are only found within the site-packages or disutils directories."""
for filepath in self.paths:
if (
filepath.endswith(".pyc")
and "site-packages" not in filepath
and "distutils" not in filepath
):
return Error(
self.path,
"C1139",
u'Found pyc file "{}" in invalid directory'.format(filepath),
)
def check_for_2to3_pickle(self):
"""Check the tar archive for .pickle files."""
for filepath in self.paths:
if "lib2to3" in filepath and filepath.endswith(".pickle"):
return Error(
self.path,
"C1140",
u'Found lib2to3 .pickle file "{}"'.format(filepath),
)
def check_pyc_files(self):
"""Check that a .pyc file exists for every .py file in a Python 2 package."""
if "py3" not in self.build:
for filepath in self.paths:
if "site-packages" in filepath:
if filepath.endswith(".py") and (filepath + "c") not in self.paths:
return Error(
self.path,
"C1141",
u'Found python file "{}" without a corresponding pyc file'.format(
filepath
),
)
def check_menu_json_name(self):
"""Check that the Menu/package.json filename is identical to the package name."""
menu_json_files = [
filepath
for filepath in self.paths
if filepath.startswith("Menu" + os.path.sep) and filepath.endswith(".json")
]
if len(menu_json_files) == 1:
filename = menu_json_files[0]
if filename != os.path.normpath("{}.json".format(self.name)):
return Error(
self.path,
"C1142",
u'Found invalid Menu json file "{}"'.format(filename),
)
elif len(menu_json_files) > 1:
return Error(self.path, "C1143", "Found more than one Menu json file")
def check_windows_arch(self):
"""Check that Windows package .exes and .dlls contain the correct headers."""
if self.win_pkg:
arch = self.info["arch"]
if arch not in ("x86", "x86_64"):
return Error(
self.path,
"C1144",
u'Found unrecognized Windows architecture "{}"'.format(arch),
)
for member in self.archive_members:
if member.endswith((".exe", ".dll")):
with open(os.path.join(self.tmpdir, member), "rb") as file_object:
file_header = file_object.read(4096)
file_object_type = get_object_type(file_header)
if (arch == "x86" and file_object_type != "DLL I386") or (
arch == "x86_64" and file_object_type != "DLL AMD64"
):
return Error(
self.path,
"C1145",
u'Found file "{}" with object type "{}" but with arch "{}"'.format(
member, file_object_type, arch
),
)
def check_windows_debug_link(self):
"""Ensure Windows binaries do not link to Visual Studio debug libraries."""
if not self.win_pkg:
return
if not any(member.endswith((".exe", ".dll")) for member in self.archive_members):
return
# only import lief when necessary
import lief.PE
pat = re.compile(r"vcruntime\d+d\.dll", re.IGNORECASE)
for member in self.archive_members:
if not member.endswith((".exe", ".dll")):
continue
b = lief.PE.parse(os.path.join(self.tmpdir, member))
for dll in b.libraries:
if pat.match(dll):
return Error(self.path, "C1149",
'Found binary "{}" linking to VS debug library: "{}"'.format(member, dll))
def check_package_hashes_and_size(self):
"""Check the sha256 checksum and filesize of each file in the package."""
for member in self.archive_members:
file_path = os.path.join(self.tmpdir, member)
if member in self.paths_json_path:
if os.path.isfile(file_path):
path = self.paths_json_path[member]
size = os.stat(file_path).st_size
if size != path["size_in_bytes"]:
return Error(
self.path,
"C1147",
'Found file "{}" with filesize different than listed in paths.json'.format(
member
),
)
with open(file_path, "rb") as file_object:
sha256_digest = sha256_checksum(file_object)
if sha256_digest != path["sha256"]:
return Error(
self.path,
"C1146",
'Found file "{}" with sha256 hash different than listed in paths.json'.format(
member
),
)
def check_noarch_files(self):
"""Check that noarch packages do not contain architecture specific files."""
if self.info["subdir"] == "noarch":
for filepath in self.paths:
if filepath.endswith((".so", ".dylib", ".dll", "lib")):
return Error(
self.path,
"C1148",
u'Found architecture specific file "{}" in package.'.format(
filepath
),
)
class CondaRecipeCheck(object):
"""Create checks in order to validate conda recipes."""
def __init__(self, meta, recipe_dir):
"""Initialize conda recipe information for use with recipe checks."""
super(CondaRecipeCheck, self).__init__()
self.meta = meta
self.recipe_dir = recipe_dir
self.name_pat = re.compile(r"[a-z0-9_][a-z0-9_\-\.]*$")
self.version_pat = re.compile(r"[\w\.]+$")
self.url_pat = re.compile(r"(ftp|http(s)?)://")
self.hash_pat = {
"md5": re.compile(r"[a-f0-9]{32}$"),
"sha1": re.compile(r"[a-f0-9]{40}$"),
"sha256": re.compile(r"[a-f0-9]{64}$"),
}
def check_package_name(self):
"""Check the package name in meta.yaml for proper formatting."""
package_name = self.meta.get("package", {}).get("name", "")
if package_name == "":
return Error(self.recipe_dir, "C2101", "Missing package name in meta.yaml")
if not self.name_pat.match(package_name) or package_name.endswith(
(".", "-", "_")
):
return Error(
self.recipe_dir,
"C2102",
u'Found invalid package name "{}" in meta.yaml'.format(package_name),
)
seq = get_bad_seq(package_name)
if seq:
return Error(
self.recipe_dir,
"C2103",
u'Found invalid sequence "{}" in package name'.format(seq),
)
def check_package_version(self):
"""Check the package version in meta.yaml for proper formatting."""
package_version = self.meta.get("package", {}).get("version", "")
if package_version == "":
return Error(
self.recipe_dir, "C2104", "Missing package version in meta.yaml"
)
if isinstance(package_version, str):
if (
not self.version_pat.match(package_version)
or package_version.startswith(("_", "."))
or package_version.endswith(("_", "."))
):
return Error(
self.recipe_dir,
"C2105",
u'Found invalid package version "{}" in meta.yaml'.format(
package_version
),
)
seq = get_bad_seq(package_version)
if seq:
return Error(
self.recipe_dir,
"C2106",
u'Found invalid sequence "{}" in package version'.format(seq),
)
def check_build_number(self):
"""Check the build number in meta.yaml for proper formatting."""
build_number = self.meta.get("build", {}).get("number")
if build_number is not None:
try:
build_number = int(build_number)
if build_number < 0:
return Error(
self.recipe_dir,
"C2108",
"Build number in info/index.json cannot be a negative integer",
)
except ValueError:
return Error(
self.recipe_dir,
"C2107",
"Build number in info/index.json must be an integer",
)
def check_fields(self):
"""Check that the fields listed in meta.yaml are valid."""
for section in self.meta:
if section not in FIELDS and section != "extra":
return Error(
self.recipe_dir,
"C2109",
u'Found invalid section "{}"'.format(section),
)
if section != "extra":
subfield = self.meta.get(section)
if hasattr(subfield, "keys"):
for key in subfield:
if key not in FIELDS[section]:
return Error(
self.recipe_dir,
"C2110",
u'Found invalid field "{}" in section "{}"'.format(
key, section
),
)
else:
# list of dicts. Used in source and outputs.
for entry in subfield:
for key in entry:
if key not in FIELDS[section]:
return Error(
self.recipe_dir,
"C2110",
u'Found invalid field "{}" in section "{}"'.format(
key, section
),
)
def check_requirements(self):
"""Check that the requirements listed in meta.yaml are valid."""
build_requirements = self.meta.get("requirements", {}).get("build", [])
run_requirements = self.meta.get("requirements", {}).get("run", [])
for requirement in build_requirements + run_requirements:
requirement_parts = requirement.split()
requirement_name = requirement_parts[0]
if not self.name_pat.match(requirement_name):
if requirement in build_requirements:
return Error(
self.recipe_dir,
"C2111",
u'Found invalid build requirement "{}"'.format(requirement),
)
elif requirement in run_requirements:
return Error(
self.recipe_dir,
"C2112",
u'Found invalid run requirement "{}"'.format(requirement),
)
if len(requirement_parts) == 0:
return Error(
self.recipe_dir, "C2113", "Found empty dependencies in meta.yaml"
)
elif len(requirement_parts) >= 2 and not fullmatch(
ver_spec_pat, requirement_parts[1]
):
return Error(
self.recipe_dir,
"C2114",
u'Found invalid dependency "{}" in meta.yaml'.format(requirement),
)
if len(build_requirements) != len(set(build_requirements)):
return Error(
self.recipe_dir,
"C2115",
u"Found duplicate build requirements: {}".format(build_requirements),
)
if len(run_requirements) != len(set(run_requirements)):
return Error(
self.recipe_dir,
"C2116",
u"Found duplicate run requirements: {}".format(run_requirements),
)
def check_about(self):
"""Check the about field in meta.yaml for proper formatting."""
summary = self.meta.get("about", {}).get("summary")
if summary is not None and len(summary) > 80:
return Error(
self.recipe_dir,
"C2117",
"Found summary with | |
# Tests generated by: guppy.gsl.Tester
# Date: Tue May 19 18:54:16 2020
class Tester:
tests = {}
def get_ex_1(self):
from guppy.heapy.heapyc import NodeGraph
return NodeGraph()
def get_ex_2(self):
from guppy.heapy.heapyc import HeapView
hv = HeapView((), ())
return hv.cli_none()
def get_ex_3(self):
from guppy.sets import immnodeset
return immnodeset()
def get_ex_4(self):
from guppy.sets import MutNodeSet
return MutNodeSet
def get_ex_5(self):
class C:
pass
return C
def test_HeapView(self, arg):
t0 = arg.limitframe
t1 = arg.is_hiding_calling_interpreter
t2 = arg.relate((), ())
t3 = arg.indisize_sum([1])
t4 = arg.numedges((), ())
t5 = arg.reachable(self.get_ex_3(), self.get_ex_3())
t6 = arg.reachable_x(self.get_ex_3(), self.get_ex_3())
t7 = arg.relimg([1])
t8 = arg.shpathstep(self.get_ex_1(), self.get_ex_3(), self.get_ex_3())
t9 = arg.shpathstep(self.get_ex_1(), self.get_ex_3(), self.get_ex_3(), self.get_ex_1())
t10 = arg.shpathstep(self.get_ex_1(), self.get_ex_3(), self.get_ex_3(), self.get_ex_1(), True)
t11 = arg.heap()
t12 = arg.cli_indisize({})
t13 = arg.cli_none()
t14 = arg.cli_rcs(self.get_ex_1(), self.get_ex_2(), {})
t15 = arg.cli_type()
t16 = arg._hiding_tag_
t17 = arg.delete_extra_type
t18 = arg.register__hiding_tag__type(self.get_ex_4())
t19 = arg.register_hidden_exact_type(int)
t20 = arg.register_hidden_exact_type(self.get_ex_5())
t21 = arg.update_dictowners(self.get_ex_1())
t22 = arg.update_referrers(self.get_ex_1(), self.get_ex_3())
t23 = arg.update_referrers_completely(self.get_ex_1())
tests['.tgt.kindnames.HeapView'] = test_HeapView
def cond_0(_self, x, y):
# Condition: .tgt.sets.CommonSet.cond:contains
return (y in x)
def cond_1(_self, x):
# Condition: .tgt.sets.CommonSet.cond:empty
return (not x)
def cond_2(_self, x, y):
# Condition: .tgt.sets.CommonSet.cond:equalset
from guppy.sets import immnodeset
return (immnodeset(x) == immnodeset(y))
def cond_3(_self, x):
# Condition: .tgt.sets.CommonSet.cond:istrue
return (bool(x))
def cond_4(_self, x, y):
# Condition: .tgt.sets.CommonSet.cond:subset
from guppy.sets import immnodeset
return (immnodeset(x) <= immnodeset(y))
def test_MutNodeSet(self, arg):
t0 = arg.add(())
assert self.cond_0(arg, ()), "Failed postcondition: 'CommonSet.contains(S, e)'"
assert not self.cond_1(arg), "Failed postcondition: 'not CommonSet.empty(S)'"
arg.discard(())
t1 = arg.append(())
assert self.cond_0(arg, ()), "Failed postcondition: 'CommonSet.contains(S, e)'"
assert not self.cond_1(arg), "Failed postcondition: 'not CommonSet.empty(S)'"
t2 = arg.clear()
assert self.cond_1(arg), "Failed postcondition: 'CommonSet.empty(S)'"
t3 = arg.discard(())
assert not self.cond_0(arg, ()), "Failed postcondition: 'not CommonSet.contains(S, e)'"
arg.add(())
t4 = arg.pop()
assert not self.cond_0(arg, t4), "Failed postcondition: 'not CommonSet.contains(S, <returned value>)'"
arg.add(())
t5 = arg.remove(())
assert not self.cond_0(arg, ()), "Failed postcondition: 'not CommonSet.contains(S, e)'"
t6 = len(arg)
t7 = iter(arg)
assert self.cond_2(t7, arg), "Failed postcondition: 'CommonSet.equalset(<returned value>, x)'"
pre_0 = self.cond_0(arg, ())
t8 = arg.tas(())
assert self.cond_0(arg, ()), "Failed postcondition: 'CommonSet.contains(S, e)'"
assert not self.cond_1(arg), "Failed postcondition: 'not CommonSet.empty(S)'"
assert pre_0 == self.cond_3(t8), 'Failed postcondition equality: CommonSet.istrue(<returned value>)'
pre_0 = self.cond_0(arg, ())
t9 = arg.tac(())
assert not self.cond_0(arg, ()), "Failed postcondition: 'not CommonSet.contains(S, e)'"
assert pre_0 == self.cond_3(t9), 'Failed postcondition equality: CommonSet.istrue(<returned value>)'
t10 = arg == self.get_ex_3()
t11 = arg != self.get_ex_3()
t12 = arg <= self.get_ex_3()
t13 = arg < self.get_ex_3()
t14 = arg >= self.get_ex_3()
t15 = arg > self.get_ex_3()
t16 = () in arg
t17 = arg
t17 &= [1]
assert self.cond_4(t17, arg), "Failed postcondition: 'CommonSet.subset(<returned value>, x)'"
assert self.cond_4(t17, [1]), "Failed postcondition: 'CommonSet.subset(<returned value>, y)'"
t18 = len(t17)
t19 = t17 != self.get_ex_3()
t20 = t17 > self.get_ex_3()
t21 = iter(t17)
assert self.cond_2(t21, t17), "Failed postcondition: 'CommonSet.equalset(<returned value>, x)'"
t22 = () in t17
t23 = t17 == self.get_ex_3()
t24 = t17 <= self.get_ex_3()
t25 = t17 < self.get_ex_3()
t26 = t17 >= self.get_ex_3()
t27 = t17
t27 &= [1]
assert self.cond_4(t27, t17), "Failed postcondition: 'CommonSet.subset(<returned value>, x)'"
assert self.cond_4(t27, [1]), "Failed postcondition: 'CommonSet.subset(<returned value>, y)'"
t28 = t17
t28 |= [1]
assert self.cond_4(t17, t28), "Failed postcondition: 'CommonSet.subset(x, <returned value>)'"
assert self.cond_4([1], t28), "Failed postcondition: 'CommonSet.subset(y, <returned value>)'"
t29 = t17
t29 ^= [1]
t30 = t17
t30 -= [1]
t31 = t17 & [1]
assert self.cond_4(t31, t17), "Failed postcondition: 'CommonSet.subset(<returned value>, x)'"
assert self.cond_4(t31, [1]), "Failed postcondition: 'CommonSet.subset(<returned value>, y)'"
t32 = t17 | [1]
assert self.cond_4(t17, t32), "Failed postcondition: 'CommonSet.subset(x, <returned value>)'"
assert self.cond_4([1], t32), "Failed postcondition: 'CommonSet.subset(y, <returned value>)'"
t33 = t17 ^ [1]
t34 = t17 - [1]
t35 = arg
t35 |= [1]
assert self.cond_4(arg, t35), "Failed postcondition: 'CommonSet.subset(x, <returned value>)'"
assert self.cond_4([1], t35), "Failed postcondition: 'CommonSet.subset(y, <returned value>)'"
t36 = iter(t35)
assert self.cond_2(t36, t35), "Failed postcondition: 'CommonSet.equalset(<returned value>, x)'"
t37 = t35 <= self.get_ex_3()
t38 = len(t35)
t39 = () in t35
t40 = t35 == self.get_ex_3()
t41 = t35 != self.get_ex_3()
t42 = t35 < self.get_ex_3()
t43 = t35 >= self.get_ex_3()
t44 = t35 > self.get_ex_3()
t45 = t35
t45 |= [1]
assert self.cond_4(t35, t45), "Failed postcondition: 'CommonSet.subset(x, <returned value>)'"
assert self.cond_4([1], t45), "Failed postcondition: 'CommonSet.subset(y, <returned value>)'"
t46 = t35
t46 &= [1]
assert self.cond_4(t46, t35), "Failed postcondition: 'CommonSet.subset(<returned value>, x)'"
assert self.cond_4(t46, [1]), "Failed postcondition: 'CommonSet.subset(<returned value>, y)'"
t47 = t35
t47 ^= [1]
t48 = t35
t48 -= [1]
t49 = t35 | [1]
assert self.cond_4(t35, t49), "Failed postcondition: 'CommonSet.subset(x, <returned value>)'"
assert self.cond_4([1], t49), "Failed postcondition: 'CommonSet.subset(y, <returned value>)'"
t50 = t35 & [1]
assert self.cond_4(t50, t35), "Failed postcondition: 'CommonSet.subset(<returned value>, x)'"
assert self.cond_4(t50, [1]), "Failed postcondition: 'CommonSet.subset(<returned value>, y)'"
t51 = t35 ^ [1]
t52 = t35 - [1]
t53 = arg
t53 ^= [1]
t54 = () in t53
t55 = t53 < self.get_ex_3()
t56 = len(t53)
t57 = iter(t53)
assert self.cond_2(t57, t53), "Failed postcondition: 'CommonSet.equalset(<returned value>, x)'"
t58 = t53 == self.get_ex_3()
t59 = t53 != self.get_ex_3()
t60 = t53 <= self.get_ex_3()
t61 = t53 >= self.get_ex_3()
t62 = t53 > self.get_ex_3()
t63 = t53
t63 ^= [1]
t64 = t53
t64 &= [1]
assert self.cond_4(t64, t53), "Failed postcondition: 'CommonSet.subset(<returned value>, x)'"
assert self.cond_4(t64, [1]), "Failed postcondition: 'CommonSet.subset(<returned value>, y)'"
t65 = t53
t65 |= [1]
assert self.cond_4(t53, t65), "Failed postcondition: 'CommonSet.subset(x, <returned value>)'"
assert self.cond_4([1], t65), "Failed postcondition: 'CommonSet.subset(y, <returned value>)'"
t66 = t53
t66 -= [1]
t67 = t53 ^ [1]
t68 = t53 & [1]
assert self.cond_4(t68, t53), "Failed postcondition: 'CommonSet.subset(<returned value>, x)'"
assert self.cond_4(t68, [1]), "Failed postcondition: 'CommonSet.subset(<returned value>, y)'"
t69 = t53 | [1]
assert self.cond_4(t53, t69), "Failed postcondition: 'CommonSet.subset(x, <returned value>)'"
assert self.cond_4([1], t69), "Failed postcondition: 'CommonSet.subset(y, <returned value>)'"
t70 = t53 - [1]
t71 = arg
t71 -= [1]
t72 = t71 == self.get_ex_3()
t73 = t71 >= self.get_ex_3()
t74 = len(t71)
t75 = iter(t71)
assert self.cond_2(t75, t71), "Failed postcondition: 'CommonSet.equalset(<returned value>, x)'"
t76 = () in t71
t77 = t71 != self.get_ex_3()
t78 = t71 <= self.get_ex_3()
t79 = t71 < self.get_ex_3()
t80 = t71 > self.get_ex_3()
t81 = t71
t81 -= [1]
t82 = t71
t82 &= [1]
assert self.cond_4(t82, t71), "Failed postcondition: 'CommonSet.subset(<returned value>, x)'"
assert self.cond_4(t82, [1]), "Failed postcondition: 'CommonSet.subset(<returned value>, y)'"
t83 = t71
t83 |= [1]
assert self.cond_4(t71, t83), "Failed postcondition: 'CommonSet.subset(x, <returned value>)'"
assert self.cond_4([1], t83), "Failed postcondition: 'CommonSet.subset(y, <returned value>)'"
t84 = t71
t84 ^= [1]
t85 = t71 - [1]
t86 = t71 & [1]
assert self.cond_4(t86, t71), "Failed postcondition: 'CommonSet.subset(<returned value>, x)'"
assert self.cond_4(t86, [1]), "Failed postcondition: 'CommonSet.subset(<returned value>, y)'"
t87 = t71 | [1]
assert self.cond_4(t71, t87), "Failed postcondition: 'CommonSet.subset(x, <returned value>)'"
assert self.cond_4([1], t87), "Failed postcondition: 'CommonSet.subset(y, <returned value>)'"
t88 = t71 ^ [1]
t89 = arg & [1]
assert self.cond_4(t89, arg), "Failed postcondition: 'CommonSet.subset(<returned value>, x)'"
assert self.cond_4(t89, [1]), "Failed postcondition: 'CommonSet.subset(<returned value>, y)'"
t90 = len(t89)
t91 = t89 == self.get_ex_3()
t92 = t89 >= self.get_ex_3()
t93 = t89
t93 ^= [1]
t94 = hash(t89)
t95 = iter(t89)
assert self.cond_2(t95, t89), "Failed postcondition: 'CommonSet.equalset(<returned value>, x)'"
t96 = () in t89
t97 = t89 != self.get_ex_3()
t98 = t89 <= self.get_ex_3()
t99 = t89 < self.get_ex_3()
t100 = t89 > self.get_ex_3()
t101 = t89
t101 &= [1]
assert self.cond_4(t101, t89), "Failed postcondition: 'CommonSet.subset(<returned value>, x)'"
assert self.cond_4(t101, [1]), "Failed postcondition: 'CommonSet.subset(<returned value>, y)'"
t102 = t89
t102 |= [1]
assert self.cond_4(t89, t102), "Failed postcondition: 'CommonSet.subset(x, <returned value>)'"
assert self.cond_4([1], t102), "Failed postcondition: 'CommonSet.subset(y, <returned value>)'"
t103 = t89
t103 -= [1]
t104 = t89 & [1]
assert self.cond_4(t104, t89), "Failed postcondition: 'CommonSet.subset(<returned value>, x)'"
assert self.cond_4(t104, [1]), "Failed postcondition: 'CommonSet.subset(<returned value>, y)'"
t105 = t89 | [1]
assert self.cond_4(t89, t105), "Failed postcondition: 'CommonSet.subset(x, <returned value>)'"
assert self.cond_4([1], t105), "Failed postcondition: 'CommonSet.subset(y, <returned value>)'"
t106 = t89 ^ [1]
t107 = t89 - [1]
t108 = arg | [1]
assert self.cond_4(arg, t108), "Failed postcondition: 'CommonSet.subset(x, <returned value>)'"
assert self.cond_4([1], t108), | |
import flask
import imageio
import io
import json
import os
import pandas as pd
import sqlalchemy as sa
import tifffile
import urllib
from flask_restful import Resource
from opencell.imaging import utils
from opencell.api import payloads, cytoscape_payload
from opencell.api.cache import cache
from opencell.database import models, metadata_operations, uniprot_utils
from opencell.database import utils as db_utils
from opencell.imaging.processors import FOVProcessor
# copied from https://stackoverflow.com/questions/24816799/how-to-use-flask-cache-with-flask-restful
def cache_key():
args = flask.request.args
key = flask.request.path + '?' + urllib.parse.urlencode([
(k, v) for k in sorted(args) for v in sorted(args.getlist(k))
])
return key
class ClearCache(Resource):
def get(self):
with flask.current_app.app_context():
cache.clear()
return flask.jsonify({'result': 'cache cleared'})
class GeneNameSearch(Resource):
'''
A list of cell_line_ids and ensg_ids that exactly correspond to a gene name
'''
@cache.cached(key_prefix=cache_key)
def get(self, gene_name):
payload = {}
gene_name = gene_name.upper()
publication_ready_only = flask.request.args.get('publication_ready') == 'true'
if flask.current_app.config['HIDE_PRIVATE_DATA']:
publication_ready_only = True
# search for opencell targets
query = (
flask.current_app.Session.query(models.CellLine)
.join(models.CellLine.crispr_design)
.filter(sa.func.upper(models.CrisprDesign.target_name) == gene_name)
)
if publication_ready_only:
cell_line_ids = metadata_operations.get_lines_by_annotation(
engine=flask.current_app.Session.get_bind(), annotation='publication_ready'
)
query = query.filter(models.CellLine.id.in_(cell_line_ids))
# hack for the positive controls
if gene_name in ['CLTA', 'BCAP31']:
query = query.filter(models.CrisprDesign.plate_design_id == 'P0001')
targets = query.all()
# use a zero-padded 11-digit number to match ENSG ID format
if targets:
payload['oc_ids'] = ['OPCT%011d' % target.id for target in targets]
# search the gene names column in the uniprot metadata table
# (use `ilike` for case-insensitivity)
hgnc_entries = pd.read_sql(
'''
select ensg_id from hgnc_metadata where %(query)s ilike symbol
''',
flask.current_app.Session.get_bind(),
params=dict(query=gene_name)
)
if len(hgnc_entries):
payload['ensg_ids'] = list(set(hgnc_entries.ensg_id))
return flask.jsonify(payload)
class FullTextSearch(Resource):
'''
Full-text search of all opencell targets and interactors
This is conducted in two steps:
First, a full-text search of the uniprot protein_names field is attempted;
this will only yield results if the query is some common word or phrase
(e.g., 'actin', 'membrane', 'nuclear lamina', etc).
If this search finds no results, we assume the query is a portion of a gene name,
and we search the uniprot gene_names field for all gene names
that start with, or exactly match, the query.
NOTE: the queries in this method rely on a materialized view
called 'searchable_hgnc_metadata' defined in `define_views.sql`
'''
@staticmethod
def get_approved_gene_name_from_query(session, query):
'''
'''
query_is_valid_gene_name = False
query_is_legacy_gene_name = False
approved_gene_name = None
# first determine if the query is an exact HGNC-approved gene name
exact_matches = (
session.query(models.HGNCMetadata)
.filter(models.HGNCMetadata.symbol == query.upper())
.one_or_none()
)
query_is_valid_gene_name = exact_matches is not None
if query_is_valid_gene_name:
approved_gene_name = query.upper()
# check if the query is an exact legacy gene name
else:
result = pd.read_sql(
'''
select * from (
select symbol, ensg_id, unnest(
string_to_array(prev_symbol, '|') || string_to_array(alias_symbol, '|')
) as alias_or_prev
from hgnc_metadata
) tmp
where alias_or_prev ilike %(query)s
''',
session.get_bind(),
params=dict(query=query.upper())
)
if len(result):
query_is_legacy_gene_name = True
approved_gene_name = result.iloc[0].symbol
return query_is_valid_gene_name, query_is_legacy_gene_name, approved_gene_name
@staticmethod
def search_protein_names(engine, query):
'''
Full-text search of uniprot protein names for all opencell targets and interactors
(this relies on a materialized view called searchable_hgnc_metadata)
'''
results = pd.read_sql(
'''
select * from (
select *, ts_rank_cd(content, query) as relevance
from searchable_hgnc_metadata, plainto_tsquery(%(query)s) as query
where content @@ query
) as hits
order by relevance desc;
''',
engine,
params=dict(query=query)
)
return results
@staticmethod
def search_gene_names(engine, query):
'''
Search for opencell targets and interactors any of whose HGNC gene names
(current, previous, or alias) starts with the query
'''
results = pd.read_sql(
'''
select * from searchable_hgnc_metadata
where ensg_id in (
select ensg_id from (
select ensg_id,
unnest(
string_to_array(symbol, '')
|| string_to_array(prev_symbol, '|')
|| string_to_array(alias_symbol, '|')
) as gene_name
from hgnc_metadata
) as tmp
where gene_name like %(query)s
);
''',
engine,
params=dict(query=('%s%%' % query.upper()))
)
# there's no way of ranking these results, so we create a relevance column
# with a relevance greater than the maximum relevance
# returned by ts_rank_cd in `search_protein_names`
results['relevance'] = 1.0
return results
@cache.cached(key_prefix=cache_key)
def get(self, query):
engine = flask.current_app.Session.get_bind()
# eliminate trailing spaces
query = query.strip()
# attempt to look up the approved gene name from the query,
# in the even that the query is an exact alias or previous gene name
(
query_is_valid_gene_name, query_is_legacy_gene_name, approved_gene_name
) = self.get_approved_gene_name_from_query(flask.current_app.Session, query)
# search for partial gene name matches
partial_gene_name_matches = self.search_gene_names(engine, query)
# if there are no partial matches but the query is an exact legacy gene name,
# try again using the approved gene name
if query_is_legacy_gene_name and not partial_gene_name_matches.shape[0]:
partial_gene_name_matches = self.search_gene_names(engine, approved_gene_name)
# always search the protein names with the original query
protein_name_matches = self.search_protein_names(engine, query)
# combine the results from both searches
all_results = pd.concat((partial_gene_name_matches, protein_name_matches), axis=0)
# eliminate duplicates
all_results = all_results.groupby('ensg_id').first().reset_index()
# hackish logic that determines whether the result is a target, interactor, or expressed
all_results['status'] = 'unknown'
for ind, row in all_results.iterrows():
if not pd.isna(row.published_cell_line_id):
status = 'Target'
elif not pd.isna(row.significant_protein_group_id):
status = 'Interactor'
elif not pd.isna(row.measured_expression):
status = 'Expressed'
else:
# the space prefix here is a deliberate hack to force undetected proteins
# to appear last when the search results are sorted by status in the frontend
status = ' Not detected'
all_results.at[ind, 'status'] = status
# force the targets to the top of the search results, then sort by relevance
all_results.sort_values(['status', 'relevance'], inplace=True, ascending=False)
# prettify the protein names
all_results['protein_name'] = all_results.protein_name.apply(
lambda s: uniprot_utils.prettify_hgnc_protein_name(s)
)
# drop unneeded column
all_results.drop(labels=['content', 'significant_protein_group_id'], axis=1, inplace=True)
# if the query was a valid (approved or legacy) gene name,
# set the relevance of its exact match, if there was one, to 10
exact_match_found = False
if approved_gene_name is not None:
mask = all_results.gene_name.apply(lambda names: approved_gene_name in names)
all_results.loc[mask, 'relevance'] = 10
exact_match_found = bool(mask.sum() > 0)
return flask.jsonify({
'is_valid_gene_name': query_is_valid_gene_name,
'is_legacy_gene_name': query_is_legacy_gene_name,
'approved_gene_name': approved_gene_name,
'exact_match_found': exact_match_found,
'hits': json.loads(all_results.to_json(orient='records')),
})
class UniProtKBAnnotation(Resource):
'''
The prettified functional annotation from UniProtKB
'''
@cache.cached(key_prefix=cache_key)
def get(self, uniprot_id):
metadata = (
flask.current_app.Session.query(models.UniprotKBMetadata)
.filter(models.UniprotKBMetadata.primary_uniprot_id == uniprot_id)
.one_or_none()
)
if metadata is None:
return flask.abort(404, 'No UniProtKB entry for uniprot_id %s' % uniprot_id)
return flask.jsonify(
{
'uniprot_id': uniprot_id,
'functional_annotation': uniprot_utils.prettify_uniprot_annotation(
metadata.function_comment
),
}
)
class AbundanceDataset(Resource):
'''
The full abundance dataset
'''
@cache.cached(key_prefix=cache_key)
def get(self):
df = pd.read_sql(
'''
select measured_transcript_expression as rna, measured_protein_concentration as pro
from abundance_measurement
where measured_protein_concentration is not null
and measured_protein_concentration != 'NaN'::NUMERIC
and random() < 0.3
''',
flask.current_app.Session.get_bind()
)
return flask.jsonify(json.loads(df.to_json(orient='records')))
class TargetNames(Resource):
'''
A list of the target names and HGNC protein names for all crispr designs
'''
@cache.cached(key_prefix=cache_key)
def get(self):
publication_ready_only = flask.request.args.get('publication_ready') == 'true'
if flask.current_app.config['HIDE_PRIVATE_DATA']:
publication_ready_only = True
cell_line_ids = None
if publication_ready_only:
cell_line_ids = metadata_operations.get_lines_by_annotation(
engine=flask.current_app.Session.get_bind(), annotation='publication_ready'
)
query = (
flask.current_app.Session.query(
models.CrisprDesign.target_name,
models.HGNCMetadata.name.label('protein_name'),
)
.join(models.CrisprDesign.hgnc_metadata)
)
if cell_line_ids is not None:
query = (
query.join(models.CrisprDesign.cell_lines)
.filter(models.CellLine.id.in_(cell_line_ids))
)
names = pd.DataFrame(data=[row._asdict() for row in query.all()])
# eliminate duplicates
names = names.groupby('target_name').first().reset_index()
return flask.jsonify(json.loads(names.to_json(orient='records')))
class Plate(Resource):
def get(self, plate_id):
plate = (
flask.current_app.Session.query(models.PlateDesign)
.filter(models.PlateDesign.design_id == plate_id)
.one_or_none()
)
targets = [d.target_name for d in plate.crispr_designs]
return {
'plate_id': plate.design_id,
'targets': targets,
}
class CellLines(Resource):
'''
A list of cell line metadata for all cell lines,
possibly filtered by plate_id and the publication_ready annotation
'''
@cache.cached(key_prefix=cache_key)
def get(self):
Session = flask.current_app.Session
args = flask.request.args
plate_id = args.get('plate_id')
publication_ready_only = args.get('publication_ready') == 'true'
if flask.current_app.config['HIDE_PRIVATE_DATA']:
publication_ready_only = True
included_fields = args.get('fields')
included_fields = included_fields.split(',') if included_fields else []
cell_line_ids = args.get('ids')
cell_line_ids = [int(_id) for _id in cell_line_ids.split(',')] if cell_line_ids else []
if publication_ready_only:
pr_cell_line_ids = metadata_operations.get_lines_by_annotation(
engine=flask.current_app.Session.get_bind(), annotation='publication_ready'
)
if len(cell_line_ids):
cell_line_ids = list(set(cell_line_ids).intersection(pr_cell_line_ids))
else:
cell_line_ids = pr_cell_line_ids
# cell line query with the eager-loading required by generate_cell_line_payload
query = (
Session.query(models.CellLine)
.join(models.CellLine.crispr_design)
.options(
(
sa.orm.joinedload(models.CellLine.crispr_design, innerjoin=True)
.joinedload(models.CrisprDesign.hgnc_metadata, innerjoin=True)
.joinedload(models.HGNCMetadata.abundance_measurements)
), (
sa.orm.joinedload(models.CellLine.crispr_design, innerjoin=True)
.joinedload(models.CrisprDesign.uniprotkb_metadata, innerjoin=True)
),
sa.orm.joinedload(models.CellLine.facs_dataset),
sa.orm.joinedload(models.CellLine.sequencing_dataset),
sa.orm.joinedload(models.CellLine.annotation),
sa.orm.joinedload(models.CellLine.pulldowns)
)
)
if plate_id:
query = query.filter(models.CrisprDesign.plate_design_id == plate_id)
if cell_line_ids:
query = query.filter(models.CellLine.id.in_(cell_line_ids))
if 'best-fov' in included_fields:
query = query.options(
(
sa.orm.joinedload(models.CellLine.fovs, innerjoin=True)
.joinedload(models.MicroscopyFOV.rois, innerjoin=True)
.joinedload(models.MicroscopyFOVROI.thumbnails, innerjoin=True)
), (
sa.orm.joinedload(models.CellLine.fovs, innerjoin=True)
.joinedload(models.MicroscopyFOV.annotation, innerjoin=True)
)
)
lines = query.all()
# a separate query for counting FOVs and annotated FOVs per cell line
fov_counts_query = (
Session.query(
models.CellLine.id,
sa.func.count(models.MicroscopyFOV.id).label('num_fovs'),
sa.func.count(models.MicroscopyFOVAnnotation.id).label('num_annotated_fovs'),
)
.outerjoin(models.CellLine.fovs)
.outerjoin(models.MicroscopyFOV.annotation)
.filter(models.CellLine.id.in_([line.id for line in lines]))
.group_by(models.CellLine.id)
)
fov_counts = pd.DataFrame([row._asdict() for row in fov_counts_query.all()])
# hackish | |
import asyncio
import logging
import json
import inspect
import aiohttp.client_exceptions
from botocore.utils import ContainerMetadataFetcher, InstanceMetadataFetcher, \
IMDSFetcher, get_environ_proxies, BadIMDSRequestError, S3RegionRedirector, \
ClientError, InstanceMetadataRegionFetcher, IMDSRegionProvider, \
resolve_imds_endpoint_mode, ReadTimeoutError, HTTPClientError, \
DEFAULT_METADATA_SERVICE_TIMEOUT, METADATA_BASE_URL, os
from botocore.exceptions import (
InvalidIMDSEndpointError, MetadataRetrievalError,
)
import botocore.awsrequest
import aiobotocore.httpsession
from aiobotocore._helpers import asynccontextmanager
logger = logging.getLogger(__name__)
RETRYABLE_HTTP_ERRORS = (aiohttp.client_exceptions.ClientError, asyncio.TimeoutError)
class _RefCountedSession(aiobotocore.httpsession.AIOHTTPSession):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.__ref_count = 0
self.__lock = None
@asynccontextmanager
async def acquire(self):
if not self.__lock:
self.__lock = asyncio.Lock()
# ensure we have a session
async with self.__lock:
self.__ref_count += 1
try:
if self.__ref_count == 1:
await self.__aenter__()
except BaseException:
self.__ref_count -= 1
raise
try:
yield self
finally:
async with self.__lock:
if self.__ref_count == 1:
await self.__aexit__(None, None, None)
self.__ref_count -= 1
class AioIMDSFetcher(IMDSFetcher):
def __init__(self, timeout=DEFAULT_METADATA_SERVICE_TIMEOUT, # noqa: E501, lgtm [py/missing-call-to-init]
num_attempts=1, base_url=METADATA_BASE_URL,
env=None, user_agent=None, config=None, session=None):
self._timeout = timeout
self._num_attempts = num_attempts
self._base_url = self._select_base_url(base_url, config)
if env is None:
env = os.environ.copy()
self._disabled = env.get('AWS_EC2_METADATA_DISABLED', 'false').lower()
self._disabled = self._disabled == 'true'
self._user_agent = user_agent
self._session = session or _RefCountedSession(
timeout=self._timeout,
proxies=get_environ_proxies(self._base_url),
)
async def _fetch_metadata_token(self):
self._assert_enabled()
url = self._construct_url(self._TOKEN_PATH)
headers = {
'x-aws-ec2-metadata-token-ttl-seconds': self._TOKEN_TTL,
}
self._add_user_agent(headers)
request = botocore.awsrequest.AWSRequest(
method='PUT', url=url, headers=headers)
async with self._session.acquire() as session:
for i in range(self._num_attempts):
try:
response = await session.send(request.prepare())
if response.status_code == 200:
return await response.text
elif response.status_code in (404, 403, 405):
return None
elif response.status_code in (400,):
raise BadIMDSRequestError(request)
except ReadTimeoutError:
return None
except RETRYABLE_HTTP_ERRORS as e:
logger.debug(
"Caught retryable HTTP exception while making metadata "
"service request to %s: %s", url, e, exc_info=True)
except HTTPClientError as e:
error = e.kwargs.get('error')
if error and getattr(error, 'errno', None) == 8 or \
str(getattr(error, 'os_error', None)) == \
'Domain name not found': # threaded vs async resolver
raise InvalidIMDSEndpointError(endpoint=url, error=e)
else:
raise
return None
async def _get_request(self, url_path, retry_func, token=None):
self._assert_enabled()
if retry_func is None:
retry_func = self._default_retry
url = self._construct_url(url_path)
headers = {}
if token is not None:
headers['x-aws-ec2-metadata-token'] = token
self._add_user_agent(headers)
async with self._session.acquire() as session:
for i in range(self._num_attempts):
try:
request = botocore.awsrequest.AWSRequest(
method='GET', url=url, headers=headers)
response = await session.send(request.prepare())
should_retry = retry_func(response)
if inspect.isawaitable(should_retry):
should_retry = await should_retry
if not should_retry:
return response
except RETRYABLE_HTTP_ERRORS as e:
logger.debug(
"Caught retryable HTTP exception while making metadata "
"service request to %s: %s", url, e, exc_info=True)
raise self._RETRIES_EXCEEDED_ERROR_CLS()
async def _default_retry(self, response):
return (
await self._is_non_ok_response(response) or
await self._is_empty(response)
)
async def _is_non_ok_response(self, response):
if response.status_code != 200:
await self._log_imds_response(response, 'non-200', log_body=True)
return True
return False
async def _is_empty(self, response):
if not await response.content:
await self._log_imds_response(response, 'no body', log_body=True)
return True
return False
async def _log_imds_response(self, response, reason_to_log, log_body=False):
statement = (
"Metadata service returned %s response "
"with status code of %s for url: %s"
)
logger_args = [
reason_to_log, response.status_code, response.url
]
if log_body:
statement += ", content body: %s"
logger_args.append(await response.content)
logger.debug(statement, *logger_args)
class AioInstanceMetadataFetcher(AioIMDSFetcher, InstanceMetadataFetcher):
async def retrieve_iam_role_credentials(self):
try:
token = await self._fetch_metadata_token()
role_name = await self._get_iam_role(token)
credentials = await self._get_credentials(role_name, token)
if self._contains_all_credential_fields(credentials):
return {
'role_name': role_name,
'access_key': credentials['AccessKeyId'],
'secret_key': credentials['SecretAccessKey'],
'token': credentials['Token'],
'expiry_time': credentials['Expiration'],
}
else:
if 'Code' in credentials and 'Message' in credentials:
logger.debug('Error response received when retrieving'
'credentials: %s.', credentials)
return {}
except self._RETRIES_EXCEEDED_ERROR_CLS:
logger.debug("Max number of attempts exceeded (%s) when "
"attempting to retrieve data from metadata service.",
self._num_attempts)
except BadIMDSRequestError as e:
logger.debug("Bad IMDS request: %s", e.request)
return {}
async def _get_iam_role(self, token=None):
return await (await self._get_request(
url_path=self._URL_PATH,
retry_func=self._needs_retry_for_role_name,
token=token,
)).text
async def _get_credentials(self, role_name, token=None):
r = await self._get_request(
url_path=self._URL_PATH + role_name,
retry_func=self._needs_retry_for_credentials,
token=token
)
return json.loads(await r.text)
async def _is_invalid_json(self, response):
try:
json.loads(await response.text)
return False
except ValueError:
await self._log_imds_response(response, 'invalid json')
return True
async def _needs_retry_for_role_name(self, response):
return (
await self._is_non_ok_response(response) or
await self._is_empty(response)
)
async def _needs_retry_for_credentials(self, response):
return (
await self._is_non_ok_response(response) or
await self._is_empty(response) or
await self._is_invalid_json(response)
)
class AioIMDSRegionProvider(IMDSRegionProvider):
async def provide(self):
"""Provide the region value from IMDS."""
instance_region = await self._get_instance_metadata_region()
return instance_region
async def _get_instance_metadata_region(self):
fetcher = self._get_fetcher()
region = await fetcher.retrieve_region()
return region
def _create_fetcher(self):
metadata_timeout = self._session.get_config_variable(
'metadata_service_timeout')
metadata_num_attempts = self._session.get_config_variable(
'metadata_service_num_attempts')
imds_config = {
'ec2_metadata_service_endpoint': self._session.get_config_variable(
'ec2_metadata_service_endpoint'),
'ec2_metadata_service_endpoint_mode': resolve_imds_endpoint_mode(
self._session
)
}
fetcher = AioInstanceMetadataRegionFetcher(
timeout=metadata_timeout,
num_attempts=metadata_num_attempts,
env=self._environ,
user_agent=self._session.user_agent(),
config=imds_config,
)
return fetcher
class AioInstanceMetadataRegionFetcher(AioIMDSFetcher, InstanceMetadataRegionFetcher):
async def retrieve_region(self):
try:
region = await self._get_region()
return region
except self._RETRIES_EXCEEDED_ERROR_CLS:
logger.debug("Max number of attempts exceeded (%s) when "
"attempting to retrieve data from metadata service.",
self._num_attempts)
return None
async def _get_region(self):
token = await self._fetch_metadata_token()
response = await self._get_request(
url_path=self._URL_PATH,
retry_func=self._default_retry,
token=token
)
availability_zone = await response.text
region = availability_zone[:-1]
return region
class AioS3RegionRedirector(S3RegionRedirector):
async def redirect_from_error(self, request_dict, response, operation, **kwargs):
if response is None:
# This could be none if there was a ConnectionError or other
# transport error.
return
if self._is_s3_accesspoint(request_dict.get('context', {})):
logger.debug(
'S3 request was previously to an accesspoint, not redirecting.'
)
return
if request_dict.get('context', {}).get('s3_redirected'):
logger.debug(
'S3 request was previously redirected, not redirecting.')
return
error = response[1].get('Error', {})
error_code = error.get('Code')
response_metadata = response[1].get('ResponseMetadata', {})
# We have to account for 400 responses because
# if we sign a Head* request with the wrong region,
# we'll get a 400 Bad Request but we won't get a
# body saying it's an "AuthorizationHeaderMalformed".
is_special_head_object = (
error_code in ['301', '400'] and
operation.name == 'HeadObject'
)
is_special_head_bucket = (
error_code in ['301', '400'] and
operation.name == 'HeadBucket' and
'x-amz-bucket-region' in response_metadata.get('HTTPHeaders', {})
)
is_wrong_signing_region = (
error_code == 'AuthorizationHeaderMalformed' and
'Region' in error
)
is_redirect_status = response[0] is not None and \
response[0].status_code in [301, 302, 307]
is_permanent_redirect = error_code == 'PermanentRedirect'
if not any([is_special_head_object, is_wrong_signing_region,
is_permanent_redirect, is_special_head_bucket,
is_redirect_status]):
return
bucket = request_dict['context']['signing']['bucket']
client_region = request_dict['context'].get('client_region')
new_region = await self.get_bucket_region(bucket, response)
if new_region is None:
logger.debug(
"S3 client configured for region %s but the bucket %s is not "
"in that region and the proper region could not be "
"automatically determined." % (client_region, bucket))
return
logger.debug(
"S3 client configured for region %s but the bucket %s is in region"
" %s; Please configure the proper region to avoid multiple "
"unnecessary redirects and signing attempts." % (
client_region, bucket, new_region))
endpoint = self._endpoint_resolver.resolve('s3', new_region)
endpoint = endpoint['endpoint_url']
signing_context = {
'region': new_region,
'bucket': bucket,
'endpoint': endpoint
}
request_dict['context']['signing'] = signing_context
self._cache[bucket] = signing_context
self.set_request_url(request_dict, request_dict['context'])
request_dict['context']['s3_redirected'] = True
# Return 0 so it doesn't wait to retry
return 0
async def get_bucket_region(self, bucket, response):
# First try to source the region from the headers.
service_response = response[1]
response_headers = service_response['ResponseMetadata']['HTTPHeaders']
if 'x-amz-bucket-region' in response_headers:
return response_headers['x-amz-bucket-region']
# Next, check the error body
region = service_response.get('Error', {}).get('Region', None)
if region is not None:
return region
# Finally, HEAD the bucket. No other choice sadly.
try:
response = await self._client.head_bucket(Bucket=bucket)
headers = response['ResponseMetadata']['HTTPHeaders']
except ClientError as e:
headers = e.response['ResponseMetadata']['HTTPHeaders']
region = headers.get('x-amz-bucket-region', None)
return region
class AioContainerMetadataFetcher(ContainerMetadataFetcher):
def __init__(self, session=None, sleep=asyncio.sleep): # noqa: E501, lgtm [py/missing-call-to-init]
if session is None:
session = _RefCountedSession(
timeout=self.TIMEOUT_SECONDS
)
self._session = session
self._sleep = sleep
async def retrieve_full_uri(self, full_url, headers=None):
self._validate_allowed_url(full_url)
return await self._retrieve_credentials(full_url, headers)
async def retrieve_uri(self, relative_uri):
"""Retrieve JSON metadata from ECS metadata.
:type relative_uri: str
:param relative_uri: A relative URI, e.g "/foo/bar?id=123"
:return: The parsed JSON response.
"""
full_url = self.full_url(relative_uri)
return await self._retrieve_credentials(full_url)
async def _retrieve_credentials(self, full_url, extra_headers=None):
headers = {'Accept': 'application/json'}
if extra_headers is not None:
headers.update(extra_headers)
attempts = 0
while True:
try:
return await self._get_response(
full_url, headers, self.TIMEOUT_SECONDS)
except MetadataRetrievalError as e:
logger.debug("Received error when attempting to retrieve "
"container metadata: %s", e, exc_info=True)
await self._sleep(self.SLEEP_TIME)
attempts += 1
if attempts >= self.RETRY_ATTEMPTS:
raise
async def _get_response(self, full_url, headers, timeout):
try:
async with self._session.acquire() as session:
AWSRequest = botocore.awsrequest.AWSRequest
request = AWSRequest(method='GET', url=full_url, headers=headers)
response = await session.send(request.prepare())
response_text = (await response.content).decode('utf-8')
if response.status_code != 200:
raise MetadataRetrievalError(
error_msg=(
"Received non 200 response (%s) from ECS metadata: %s"
) % (response.status_code, response_text)
)
try:
return json.loads(response_text)
except ValueError:
error_msg = (
"Unable to parse JSON returned from ECS metadata services"
)
logger.debug('%s:%s', error_msg, response_text)
raise | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import re
import sys
import json
import time
import inspect
try:
# noinspection PyUnresolvedReferences
from fabric.api import local, abort, env
# noinspection PyUnresolvedReferences
from fabric.colors import red, green, yellow, cyan
# noinspection PyUnresolvedReferences
from fabric.main import main as _fabmain
except ImportError:
sys.stderr.write('ERROR: No module named fabric\n'
'Please install the python fabric package (http://www.fabfile.org/)\n')
sys.exit(99)
###############################################################################
# globals
###############################################################################
DEBUG = False
SELF = os.path.realpath(__file__)
ERIGONES_HOME = os.environ.get('ERIGONES_HOME', '/opt/erigones')
ES = os.path.join(ERIGONES_HOME, 'bin', 'es')
STATUS_CODES_OK = (200, 201)
TESTS_RUN = 0
TESTS_FAIL = 0
TESTS_WARN = 0
USER_TASK_PREFIX = '' # Used by tests
ADMIN_TASK_PREFIX = ''
RE_TASK_PREFIX = re.compile(r'([a-zA-Z]+)')
DEFAULT_TASK_PREFIX = [None, 'e', '1', 'd', '1']
env.warn_only = True
if not os.path.exists(ES):
sys.stderr.write('ERROR: %s does not exist\n' % ES)
sys.exit(100)
###############################################################################
# helpers
###############################################################################
def _es(*argv):
return local(ES + ' ' + ' '.join(argv), capture=True)
def _exp_compare(exp, text, equal=False):
if isinstance(exp, dict):
for i in exp.keys():
if not _exp_compare(exp[i], text[i], True):
return False
elif isinstance(exp, (list, tuple)):
for i in range(len(exp)):
if not _exp_compare(exp[i], text[i], True):
return False
elif equal:
if exp != text:
return False
else:
if exp not in text:
return False
return True
def _test(cmd, exp, scode=200, rc=0, custom_test=None, dc='main'):
caller = inspect.stack()[1][3]
global TESTS_RUN
TESTS_RUN += 1
def log_fail(res, s=''):
global TESTS_FAIL
TESTS_FAIL += 1
print(red('Test %s failed: %s' % (caller, s)))
print(res)
# noinspection PyUnusedLocal
def log_warn(res, s=''):
global TESTS_WARN
TESTS_WARN += 1
print(yellow('Test %s warning: %s' % (caller, s)))
print(res)
def log_ok(s=''):
print(green('Test %s succeeded %s' % (caller, s)))
if dc:
cmd += ' -dc %s' % dc
ret = False
out = _es(cmd)
if out.return_code != rc:
log_fail(out, 'return_code='+str(out.return_code))
else:
# noinspection PyBroadException
try:
jout = json.loads(out)
except:
log_fail(out, 'json not parsed')
else:
try:
if jout['status'] != scode:
raise ValueError('status code mismatch')
except Exception as e:
log_fail(out, str(e))
else:
text = jout['text']
try:
if not _exp_compare(exp, text):
raise Exception('test structure not found')
except Exception as e:
log_fail(out, str(e))
else:
if custom_test:
try:
if custom_test(text):
log_ok()
ret = True
else:
log_fail(out, 'custom test failed')
except Exception as e:
log_fail(out, 'custom test got exception: ' + str(e))
else:
log_ok()
ret = True
return ret
def _summary():
print('''
*** Test summary ***
Total: %s
Failed: %s
Warning: %s
Successful: %s
''') % (TESTS_RUN, red(TESTS_FAIL), yellow(TESTS_WARN), green(TESTS_RUN-(TESTS_FAIL+TESTS_WARN)))
raise SystemExit(TESTS_FAIL)
def _remove_token_store():
# noinspection PyBroadException
try:
os.remove('/tmp/esdc.session')
except:
pass
def _sleep(seconds):
print(cyan('\n***\n* Taking a %s seconds break to avoid API throttling.\n***' % (seconds,)))
i = 0
while i < seconds:
sys.stdout.write('.')
sys.stdout.flush()
time.sleep(1)
i += 1
print('\n')
def _task_prefix_from_task_id(task_id):
"""Get (user ID, task type, owner ID) tuple from task ID"""
tp = RE_TASK_PREFIX.split(task_id[:-24])
return tuple(tp + DEFAULT_TASK_PREFIX[len(tp):])
###############################################################################
# automatic test creation
###############################################################################
def test(name=''):
"""create test from stdin - pipe \"es -d\" into this"""
if sys.stdin.isatty():
abort(red('no stdin (pipe the output of es -d command)'))
stdin = [line.strip() for line in sys.stdin.readlines()]
if not stdin:
abort(red('no stdin'))
# noinspection PyBroadException
try:
jin = json.loads('\n'.join(stdin))
except:
abort(red('stdin json not parsed'))
# noinspection PyBroadException
try:
# noinspection PyUnboundLocalVariable
cmd, text, code = jin['command'], jin['text'], jin['status']
# noinspection PyBroadException
try:
text.pop('task_id')
except:
pass
except:
abort(red('es output not parsed (missing -d option?)'))
if not name:
# noinspection PyBroadException
try:
# noinspection PyUnboundLocalVariable
_cmd = cmd.split()
_met = _cmd[0]
_res = _cmd[1][1:].split('/')
_mod = _res[0]
_sub = '_'
# noinspection PyBroadException
try:
_sub += '_'.join(_res[2:])
except:
pass
# noinspection PyUnboundLocalVariable
name = '%s%s_%s_%s' % (_mod, _sub, _met, code)
except:
abort(red('could not generate test name'))
# noinspection PyUnboundLocalVariable
print('''
def _%s():
cmd = '%s'
exp = %s
_test(cmd, exp, %s, %d)
''') % (name, cmd, text, code, 0 if int(code) in STATUS_CODES_OK else 1)
sys.exit(0)
###############################################################################
# ping
###############################################################################
def _ping():
return _test('get /ping', 'pong', 200)
###############################################################################
# accounts tests
###############################################################################
def _accounts_login_user_good(username='test', password='<PASSWORD>'):
cmd = 'login -username %s -password %s' % (username, password)
cod = 200
exp = {"detail": "Welcome to Danube Cloud API."}
_test(cmd, exp, cod)
def _accounts_login_admin_good(username='admin', password='<PASSWORD>'):
cmd = 'login -username %s -password %s' % (username, password)
cod = 200
exp = {"detail": "Welcome to Danube Cloud API."}
_test(cmd, exp, cod)
def _accounts_user_create_test_201():
cmd = 'create /accounts/user/test -password <PASSWORD> -first_name Tester -last_name Tester ' \
'-email <EMAIL> -api_access true'
exp = {u'status': u'SUCCESS', u'result': {u'username': u'test', u'first_name': u'Tester', u'last_name': u'Tester',
u'api_access': True, u'is_active': True, u'is_super_admin': False,
u'callback_key': u'***', u'groups': [], u'api_key': u'***',
u'email': u'<EMAIL>'}}
_test(cmd, exp, 201)
def _accounts_user_delete_test_200():
cmd = 'delete /accounts/user/test'
exp = {u'status': u'SUCCESS', u'result': None}
_test(cmd, exp, 200)
def _accounts_login_bad1():
_test('login', {"detail": {"username": ["This field is required."],
"password": ["<PASSWORD>."]}}, 400, 4)
def _accounts_login_bad2():
_test('login -password <PASSWORD>', {"detail": {"username": ["This field is required."]}}, 400, 4)
def _accounts_login_bad3():
_test('login -username test', {"detail": {"password": ["<PASSWORD>."]}}, 400, 4)
def _accounts_login_bad4():
_test('login -username test -password test', {"detail": "Unable to log in with provided credentials."}, 400, 4)
def _accounts_logout_good():
_test('logout', {"detail": "Bye."}, 200)
def _accounts_logout_bad():
_remove_token_store()
_test('logout', {"detail": "Authentication credentials were not provided."}, 403, 1)
def _accounts_delete_test_vm_relation_400():
cmd = 'delete /accounts/user/test'
exp = {u'status': u'FAILURE',
u'result': {u'detail': u'Cannot delete user, because he has relations to some objects.',
u'relations': {u'VM': [u'test99.example.com']}}}
_test(cmd, exp, 400, 1)
###############################################################################
# task tests
###############################################################################
def _set_user_task_prefix(text):
# noinspection PyBroadException
try:
global USER_TASK_PREFIX
USER_TASK_PREFIX = ''.join(_task_prefix_from_task_id(text['task_id']))
except:
return False
else:
return True
def _set_admin_task_prefix(text):
# noinspection PyBroadException
try:
global ADMIN_TASK_PREFIX
ADMIN_TASK_PREFIX = ''.join(_task_prefix_from_task_id(text['task_id']))
except:
return False
else:
return True
def _task_get_prefix(set_fun=_set_user_task_prefix):
cmd = 'get /vm'
exp = {'status': 'SUCCESS', 'result': []}
_test(cmd, exp, 200, custom_test=set_fun)
def _user_task_prefix():
assert USER_TASK_PREFIX, 'Run _task_get_prefix() first'
return USER_TASK_PREFIX
def _admin_task_prefix():
assert ADMIN_TASK_PREFIX, 'Run _task_get_prefix() first'
return ADMIN_TASK_PREFIX
def _task__get_200():
cmd = 'get /task'
exp = []
_test(cmd, exp, 200)
def _task_details_get_404_1():
cmd = 'get /task/%s-0000-1111-aaaa-12345678' % _user_task_prefix()
exp = {'detail': 'Task does not exist'}
_test(cmd, exp, 404, 1)
def _task_details_get_403_1():
cmd = 'get /task/%s-6f75849b-c9ca-42b1-968e' % _admin_task_prefix()
exp = {u'detail': u'Permission denied'}
_test(cmd, exp, 403, 1)
def _task_done_get_201():
cmd = 'get /task/%s-0000-1111-aaaa-12345678/done' % _user_task_prefix()
exp = {'done': False}
_test(cmd, exp, 201)
def _task_done_get_403():
cmd = 'get /task/%s-6f75849b-c9ca-42b1-968e/done' % _admin_task_prefix()
exp = {u'detail': u'Permission denied'}
_test(cmd, exp, 403, 1)
def _task_status_get_201():
cmd = 'get /task/%s-0000-1111-aaaa-12345678/status' % _user_task_prefix()
exp = {'status': 'PENDING', 'result': None}
_test(cmd, exp, 201)
def _task_status_get_403():
cmd = 'get /task/%s-6f75849b-c9ca-42b1-968e/status' % _admin_task_prefix()
exp = {u'detail': u'Permission denied'}
_test(cmd, exp, 403, 1)
def _task_cancel_set_406():
cmd = 'set /task/%s-6f75849b-c9ca-42b1-968e/cancel' % _user_task_prefix()
exp = {u'detail': u'Task cannot be canceled'}
_test(cmd, exp, 406, 1)
def _task_cancel_set_403():
cmd = 'set /task/%s-6f75849b-c9ca-42b1-968e/cancel' % _admin_task_prefix()
exp = {u'detail': u'Permission denied'}
_test(cmd, exp, 403, 1)
def _task__get_403():
cmd = 'get /task'
exp = {'detail': 'Authentication credentials were not provided.'}
_test(cmd, exp, 403, 1)
def _task_done_get_logout_403():
cmd = 'get /task/6-0000-1111-aaaa-12345678/done'
exp = {'detail': 'Authentication credentials were not provided.'}
_test(cmd, exp, 403, 1)
def _task_status_get_logout_403():
cmd = 'get /task/6-0000-1111-aaaa-12345678/status'
exp = {'detail': 'Authentication credentials were not provided.'}
_test(cmd, exp, 403, 1)
def _task_log_get_200():
cmd = 'get /task/log'
exp = []
_test(cmd, exp, 200)
def _task_log_last_get_200():
cmd = 'get /task/log'
exp = []
_test(cmd, exp, 200)
def _task_log_get_logout_403():
cmd = 'get /task/log'
exp = {'detail': 'Authentication credentials were not provided.'}
_test(cmd, exp, 403, 1)
def _task_log_0_get_logout_403():
cmd = 'get /task/log -page 1'
exp = {'detail': 'Authentication credentials were not provided.'}
_test(cmd, exp, 403, 1)
###############################################################################
# vm tests
###############################################################################
def _vm__get_200():
cmd = 'get /vm'
exp = {'status': 'SUCCESS'}
cst = lambda t: isinstance(t['result'], list)
_test(cmd, exp, 200, custom_test=cst)
def _vm__get_403():
cmd = 'get /vm'
exp = {'detail': 'Authentication credentials were not provided.'}
_test(cmd, exp, 403, 1)
def _vm__get_404():
cmd = 'get /vm/test99.example.com'
exp = {'detail': 'VM not found'}
_test(cmd, exp, 404, 1)
def _vm__delete_404():
cmd = 'delete /vm/test99.example.com'
exp = {'detail': 'VM not found'}
_test(cmd, exp, 404, 1)
def _vm__create_404():
cmd = 'create /vm/test99.example.com'
exp = {'detail': 'VM not found'}
_test(cmd, exp, 404, 1)
def _vm_define_get_200():
cmd = 'get /vm/define'
exp = {'status': 'SUCCESS', 'result': []}
_test(cmd, exp, 200)
def _vm_status_get_200():
cmd = 'get /vm/status'
exp = {'status': 'SUCCESS', 'result': []}
_test(cmd, exp, 200)
def _vm_define_create_403():
cmd = 'create /vm/test99.example.com/define'
exp = {'detail': 'Permission denied'}
_test(cmd, exp, 403, 1)
def _vm_define_disk_1_create_403():
cmd = 'create /vm/test99.example.com/define/disk/1'
exp = {'detail': 'Permission denied'}
_test(cmd, exp, 403, 1)
def _vm_define_nic_1_create_403():
cmd = 'create /vm/test99.example.com/define/nic/1'
exp = {'detail': 'Permission denied'}
_test(cmd, exp, 403, 1)
# no input
def _vm_define_create_400_1():
cmd = 'create /vm/test99.example.com/define'
exp = {'status': 'FAILURE', 'result': {'vcpus': ['This field is required.'], 'ram': ['This field is required.']}}
_test(cmd, exp, 400, 1)
# low input
def _vm_define_create_400_2():
cmd = 'create /vm/test99.example.com/define -ram 1 -vcpus 0 -ostype 0'
exp = {'status': | |
"""Collections of library function names.
"""
class Library:
"""Base class for a collection of library function names.
"""
@staticmethod
def get(libname, _cache={}):
if libname in _cache:
return _cache[libname]
if libname == 'stdlib':
r = Stdlib()
elif libname == 'stdio':
r = Stdio()
elif libname == 'm':
r = Mlib()
elif libname == 'libdevice':
r = Libdevice()
elif libname == 'nvvm':
r = NVVMIntrinsics()
elif libname == 'llvm':
r = LLVMIntrinsics()
elif libname == 'heavydb':
r = HeavyDB()
else:
raise ValueError(f'Unknown library {libname}')
_cache[libname] = r
return r
def __contains__(self, fname):
return self.check(fname)
def check(self, fname):
"""
Return True if library contains a function with given name.
"""
if fname in self._function_names:
return True
for func in self._function_names:
if func.endswith('.*') and fname.startswith(func[:-2]):
return True
return False
class HeavyDB(Library):
name = 'heavydb'
_function_names = list('''
allocate_varlen_buffer set_output_row_size
TableFunctionManager_error_message TableFunctionManager_set_output_row_size
table_function_error
'''.strip().split())
class Stdlib(Library):
"""
Reference: http://www.cplusplus.com/reference/cstdlib/
"""
name = 'stdlib'
_function_names = list(''' atof atoi atol atoll strtod strtof strtol strtold strtoll strtoul
strtoull rand srand calloc free malloc realloc abort atexit
at_quick_exit exit getenv quick_exit system bsearch qsort abs div
labs ldiv llabs lldiv mblen mbtowc wctomb mbstowcs wcstombs '''.strip().split())
class Stdio(Library):
"""
Reference: http://www.cplusplus.com/reference/cstdio/
"""
name = 'stdio'
_function_names = list(''' remove rename tmpfile tmpnam fclose fflush fopen freopen setbuf
setvbuf fprintf fscanf printf scanf snprintf sprintf sscanf
vfprintf vfscanf vprintf vscanf vsnprintf vsprintf vsscanf fgetc
fgets fputc fputs getc getchar gets putc putchar puts ungetc fread
fwrite fgetpos fseek fsetpos ftell rewind clearerr feof ferror
perror '''.strip().split())
class Mlib(Library):
"""
References:
https://www.gnu.org/software/libc/manual/html_node/Mathematics.html
https://en.cppreference.com/w/cpp/header/cmath
"""
name = 'm'
_function_names = list('''sin sinf sinl cos cosf cosl tan tanf tanl sincos sincosf sincosl
csin csinf csinl ccos ccosf ccosl ctan ctanf ctanl asin asinf
asinl acos acosf acosl atan atanf atanl atan2 atan2f atan2l casin
casinf casinl cacos cacosf cacosl catan catanf catanl exp expf
expl exp2 exp2f exp2l exp10 exp10f exp10l log logf logl log2 log2f
log2l log10 log10f log10l logb logbf logbl ilogb ilogbf ilogbl pow
powf powl sqrt sqrtf sqrtl cbrt cbrtf cbrtl hypot hypotf hypotl
expm1 expm1f expm1l log1p log1pf log1pl clog clogf clogl clog10
clog10f clog10l csqrt csqrtf csqrtl cpow cpowf cpowl sinh sinhf
sinhl cosh coshf coshl tanh tanhf tanhl csinh csinhf csinhl ccosh
ccoshf ccoshl ctanh ctanhf ctanhl asinh asinhf asinhl acosh acoshf
acoshl atanh atanhf atanhl casinh casinhf casinhl cacosh cacoshf
cacoshl catanh catanhf catanhl erf erff erfl erfc erfcf erfcl
lgamma lgammaf lgammal tgamma tgammaf tgammal lgamma_r lgammaf_r
lgammal_r gamma gammaf gammal j0 j0f j0l j1 j1f j1l jn jnf jnl y0
y0f y0l y1 y1f y1l yn ynf ynl rand srand rand_r random srandom
initstate setstate random_r srandom_r initstate_r setstate_r
drand48 erand48 lrand48 nrand48 mrand48 jrand48 srand48 seed48
lcong48 drand48_r erand48_r lrand48_r nrand48_r mrand48_r
jrand48_r srand48_r seed48_r lcong48_r abs labs llabs fabs fabsf
fabsl cabs cabsf cabsl frexp frexpf frexpl ldexp ldexpf ldexpl
scalb scalbf scalbl scalbn scalbnf scalbnl significand
significandf significandl ceil ceilf ceill floor floorf floorl
trunc truncf truncl rint rintf rintl nearbyint nearbyintf
nearbyintl round roundf roundl roundeven roundevenf roundevenl
lrint lrintf lrintl lround lroundf lroundl llround llroundf
llroundl fromfp fromfpf fromfpl ufromfp ufromfpf ufromfpl fromfpx
fromfpxf fromfpxl ufromfpx ufromfpxf ufromfpxl modf modff modfl
fmod fmodf fmodl remainder remainderf remainderl drem dremf dreml
copysign copysignf copysignl signbit signbitf signbitl nextafter
nextafterf nextafterl nexttoward nexttowardf nexttowardl nextup
nextupf nextupl nextdown nextdownf nextdownl nan nanf nanl
canonicalize canonicalizef canonicalizel getpayload getpayloadf
getpayloadl setpayload setpayloadf setpayloadl setpayloadsig
setpayloadsigf setpayloadsigl isgreater isgreaterequal isless
islessequal islessgreater isunordered iseqsig totalorder
totalorderf totalorderl totalordermag totalorderf totalorderl fmin
fminf fminl fmax fmaxf fmaxl fminmag fminmagf fminmagl fmaxmag
fmaxmagf fmaxmagl fdim fdimf fdiml fma fmaf fmal fadd faddf faddl
fsub fsubf fsubl fmul fmulf fmull fdiv fdivf fdivl llrint llrintf
llrintl'''.strip().split())
def drop_suffix(f):
s = f.rsplit('.', 1)[-1]
if s in ['p0i8', 'f64', 'f32', 'i1', 'i8', 'i16', 'i32', 'i64', 'i128']:
f = f[:-len(s)-1]
return drop_suffix(f)
return f
def get_llvm_name(f, prefix='llvm.'):
"""Return normalized name of a llvm intrinsic name.
"""
if f.startswith(prefix):
return drop_suffix(f[len(prefix):])
return f
class LLVMIntrinsics(Library):
"""LLVM intrinsic function names with prefix `llvm.` removed.
Reference: https://llvm.org/docs/LangRef.html#intrinsic-functions
"""
name = 'llvm'
def check(self, fname):
if fname.startswith('llvm.'):
return Library.check(self, get_llvm_name(fname))
return False
_function_names = list(''' va_start va_end va_copy gcroot gcread gcwrite returnaddress
addressofreturnaddress sponentry frameaddress stacksave
stackrestore get.dynamic.area.offset prefetch pcmarker
readcyclecounter clear_cache instrprof.increment
instrprof.increment.step instrprof.value.profile thread.pointer
call.preallocated.setup call.preallocated.arg
call.preallocated.teardown abs smax smin umax umin memcpy
memcpy.inline memmove sqrt powi sin cos pow exp exp2 log log10
log2 fma fabs minnum maxnum minimum maximum copysign floor ceil
trunc rint nearbyint round roundeven lround llround lrint llrint
ctpop ctlz cttz fshl fshr sadd.with.overflow uadd.with.overflow
ssub.with.overflow usub.with.overflow smul.with.overflow
umul.with.overflow sadd.sat uadd.sat ssub.sat usub.sat sshl.sat
ushl.sat smul.fix umul.fix smul.fix.sat umul.fix.sat sdiv.fix
udiv.fix sdiv.fix.sat udiv.fix.sat canonicalize fmuladd
set.loop.iterations test.set.loop.iterations loop.decrement.reg
loop.decrement vector.reduce.add vector.reduce.fadd
vector.reduce.mul vector.reduce.fmul vector.reduce.and
vector.reduce.or vector.reduce.xor vector.reduce.smax
vector.reduce.smin vector.reduce.umax vector.reduce.umin
vector.reduce.fmax vector.reduce.fmin matrix.transpose
matrix.multiply matrix.column.major.load matrix.column.major.store
convert.to.fp16 convert.from.fp16 init.trampoline
adjust.trampoline lifetime.start lifetime.end invariant.start
invariant.end launder.invariant.group strip.invariant.group
experimental.constrained.fadd experimental.constrained.fsub
experimental.constrained.fmul experimental.constrained.fdiv
experimental.constrained.frem experimental.constrained.fma
experimental.constrained.fptoui experimental.constrained.fptosi
experimental.constrained.uitofp experimental.constrained.sitofp
experimental.constrained.fptrunc experimental.constrained.fpext
experimental.constrained.fmuladd experimental.constrained.sqrt
experimental.constrained.pow experimental.constrained.powi
experimental.constrained.sin experimental.constrained.cos
experimental.constrained.exp experimental.constrained.exp2
experimental.constrained.log experimental.constrained.log10
experimental.constrained.log2 experimental.constrained.rint
experimental.constrained.lrint experimental.constrained.llrint
experimental.constrained.nearbyint experimental.constrained.maxnum
experimental.constrained.minnum experimental.constrained.maximum
experimental.constrained.minimum experimental.constrained.ceil
experimental.constrained.floor experimental.constrained.round
experimental.constrained.roundeven experimental.constrained.lround
experimental.constrained.llround experimental.constrained.trunc
experimental.gc.statepoint experimental.gc.result experimental.gc.relocate
experimental.gc.get.pointer.base experimental.gc.get.pointer.offset
experimental.vector.reduce.add.* experimental.vector.reduce.fadd.*
experimental.vector.reduce.mul.* experimental.vector.reduce.fmul.*
experimental.vector.reduce.and.* experimental.vector.reduce.or.*
experimental.vector.reduce.xor.* experimental.vector.reduce.smax.*
experimental.vector.reduce.smin.* experimental.vector.reduce.umax.*
experimental.vector.reduce.umin.* experimental.vector.reduce.fmax.*
experimental.vector.reduce.fmin.*
flt.rounds var.annotation ptr.annotation annotation
codeview.annotation trap debugtrap stackprotector stackguard
objectsize expect expect.with.probability assume ssa_copy
type.test type.checked.load donothing experimental.deoptimize
experimental.guard experimental.widenable.condition load.relative
sideeffect is.constant ptrmask vscale
memcpy.element.unordered.atomic memmove.element.unordered.atomic
memset.element.unordered.atomic objc.autorelease
objc.autoreleasePoolPop objc.autoreleasePoolPush
objc.autoreleaseReturnValue objc.copyWeak objc.destroyWeak
objc.initWeak objc.loadWeak objc.loadWeakRetained objc.moveWeak
objc.release objc.retain objc.retainAutorelease
objc.retainAutoreleaseReturnValue
objc.retainAutoreleasedReturnValue objc.retainBlock
objc.storeStrong objc.storeWeak preserve.array.access.index
preserve.union.access.index preserve.struct.access.index
masked.store.* memset'''.strip().split())
class NVVMIntrinsics(Library):
"""NVVM intrinsic function names with prefix `llvm.` removed.
Reference: https://docs.nvidia.com/cuda/nvvm-ir-spec/index.html#intrinsic-functions
"""
name = 'nvvm'
def check(self, fname):
if fname.startswith('llvm.'):
return Library.check(self, get_llvm_name(fname))
return False
_function_names = list(''' memcpy memmove memset sqrt fma bswap ctpop ctlz cttz fmuladd
convert.to.fp16.f32 convert.from.fp16.f32 convert.to.fp16
convert.from.fp16 lifetime.start lifetime.end invariant.start
invariant.end var.annotation ptr.annotation annotation expect
donothing '''.strip().split())
class Libdevice(Library):
"""NVIDIA libdevice function names with prefix `__nv_` removed.
Reference: https://docs.nvidia.com/cuda/libdevice-users-guide/function-desc.html#function-desc
"""
name = 'libdevice'
def check(self, fname):
if fname.startswith('__nv_'):
return Library.check(self, get_llvm_name(fname, prefix='__nv_'))
return False
_function_names = list(''' abs acos acosf acosh acoshf asin asinf asinh asinhf atan atan2
atan2f atanf atanh atanhf brev brevll byte_perm cbrt cbrtf ceil
ceilf clz clzll copysign copysignf cos cosf cosh coshf cospi
cospif dadd_rd dadd_rn dadd_ru dadd_rz ddiv_rd ddiv_rn ddiv_ru
ddiv_rz dmul_rd dmul_rn dmul_ru dmul_rz double2float_rd
double2float_rn double2float_ru double2float_rz double2hiint
double2int_rd double2int_rn double2int_ru double2int_rz
double2ll_rd double2ll_rn double2ll_ru double2ll_rz double2loint
double2uint_rd double2uint_rn double2uint_ru double2uint_rz
double2ull_rd double2ull_rn double2ull_ru double2ull_rz
double_as_longlong drcp_rd drcp_rn drcp_ru drcp_rz dsqrt_rd
dsqrt_rn dsqrt_ru dsqrt_rz erf erfc erfcf erfcinv erfcinvf erfcx
erfcxf erff erfinv erfinvf exp exp10 exp10f exp2 exp2f expf expm1
expm1f fabs fabsf fadd_rd fadd_rn fadd_ru fadd_rz fast_cosf
fast_exp10f fast_expf fast_fdividef fast_log10f fast_log2f
fast_logf fast_powf fast_sincosf fast_sinf fast_tanf fdim fdimf
fdiv_rd fdiv_rn fdiv_ru fdiv_rz ffs ffsll finitef float2half_rn
float2int_rd float2int_rn float2int_ru float2int_rz float2ll_rd
float2ll_rn float2ll_ru float2ll_rz float2uint_rd float2uint_rn
float2uint_ru float2uint_rz float2ull_rd float2ull_rn float2ull_ru
float2ull_rz float_as_int floor floorf fma fma_rd fma_rn fma_ru
fma_rz fmaf fmaf_rd fmaf_rn fmaf_ru fmaf_rz fmax fmaxf fmin fminf
fmod fmodf fmul_rd fmul_rn fmul_ru fmul_rz frcp_rd frcp_rn frcp_ru
frcp_rz frexp frexpf frsqrt_rn fsqrt_rd fsqrt_rn fsqrt_ru fsqrt_rz
fsub_rd fsub_rn fsub_ru fsub_rz hadd half2float hiloint2double
hypot hypotf ilogb ilogbf int2double_rn int2float_rd int2float_rn
int2float_ru int2float_rz int_as_float isfinited isinfd isinff
isnand isnanf j0 j0f j1 j1f jn jnf ldexp ldexpf lgamma lgammaf
ll2double_rd ll2double_rn ll2double_ru ll2double_rz ll2float_rd
ll2float_rn ll2float_ru ll2float_rz llabs llmax llmin llrint
llrintf llround llroundf log log10 log10f log1p log1pf log2 log2f
logb logbf logf longlong_as_double max min modf modff mul24
mul64hi mulhi nan nanf nearbyint nearbyintf nextafter nextafterf
normcdf normcdff normcdfinv normcdfinvf popc popcll pow powf powi
powif rcbrt rcbrtf remainder remainderf remquo remquof rhadd rint
rintf round roundf rsqrt rsqrtf sad saturatef scalbn scalbnf
signbitd signbitf sin sincos sincosf sincospi sincospif sinf sinh
sinhf sinpi sinpif sqrt sqrtf tan tanf tanh tanhf tgamma tgammaf
trunc truncf uhadd uint2double_rn uint2float_rd uint2float_rn
uint2float_ru uint2float_rz ull2double_rd | |
<gh_stars>100-1000
# coding: utf-8
'''This module contains miscellaneous commands for additional functionality.
Suppose these things are useful, but not essential.
'''
from __future__ import print_function
import sublime, sublime_plugin
from sublime import Region
from sublime_plugin import TextCommand, EventListener
import glob
import math
import os
import subprocess
import sys
import threading
from os.path import dirname, isfile, isdir, exists, join, normpath, getsize, getctime, getatime, getmtime
from datetime import datetime
ST3 = int(sublime.version()) >= 3000
if ST3:
from .common import DiredBaseCommand, set_proper_scheme, hijack_window, emit_event, NT, OSX, PARENT_SYM, sort_nicely
MARK_OPTIONS = sublime.DRAW_NO_OUTLINE
SYNTAX_EXTENSION = '.sublime-syntax'
else: # ST2 imports
import locale
from common import DiredBaseCommand, set_proper_scheme, hijack_window, emit_event, NT, OSX, PARENT_SYM, sort_nicely
MARK_OPTIONS = 0
SYNTAX_EXTENSION = '.hidden-tmLanguage'
sublime_plugin.ViewEventListener = object
def convert_size(size):
if not size:
return '0 B'
size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
i = int(math.floor(math.log(size, 1024)))
p = math.pow(1024, i)
s = round(size / p, 2)
return '%s %s' % (s, size_name[i])
def get_dates(path):
try:
created = datetime.fromtimestamp(getctime(path)).strftime('%d %b %Y, %H:%M:%S')
except OSError as e:
created = e
try:
accessed = datetime.fromtimestamp(getatime(path)).strftime('%d %b %Y, %H:%M:%S')
except OSError as e:
accessed = e
try:
modified = datetime.fromtimestamp(getmtime(path)).strftime('%d %b %Y, %H:%M:%S')
except OSError as e:
modified = e
return created, accessed, modified
class DiredFindInFilesCommand(TextCommand, DiredBaseCommand):
def run(self, edit):
self.index = self.get_all()
path = self.path
if path == 'ThisPC\\':
path = ''
items = self.get_marked() or self.get_selected()
else:
items = self.get_marked()
where = ', '.join(join(path, p) for p in items) or path or ''
args = {"panel": "find_in_files", "where": where, "replace": "", "reverse": "false"}
sublime.active_window().run_command("show_panel", args)
# HELP ##############################################################
class DiredHelpCommand(TextCommand):
def run(self, edit):
view = self.view.window().new_file()
view.settings().add_on_change('color_scheme', lambda: set_proper_scheme(view))
view.set_name("Browse: shortcuts")
view.set_scratch(True)
view.settings().set('rulers', [])
view.settings().set('syntax', 'Packages/FileBrowser/dired-help' + SYNTAX_EXTENSION)
view.settings().set('margin', 16)
view.settings().set('line_numbers', False)
view.settings().set('gutter', False)
view.settings().set('fold_buttons', False)
view.settings().set('draw_indent_guides', False)
view.settings().set('word_wrap', False)
view.settings().set('spell_check', False)
view.settings().set('drag_text', False)
view.run_command('dired_show_help')
sublime.active_window().focus_view(view)
class DiredShowHelpCommand(TextCommand):
def run(self, edit):
COMMANDS_HELP = sublime.load_resource('Packages/FileBrowser/shortcuts.md') if ST3 else ''
if not COMMANDS_HELP:
dest = dirname(__file__)
shortcuts = join(dest if dest != '.' else join(sublime.packages_path(), 'FileBrowser'), "shortcuts.md")
COMMANDS_HELP = open(shortcuts, "r").read()
self.view.erase(edit, Region(0, self.view.size()))
self.view.insert(edit, 0, COMMANDS_HELP)
self.view.sel().clear()
self.view.set_read_only(True)
# OTHER #############################################################
class DiredToggleProjectFolder(TextCommand, DiredBaseCommand):
def run(self, edit):
if not ST3:
return sublime.status_message('This feature is available only in Sublime Text 3')
path = self.path.rstrip(os.sep)
data = self.view.window().project_data() or {}
data['folders'] = data.get('folders', {})
folders = [f for f in data['folders'] if f['path'] != path]
if len(folders) == len(data['folders']):
folders.insert(0, {'path': path})
data['folders'] = folders
self.view.window().set_project_data(data)
self.view.window().run_command('dired_refresh')
class DiredOnlyOneProjectFolder(TextCommand, DiredBaseCommand):
def run(self, edit):
if not ST3:
return sublime.status_message('This feature is available only in Sublime Text 3')
path = self.path.rstrip(os.sep)
msg = u"Set '{0}' as only one project folder (will remove all other folders from project)?".format(path)
if sublime.ok_cancel_dialog(msg):
data = self.view.window().project_data() or {'folders': {}}
data['folders'] = [{'path': path}]
self.view.window().set_project_data(data)
self.view.window().run_command('dired_refresh')
class DiredQuickLookCommand(TextCommand, DiredBaseCommand):
"""
quick look current file in mac or open in default app on other OSs
"""
def run(self, edit, preview=True, files=None):
self.index = self.get_all()
files = files or self.get_marked() or self.get_selected(parent=False)
if not files:
return sublime.status_message('Nothing chosen')
if OSX and preview:
cmd = ["qlmanage", "-p"]
for filename in files:
fqn = join(self.path, filename)
cmd.append(fqn)
subprocess.call(cmd)
else:
if OSX:
launch = lambda f: subprocess.call(['open', f], cwd=dirname(f))
elif NT:
# the "" before filename is a trick for batch files and such
launch = lambda f: subprocess.call('start "" "%s"' % f, shell=True, cwd=dirname(f))
else:
launch = lambda f: subprocess.call(['xdg-open', f], cwd=dirname(f))
for filename in files:
fqn = join(self.path, filename)
launch(fqn)
class DiredOpenExternalCommand(TextCommand, DiredBaseCommand):
"""open dir/file in external file explorer"""
def run(self, edit, fname=None):
path = self.path
if not fname:
self.index = self.get_all()
files = self.get_selected(parent=False)
fname = join(path, files[0] if files else '')
else:
files = True
p, f = os.path.split(fname.rstrip(os.sep))
if not exists(fname):
return sublime.status_message(u'Directory doesn’t exist “%s”' % path)
if NT and path == 'ThisPC\\':
if not ST3:
fname = fname.encode(locale.getpreferredencoding(False))
return subprocess.Popen('explorer /select,"%s"' % fname)
if files:
self.view.window().run_command("open_dir", {"dir": p, "file": f})
else:
self.view.window().run_command("open_dir", {"dir": path})
class DiredOpenInNewWindowCommand(TextCommand, DiredBaseCommand):
def run(self, edit, project_folder=False):
if project_folder:
files = project_folder
else:
self.index = self.get_all()
files = self.get_marked(full=True) or self.get_selected(parent=False, full=True)
if not files:
return sublime.status_message('Nothing chosen')
if ST3:
self.launch_ST3(files)
else:
self.launch_ST2(files)
def run_on_new_window():
settings = sublime.load_settings('dired.sublime-settings')
open_on_jump = settings.get('dired_open_on_jump', 'left')
if open_on_jump:
options = {"immediate": True, "project": True}
if open_on_jump in ['left', 'right']:
options["other_group"] = open_on_jump
sublime.active_window().run_command("dired", options)
sublime.set_timeout(run_on_new_window, 200)
if not ST3 and not NT:
sublime.set_timeout(lambda: sublime.active_window().run_command("toggle_side_bar"), 200)
def launch_ST3(self, files):
executable_path = sublime.executable_path()
if OSX:
app_path = executable_path[:executable_path.rfind(".app/")+5]
executable_path = app_path+"Contents/SharedSupport/bin/subl"
items = [executable_path, "-n"] + files
subprocess.Popen(items, cwd=None if NT else self.path)
def launch_ST2(self, files):
items = ["-n"] + files
cwd = None if NT else self.path
shell = False
if NT:
# 9200 means win8
shell = True if sys.getwindowsversion()[2] < 9200 else False
items = [i.encode(locale.getpreferredencoding(False)) if sys.getwindowsversion()[2] == 9200 else i for i in items]
def app_path():
if OSX:
app_path = subprocess.Popen(["osascript", "-e" "tell application \"System Events\" to POSIX path of (file of process \"Sublime Text 2\" as alias)"], stdout=subprocess.PIPE).communicate()[0].rstrip()
subl_path = "{0}/Contents/SharedSupport/bin/subl".format(app_path)
else:
subl_path = 'sublime_text'
yield subl_path
fail = False
for c in ['subl', 'sublime', app_path()]:
try:
subprocess.Popen(list(c) + items, cwd=cwd, shell=shell)
except:
fail = True
else:
fail = False
if fail:
sublime.status_message('Cannot open a new window')
class DiredToggleAutoRefresh(TextCommand):
def is_enabled(self):
return self.view.score_selector(0, "text.dired") > 0
def is_visible(self):
return self.is_enabled()
def description(self):
msg = u'auto-refresh for this view'
if self.view.settings().get('dired_autorefresh', True):
return u'Disable ' + msg
else:
return u'Enable ' + msg
def run(self, edit):
s = self.view.settings()
ar = s.get('dired_autorefresh', True)
s.set('dired_autorefresh', not ar)
self.view.run_command('dired_refresh')
class DiredPreviewDirectoryCommand(TextCommand, DiredBaseCommand):
'''Show properties and content of directory in popup; ST3 only'''
def run(self, edit, fqn=None, point=0):
if not fqn:
self.index = self.get_all()
filenames = self.get_selected(full=True)
if not filenames:
return sublime.status_message(u'Nothing to preview')
fqn = filenames[0]
if not (isdir(fqn) or fqn == PARENT_SYM):
return sublime.status_message(u'Something wrong')
self.view.settings().set('dired_stop_preview_thread', False)
self.preview_thread = threading.Thread(target=self.worker, args=(fqn if fqn != PARENT_SYM else self.get_path(),))
self.preview_thread.start()
width, height = self.view.viewport_extent()
self.view.show_popup('Loading...', 0, point or self.view.sel()[0].begin(), width, height / 2, self.open_from_preview)
def worker(self, path):
self.preview_path = '📁 <a href="dir\v{0}">{0}</a>'.format(path)
self.subdirs = self.files = self.size = 0
self.errors = []
self.open_dirs = []
self.open_files = []
self._created, self._accessed, self._modified = get_dates(path)
def add_err(err): self.errors.append(str(err))
for index, (root, dirs, files) in enumerate(os.walk(path, onerror=add_err)):
self.subdirs += len(dirs)
self.files += len(files)
if not index:
sort_nicely(dirs)
sort_nicely(files)
self.open_dirs = ['📁 <a href="dir\v%s%s">%s</a>' % (join(root, d), os.sep, d) for d in dirs]
self.open_files = []
for f in files:
fpath = join(root, f)
if not index:
self.open_files.append('≡ <a href="file\v%s">%s</a>' % (fpath, f))
try:
self.size += getsize(fpath)
except OSError as e:
add_err(e)
if not self.view.is_popup_visible() or self.view.settings().get('dired_stop_preview_thread'):
return
sublime.set_timeout_async(self.update_preview(), 1)
sublime.set_timeout_async(self.update_preview(loading=False), 1)
def update_preview(self, loading=True):
le = len(self.errors)
if le > 5:
if loading:
errors = '<br>%d errors<br><br>' % le
else:
errors = '<br><a href="errors\v">%s errors</a> (click to view)<br><br>' % le
else:
errors = '<br>Errors:<br> %s<br><br>' % '<br> '.join(self.errors) if self.errors else '<br>'
items = self.open_dirs + self.open_files
self.view.update_popup(
'<br>{0}{1}<br><br>'
'Files: {2}; directories: {3}<br>'
'Size: {4} ({5} bytes)<br><br>'
'Created: {6}<br>'
'Accessed: {7}<br>'
'Modified: {8}<br>{9}{10}'.format(
'Loading... ' if loading else '', self.preview_path,
self.files, self.subdirs,
convert_size(self.size), self.size,
self._created, self._accessed, self._modified,
errors,
' %s<br><br>' % '<br> '.join(items) if items else '')
)
def open_from_preview(self, payload):
msg, path = payload.split('\v')
def show_errors(_):
self.view.update_popup(
'<br><a href="back\v">←<br><br>'
'</a>Errors:<br> %s<br>' % '<br> '.join(self.errors))
def go_back(_):
self.update_preview(loading=False)
def open_dir(path):
self.view.settings().set('dired_path', path)
self.view.run_command('dired_refresh')
def open_file(path):
(self.view.window() or sublime.active_window()).open_file(path)
case = {
'dir': open_dir,
'file': open_file,
'errors': show_errors,
'back': go_back
}
case[msg](path)
class DiredFilePropertiesCommand(TextCommand, DiredBaseCommand):
'''Show properties of file in popup; ST3 only'''
def run(self, edit, fqn=None, point=0):
if not fqn:
self.index = self.get_all()
filenames = self.get_selected(full=True)
if not filenames:
return sublime.status_message(u'Nothing to preview')
width, height = self.view.viewport_extent()
self.view.show_popup('Loading...', 0, point or self.view.sel()[0].begin(), width, height / 2, self.open_from_preview)
self.get_info(fqn)
def get_info(self, path):
self.preview_path = path
self.parent = dirname(path)
self.size = 0
self.errors = []
self._created, self._accessed, self._modified = get_dates(path)
try:
self.size += getsize(path)
except OSError as e:
self.errors.append(str(e))
if not self.view.is_popup_visible():
return
sublime.set_timeout_async(self.update_preview, 1)
def update_preview(self):
self.view.update_popup(
'<br>≡ <a href="file\v{0}">{0}</a><br><br>'
'Size: {1} ({2} bytes)<br><br>'
'Created: {3}<br>'
'Accessed: {4}<br>'
'Modified: {5}<br>'
'{6}'
'{7}'
'<a href="app\v{0}">Open in default app</a><br>'
'<a href="external\v{0}">Open parent in Finder/Explorer</a><br><br>'.format(
self.preview_path,
convert_size(self.size), self.size,
self._created, self._accessed, self._modified,
'<br>Errors:<br> %s<br><br>' % '<br> '.join(self.errors) if self.errors else | |
+ 2333.44021035551 * self.t)
Y1 += 0.00000000097 * math.cos(0.57476621767 + 26556.11194941211 * self.t)
Y1 += 0.00000000118 * math.cos(0.18981536711 + 7994.77225950771 * self.t)
Y1 += 0.00000000109 * math.cos(0.03572167179 + 103396.25664217169 * self.t)
Y1 += 0.00000000109 * math.cos(4.54205508167 + 79219.55298381469 * self.t)
Y1 += 0.00000000095 * math.cos(4.81791418885 + 58459.1259506233 * self.t)
Y1 += 0.00000000095 * math.cos(1.17289793021 + 78256.83969520529 * self.t)
Y1 += 0.00000000097 * math.cos(5.79863208794 + 105411.23831396949 * self.t)
Y1 += 0.00000000092 * math.cos(3.83355549192 + 10213.0417287275 * self.t)
Y1 += 0.00000000094 * math.cos(2.84811066413 + 1582.2031657665 * self.t)
Y1 += 0.00000000103 * math.cos(3.37710316075 + 91785.70468379749 * self.t)
Y1 += 0.00000000101 * math.cos(5.05588975902 + 20426.32727493849 * self.t)
Y1 += 0.00000000089 * math.cos(1.67320799366 + 74821.37829724069 * self.t)
Y1 += 0.00000000089 * math.cos(4.81402637705 + 125887.80602829569 * self.t)
Y1 += 0.00000000103 * math.cos(5.07765700675 + 51066.18391357149 * self.t)
Y1 += 0.00000000087 * math.cos(5.93439795460 + 27043.2590656993 * self.t)
Y1 += 0.00000000083 * math.cos(1.05061824879 + 73711.99974514729 * self.t)
Y1 += 0.00000000083 * math.cos(0.20717662715 + 77624.05595589208 * self.t)
Y1 += 0.00000000097 * math.cos(3.79624621564 + 71980.87739221469 * self.t)
Y1 += 0.00000000081 * math.cos(5.50420619898 + 155468.28073673949 * self.t)
Y1 += 0.00000000080 * math.cos(5.57436544726 + 22645.08437912529 * self.t)
Y1 += 0.00000000081 * math.cos(1.24551992048 + 76045.1961380193 * self.t)
Y1 += 0.00000000081 * math.cos(3.47212148199 + 26138.1435809619 * self.t)
Y1 += 0.00000000080 * math.cos(5.32926891572 + 26038.1503371535 * self.t)
Y1 += 0.00000000104 * math.cos(0.71084047438 + 78271.06678920689 * self.t)
Y1 += 0.00000000103 * math.cos(3.66294929279 + 78283.62300310588 * self.t)
Y1 += 0.00000000083 * math.cos(0.93556744308 + 80482.71034639288 * self.t)
Y1 += 0.00000000089 * math.cos(0.32257506891 + 78260.07190684808 * self.t)
Y1 += 0.00000000073 * math.cos(5.79066657626 + 26617.35028918529 * self.t)
Y1 += 0.00000000077 * math.cos(5.23137947798 + 18208.05780571871 * self.t)
Y1 += 0.00000000082 * math.cos(2.34137394607 + 77211.68485901768 * self.t)
Y1 += 0.00000000077 * math.cos(1.31837686051 + 41962.7645544209 * self.t)
Y1 += 0.00000000082 * math.cos(2.81188130706 + 77197.45776501608 * self.t)
Y1 += 0.00000000067 * math.cos(3.45344753309 + 85034.66384345168 * self.t)
Y1 += 0.00000000070 * math.cos(5.80811525915 + 537.0483295789 * self.t)
Y1 += 0.00000000067 * math.cos(1.90240647027 + 5661.0882316687 * self.t)
Y1 += 0.00000000067 * math.cos(2.25742587365 + 120226.47397914348 * self.t)
Y1 += 0.00000000065 * math.cos(3.38772930583 + 162188.99471608088 * self.t)
Y1 += 0.00000000075 * math.cos(3.36246887110 + 149.8070146181 * self.t)
Y1 += 0.00000000071 * math.cos(0.43774462141 + 108903.80988083909 * self.t)
Y1 += 0.00000000063 * math.cos(1.71956484187 + 51756.5654567567 * self.t)
Y1 += 0.00000000062 * math.cos(0.86384103129 + 102762.78348849648 * self.t)
Y1 += 0.00000000067 * math.cos(2.92995035330 + 64608.09275102969 * self.t)
Y1 += 0.00000000064 * math.cos(3.59312937724 + 13521.9952590749 * self.t)
Y1 += 0.00000000062 * math.cos(1.70048284317 + 28306.41642827749 * self.t)
Y1 += 0.00000000058 * math.cos(0.05666653789 + 11322.9079157879 * self.t)
Y1 += 0.00000000058 * math.cos(1.85046002910 + 24498.58642880689 * self.t)
Y1 += 0.00000000077 * math.cos(2.22443127486 + 25565.5695409639 * self.t)
Y1 += 0.00000000076 * math.cos(3.03069740917 + 78477.25233764409 * self.t)
Y1 += 0.00000000063 * math.cos(5.48328560721 + 93029.19228547589 * self.t)
Y1 += 0.00000000078 * math.cos(4.97356487718 + 71493.2426409605 * self.t)
Y1 += 0.00000000077 * math.cos(2.34741649134 + 51742.33836275509 * self.t)
Y1 += 0.00000000057 * math.cos(2.02149140578 + 2648.6986429565 * self.t)
Y1 += 0.00000000058 * math.cos(0.20352146431 + 339142.98465794802 * self.t)
Y1 += 0.00000000056 * math.cos(3.96222140968 + 76145.18938182769 * self.t)
Y1 += 0.00000000059 * math.cos(1.23567785929 + 21535.70582703189 * self.t)
Y1 += 0.00000000056 * math.cos(0.13198299548 + 117873.60782537168 * self.t)
Y1 += 0.00000000053 * math.cos(6.21797647818 + 25557.96835899609 * self.t)
Y1 += 0.00000000060 * math.cos(5.92257554155 + 71025.27765060609 * self.t)
Y1 += 0.00000000052 * math.cos(5.28001264458 + 10022.0810975829 * self.t)
Y1 += 0.00000000055 * math.cos(4.20817850068 + 51322.85371887989 * self.t)
Y1 += 0.00000000058 * math.cos(2.97195320983 + 43071.65547154729 * self.t)
Y1 += 0.00000000053 * math.cos(3.50115924170 + 79853.02613748988 * self.t)
Y1 += 0.00000000053 * math.cos(4.92881565492 + 78050.65414676809 * self.t)
Y1 += 0.00000000053 * math.cos(0.59227243554 + 98068.78053378889 * self.t)
Y1 += 0.00000000057 * math.cos(5.95602153522 + 633.0275567967 * self.t)
Y1 += 0.00000000049 * math.cos(5.08646367150 + 90830.10494218889 * self.t)
Y1 += 0.00000000067 * math.cos(0.27295012704 + 26610.72437715149 * self.t)
Y1 += 0.00000000050 * math.cos(4.43506318049 + 131549.13807744789 * self.t)
Y1 += 0.00000000049 * math.cos(3.57210369085 + 33968.23611239669 * self.t)
Y1 += 0.00000000050 * math.cos(3.73457689628 + 81706.5281871715 * self.t)
Y1 += 0.00000000051 * math.cos(5.44465818352 + 129910.06856025988 * self.t)
Y1 += 0.00000000057 * math.cos(1.99278813028 + 26624.9514711531 * self.t)
Y1 += 0.00000000045 * math.cos(4.93934477894 + 51116.18053547569 * self.t)
Y1 += 0.00000000045 * math.cos(1.53008615762 + 104332.1866228805 * self.t)
Y1 += 0.00000000051 * math.cos(5.56661489639 + 647.25465079831 * self.t)
Y1 += 0.00000000043 * math.cos(3.21201517479 + 78690.55143308209 * self.t)
Y1 += 0.00000000054 * math.cos(0.68244488765 + 522.8212355773 * self.t)
Y1 += 0.00000000042 * math.cos(6.05067077478 + 114565.14192999128 * self.t)
Y1 += 0.00000000041 * math.cos(0.42167567232 + 38519.70197448849 * self.t)
Y1 += 0.00000000041 * math.cos(5.52226173141 + 25028.27739390149 * self.t)
Y1 += 0.00000000052 * math.cos(5.71899801337 + 39744.0074502341 * self.t)
Y1 += 0.00000000043 * math.cos(5.20652224208 + 103925.25819290428 * self.t)
Y1 += 0.00000000045 * math.cos(3.01197275808 + 76.50988875911 * self.t)
Y1 += 0.00000000042 * math.cos(1.64710245004 + 46848.5739922491 * self.t)
Y1 += 0.00000000047 * math.cos(2.33748031419 + 44181.52165860769 * self.t)
Y1 += 0.00000000038 * math.cos(1.51512405751 + 22760.01130277751 * self.t)
Y1 += 0.00000000037 * math.cos(1.55847676346 + 151975.70916986988 * self.t)
Y1 += 0.00000000050 * math.cos(0.66442193902 + 25551.34244696229 * self.t)
Y1 += 0.00000000050 * math.cos(5.43960666857 + 50579.86365834729 * self.t)
Y1 += 0.00000000045 * math.cos(4.18105807315 + 7879.84533585549 * self.t)
Y1 += 0.00000000043 * math.cos(3.23954616248 + 26521.8586969345 * self.t)
Y1 += 0.00000000037 * math.cos(4.29411552008 + 30639.61282114949 * self.t)
Y1 += 0.00000000037 * math.cos(5.45971389197 + 48732.98752069949 * self.t)
Y1 += 0.00000000035 * math.cos(1.61116129207 + 84547.0290921975 * self.t)
Y1 += 0.00000000040 * math.cos(5.41660085121 + 52815.9473869459 * self.t)
Y1 += 0.00000000041 * math.cos(5.72036408436 + 1059.1381127057 * self.t)
Y1 += 0.00000000041 * math.cos(1.46421286291 + 150866.33061777649 * self.t)
Y1 += 0.00000000040 * math.cos(3.44404714469 + 26294.33250749489 * self.t)
Y1 += 0.00000000041 * math.cos(5.29927149302 + 25881.9614106205 * self.t)
Y1 += 0.00000000034 * math.cos(5.65060802359 + 26191.23973327629 * self.t)
Y1 += 0.00000000034 * math.cos(3.16023657143 + 25985.0541848391 * self.t)
Y1 += 0.00000000034 * math.cos(5.54425040097 + 89586.61734051049 * self.t)
Y1 += 0.00000000041 * math.cos(2.60243509566 + 52329.8289111167 * self.t)
Y1 += 0.00000000035 * math.cos(0.95515596888 + 157637.04121902208 * self.t)
Y1 += 0.00000000042 * math.cos(4.45181501350 + 26507.63160293289 * self.t)
Y1 += 0.00000000039 * math.cos(1.71451949376 + 234790.88445668427 * self.t)
Y1 += 0.00000000037 * math.cos(3.45787318433 + 57836.89451481709 * self.t)
Y1 += 0.00000000036 * math.cos(5.05609057964 + 130969.45049044909 * self.t)
Y1 += 0.00000000032 * math.cos(0.80695926075 + 181506.18725640948 * self.t)
Y1 += 0.00000000033 * math.cos(5.95814421441 + 50594.09075234889 * self.t)
Y1 += 0.00000000044 * math.cos(4.54253002200 + 6681.46867088311 * self.t)
Y1 += 0.00000000036 * math.cos(4.89960028584 + 104355.73771913828 * self.t)
Y1 += 0.00000000032 * math.cos(3.07469859082 + 129484.15978374588 * self.t)
Y1 += 0.00000000030 * math.cos(3.63644868220 + 52644.0150909863 * self.t)
Y1 += 0.00000000031 * math.cos(0.98929514792 + 29416.28261533789 * self.t)
Y1 += 0.00000000031 * math.cos(6.01882424612 + 105307.45612538888 * self.t)
Y1 += 0.00000000030 * math.cos(2.14561278666 + 95247.94938966268 * self.t)
Y1 += 0.00000000031 * math.cos(0.94264719675 + 52290.48938931711 * self.t)
Y1 += 0.00000000029 * math.cos(2.26390987562 + 181556.18387831368 * self.t)
Y1 += 0.00000000039 * math.cos(0.22423030716 + 25448.2496727437 * self.t)
Y1 += 0.00000000032 * math.cos(3.17151820552 + 51639.24558853649 * self.t)
Y1 += 0.00000000029 * math.cos(2.64834173157 + 53242.5455778219 * self.t)
Y1 += 0.00000000029 * math.cos(3.82338961652 + 25668.6623151825 * self.t)
Y1 += 0.00000000028 * math.cos(4.61151969244 + 77204.57131201689 * self.t)
Y1 += 0.00000000036 * math.cos(3.92497606168 + 25132.0595824821 * self.t)
Y1 += 0.00000000027 * math.cos(0.76041081814 + 54509.2464935039 * self.t)
Y1 += 0.00000000026 * math.cos(1.38925912724 + 52061.61081194667 * self.t)
Y1 += 0.00000000025 * math.cos(5.15879998687 + 29428.7593857575 * self.t)
Y1 += 0.00000000031 * math.cos(2.93989277053 + 18093.13088206649 * self.t)
Y1 += 0.00000000030 * math.cos(1.42238495741 + 26941.3433408097 * self.t)
Y1 += 0.00000000025 * math.cos(4.73209653345 + 956.53297345411 * self.t)
Y1 += 0.00000000027 * math.cos(2.35402357443 + 18093.6185170335 * self.t)
Y1 += 0.00000000028 * math.cos(3.75976953619 + 104358.96993078108 * self.t)
Y1 += 0.00000000026 * math.cos(0.11444202162 + 26237.7101561923 * self.t)
Y1 += 0.00000000024 * math.cos(3.77280410253 + 22747.5345323579 * self.t)
Y1 += 0.00000000024 * math.cos(2.17839409058 + 46514.23041651269 * self.t)
Y1 += 0.00000000029 * math.cos(2.57773209519 + 131499.14145554368 * self.t)
Y1 += 0.00000000024 * math.cos(3.54215594406 + 42154.2128205325 * self.t)
Y1 += 0.00000000024 * math.cos(1.15126185636 + 54294.81396101029 * self.t)
Y1 += 0.00000000023 * math.cos(5.30608917177 + 146314.37712071768 * self.t)
Y1 += 0.00000000024 * math.cos(4.83980442527 + 26107.32908499049 * self.t)
Y1 += 0.00000000026 * math.cos(2.88358798527 + 25021.6514818677 | |
<reponame>gpescia/MyNetKet
# Copyright 2021 The NetKet Authors - All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import numpy as np
import jax
from jax import numpy as jnp
from jax.experimental import loops
from netket import config
from netket.utils.types import PyTree, PRNGKeyT
from netket.utils import struct
# for deprecations
from netket.utils import wraps_legacy
from netket.legacy.machine import AbstractMachine
from netket.legacy.sampler import (
MetropolisLocalPt as LegacyMetropolisLocalPt,
MetropolisExchangePt as LegacyMetropolisExchangePt,
MetropolisHamiltonianPt as LegacyMetropolisHamiltonianPt,
)
from .metropolis import MetropolisSamplerState, MetropolisSampler
@struct.dataclass
class MetropolisPtSamplerState(MetropolisSamplerState):
beta: jnp.ndarray = None
n_accepted_per_beta: jnp.ndarray = None
beta_0_index: jnp.ndarray = None
beta_position: jnp.ndarray = None
beta_diffusion: jnp.ndarray = None
exchange_steps: int = 0
def __repr__(self):
if self.n_steps > 0:
acc_string = "# accepted = {}/{} ({}%), ".format(
self.n_accepted, self.n_steps, self.acceptance * 100
)
else:
acc_string = ""
text = (
"MetropolisNumpySamplerState("
+ acc_string
+ "rng state={})".format(self.rng)
)
text = (
"MetropolisPtSamplerState(" + acc_string + "rng state={}".format(self.rng)
)
return text
_init_doc = r"""
``MetropolisSampler`` is a generic Metropolis-Hastings sampler using
a transition rule to perform moves in the Markov Chain.
The transition kernel is used to generate
a proposed state :math:`s^\prime`, starting from the current state :math:`s`.
The move is accepted with probability
.. math::
A(s\rightarrow s^\prime) = \mathrm{min}\left (1,\frac{P(s^\prime)}{P(s)} F(e^{L(s,s^\prime)})\right),
where the probability being sampled from is :math:`P(s)=|M(s)|^p. Here ::math::`M(s)` is a
user-provided function (the machine), :math:`p` is also user-provided with default value :math:`p=2`,
and :math:`L(s,s^\prime)` is a suitable correcting factor computed by the transition kernel.
Args:
hilbert: The hilbert space to sample
rule: A `MetropolisRule` to generate random transitions from a given state as
well as uniform random states.
n_chains: The number of Markov Chain to be run in parallel on a single process.
n_sweeps: The number of exchanges that compose a single sweep.
If None, sweep_size is equal to the number of degrees of freedom being sampled
(the size of the input vector s to the machine).
n_chains: The number of batches of the states to sample (default = 8)
machine_pow: The power to which the machine should be exponentiated to generate the pdf (default = 2).
dtype: The dtype of the statees sampled (default = np.float32).
"""
@struct.dataclass(init_doc=_init_doc)
class MetropolisPtSampler(MetropolisSampler):
"""
Metropolis-Hastings with Parallel Tempering sampler.
This sampler samples an Hilbert space, producing samples off a specific dtype.
The samples are generated according to a transition rule that must be
specified.
"""
n_replicas: int = struct.field(pytree_node=False, default=32)
"""The number of replicas"""
def __post_init__(self):
if not config.FLAGS["NETKET_EXPERIMENTAL"]:
raise RuntimeError(
"""
Parallel Tempering samplers are under development and
are known not to work.
If you want to debug it, set the environment variable
NETKET_EXPERIMENTAL=1
"""
)
super().__post_init__()
if (
not isinstance(self.n_replicas, int)
and self.n_replicas > 0
and np.mod(self.n_replicas, 2) == 0
):
raise ValueError("n_replicas must be an even integer > 0.")
@property
def n_batches(self):
return self.n_chains * self.n_replicas
def _init_state(
sampler, machine, params: PyTree, key: PRNGKeyT
) -> MetropolisPtSamplerState:
key_state, key_rule = jax.random.split(key, 2)
σ = jnp.zeros(
(sampler.n_batches, sampler.hilbert.size),
dtype=sampler.dtype,
)
rule_state = sampler.rule.init_state(sampler, machine, params, key_rule)
beta = 1.0 - jnp.arange(sampler.n_replicas) / sampler.n_replicas
beta = jnp.tile(beta, (sampler.n_chains, 1))
return MetropolisPtSamplerState(
σ=σ,
rng=key_state,
rule_state=rule_state,
n_steps_proc=0,
n_accepted_proc=0,
beta=beta,
beta_0_index=jnp.zeros((sampler.n_chains,), dtype=int),
n_accepted_per_beta=jnp.zeros(
(sampler.n_chains, sampler.n_replicas), dtype=int
),
beta_position=jnp.zeros((sampler.n_chains,)),
beta_diffusion=jnp.zeros((sampler.n_chains,)),
exchange_steps=0,
)
def _reset(sampler, machine, parameters: PyTree, state: MetropolisPtSamplerState):
new_rng, rng = jax.random.split(state.rng)
σ = sampler.rule.random_state(sampler, machine, parameters, state, rng)
rule_state = sampler.rule.reset(sampler, machine, parameters, state)
beta = 1.0 - jnp.arange(sampler.n_replicas) / sampler.n_replicas
beta = jnp.tile(beta, (sampler.n_chains, 1))
return state.replace(
σ=σ,
rng=new_rng,
rule_state=rule_state,
n_steps_proc=0,
n_accepted_proc=0,
n_accepted_per_beta=jnp.zeros((sampler.n_chains, sampler.n_replicas)),
beta_position=jnp.zeros((sampler.n_chains,)),
beta_diffusion=jnp.zeros((sampler.n_chains)),
exchange_steps=0,
# beta=beta,
# beta_0_index=jnp.zeros((sampler.n_chains,), dtype=jnp.int32),
)
def _sample_next(
sampler, machine, parameters: PyTree, state: MetropolisPtSamplerState
):
new_rng, rng = jax.random.split(state.rng)
# def cbr(data):
# new_rng, rng = data
# print("sample_next newrng:\n", new_rng, "\nand rng:\n", rng)
# return new_rng
# new_rng = hcb.call(
# cbr,
# (new_rng, rng),
# result_shape=jax.ShapeDtypeStruct(new_rng.shape, new_rng.dtype),
# )
with loops.Scope() as s:
s.key = rng
s.σ = state.σ
s.log_prob = sampler.machine_pow * machine.apply(parameters, state.σ).real
s.beta = state.beta
# for logging
s.beta_0_index = state.beta_0_index
s.n_accepted_per_beta = state.n_accepted_per_beta
s.beta_position = state.beta_position
s.beta_diffusion = state.beta_diffusion
for i in s.range(sampler.n_sweeps):
# 1 to propagate for next iteration, 1 for uniform rng and n_chains for transition kernel
s.key, key1, key2, key3, key4 = jax.random.split(s.key, 5)
# def cbi(data):
# i, beta = data
# print("sweep #", i, " for beta=\n", beta)
# return beta
# beta = hcb.call(
# cbi,
# (i, s.beta),
# result_shape=jax.ShapeDtypeStruct(s.beta.shape, s.beta.dtype),
# )
beta = s.beta
σp, log_prob_correction = sampler.rule.transition(
sampler, machine, parameters, state, key1, s.σ
)
proposal_log_prob = (
sampler.machine_pow * machine.apply(parameters, σp).real
)
uniform = jax.random.uniform(key2, shape=(sampler.n_batches,))
if log_prob_correction is not None:
do_accept = uniform < jnp.exp(
beta.reshape((-1,))
* (proposal_log_prob - s.log_prob + log_prob_correction)
)
else:
do_accept = uniform < jnp.exp(
beta.reshape((-1,)) * (proposal_log_prob - s.log_prob)
)
# do_accept must match ndim of proposal and state (which is 2)
s.σ = jnp.where(do_accept.reshape(-1, 1), σp, s.σ)
n_accepted_per_beta = s.n_accepted_per_beta + do_accept.reshape(
(sampler.n_chains, sampler.n_replicas)
)
s.log_prob = jax.numpy.where(
do_accept.reshape(-1), proposal_log_prob, s.log_prob
)
# exchange betas
# randomly decide if every set of replicas should be swapped in even or odd order
swap_order = jax.random.randint(
key3,
minval=0,
maxval=2,
shape=(sampler.n_chains,),
) # 0 or 1
# iswap_order = jnp.mod(swap_order + 1, 2) # 1 or 0
# indices of even swapped elements (per-row)
idxs = jnp.arange(0, sampler.n_replicas, 2).reshape(
(1, -1)
) + swap_order.reshape((-1, 1))
# indices off odd swapped elements (per-row)
inn = (idxs + 1) % sampler.n_replicas
# for every rows of the input, swap elements at idxs with elements at inn
@partial(jax.vmap, in_axes=(0, 0, 0), out_axes=0)
def swap_rows(beta_row, idxs, inn):
proposed_beta = jax.ops.index_update(
beta_row,
idxs,
beta_row[inn],
unique_indices=True,
indices_are_sorted=True,
)
proposed_beta = jax.ops.index_update(
proposed_beta,
inn,
beta_row[idxs],
unique_indices=True,
indices_are_sorted=False,
)
return proposed_beta
proposed_beta = swap_rows(beta, idxs, inn)
@partial(jax.vmap, in_axes=(0, 0, 0), out_axes=0)
def compute_proposed_prob(prob, idxs, inn):
prob_rescaled = prob[idxs] + prob[inn]
return prob_rescaled
# compute the probability of the swaps
log_prob = (proposed_beta - state.beta) * s.log_prob.reshape(
(sampler.n_chains, sampler.n_replicas)
)
prob_rescaled = jnp.exp(compute_proposed_prob(log_prob, idxs, inn))
prob_rescaled = jnp.exp(compute_proposed_prob(log_prob, idxs, inn))
uniform = jax.random.uniform(
key4, shape=(sampler.n_chains, sampler.n_replicas // 2)
)
do_swap = uniform < prob_rescaled
do_swap = jnp.dstack((do_swap, do_swap)).reshape(
(-1, sampler.n_replicas)
) # concat along last dimension
# roll if swap_ordeer is odd
@partial(jax.vmap, in_axes=(0, 0), out_axes=0)
def fix_swap(do_swap, swap_order):
return jax.lax.cond(
swap_order == 0, lambda x: x, lambda x: jnp.roll(x, 1), do_swap
)
do_swap = fix_swap(do_swap, swap_order)
# jax.experimental.host_callback.id_print(state.beta)
# jax.experimental.host_callback.id_print(proposed_beta)
# new_beta = jax.numpy.where(do_swap, proposed_beta, beta)
def cb(data):
_bt, _pbt, new_beta, so, do_swap, log_prob, prob = data
print("--------.---------.---------.--------")
print(" cur beta:\n", _bt)
print("proposed beta:\n", _pbt)
print(" new beta:\n", new_beta)
print("swaporder :", so)
print("do_swap :\n", do_swap)
print("log_prob;\n", log_prob)
print("prob_rescaled;\n", prob)
return new_beta
# new_beta = hcb.call(
# cb,
# (
# beta,
# proposed_beta,
# new_beta,
# swap_order,
# do_swap,
# log_prob,
# prob_rescaled,
# ),
# result_shape=jax.ShapeDtypeStruct(new_beta.shape, new_beta.dtype),
# )
# s.beta = new_beta
swap_order = swap_order.reshape(-1)
beta_0_moved = jax.vmap(
lambda do_swap, i: do_swap[i], in_axes=(0, 0), out_axes=0
)(do_swap, state.beta_0_index)
proposed_beta_0_index = jnp.mod(
state.beta_0_index
+ (-jnp.mod(swap_order, 2) * 2 + 1)
* (-jnp.mod(state.beta_0_index, 2) * 2 + 1),
sampler.n_replicas,
)
s.beta_0_index = jnp.where(
beta_0_moved, proposed_beta_0_index, s.beta_0_index
)
# swap acceptances
swapped_n_accepted_per_beta = swap_rows(n_accepted_per_beta, idxs, inn)
s.n_accepted_per_beta = jax.numpy.where(
do_swap,
swapped_n_accepted_per_beta,
n_accepted_per_beta,
)
# Update statistics to compute diffusion coefficient of replicas
# Total exchange steps performed
delta = s.beta_0_index - s.beta_position
s.beta_position = s.beta_position + delta / (state.exchange_steps + i)
delta2 = s.beta_0_index - s.beta_position
s.beta_diffusion = s.beta_diffusion + delta * delta2
new_state = state.replace(
rng=new_rng,
σ=s.σ,
# n_accepted=s.accepted,
n_steps_proc=state.n_steps_proc + sampler.n_sweeps * sampler.n_chains,
beta=s.beta,
beta_0_index=s.beta_0_index,
beta_position=s.beta_position,
beta_diffusion=s.beta_diffusion,
exchange_steps=state.exchange_steps + sampler.n_sweeps,
n_accepted_per_beta=s.n_accepted_per_beta,
)
offsets = jnp.arange(
0, sampler.n_chains * sampler.n_replicas, sampler.n_replicas
)
return new_state, new_state.σ[new_state.beta_0_index + offsets, | |
import numpy as np
import torch
import data
import utils
class BatchGenerator(object):
def __call__(self):
pass
def for_eval(self):
pass
def total_batches(self):
pass
class DocumentBatchGenerator(BatchGenerator):
"""A batch generator that only generates word batches.
Each seq in a batch is a sequence of tokens in the input document, not necessaliry a
sentence. (This means the training is done via truncated back-prop.)
SentenceBatchGenerator, on the other hand, handles each sentence independently.
Each __call__ method returns a tuple of (batch, is_tag, tag_alpha). is_tag indicates
whether this batch is a batch of tags, rather than words. For this class, this value is
fixed to the value given to the initializer. tag_alpha is also fixed to 1.0, which weights
the loss computation for this batch.
These values will be changed for different batches in `SentenceAndTagBatchGenerator` class,
whether each batch may be a batch of word sequences, or a batch of tag sequences.
"""
def __init__(self, tensors, batch_size, bptt, shuffle=False, is_tag=False):
self.tensors = tensors
self.data = utils.batchify(tensors, batch_size, batch_first=True)
self.batch_size = batch_size
self.bptt = bptt
self.shuffle=shuffle
self.is_tag = False
def __call__(self):
if self.shuffle:
# I know shuffling the sentences in the document mode is non-sensical. This is just
# experimental to see the importance of sentence worder for document-LM.
np.random.shuffle(self.tensors)
self.data = utils.batchify(self.tensors, self.batch_size, batch_first=True)
i = 0
while i < self.data.size(1) - 1 - 1:
bptt = self.bptt if np.random.random() < 0.95 else self.bptt / 2.
# Prevent excessively small or negative sequence lengths
seq_len = max(5, int(np.random.normal(self.bptt, 5)))
# There's a very small chance that it could select a very long sequence
# length resulting in OOM
# seq_len = min(seq_len, args.bptt + 10)
sources, targets = utils.get_batch(
self.data, i, bptt, seq_len=seq_len, batch_first=True)
yield (sources, targets), self.is_tag, 1.0
i += seq_len
def for_eval(self):
for i in range(0, self.data.size(1) - 1, self.bptt):
sources, targets = utils.get_batch(
self.data, i, self.bptt, evaluation=True, batch_first=True)
yield sources, targets
def total_batches(self):
return self.data.size(1) // self.bptt
class SentenceBatchGenerator(BatchGenerator):
def __init__(self, tensors, batch_size, pad_id, shuffle=False, length_bucket=False):
self.tensors = tensors
self.batch_size = batch_size
self.pad_id = pad_id
self.shuffle = shuffle
self.length_bucket = length_bucket
def __call__(self):
if self.shuffle and self.length_bucket:
sents_sorted = sorted(self.tensors, key=lambda x: len(x))
batches = [b for b in data.batches_in_buckets(self.tensors, self.batch_size)]
np.random.shuffle(batches)
for batch in batches:
sources, lengths, targets = utils.get_sorted_sentences_batch(
batch, 0, self.batch_size, self.pad_id)
yield (sources, lengths, targets), False, 1.0
else:
if self.shuffle:
np.random.shuffle(self.tensors)
for i in range(0, len(self.tensors), self.batch_size):
# Create a batch matrix with padding.
sources, lengths, targets = utils.get_sorted_sentences_batch(
self.tensors, i, self.batch_size, self.pad_id)
yield (sources, lengths, targets), False, 1.0
def for_eval(self):
for i in range(0, len(self.tensors), self.batch_size):
sources, lengths, targets = utils.get_sorted_sentences_batch(
self.tensors, i, self.batch_size, self.pad_id)
yield sources, lengths, targets
def total_batches(self):
return len(self.tensors) // self.batch_size
def mk_pairs_batch(batch_pairs, pad_id):
batch_pairs = sorted(batch_pairs, key=lambda x: len(x[0]), reverse=True)
assert len(batch_pairs[0]) == 3
golds = [p[0] for p in batch_pairs]
wrongs = [p[1] for p in batch_pairs]
obj_rels = [p[2] for p in batch_pairs]
gold_sources, lengths, gold_targets = utils.get_sorted_sentences_batch(
golds, 0, len(golds), pad_id, sorted=True)
wrong_sources, wrong_lengths, wrong_targets = utils.get_sorted_sentences_batch(
wrongs, 0, len(wrongs), pad_id, sorted=True)
assert lengths.eq(wrong_lengths).all()
return (gold_sources, lengths, gold_targets, wrong_sources, wrong_targets)
def mk_batches_list(seq, batch_size, shuffle, length_bucket,
len_fun = lambda x: len(x)):
if shuffle and length_bucket:
seq_sorted = sorted(seq, key=len_fun)
batches = [b for b in data.batches_in_buckets(seq_sorted, batch_size)]
np.random.shuffle(batches)
else:
if shuffle:
np.random.shuffle(seq)
batches = [seq[j:j+batch_size] for j in range(0, len(seq), batch_size)]
return batches
class AgreementPairBatchGenerator(BatchGenerator):
def __init__(self, tensor_pairs, batch_size, pad_id, shuffle=False):
self.tensor_pairs = tensor_pairs
self.batch_size = batch_size
self.pad_id = pad_id
self.shuffle = shuffle
def __call__(self):
"""A batch for this generator is a quintuple
(gold_sources, lengths, gold_targets, wrong_sources, wrong_targets).
gold_sources and gold_targets are sources and targets sequences for calculating
probabilities of gold sequences. wrong_sources and wrong_targets are the ones
for distracted (ungrammatical) sequences.
lengths are shared between two types of sequences, because we assume a distracted
sentence has the same length as the original one (with a minimal change).
"""
if self.shuffle:
np.random.shuffle(self.tensor_pairs)
return self._gen()
def _gen(self):
for i in range(0, len(self.tensor_pairs), self.batch_size):
batch_pairs = self.tensor_pairs[i:i+self.batch_size]
yield mk_pairs_batch(batch_pairs, self.pad_id), False, 1.0
def for_eval(self):
return self._gen()
def total_batches(self):
return len(self.tensor_pairs) // self.batch_size
class AgreementPairBatchSampler(object):
def __init__(self, tensor_pairs, batch_size, one_pair_per_sent=False):
self.tensor_pairs = tensor_pairs
self.batch_size = batch_size
self.batches = self.init_batches()
self.idx = 0
self.one_pair_per_sent = one_pair_per_sent
if not one_pair_per_sent:
from itertools import chain
self.flatten = list(chain.from_iterable(self.tensor_pairs))
def init_batches(self):
if self.one_pair_per_sent:
def sample(sent_pairs):
return sent_pairs[np.random.randint(len(sent_pairs))]
examples = [sample(sent_pairs) for sent_pairs in self.tensor_pairs]
else:
examples = self.flatten
# np.random.shuffle(self.tensor_pairs)
# t = self.tensor_pairs
t = examples
b = self.batch_size
return [t[j:j+b] for j in range(0, len(t), b)]
def next(self):
if self.idx < len(self.batches):
self.idx += 1
return self.batches[self.idx - 1]
else:
assert self.idx == len(self.batches)
self.batches = self.init_batches()
self.idx = 0
return self.next()
class AgreementBatchGeneratorBase(BatchGenerator):
# We want to make this class equip all required methods manipulating tensor_pairs.
# Some methods may be specialized (and not relevant) in some child class.
def __init__(self,
tensor_pairs,
batch_size,
pad_id,
shuffle,
upsample_agreement,
agreement_loss_alpha,
half_agreement_batch,
one_pair_per_sent,
agreement_sample_ratio,
prefer_obj_rel):
assert isinstance(tensor_pairs, list)
if len(tensor_pairs) > 0: assert isinstance(tensor_pairs[0], list)
if len(tensor_pairs[0]) > 0:
assert isinstance(tensor_pairs[0][0], tuple)
assert len(tensor_pairs[0][0]) == 3
assert isinstance(tensor_pairs[0][0][0], torch.Tensor)
assert isinstance(tensor_pairs[0][0][1], torch.Tensor)
assert isinstance(tensor_pairs[0][0][2], bool)
self.tensor_pairs = tensor_pairs
self.batch_size = batch_size
self.ag_batch_size = self.batch_size // 2 if half_agreement_batch \
else self.batch_size
self.pad_id = pad_id
self.shuffle = shuffle
self.upsample_agreement = upsample_agreement
self.agreement_loss_alpha = agreement_loss_alpha
self.half_agreement_batch = half_agreement_batch
self.one_pair_per_sent = one_pair_per_sent
self.agreement_sample_ratio = agreement_sample_ratio
self.prefer_obj_rel = prefer_obj_rel
self.pair_batch_sampler = None
if self.upsample_agreement:
self.pair_batch_sampler = AgreementPairBatchSampler(
self.tensor_pairs, self.ag_batch_size)
if not self.upsample_agreement and not self.one_pair_per_sent:
from itertools import chain
self.flatten_pairs = list(chain.from_iterable(self.tensor_pairs))
def __call__(self):
"""A batch for this generator is either a sentence (or document) batch, or a pair
batch (for optimizing sensitivity to agreement errors). Generally, # training
instances for agreement is smaller; the aim for `upsample` is to reduce this bias.
If this value is True, batches for sentence pairs are upsampled, and two types
of batches are generated alternatively.
"""
if self.upsample_agreement:
return self.alternate()
else:
return self.visit_once()
def alternate(self):
for main_batch in self._gen_main_batches():
yield main_batch
yield self._sample_pairs_batch()
def visit_once(self):
def maybe_iter_to_list(gen):
if not isinstance(gen, list):
gen = [a for a in gen]
return gen
main_batches = self._gen_main_batches()
# main_batches = maybe_iter_to_list(main_batches)
pair_examples = self._gen_cand_pairs()
pair_examples = maybe_iter_to_list(pair_examples)
if self.prefer_obj_rel: # always contains obj rel cases.
obj_rel_examples = [p for p in pair_examples if p[2]]
non_obj_rel_examples = [p for p in pair_examples if not p[2]]
preferred_pair_batches = mk_batches_list(
obj_rel_examples, self.ag_batch_size, self.shuffle, False)
pair_batches = mk_batches_list(
non_obj_rel_examples, self.ag_batch_size, self.shuffle, False)
else:
preferred_pair_batches = []
pair_batches = mk_batches_list(
pair_examples, self.ag_batch_size, self.shuffle, False)
main_batches = [(b, False) for b in main_batches]
preferred_pair_batches = [(b, True) for b in preferred_pair_batches]
pair_batches = [(b, True) for b in pair_batches]
if self.agreement_sample_ratio > 0:
def get_sample_recur(n_remain, current_batches):
if n_remain <= 0:
return current_batches
if n_remain < len(pair_batches):
np.random.shuffle(pair_batches)
return current_batches + pair_batches[:n_remain]
else:
combined = current_batches + pair_batches
return get_sample_recur(n_remain - len(pair_batches), combined)
r = self.agreement_sample_ratio
n = int(len(main_batches) * r)
print('size of preferred: {}'.format(len(preferred_pair_batches)))
print('size of others: {}'.format(len(pair_batches)))
pair_batches = get_sample_recur(n - len(preferred_pair_batches),
preferred_pair_batches)
print('total batches for agreement: {}'.format(len(pair_batches)))
else:
# preferred_pair_bathces is not meaningful when agreement_sample_ratio is inactive.
if len(preferred_pair_batches) > 0:
# recover the original, non segmented pair_batches
pair_batches += preferred_pair_batches
batches = main_batches + pair_batches
if self.shuffle:
np.random.shuffle(batches)
for batch, is_pair in batches:
if is_pair:
batch = mk_pairs_batch(batch, self.pad_id)
yield batch, False, self.agreement_loss_alpha
else:
yield batch
# sources, lengths, targets = utils.get_sorted_sentences_batch(
# batch, 0, self.batch_size, self.pad_id)
# yield (sources, lengths, targets), False, 1.0
def _gen_main_batches(self):
pass
def _gen_cand_pairs(self):
if self.one_pair_per_sent:
def sample(sent_pairs):
return sent_pairs[np.random.randint(len(sent_pairs))]
return [sample(sent_pairs) for sent_pairs in self.tensor_pairs]
else:
return self.flatten_pairs
def _sample_pairs_batch(self):
assert self.upsample_agreement
batch = self.pair_batch_sampler.next()
batch = mk_pairs_batch(batch, self.pad_id)
return batch, False, self.agreement_loss_alpha
class SentenceAndAgreementBatchGenerator(AgreementBatchGeneratorBase):
def __init__(self,
tensors,
tensor_pairs,
batch_size,
pad_id,
shuffle=False,
length_bucket=False,
upsample_agreement=False,
agreement_loss_alpha=1.0,
half_agreement_batch=False,
one_pair_per_sent=False,
agreement_sample_ratio=0.0,
prefer_obj_rel=False):
super(SentenceAndAgreementBatchGenerator, self).__init__(
tensor_pairs,
batch_size,
pad_id,
shuffle,
upsample_agreement,
agreement_loss_alpha,
half_agreement_batch,
one_pair_per_sent,
agreement_sample_ratio,
prefer_obj_rel)
assert isinstance(tensors, list)
if tensors: assert isinstance(tensors[0], torch.Tensor)
self.tensors = tensors
self.length_bucket = length_bucket
def _gen_main_batches(self):
def sent_batch_to_ready(batch):
sources, lengths, targets = utils.get_sorted_sentences_batch(
batch, 0, self.batch_size, self.pad_id)
return (sources, | |
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import numpy as np
import casadi
from casadi import SX, DM
from math import cos, sin
import plotly.graph_objects as go
app = dash.Dash(__name__)
server = app.server
app.layout = html.Div(
children=[
html.Div(
children=[
html.Img(src=app.get_asset_url("dash-logo.png"), id="logo"),
html.H4("Dash Lunar Lander"),
html.A(
id="github-link",
children=["View source code on Github"],
href="https://github.com/plotly/dash-sample-apps/blob/master/apps/dash-lunar-lander/app.py",
),
],
className="app-header",
),
html.Div(
[
html.H3("Lunar Lander Trade Study App"),
html.P(
"This app allows for exploring the trade space of a lunar lander. You can adjust different "
"parameters on the lander, like max angular velocity and inital mass, using the sliders and a new "
"optimal trajectory will be recomputed in near real time using Casadi."
),
html.A(
"The lunar lander model was based off of one in this paper,",
href="https://arxiv.org/pdf/1610.08668.pdf",
target="_blank",
),
html.P(
"This model does not allow for the lander to gimbal its engine, instead it must turn the entire "
"spacecraft and then thrust to cancel out any horizontal velocity, giving the mass optimal control "
"case a distinctive hooked shape. Switching the optimizer to time-optimal control results in a "
"smoother and more expected shape."
),
]
),
# Display the Trajectory
dcc.Loading(
[dcc.Graph(id="indicator-graphic")],
style={"height": "450px", "verticalAlign": "middle"},
type="dot",
),
# Adjust the Spacecraft Parameters
dcc.Slider(
id="m0Slider",
min=5000,
max=15000,
value=10000,
step=10,
marks={5000: {"label": "Inital Mass", "style": {"transform": "none"}}},
),
dcc.Slider(
id="c1Slider",
min=35000,
max=44000 * 1.1,
value=44000,
step=10,
marks={35000: {"label": "Max Thrust", "style": {"transform": "none"}}},
),
dcc.Slider(
id="isp",
min=300,
max=330,
value=311,
step=1,
marks={300: {"label": "ISP", "style": {"transform": "none"}}},
),
dcc.Slider(
id="c3Slider",
min=0.01,
max=0.1,
value=0.0698,
step=0.0001,
marks={
0.01: {"label": "Max Angular Velocity", "style": {"transform": "none"}}
},
),
dcc.Slider(
id="gSlider",
min=0.5,
max=2,
value=1.6229,
step=0.0001,
marks={0.5: {"label": "Gravity", "style": {"transform": "none"}}},
),
html.Div(
[
# Display Current Spacecraft Parameters
html.Div(
[
html.H5("Parameters"),
html.P("Inital Mass (kg):", id="m0Out"),
html.P("Max Thrust (N):", id="maThrustOut"),
html.P("ISP (s):", id="ispOut"),
html.P("C3 (rad/s):", id="c3Out"),
html.P("Gravity (N):", id="gravOut"),
],
),
html.Div(
[
# Choose Between Different Cost Functions
html.H5("Outputs"),
dcc.RadioItems(
options=[
{"label": "Mass Optimal Control", "value": 1},
{"label": "Time Optimal Control", "value": 2},
],
id="costFun",
value=1,
),
# Display Final Cost Functions
html.P("Final Mass (kg):", id="mfOut"),
html.P("TOF (s):", id="tof"),
],
),
html.Div(
[
html.H5("Adjust Initial Position"),
html.Div(
[
# DPad for Adjusting the Spacecrafts Initial Position
html.Div(
[html.Button("Left", id="Left",),],
className="direction-button",
),
html.Div(
[
html.Button("Up", id="Up",),
html.Button("Down", id="Down",),
],
style={
"display": "flex",
"flex-direction": "column",
},
className="direction-button",
),
html.Div(
[html.Button("Right", id="Right",),],
className="direction-button",
),
],
id="button-grid",
),
],
id="adjust-init-pos",
),
],
id="bottom-cards",
),
],
className="entire-app",
)
@app.callback(
[
Output("indicator-graphic", "figure"),
Output("m0Out", "children"),
Output("mfOut", "children"),
Output("maThrustOut", "children"),
Output("ispOut", "children"),
Output("c3Out", "children"),
Output("gravOut", "children"),
Output("tof", "children"),
],
[
Input(component_id="m0Slider", component_property="value"),
Input(component_id="c1Slider", component_property="value"),
Input(component_id="isp", component_property="value"),
Input(component_id="c3Slider", component_property="value"),
Input(component_id="gSlider", component_property="value"),
Input(component_id="Left", component_property="n_clicks"),
Input(component_id="Right", component_property="n_clicks"),
Input(component_id="Up", component_property="n_clicks"),
Input(component_id="Down", component_property="n_clicks"),
Input(component_id="costFun", component_property="value"),
],
)
def update_output_div(
m0, c1, isp, c3, g, n_clicksL, n_clicksR, n_clicksU, n_clicksD, optimal
):
# Adjust the Spacecraft's Intal Position
if n_clicksL is None:
n_clicksL = 0
if n_clicksR is None:
n_clicksR = 0
if n_clicksU is None:
n_clicksU = 0
if n_clicksD is None:
n_clicksD = 0
motionPerClickX = 10
motionPerClickY = 100
clicksX = n_clicksR - n_clicksL
clicksY = n_clicksU - n_clicksD
x0 = -30 + (motionPerClickX * clicksX)
y0 = 1000 + (motionPerClickY * clicksY)
# Setup and Solve the Optimal Control Problem
IC = np.array([x0, y0, 10, 20, m0, 0])
g0 = 9.81 # For Calculating Spacecraft Engine Efficiency
argsRWSC = [c1, isp, g0, c3, g]
resultsRWSC = wrapperRWSC(IC, argsRWSC, optimal)
# Unpack the Solution
states = np.array(resultsRWSC["states"])
dt = resultsRWSC["dt"]
cont = np.array(resultsRWSC["controls"])
tof = 30 * dt
x = states[:, 0]
y = states[:, 1]
# Create the Graphics Object
trace0 = go.Scatter(y=y, x=x, mode="lines", name="Trajectory")
trace1 = go.Scatter(
y=np.array(y[0]), x=np.array(x[0]), mode="markers", name="Inital Location"
)
trace2 = go.Scatter(
y=np.array(y[-1]), x=np.array(x[-1]), mode="markers", name="Target"
)
data = [trace1, trace2, trace0]
if optimal == 1:
titleName = "Lunar Lander - Mass Optimal Trajectory"
else:
titleName = "Lunar Lander - Time Optimal Trajectory"
layout = go.Layout(
title=titleName,
yaxis={"title": "Height (meters)", "scaleanchor": "x", "scaleratio": 1},
xaxis={"title": "Distance uprange from target (meters)"},
)
return (
{"data": data, "layout": layout},
"Inital Mass: " + str(m0) + " (kg)",
"Final Mass: " + str(np.round(states[-1, 4], 2)) + " (kg)",
"Max Thrust: " + str(c1) + " (N)",
"ISP: " + str(isp) + " (S)",
"Max c3: " + str(c3) + "(rad/s)",
"Gravity: " + str(g) + "(N)",
"TOF: " + str(tof) + "(s)",
)
def RWSC(states, controls, args):
# ODE for the Reaction Wheel Spacecraft
# See https://arxiv.org/pdf/1610.08668.pdf Equation 13 for EOM
# Unpacking the Input Variables
x = states[:, 0]
y = states[:, 1]
xDot = states[:, 2]
yDot = states[:, 3]
m = states[:, 4]
scTheta = states[:, 5]
beta = controls[:, 0]
u = controls[:, 1]
c1 = args[0]
isp = args[1]
g0 = args[2]
c2 = isp * g0
c3 = args[3]
g = args[4]
# Equations of Motion
xDotDot = c1 * u * np.sin(scTheta) / m
yDotDot = (c1 * u * np.cos(scTheta) / m) - g
scThetaDot = c3 * beta
mDot = -c1 * u / c2
# Repacking Time Derivative of the State Vector
statesDot = 0 * states
statesDot[:, 0] = xDot
statesDot[:, 1] = yDot
statesDot[:, 2] = xDotDot
statesDot[:, 3] = yDotDot
statesDot[:, 4] = mDot
statesDot[:, 5] = scThetaDot
return statesDot
def wrapperRWSC(IC, args, optimal):
# Converting the Optimal Control Problem into a Non-Linear Programming Problem
numStates = 6
numInputs = 2
nodes = 30 # Keep this Number Small to Reduce Runtime
dt = SX.sym("dt")
states = SX.sym("state", nodes, numStates)
controls = SX.sym("controls", nodes, numInputs)
variables_list = [dt, states, controls]
variables_name = ["dt", "states", "controls"]
variables_flat = casadi.vertcat(*[casadi.reshape(e, -1, 1) for e in variables_list])
pack_variables_fn = casadi.Function(
"pack_variables_fn", variables_list, [variables_flat], variables_name, ["flat"]
)
unpack_variables_fn = casadi.Function(
"unpack_variables_fn",
[variables_flat],
variables_list,
["flat"],
variables_name,
)
# Bounds
bds = [
[np.sqrt(np.finfo(float).eps), np.inf],
[-100, 300],
[0, np.inf],
[-np.inf, np.inf],
[-np.inf, np.inf],
[np.sqrt(np.finfo(float).eps), np.inf],
[-1, 1],
[np.sqrt(np.finfo(float).eps), 1],
]
lower_bounds = unpack_variables_fn(flat=-float("inf"))
lower_bounds["dt"][:, :] = bds[0][0]
lower_bounds["states"][:, 0] = bds[1][0]
lower_bounds["states"][:, 1] = bds[2][0]
lower_bounds["states"][:, 4] = bds[5][0]
lower_bounds["controls"][:, 0] = bds[6][0]
lower_bounds["controls"][:, 1] = bds[7][0]
upper_bounds = unpack_variables_fn(flat=float("inf"))
upper_bounds["dt"][:, :] = bds[0][1]
upper_bounds["states"][:, 0] = bds[1][1]
upper_bounds["controls"][:, 0] = bds[6][1]
upper_bounds["controls"][:, 1] = bds[7][1]
# Set Initial Conditions
# Casadi does not accept equality constraints, so boundary constraints are
# set as box constraints with 0 area.
lower_bounds["states"][0, 0] = IC[0]
lower_bounds["states"][0, 1] = IC[1]
lower_bounds["states"][0, 2] = IC[2]
lower_bounds["states"][0, 3] = IC[3]
lower_bounds["states"][0, 4] = IC[4]
lower_bounds["states"][0, 5] = IC[5]
upper_bounds["states"][0, 0] = IC[0]
upper_bounds["states"][0, 1] = IC[1]
upper_bounds["states"][0, 2] = IC[2]
upper_bounds["states"][0, 3] = IC[3]
upper_bounds["states"][0, 4] = IC[4]
upper_bounds["states"][0, 5] = IC[5]
# Set Final Conditions
# Currently set for a soft touchdown at the origin
lower_bounds["states"][-1, 0] = 0
lower_bounds["states"][-1, 1] = 0
lower_bounds["states"][-1, 2] = 0
lower_bounds["states"][-1, 3] = 0
lower_bounds["states"][-1, 5] = 0
upper_bounds["states"][-1, 0] = 0
upper_bounds["states"][-1, 1] = 0
upper_bounds["states"][-1, 2] = 0
upper_bounds["states"][-1, 3] = 0
upper_bounds["states"][-1, 5] = 0
# Initial Guess Generation
# Generate the initial guess as a line between initial and final conditions
xIG = np.array(
[
np.linspace(IC[0], 0, nodes),
np.linspace(IC[1], 0, nodes),
np.linspace(IC[2], 0, nodes),
np.linspace(IC[3], 0, nodes),
np.linspace(IC[4], IC[4] * 0.5, nodes),
np.linspace(IC[5], 0, nodes),
]
).T
uIG = np.array([np.linspace(0, 1, nodes), np.linspace(1, 1, nodes)]).T
ig_list = [60 / nodes, xIG, uIG]
ig_flat = casadi.vertcat(*[casadi.reshape(e, -1, 1) for e in ig_list])
# Generating Defect Vector
xLow = states[0 : (nodes - 1), :]
xHigh = states[1:nodes, :]
contLow = controls[0 : (nodes - 1), :]
contHi = controls[1:nodes, :]
contMid = (contLow + contHi) / 2
# Use a RK4 Method for Generating the Defects
k1 = RWSC(xLow, contLow, args)
k2 = RWSC(xLow + (0.5 * dt * k1), contMid, args)
k3 = RWSC(xLow + (0.5 * dt * k2), contMid, args)
k4 = RWSC(xLow + k3, contHi, args)
xNew = xLow + ((dt / 6) * (k1 | |
Oo0Ooo * oO0o . I11i / i1IIi
lisp . lisp_ms_rtr_list = [ ]
if ( I1IiIiiIiIII . has_key ( "address" ) ) :
for Ii1I1i in I1IiIiiIiIII [ "address" ] :
II = lisp . lisp_address ( lisp . LISP_AFI_NONE , "" , 0 , 0 )
II . store_address ( Ii1I1i )
lisp . lisp_ms_rtr_list . append ( II )
if 50 - 50: I1Ii111 / i1IIi % OoooooooOO
if 83 - 83: I1ii11iIi11i * I1ii11iIi11i + OOooOOo
return ( iI11 )
if 57 - 57: O0 - O0 . I1ii11iIi11i / o0oOOo0O0Ooo / Ii1I
if 20 - 20: OOooOOo * II111iiii - OoOoOO00 - oO0o * I1Ii111
if 6 - 6: ooOoO0o + OOooOOo / Oo0Ooo + IiII % II111iiii / OoO0O00
if 45 - 45: OoooooooOO
if 9 - 9: I11i . OoO0O00 * i1IIi . OoooooooOO
if 32 - 32: OoOoOO00 . I1ii11iIi11i % I1IiiI - II111iiii
if 11 - 11: O0 + I1IiiI
if 80 - 80: oO0o % oO0o % O0 - i11iIiiIii . iII111i / O0
def lisp_process_command_lines ( lisp_socket , old , new , line ) :
oOOI11I = line . split ( "{" )
oOOI11I = oOOI11I [ 0 ]
oOOI11I = oOOI11I [ 0 : - 1 ]
if 13 - 13: I1IiiI + O0 - I1ii11iIi11i % Oo0Ooo / Ii1I . i1IIi
lisp . lprint ( "Process the '{}' command" . format ( oOOI11I ) )
if 60 - 60: Oo0Ooo . IiII % I1IiiI - I1Ii111
if 79 - 79: OoooooooOO / I1ii11iIi11i . O0
if 79 - 79: oO0o - II111iiii
if 43 - 43: i1IIi + O0 % OoO0O00 / Ii1I * I1IiiI
if ( lisp_commands . has_key ( oOOI11I ) == False ) :
line = "#>>> " + line . replace ( "\n" , " <<< invalid command\n" )
new . write ( line )
return
if 89 - 89: I1IiiI . Oo0Ooo + I1ii11iIi11i . O0 % o0oOOo0O0Ooo
if 84 - 84: OoooooooOO + I1Ii111 / I1IiiI % OOooOOo % I1ii11iIi11i * I1IiiI
if 58 - 58: OoO0O00 - OoOoOO00 . i11iIiiIii % i11iIiiIii / i1IIi / oO0o
if 24 - 24: I1IiiI * i1IIi % ooOoO0o / O0 + i11iIiiIii
if 12 - 12: I1ii11iIi11i / Ii1I
ii11Ii11 = lisp_commands [ oOOI11I ]
I1i1ii = False
for Oo0 in ii11Ii11 :
if ( Oo0 == "" ) :
line = lisp_write_error ( line , "invalid command" )
new . write ( line )
return
if 81 - 81: ooOoO0o * IiII * O0 * iIii1I11I1II1
if 93 - 93: Oo0Ooo / I1ii11iIi11i + i1IIi * oO0o . OoooooooOO
if ( I1i1ii == False ) :
Oo000 = line
III11I1 = 1
for line in old :
if ( lisp_begin_clause ( line ) ) : III11I1 += 1
Oo000 += line
if ( lisp_end_clause ( line ) ) :
III11I1 -= 1
if ( III11I1 == 0 ) : break
if 97 - 97: O0 / OOooOOo + o0oOOo0O0Ooo . oO0o % OoOoOO00 - OoOoOO00
if 33 - 33: I11i % II111iiii + OoO0O00
if 93 - 93: i1IIi . IiII / I1IiiI + IiII
if 58 - 58: I1ii11iIi11i + O0 . Oo0Ooo + OoOoOO00 - OoO0O00 - OoOoOO00
if 41 - 41: Oo0Ooo / i1IIi / Oo0Ooo - iII111i . o0oOOo0O0Ooo
if 65 - 65: O0 * i11iIiiIii . OoooooooOO / I1IiiI / iII111i
if 69 - 69: ooOoO0o % ooOoO0o
if ( lisp . lisp_is_running ( Oo0 ) == False ) :
if ( I1i1ii == False ) :
for line in Oo000 : new . write ( line )
I1i1ii = True
if 76 - 76: i11iIiiIii * iII111i / OoO0O00 % I1ii11iIi11i + OOooOOo
lisp . lprint ( "Process '{}' is not running, do not send command" . format ( Oo0 ) )
if 48 - 48: iIii1I11I1II1 % i1IIi + OoOoOO00 % o0oOOo0O0Ooo
continue
if 79 - 79: OoOoOO00 % I1IiiI % Ii1I / i1IIi % OoO0O00
if 56 - 56: iIii1I11I1II1 - i11iIiiIii * iII111i
if 84 - 84: OOooOOo + Ii1I + o0oOOo0O0Ooo
if 33 - 33: Ii1I
if 93 - 93: ooOoO0o
if 34 - 34: oO0o - ooOoO0o * Oo0Ooo / o0oOOo0O0Ooo
if ( Oo0 == "lisp-core" ) :
if ( Oo000 . find ( "enable" ) != - 1 ) :
iI11 = lisp_enable_command ( Oo000 )
if 19 - 19: I1ii11iIi11i
if ( Oo000 . find ( "debug" ) != - 1 ) :
iI11 = lisp_debug_command ( lisp_socket , Oo000 , None )
if 46 - 46: iIii1I11I1II1 . i11iIiiIii - OoOoOO00 % O0 / II111iiii * i1IIi
if ( Oo000 . find ( "user-account" ) != - 1 ) :
iI11 = lisp_user_account_command ( Oo000 )
if 66 - 66: O0
if ( Oo000 . find ( "rtr-list" ) != - 1 ) :
iI11 = lisp_rtr_list_command ( Oo000 )
if 52 - 52: OoO0O00 * OoooooooOO
else :
Ii11iiI = lisp . lisp_command_ipc ( Oo000 , "lisp-core" )
lisp . lisp_ipc ( Ii11iiI , lisp_socket , Oo0 )
lisp . lprint ( "Waiting for response to config command '{}'" . format ( oOOI11I ) )
if 71 - 71: I1Ii111 - o0oOOo0O0Ooo - OOooOOo
if 28 - 28: iIii1I11I1II1
IIIIiIi11iiIi , i11iI11I1I , Ii1iiIi1I11i , iI11 = lisp . lisp_receive ( lisp_socket ,
True )
if 7 - 7: o0oOOo0O0Ooo % IiII * OoOoOO00
if ( i11iI11I1I == "" ) :
lisp . lprint ( "Command timed out to {}" . format ( Oo0 ) )
iI11 = Oo000
elif ( i11iI11I1I != Oo0 ) :
lisp . lprint ( "Fatal IPC error to {}, source {}" . format ( Oo0 ,
i11iI11I1I ) )
if 58 - 58: IiII / I11i + II111iiii % iII111i - OoooooooOO
if 25 - 25: OoOoOO00 % OoooooooOO * Oo0Ooo - i1IIi * II111iiii * oO0o
if 30 - 30: I11i % OoOoOO00 / I1ii11iIi11i * O0 * Ii1I . I1IiiI
if 46 - 46: OoOoOO00 - O0
if 70 - 70: I11i + Oo0Ooo * iIii1I11I1II1 . I1IiiI * I11i
if 49 - 49: o0oOOo0O0Ooo
if 25 - 25: iII111i . OoooooooOO * iIii1I11I1II1 . o0oOOo0O0Ooo / O0 + Ii1I
if ( I1i1ii == False ) :
for line in iI11 : new . write ( line )
I1i1ii = True
if 68 - 68: Oo0Ooo
if 22 - 22: OOooOOo
return
if 22 - 22: iII111i * I11i - Oo0Ooo * O0 / i11iIiiIii
if 78 - 78: Oo0Ooo * O0 / ooOoO0o + OoooooooOO + OOooOOo
if 23 - 23: iII111i % OoooooooOO / iIii1I11I1II1 + I1ii11iIi11i / i1IIi / o0oOOo0O0Ooo
if 94 - 94: i1IIi
if 36 - 36: I1IiiI + Oo0Ooo
if 46 - 46: iII111i
if 65 - 65: i1IIi . I1ii11iIi11i / ooOoO0o
def lisp_process_config_file ( lisp_socket , file_name , startup ) :
lisp . lprint ( "Processing configuration file {}" . format ( file_name ) )
if 11 - 11: IiII * ooOoO0o / ooOoO0o - OOooOOo
if 68 - 68: I1IiiI % IiII - IiII / I1IiiI + I1ii11iIi11i - Oo0Ooo
if 65 - 65: ooOoO0o - i1IIi
if 62 - 62: I11i / oO0o % Oo0Ooo . OoooooooOO / i11iIiiIii / I1Ii111
if ( os . path . exists ( file_name ) == False ) :
lisp . lprint ( "LISP configuration file '{}' does not exist" . format ( file_name ) )
if 60 - 60: I1IiiI % oO0o / o0oOOo0O0Ooo % oO0o * i11iIiiIii / iII111i
return
if 34 - 34: I1Ii111 - OOooOOo
if 25 - 25: oO0o % I1IiiI + i11iIiiIii + O0 * OoooooooOO
ooO0 = file_name + ".diff"
o0Iiii = file_name + ".bak"
I1i1I = file_name + | |
np.pi * np.array( [np.square(bins[0])] + [np.square(bins[i+1])-np.square(bins[i]) for i in np.arange(len(bins)-1)] )
cont_per_bin = mean_cont * bin_area
fractional_cont = cont_per_bin / n
fc = interp1d(bins, fractional_cont, kind='slinear')
print '%.3f%% at x=%.2f' % (fc(1.5), 1.5)
#for i in np.logspace(-1, 2, 20):
# xx = brentq(lambda x: fc(x)-i/100., bins[0], bins[-1])
# print '%.1f%% at x=%.2f' % (i,xx)
'''f, ax = plt.subplots(1)
density = n/bin_area
err = np.sqrt(n)/bin_area
ax.errorbar(bins, density, yerr=err, fmt='o', ms=4, label='Source density')
logx = np.log10(bins[1:-3])
logy = np.log10(density[1:-3])
logyerr = err[1:-3] / density[1:-3]
fitfunc = lambda p, x: p[0]*x + p[1]
errfunc = lambda p, x, y, err: (y - fitfunc(p, x)) / err
out = leastsq(errfunc, [-1,1], args=(logx, logy, logyerr), full_output=1)
index, amp = out[0]
index_err = np.sqrt(out[1][1][1])
amp_err = np.sqrt(out[1][0][0])*amp
ax.plot(bins, pow(10, index*np.log10(bins)+amp), c='k', label='$\sim(r/r_{500})^{%.2f\pm%.2f}$'%(index,index_err))
ax.set_xscale('log')
ax.set_yscale('log')
ax.legend(bbox_to_anchor=(1, 1))
ax.set_xlabel('Separation ($r_{500}$)')
ax.set_ylabel('Count / area of annulus')
#ax.set_title('Count density vs. separation')
ax.set_aspect('equal', adjustable='box')
plt.tight_layout()'''
f, (ax1, ax2) = plt.subplots(2, sharex=True)
ax1.plot(np.log10(bins), np.log10(n), label='Observed')
ax1.plot(np.log10(bins), np.log10(cont_per_bin), ls='--', label='Contamination')
ax1.legend(loc='lower right')
ax1.set_ylim(-2.24, 3.24)
ax1.set_yticks(np.arange(-2,4))
ax1.set_ylabel('$\log_{10}$ (Count)')
#ax1.set_title('Contamination')
ax2.plot(np.log10(bins), np.log10(fractional_cont), c='C2', label='Contamination\nfraction')
ax2.legend(loc='lower right')
ax2.axhline(0, ls=':', c='k')
ax2.set_xlabel(get_label('WHL.r/r500', True))
ax2.set_ylabel('$\log_{10}$ (Fraction)')
plt.tight_layout()
def r500_hist(coll, params, bin_count=20):
fig, ax = plt.subplots(1)
sep = []
for i in coll.find(params):
sep.append(i['WHL']['r/r500'])
sep = np.array(sep)
min_sep = min(np.log10(sep))
sep = np.clip(sep, .01, None) # Combine everything less than 0.01 into one bin
n, bins, patches = ax.hist(np.log10(sep), bins=bin_count)
bins0 = bins[0]
bins[0] = min_sep
n, bins, patches = ax.hist(sep, bins=pow(10,bins))
ax.set_xscale('log')
bins[0] = pow(10,bins0)
return sep, n, (bins[:-1]+bins[1:])/2.
def orientation(bent_cutoff=None, folded=True, r_min=0.01, r_max=10):
if bent_cutoff is None:
bent_cutoff = get_bent_cut()
print 'Bending excess cutoff: %.2f' % bent_cutoff
sep = []
bend = []
ori = []
for i in bending_15.find(total_cuts):
sep.append(i['WHL']['r/r500'])
bend.append(i['using_peaks']['bending_excess'])
if folded:
ori.append(i['WHL']['orientation_folded'])
else:
ori.append(i['WHL']['orientation_peaks'])
sep = np.array(sep)
bend = np.array(bend)
ori = np.array(ori)
inner = np.logical_and(0.01<sep, sep<1.5)
outer = np.logical_and(1.5<sep, sep<10)
seps = np.vstack([inner, outer])
print sum(inner), 'inner sources,', sum(outer), 'outer sources'
straight = bend<bent_cutoff
bent = bend>=bent_cutoff
bends = np.vstack([bent, straight])
print sum(straight), 'straight sources,', sum(bent), 'bent sources'
if folded:
max_ori = 90.
else:
max_ori = 180.
f, ax = plt.subplots(1)
#ax.set_title('Orientation distribution ($%.2f<r/r_{500}<%.2f$; $\Delta\\theta<%.0f$ deg)' % (r_min, r_max, excess_cutoff))
data = ori[np.logical_and(straight, np.logical_and(sep>r_min, sep<r_max))]
ad = ad_test(data, stats.uniform(0,max_ori))
print ad.pvalue, z_score(ad.pvalue), len(data)
ax.hist(data, bins=6, fill=False, hatch='//', label='$n=%i;~p=%.3f$'%(len(data), ad.pvalue) )
ax.set_ylim(0, 358)
ax.legend(loc='upper center')
ax.set_ylabel('Count')
ax.set_xlabel('Orientation angle (deg)')
plt.tight_layout()
f, ax = plt.subplots(1)
#ax.set_title('Orientation distribution ($%.2f<r/r_{500}<%.2f$; $\Delta\\theta>%.0f$ deg)' % (r_min, r_max, excess_cutoff))
data = ori[np.logical_and(bent, np.logical_and(sep>r_min, sep<r_max))]
if folded:
ad = ad_test(data, stats.uniform(0,max_ori))
print ad.pvalue, z_score(ad.pvalue), len(data)
else:
towards = data[data<90]
away = data[data>90]
sig = stats.anderson_ksamp([towards, max_ori-away]).significance_level
print 'Symmetric: p=%.4f' % sig
n, bins, _ = ax.hist(data, bins=6, fill=False, hatch='//', label='$n=%i;~p=%.3f$'%(len(data), ad.pvalue if folded else sig) )
ax.set_ylim(0, 75)
ax.legend(loc='upper center')
ax.set_ylabel('Count')
ax.set_xlabel('Orientation angle (deg)')
plt.tight_layout()
unfolded, sep = [], []
params = total_cuts.copy()
params['using_peaks.bending_excess'] = {'$gte': bent_cutoff}
params['WHL.r/r500'] = {'$gte': r_min, '$lte': r_max}
for source in bending_15.find(params):
unfolded.append(source['using_peaks']['bisector'] - source['WHL']['position_angle'])
sep.append(source['WHL']['r/r500'])
sep = np.array(sep)
unfolded = np.array(unfolded)
tangential = np.sin(unfolded*np.pi/180)
radial = np.cos(unfolded*np.pi/180)
beta = 1 - np.var(tangential) / np.var(radial)
print 'beta = %.2f +- %.2f' % (beta, beta*np.sqrt(2./(len(unfolded)-1)))
return None
y, yerr = [], []
for i in np.arange(int(max_ori/2.)):
a = sum(data<i)
b = sum(data>max_ori-i)
y.append(a-b)
yerr.append(np.sqrt(a+b))
f, ax = plt.subplots(1)
ax.errorbar(np.arange(int(max_ori/2.)), y, yerr=yerr)
ax.axhline(0)
ax.set_xlabel('Orientation cut')
ax.set_ylabel('Count')
ax.set_title('Excess of inward-moving sources')
mask = np.logical_and(bent, np.logical_and(sep>0.01, sep<15))
n = sum(mask)
bin_count = 6
bins = int(1.*n/bin_count) * np.arange(bin_count+1)
bins[-1] = -1
sep_sort = np.sort(sep[mask])
ori_sort = ori[mask][np.argsort(sep[mask])]
x, diff, err = [], [], []
for i,j in zip(bins[:-1],bins[1:]):
inward = sum(ori_sort[i:j]<30)
outward = sum(ori_sort[i:j]>60)
x.append(np.median(sep_sort[i:j]))
diff.append(inward-outward)
err.append(np.sqrt(inward+outward))
f, ax = plt.subplots(1)
ax.errorbar(x, diff, err)
ax.axhline(0, c='k', ls='dotted')
ax.set_xscale('log')
ax.set_xlabel('Separation ($r_{500}$)')
ax.set_ylabel('Count')
ax.set_title('Excess of radially-moving sources')
def orientation_test():
excess_cutoff = get_bent_cut()
r_min, r_max = 0.01, 10
sep, bend, folded, unfolded = [], [], [], []
for i in bending_15.find(total_cuts):
sep.append(i['WHL']['r/r500'])
bend.append(i['using_peaks']['bending_excess'])
folded.append(i['WHL']['orientation_folded'])
unfolded.append(i['WHL']['orientation_peaks'])
sep = np.array(sep)
bend = np.array(bend)
folded = np.array(folded)[np.logical_and(bend>excess_cutoff, np.logical_and(sep>r_min, sep<r_max))]
unfolded = np.array(unfolded)[np.logical_and(bend>excess_cutoff, np.logical_and(sep>r_min, sep<r_max))]
n_f, bins_f, _ = plt.hist(folded, bins=6, alpha=.8, normed=True, fill=False, edgecolor='r', label='Folded')
plt.figure()
n_u, bins_u, _ = plt.hist(unfolded, bins=12, alpha=.8, normed=True, fill=False, hatch='//', label='Unfolded')
# model 1: count goes to 0 at 180 deg
class quad(stats.rv_continuous):
def _argcheck(self, a, b, c):
return np.isfinite(a) and np.isfinite(b) and np.isfinite(c)
def _pdf(self, x, a, b, c):
x0 = 180.
norm = a*x0**3/3. + b*x0**2/2. + c*x0
if type(x) is float:
return max(a*x**2 + b*x + c, 0) / norm
elif type(x) is np.ndarray:
return np.max([a*x**2 + b*x + c, np.zeros(len(x))], axis=0) / norm
else:
raise TypeError('Got %s instead' % str(type(x)))
a, b, c = n_f[0], n_f[-1]/2., 0
popt = np.polyfit([0,90,180], [a,b,c], 2)
case1 = quad(a=0, b=180, shapes='a, b, c')(*popt)
ad1 = ad_test(unfolded, case1)
print 'Model 1: p=%.2g (%.2f sigma)' % (ad1.pvalue, z_score(ad1.pvalue))
# model 2: count plateaus at 90 deg
class piecewise_lin(stats.rv_continuous):
def _pdf(self, x, a, b, c):
norm = 45.*a + 135.*c
m = (c-a) / 90.
if type(x) is float:
return (c + m*min([x-90, 0])) / norm
elif type(x) is np.ndarray:
return (c + m*np.min([x-90, np.zeros(len(x))], axis=0)) / norm
else:
raise TypeError('Got %s instead' % str(type(x)))
a, b, c = n_f[0]-n_f[-1]/2., n_f[-1]/2., n_f[-1]/2.
case2 = piecewise_lin(a=0, b=180, shapes='a, b, c')(a, b, c)
ad2 = ad_test(unfolded, case2)
print 'Model 1: p=%.2g (%.2f sigma)' % (ad2.pvalue, z_score(ad2.pvalue))
x = np.arange(181)
plt.plot(x, case1.pdf(x), c='C0', label='Model 1')
plt.plot(x, case2.pdf(x), c='C1', ls=':', label='Model 2')
plt.legend()
plt.ylabel('Normalized count')
plt.xlabel('Orientation angle (deg)')
plt.tight_layout()
def size_dependence():
params0, params1 = total_cuts.copy(), total_cuts.copy()
params0['WHL.r/r500'] = {'$gte':0.01, '$lt':1.5}
params1['WHL.r/r500'] = {'$gte':1.5, '$lt':10.}
# Get trends
window_size, run_x_50, run_y_25, size0, run_y_75 = get_trends(params0, 'WHL.r/r500', 'RGZ.size_arcmin', bending_15, False)
sep0 = (run_x_50-run_x_50[0])/(run_x_50[-1]-run_x_50[0])
window_size, run_x_50, run_y_25, size1, run_y_75 = get_trends(params1, 'WHL.r/r500', 'RGZ.size_arcmin', bending_15, False)
sep1 = (run_x_50-run_x_50[0])/(run_x_50[-1]-run_x_50[0])
# Downsample size1 to same length as size0
mask = (np.linspace(0,1,len(size0)) * (len(size1)-1)).astype(int)
# Get values
size0, size1 = [], []
for i in bending_15.find(params0):
size0.append(i['RGZ']['size_arcmin'])
for i in bending_15.find(params1):
size1.append(i['RGZ']['size_arcmin'])
# AD test the values
print 'Different separations:', stats.anderson_ksamp([size0, size1])
# Repeat for masses
params0, params1 = total_cuts.copy(), total_cuts.copy()
params0['WHL.M500'] = {'$lte':10}
params1['WHL.M500'] = {'$gte':15}
size0, size1 = [], []
for i in bending_15.find(params0):
size0.append(i['RGZ']['size_arcmin'])
for i in bending_15.find(params1):
size1.append(i['RGZ']['size_arcmin'])
# AD test the values
print 'Different masses:', stats.anderson_ksamp([size0, size1])
def trend_tests():
# Get trends for mass bins
params0, params1 = total_cuts.copy(), total_cuts.copy()
params0['WHL.r/r500'] = {'$lte':7.8}
params1['WHL.r/r500'] = {'$gte':7.8}
window_size, run_x_50, run_y_25, size0, run_y_75 = get_trends(params0, 'WHL.r/r500', 'RGZ.size_arcmin', bending_15, False)
sep0 = (run_x_50-run_x_50[0])/(run_x_50[-1]-run_x_50[0])
window_size, run_x_50, run_y_25, size1, run_y_75 = get_trends(params1, 'WHL.r/r500', 'RGZ.size_arcmin', bending_15, False)
sep1 = (run_x_50-run_x_50[0])/(run_x_50[-1]-run_x_50[0])
print stats.mannwhitneyu(size0, size1, alternative='two-sided')
def get_errs(get_errs=False):
first_err = np.sqrt(0.3**2+0.02**2) # https://arxiv.org/pdf/1501.01555.pdf
for source in bending_15.find().batch_size(100):
# Positional errors
if get_errs:
if 'SDSS' in source:
sql = 'select raerr, decerr from photoprimary where objid=%i' % source['SDSS']['objID']
df = SDSS_select(sql)
ra_err = df['raerr'][0]
dec_err = df['decerr'][0]
else:
ir_pos = coord.SkyCoord(source['AllWISE']['ra'], source['AllWISE']['dec'], unit=(u.deg,u.deg), frame='icrs')
table = Irsa.query_region(ir_pos, catalog='allwise_p3as_psd', radius=1.*u.arcsec)
ra_err = table['sigra'][0]
dec_err = table['sigdec'][0]
else:
ra_err = source['best']['ra_err']
dec_err = source['best']['dec_err']
pos_err = np.sqrt(ra_err**2 + dec_err**2 + first_err**2)
# Morphology errors
area = 0
for comp in source['RGZ']['components']:
area += comp['solid_angle']
# Total errors
size = 60.* source['RGZ']['size_arcmin']
frac_pos_err = pos_err / size / 4. # fractional pos error
morph_err = area / size**2 * 180. / np.pi # total morph error in deg
frac_morph_err = morph_err / source['using_peaks']['bending_angle']
total_err = np.sqrt(frac_pos_err**2 + frac_morph_err**2)
bend_err = total_err * source['using_peaks']['bending_angle']
bending_15.update({'_id':source['_id']}, {'$set': {'best.ra_err':ra_err, 'best.dec_err':dec_err, 'best.frac_positional_err':frac_pos_err, 'RGZ.solid_angle':area, 'RGZ.frac_morphology_err':frac_morph_err, 'using_peaks.bending_frac_err':total_err, 'using_peaks.bending_err':bend_err}})
window, size, area_25, area_50, area_75 = get_trends(total_cuts, 'RGZ.size_arcmin', 'RGZ.solid_angle', bending_15, False)
m, b = np.polyfit(size, area_50, 1)
y = m*size+b
#plt.plot(size, area_50, label='Running median')
#plt.plot(size, y, label='%.0fx%+.0f\nR^2=%.3f' % (m, b, r2_score(area_50,y)))
#plt.legend()
def rmsd(params=total_cuts.copy(), x_param='RGZ.size_arcmin', y_param='bending_angle', plot=True, coll=bending_15):
x_param_list = x_param.split('.')
y0_param_list = ['using_peaks', y_param]
y1_param_list = ['using_contour', y_param]
x, y0, y1 = [], [], []
for i in coll.find(params).sort(x_param, 1):
x.append(i[x_param_list[0]][x_param_list[1]])
y0.append(i[y0_param_list[0]][y0_param_list[1]])
y1.append(i[y1_param_list[0]][y1_param_list[1]])
x = np.array(x)
y0 = np.array(y0)
y1 = np.array(y1)
window_size = min(len(x)/10, 100)
if 'WHL.population' in params and params['WHL.population'] == 'BCG':
run_x = [0.01, 0.011]
run_y0 = 2*[np.percentile(y0, 50)]
run_y1 = 2*[np.percentile(y1, 50)]
run_rmsd = 2*[np.sqrt(sum((y0-y1)**2)/len(x))]
else:
run_x = np.array([np.percentile(x[ix:ix+window_size], 50) for ix in np.arange(len(x)-window_size+1)])
run_y0 = np.array([np.percentile(y0[ix:ix+window_size], 50) for ix in np.arange(len(x)-window_size+1)])
run_y1 = np.array([np.percentile(y1[ix:ix+window_size], 50) for ix in np.arange(len(x)-window_size+1)])
run_rmsd = np.array([np.sqrt(sum((y0[ix:ix+window_size]-y1[ix:ix+window_size])**2)/window_size) for ix in np.arange(len(x)-window_size+1)])
if plot:
fig, (ax1, ax2) = plt.subplots(2, sharex=True)
ax1.plot(run_x, run_y0, label='Peak method')
ax1.plot(run_x, run_y1, label='Contour method')
ax1.legend(loc='best')
ax1.set_ylabel(get_label(y_param))
#ax1.set_title('%s comparison' % y_param)
ax2.plot(run_x, run_rmsd)
ax2.set_xlabel(get_label(x_param))
ax2.set_ylabel('RMS difference')
fig.tight_layout()
fig, ax = plt.subplots(1)
ax.plot(run_x, run_y0, label='Bending angle (peak method)')
#a, b = np.polyfit(run_x[run_x<1.05], run_rmsd[run_x<1.05], 1)
#ax.plot(run_x, a*run_x+b, label='rms difference linear fit')
ax.plot(run_x, run_rmsd, label='RMS difference', ls='--')
ax.legend(loc='best')
ax.set_xlabel(get_label(x_param))
ax.set_ylabel('Angle (deg)')
#ax.set_title('Bending error comparison')
fig.tight_layout()
return window_size, run_x, run_y0, run_y1, run_rmsd
def rmsd_debug():
params = total_cuts.copy()
params['using_peaks.bending_angle'] = params['using_peaks.bending_corrected']
del params['using_peaks.bending_corrected']
x_param = 'RGZ.size_arcmin'
y_param = 'bending_angle'
x_param_list = x_param.split('.')
y0_param_list = ['using_peaks', y_param]
y1_param_list = ['using_contour', y_param]
x, y0, y1, zid = [], [], [], []
for i in bending_15.find(params).sort(x_param, 1):
x.append(i[x_param_list[0]][x_param_list[1]])
y0.append(i[y0_param_list[0]][y0_param_list[1]])
y1.append(i[y1_param_list[0]][y1_param_list[1]])
zid.append(i['RGZ']['zooniverse_id'])
x = np.array(x)
y0 = np.array(y0)
y1 = np.array(y1)
window_size = min(len(x)/10, 100)
print 'Original sample:', len(x)
run_x = np.array([np.percentile(x[ix:ix+window_size], 50) for ix in np.arange(len(x)-window_size+1)])
run_y0 = np.array([np.percentile(y0[ix:ix+window_size], 50) for ix in np.arange(len(x)-window_size+1)])
run_y1 = np.array([np.percentile(y1[ix:ix+window_size], 50) for ix in np.arange(len(x)-window_size+1)])
run_rmsd = np.array([np.sqrt(sum((y0[ix:ix+window_size]-y1[ix:ix+window_size])**2)/window_size) for ix in np.arange(len(x)-window_size+1)])
fig, ax = plt.subplots(1)
plt.scatter(x, y0, s=1, label='using peaks')
plt.scatter(x, y1, s=1, label='using contour')
plt.plot(run_x, run_rmsd, c='k', label='rmsd')
plt.xlabel(get_label(x_param))
plt.ylabel(get_label('%s.%s' % tuple(y0_param_list)))
'''outlier = np.logical_and(np.logical_and(x>1.17, x<1.19), np.logical_and(y1>105, y1<109))
x = x[np.logical_not(outlier)]
y1 = y1[np.logical_not(outlier)]
print np.where(outlier, zid, False)[outlier][0]
run_x = np.array([np.percentile(x[ix:ix+window_size], 50) for ix in np.arange(len(x)-window_size+1)])
run_y0 = np.array([np.percentile(y0[ix:ix+window_size], 50) for ix in np.arange(len(x)-window_size+1)])
run_y1 = np.array([np.percentile(y1[ix:ix+window_size], 50) for ix in np.arange(len(x)-window_size+1)])
run_rmsd = np.array([np.sqrt(sum((y0[ix:ix+window_size]-y1[ix:ix+window_size])**2)/window_size) for ix in np.arange(len(x)-window_size+1)])
plt.plot(run_x, run_rmsd, c='g', label='outlier\nremoved')
ell = matplotlib.patches.Ellipse(xy=(1.18,107), width=0.06, height=13, fill=False, color='r', label='outlier')
ax.add_artist(ell)'''
logdy = np.log10(np.abs(y0-y1))
mask = logdy < 3*np.std(logdy)
outliers = np.logical_not(mask)
plt.scatter(x[outliers], y0[outliers], c='g', label='outliers', s=1)
plt.scatter(x[outliers], y1[outliers], c='g', s=1)
x = x[mask]
y0 = y0[mask]
y1 = y1[mask]
print 'Outliers:', sum(outliers)
run_x = np.array([np.percentile(x[ix:ix+window_size], 50) for ix in np.arange(len(x)-window_size+1)])
run_y0 = np.array([np.percentile(y0[ix:ix+window_size], 50) for ix in np.arange(len(x)-window_size+1)])
run_y1 = np.array([np.percentile(y1[ix:ix+window_size], 50) for ix in np.arange(len(x)-window_size+1)])
run_rmsd = np.array([np.sqrt(sum((y0[ix:ix+window_size]-y1[ix:ix+window_size])**2)/window_size) for ix in np.arange(len(x)-window_size+1)])
plt.plot(run_x, run_rmsd, c='g', label='$<3\sigma$')
plt.legend()
def is_outlier(points, thresh=3.5):
"""
Returns a boolean array with True if points are outliers and False
otherwise.
Parameters:
-----------
points : An numobservations by numdimensions array of observations
thresh : The modified z-score to use as a threshold. Observations with
a modified | |
# from __future__ import unicode_literals
import boto3
from botocore.exceptions import ClientError
from datetime import datetime, timedelta
from freezegun import freeze_time
import pytest
from uuid import uuid4
import pytz
import sure # noqa
from moto import mock_cloudwatch
@mock_cloudwatch
def test_put_list_dashboard():
client = boto3.client("cloudwatch", region_name="eu-central-1")
widget = '{"widgets": [{"type": "text", "x": 0, "y": 7, "width": 3, "height": 3, "properties": {"markdown": "Hello world"}}]}'
client.put_dashboard(DashboardName="test1", DashboardBody=widget)
resp = client.list_dashboards()
len(resp["DashboardEntries"]).should.equal(1)
@mock_cloudwatch
def test_put_list_prefix_nomatch_dashboard():
client = boto3.client("cloudwatch", region_name="eu-central-1")
widget = '{"widgets": [{"type": "text", "x": 0, "y": 7, "width": 3, "height": 3, "properties": {"markdown": "Hello world"}}]}'
client.put_dashboard(DashboardName="test1", DashboardBody=widget)
resp = client.list_dashboards(DashboardNamePrefix="nomatch")
len(resp["DashboardEntries"]).should.equal(0)
@mock_cloudwatch
def test_delete_dashboard():
client = boto3.client("cloudwatch", region_name="eu-central-1")
widget = '{"widgets": [{"type": "text", "x": 0, "y": 7, "width": 3, "height": 3, "properties": {"markdown": "Hello world"}}]}'
client.put_dashboard(DashboardName="test1", DashboardBody=widget)
client.put_dashboard(DashboardName="test2", DashboardBody=widget)
client.put_dashboard(DashboardName="test3", DashboardBody=widget)
client.delete_dashboards(DashboardNames=["test2", "test1"])
resp = client.list_dashboards(DashboardNamePrefix="test3")
len(resp["DashboardEntries"]).should.equal(1)
@mock_cloudwatch
def test_delete_dashboard_fail():
client = boto3.client("cloudwatch", region_name="eu-central-1")
widget = '{"widgets": [{"type": "text", "x": 0, "y": 7, "width": 3, "height": 3, "properties": {"markdown": "Hello world"}}]}'
client.put_dashboard(DashboardName="test1", DashboardBody=widget)
client.put_dashboard(DashboardName="test2", DashboardBody=widget)
client.put_dashboard(DashboardName="test3", DashboardBody=widget)
# Doesnt delete anything if all dashboards to be deleted do not exist
try:
client.delete_dashboards(DashboardNames=["test2", "test1", "test_no_match"])
except ClientError as err:
err.response["Error"]["Code"].should.equal("ResourceNotFound")
else:
raise RuntimeError("Should of raised error")
resp = client.list_dashboards()
len(resp["DashboardEntries"]).should.equal(3)
@mock_cloudwatch
def test_get_dashboard():
client = boto3.client("cloudwatch", region_name="eu-central-1")
widget = '{"widgets": [{"type": "text", "x": 0, "y": 7, "width": 3, "height": 3, "properties": {"markdown": "Hello world"}}]}'
client.put_dashboard(DashboardName="test1", DashboardBody=widget)
resp = client.get_dashboard(DashboardName="test1")
resp.should.contain("DashboardArn")
resp.should.contain("DashboardBody")
resp["DashboardName"].should.equal("test1")
@mock_cloudwatch
def test_get_dashboard_fail():
client = boto3.client("cloudwatch", region_name="eu-central-1")
try:
client.get_dashboard(DashboardName="test1")
except ClientError as err:
err.response["Error"]["Code"].should.equal("ResourceNotFound")
else:
raise RuntimeError("Should of raised error")
@mock_cloudwatch
def test_delete_invalid_alarm():
cloudwatch = boto3.client("cloudwatch", "eu-west-1")
cloudwatch.put_metric_alarm(
AlarmName="testalarm1",
MetricName="cpu",
Namespace="blah",
Period=10,
EvaluationPeriods=5,
Statistic="Average",
Threshold=2,
ComparisonOperator="GreaterThanThreshold",
ActionsEnabled=True,
)
# trying to delete an alarm which is not created along with valid alarm.
with pytest.raises(ClientError) as e:
cloudwatch.delete_alarms(AlarmNames=["InvalidAlarmName", "testalarm1"])
e.value.response["Error"]["Code"].should.equal("ResourceNotFound")
resp = cloudwatch.describe_alarms(AlarmNames=["testalarm1"])
# making sure other alarms are not deleted in case of an error.
len(resp["MetricAlarms"]).should.equal(1)
# test to check if the error raises if only one invalid alarm is tried to delete.
with pytest.raises(ClientError) as e:
cloudwatch.delete_alarms(AlarmNames=["InvalidAlarmName"])
e.value.response["Error"]["Code"].should.equal("ResourceNotFound")
@mock_cloudwatch
def test_describe_alarms_for_metric():
conn = boto3.client("cloudwatch", region_name="eu-central-1")
conn.put_metric_alarm(
AlarmName="testalarm1",
MetricName="cpu",
Namespace="blah",
Period=10,
EvaluationPeriods=5,
Statistic="Average",
Threshold=2,
ComparisonOperator="GreaterThanThreshold",
ActionsEnabled=True,
)
alarms = conn.describe_alarms_for_metric(MetricName="cpu", Namespace="blah")
alarms.get("MetricAlarms").should.have.length_of(1)
@mock_cloudwatch
def test_alarm_state():
client = boto3.client("cloudwatch", region_name="eu-central-1")
client.put_metric_alarm(
AlarmName="testalarm1",
MetricName="cpu",
Namespace="blah",
Period=10,
EvaluationPeriods=5,
Statistic="Average",
Threshold=2,
ComparisonOperator="GreaterThanThreshold",
ActionsEnabled=True,
)
client.put_metric_alarm(
AlarmName="testalarm2",
MetricName="cpu",
Namespace="blah",
Period=10,
EvaluationPeriods=5,
Statistic="Average",
Threshold=2,
ComparisonOperator="GreaterThanThreshold",
)
# This is tested implicitly as if it doesnt work the rest will die
client.set_alarm_state(
AlarmName="testalarm1",
StateValue="ALARM",
StateReason="testreason",
StateReasonData='{"some": "json_data"}',
)
resp = client.describe_alarms(StateValue="ALARM")
len(resp["MetricAlarms"]).should.equal(1)
resp["MetricAlarms"][0]["AlarmName"].should.equal("testalarm1")
resp["MetricAlarms"][0]["StateValue"].should.equal("ALARM")
resp["MetricAlarms"][0]["ActionsEnabled"].should.equal(True)
resp = client.describe_alarms(StateValue="OK")
len(resp["MetricAlarms"]).should.equal(1)
resp["MetricAlarms"][0]["AlarmName"].should.equal("testalarm2")
resp["MetricAlarms"][0]["StateValue"].should.equal("OK")
resp["MetricAlarms"][0]["ActionsEnabled"].should.equal(False)
# Just for sanity
resp = client.describe_alarms()
len(resp["MetricAlarms"]).should.equal(2)
@mock_cloudwatch
def test_put_metric_data_no_dimensions():
conn = boto3.client("cloudwatch", region_name="us-east-1")
conn.put_metric_data(
Namespace="tester", MetricData=[dict(MetricName="metric", Value=1.5)]
)
metrics = conn.list_metrics()["Metrics"]
metrics.should.have.length_of(1)
metric = metrics[0]
metric["Namespace"].should.equal("tester")
metric["MetricName"].should.equal("metric")
@mock_cloudwatch
def test_put_metric_data_with_statistics():
conn = boto3.client("cloudwatch", region_name="us-east-1")
utc_now = datetime.now(tz=pytz.utc)
conn.put_metric_data(
Namespace="tester",
MetricData=[
dict(
MetricName="statmetric",
Timestamp=utc_now,
# no Value to test https://github.com/spulec/moto/issues/1615
StatisticValues=dict(
SampleCount=123.0, Sum=123.0, Minimum=123.0, Maximum=123.0
),
Unit="Milliseconds",
StorageResolution=123,
)
],
)
metrics = conn.list_metrics()["Metrics"]
metrics.should.have.length_of(1)
metric = metrics[0]
metric["Namespace"].should.equal("tester")
metric["MetricName"].should.equal("statmetric")
# TODO: test statistics - https://github.com/spulec/moto/issues/1615
@mock_cloudwatch
def test_get_metric_statistics():
conn = boto3.client("cloudwatch", region_name="us-east-1")
utc_now = datetime.now(tz=pytz.utc)
conn.put_metric_data(
Namespace="tester",
MetricData=[dict(MetricName="metric", Value=1.5, Timestamp=utc_now)],
)
stats = conn.get_metric_statistics(
Namespace="tester",
MetricName="metric",
StartTime=utc_now - timedelta(seconds=60),
EndTime=utc_now + timedelta(seconds=60),
Period=60,
Statistics=["SampleCount", "Sum"],
)
stats["Datapoints"].should.have.length_of(1)
datapoint = stats["Datapoints"][0]
datapoint["SampleCount"].should.equal(1.0)
datapoint["Sum"].should.equal(1.5)
@mock_cloudwatch
@freeze_time("2020-02-10 18:44:05")
def test_custom_timestamp():
utc_now = datetime.now(tz=pytz.utc)
time = "2020-02-10T18:44:09Z"
cw = boto3.client("cloudwatch", "eu-west-1")
cw.put_metric_data(
Namespace="tester",
MetricData=[dict(MetricName="metric1", Value=1.5, Timestamp=time)],
)
cw.put_metric_data(
Namespace="tester",
MetricData=[
dict(MetricName="metric2", Value=1.5, Timestamp=datetime(2020, 2, 10))
],
)
stats = cw.get_metric_statistics(
Namespace="tester",
MetricName="metric",
StartTime=utc_now - timedelta(seconds=60),
EndTime=utc_now + timedelta(seconds=60),
Period=60,
Statistics=["SampleCount", "Sum"],
)
@mock_cloudwatch
def test_list_metrics():
cloudwatch = boto3.client("cloudwatch", "eu-west-1")
# Verify namespace has to exist
res = cloudwatch.list_metrics(Namespace="unknown/")["Metrics"]
res.should.be.empty
# Create some metrics to filter on
create_metrics(cloudwatch, namespace="list_test_1/", metrics=4, data_points=2)
create_metrics(cloudwatch, namespace="list_test_2/", metrics=4, data_points=2)
# Verify we can retrieve everything
res = cloudwatch.list_metrics()["Metrics"]
len(res).should.equal(16) # 2 namespaces * 4 metrics * 2 data points
# Verify we can filter by namespace/metric name
res = cloudwatch.list_metrics(Namespace="list_test_1/")["Metrics"]
len(res).should.equal(8) # 1 namespace * 4 metrics * 2 data points
res = cloudwatch.list_metrics(Namespace="list_test_1/", MetricName="metric1")[
"Metrics"
]
len(res).should.equal(2) # 1 namespace * 1 metrics * 2 data points
# Verify format
res.should.equal(
[
{
u"Namespace": "list_test_1/",
u"Dimensions": [],
u"MetricName": "metric1",
},
{
u"Namespace": "list_test_1/",
u"Dimensions": [],
u"MetricName": "metric1",
},
]
)
# Verify unknown namespace still has no results
res = cloudwatch.list_metrics(Namespace="unknown/")["Metrics"]
res.should.be.empty
@mock_cloudwatch
def test_list_metrics_paginated():
cloudwatch = boto3.client("cloudwatch", "eu-west-1")
# Verify that only a single page of metrics is returned
cloudwatch.list_metrics()["Metrics"].should.be.empty
# Verify we can't pass a random NextToken
with pytest.raises(ClientError) as e:
cloudwatch.list_metrics(NextToken=str(uuid4()))
e.value.response["Error"]["Message"].should.equal(
"Request parameter NextToken is invalid"
)
# Add a boatload of metrics
create_metrics(cloudwatch, namespace="test", metrics=100, data_points=1)
# Verify that a single page is returned until we've reached 500
first_page = cloudwatch.list_metrics()
first_page["Metrics"].shouldnt.be.empty
len(first_page["Metrics"]).should.equal(100)
create_metrics(cloudwatch, namespace="test", metrics=200, data_points=2)
first_page = cloudwatch.list_metrics()
len(first_page["Metrics"]).should.equal(500)
first_page.shouldnt.contain("NextToken")
# Verify that adding more data points results in pagination
create_metrics(cloudwatch, namespace="test", metrics=60, data_points=10)
first_page = cloudwatch.list_metrics()
len(first_page["Metrics"]).should.equal(500)
first_page["NextToken"].shouldnt.be.empty
# Retrieve second page - and verify there's more where that came from
second_page = cloudwatch.list_metrics(NextToken=first_page["NextToken"])
len(second_page["Metrics"]).should.equal(500)
second_page.should.contain("NextToken")
# Last page should only have the last 100 results, and no NextToken (indicating that pagination is finished)
third_page = cloudwatch.list_metrics(NextToken=second_page["NextToken"])
len(third_page["Metrics"]).should.equal(100)
third_page.shouldnt.contain("NextToken")
# Verify that we can't reuse an existing token
with pytest.raises(ClientError) as e:
cloudwatch.list_metrics(NextToken=first_page["NextToken"])
e.value.response["Error"]["Message"].should.equal(
"Request parameter NextToken is invalid"
)
def create_metrics(cloudwatch, namespace, metrics=5, data_points=5):
for i in range(0, metrics):
metric_name = "metric" + str(i)
for j in range(0, data_points):
cloudwatch.put_metric_data(
Namespace=namespace,
MetricData=[{"MetricName": metric_name, "Value": j, "Unit": "Seconds"}],
)
@mock_cloudwatch
def test_get_metric_data_within_timeframe():
utc_now = datetime.now(tz=pytz.utc)
cloudwatch = boto3.client("cloudwatch", "eu-west-1")
namespace1 = "my_namespace/"
# put metric data
values = [0, 2, 4, 3.5, 7, 100]
cloudwatch.put_metric_data(
Namespace=namespace1,
MetricData=[
{"MetricName": "metric1", "Value": val, "Unit": "Seconds"} for val in values
],
)
# get_metric_data
stats = ["Average", "Sum", "Minimum", "Maximum"]
response = cloudwatch.get_metric_data(
MetricDataQueries=[
{
"Id": "result_" + stat,
"MetricStat": {
"Metric": {"Namespace": namespace1, "MetricName": "metric1"},
"Period": 60,
"Stat": stat,
},
}
for stat in stats
],
StartTime=utc_now - timedelta(seconds=60),
EndTime=utc_now + timedelta(seconds=60),
)
#
# Assert Average/Min/Max/Sum is returned as expected
avg = [
res for res in response["MetricDataResults"] if res["Id"] == "result_Average"
][0]
avg["Label"].should.equal("metric1 Average")
avg["StatusCode"].should.equal("Complete")
[int(val) for val in avg["Values"]].should.equal([19])
sum_ = [res for res in response["MetricDataResults"] if res["Id"] == "result_Sum"][
0
]
sum_["Label"].should.equal("metric1 Sum")
sum_["StatusCode"].should.equal("Complete")
[val for val in sum_["Values"]].should.equal([sum(values)])
min_ = [
res for res in response["MetricDataResults"] if res["Id"] == "result_Minimum"
][0]
min_["Label"].should.equal("metric1 Minimum")
min_["StatusCode"].should.equal("Complete")
[int(val) for val in min_["Values"]].should.equal([0])
max_ = [
res for res in response["MetricDataResults"] if res["Id"] == "result_Maximum"
][0]
max_["Label"].should.equal("metric1 Maximum")
max_["StatusCode"].should.equal("Complete")
[int(val) for val in max_["Values"]].should.equal([100])
@mock_cloudwatch
def test_get_metric_data_partially_within_timeframe():
utc_now = datetime.now(tz=pytz.utc)
yesterday = utc_now - timedelta(days=1)
last_week = utc_now - timedelta(days=7)
cloudwatch = boto3.client("cloudwatch", "eu-west-1")
namespace1 = "my_namespace/"
# put metric data
values = [0, 2, 4, 3.5, 7, 100]
cloudwatch.put_metric_data(
Namespace=namespace1,
MetricData=[
{
"MetricName": "metric1",
"Value": 10,
"Unit": "Seconds",
"Timestamp": utc_now,
}
],
)
cloudwatch.put_metric_data(
Namespace=namespace1,
MetricData=[
{
"MetricName": "metric1",
"Value": 20,
"Unit": "Seconds",
"Timestamp": yesterday,
}
],
)
cloudwatch.put_metric_data(
Namespace=namespace1,
MetricData=[
{
"MetricName": "metric1",
"Value": 50,
"Unit": "Seconds",
"Timestamp": last_week,
}
],
)
# get_metric_data
response = cloudwatch.get_metric_data(
MetricDataQueries=[
{
"Id": "result",
"MetricStat": {
"Metric": {"Namespace": namespace1, "MetricName": "metric1"},
"Period": 60,
"Stat": "Sum",
},
}
],
StartTime=yesterday - timedelta(seconds=60),
EndTime=utc_now + timedelta(seconds=60),
)
#
# Assert Last week's data is not returned
len(response["MetricDataResults"]).should.equal(1)
sum_ = response["MetricDataResults"][0]
sum_["Label"].should.equal("metric1 Sum")
sum_["StatusCode"].should.equal("Complete")
sum_["Values"].should.equal([30.0])
@mock_cloudwatch
def test_get_metric_data_outside_timeframe():
utc_now = datetime.now(tz=pytz.utc)
last_week = utc_now - timedelta(days=7)
cloudwatch = boto3.client("cloudwatch", "eu-west-1")
namespace1 = "my_namespace/"
# put metric data
cloudwatch.put_metric_data(
Namespace=namespace1,
MetricData=[
{
"MetricName": "metric1",
"Value": 50,
"Unit": "Seconds",
"Timestamp": last_week,
}
],
)
# get_metric_data
response = cloudwatch.get_metric_data(
MetricDataQueries=[
{
"Id": "result",
"MetricStat": {
"Metric": {"Namespace": namespace1, "MetricName": "metric1"},
"Period": 60,
"Stat": "Sum",
},
}
],
StartTime=utc_now - timedelta(seconds=60),
EndTime=utc_now + timedelta(seconds=60),
)
#
# Assert Last week's data is not returned
len(response["MetricDataResults"]).should.equal(1)
response["MetricDataResults"][0]["Id"].should.equal("result")
response["MetricDataResults"][0]["StatusCode"].should.equal("Complete")
response["MetricDataResults"][0]["Values"].should.equal([])
@mock_cloudwatch
def test_get_metric_data_for_multiple_metrics():
utc_now = datetime.now(tz=pytz.utc)
cloudwatch = boto3.client("cloudwatch", "eu-west-1")
namespace = "my_namespace/"
# put metric data
cloudwatch.put_metric_data(
Namespace=namespace,
MetricData=[
{
"MetricName": "metric1",
"Value": 50,
"Unit": "Seconds",
"Timestamp": utc_now,
}
],
)
cloudwatch.put_metric_data(
Namespace=namespace,
MetricData=[
{
"MetricName": "metric2",
"Value": 25,
"Unit": "Seconds",
"Timestamp": utc_now,
}
],
)
# get_metric_data
response = cloudwatch.get_metric_data(
MetricDataQueries=[
{
"Id": "result1",
"MetricStat": {
"Metric": {"Namespace": namespace, "MetricName": "metric1"},
"Period": 60,
"Stat": | |
import os
import sys
import re
import yaml
import numpy as np
import pandas as pd
import math
from tqdm import tqdm
from scipy import interpolate
sys.path.append("..")
class MaterialLoader:
def __init__(self,
wl_s=0.2,
wl_e=2.0):
super().__init__()
self.db_path = './db'
self.db_info = self.load_db_info()
self.default_wavelength_range = (wl_s, wl_e)
self.db_shelf = dict()
self.failed_material = []
self.success_material = []
def load_db_info(self):
"""[summery]
load data base info from database/library.yml
Returns:
[db info] -- [total information from load from library.yml]
"""
info_path = 'library.yml'
fileYml = open(os.path.join(self.db_path,info_path), encoding='UTF-8')
db_info = yaml.load(fileYml)
return db_info
def load_material(self, shelfs):
"""[summary]
using in self.material_list()
load material data path from db_info
Arguments:
shelfs {[str]} -- [shelfs name list]
Returns:
[material_names, material_data] -- [material_data]
"""
material_names = []
material_data = {}
for shelf in shelfs:
for material in self.db_shelf[shelf]:
if 'BOOK' in material.keys():
material_names.append(material['BOOK'])
for data in material['content']:
if 'data' in data.keys():
material_data['%s_%s'%(material['BOOK'], data['PAGE'])] = self.material_info_split(divider, data['name'])
material_data['%s_%s'%(material['BOOK'], data['PAGE'])]['path'] = data['data']
material_data['%s_%s'%(material['BOOK'], data['PAGE'])]['divider'] = divider
elif 'DIVIDER' in data.keys():
divider = data['DIVIDER']
return material_names, material_data
def material_info_split(self, divider, info):
material_info = {}
info_split = info.split(':')
if len(info_split) > 1:
material_info['year'], material_info['author'] = self.rex_author_info(info_split[0])
material_info['n'], material_info['k'] = self.rex_nk_info(info_split[1])
material_info['wavelength_start'], material_info['wavelength_end'] = self.rex_wavelength_info(info_split[1])
material_info['degree'] = self.rex_degree_info(info_split[1])
material_info['model'] = self.rex_model_info(info_split[1])
else:
material_info['year'], material_info['author'] = self.rex_author_info(info_split[0])
material_info['n'], material_info['k'] = True, False
material_info['wavelength_start'], material_info['wavelength_end'] = None, None
material_info['degree'] = None
material_info['model'] = None
return material_info
def rex_author_info(self, info):
try:
year = re.findall('[0-9]{4}', info)[0]
author = info.split(year)[0]
except:
year = None
author = info
return year, author
def rex_nk_info(self, info):
try:
nk = re.findall('n,k', info)
if nk is not None:
n = True
k = True
else:
n = True
k = False
except:
n = False
k = False
return n, k
return n, k
def rex_wavelength_info(self, info):
try:
wavelength_range = re.findall('-?\d+\.\d*\d*?',info)
if len(wavelength_range) is 2:
wavelength_start, wavelength_end = wavelength_range[0], wavelength_range[1]
else:
wavelength_start = wavelength_range[0]
wavelength_end = wavelength_range[0]
except:
wavelength_start = None
wavelength_end = None
return wavelength_start, wavelength_end
def rex_degree_info(self, info):
degree = re.findall('\-?\d+?°C', info)
if len(degree) == 0:
return None
return degree[0]
def rex_model_info(self, info):
model = re.findall('Brendel-Bormann model', info)
if len(model) != 0:
return 'Brendel-Bormann model'
model = re.findall('Lorentz-Drude model', info)
if len(model) != 0:
return 'Lorentz-Drude model'
model = re.findall('DFT calculations', info)
if len(model) != 0:
return 'DFT calculations'
return None
def load_total_material(self):
# print(len(self.material_list))
# print(type(self.material_list))
total_material = {}
for material_name, material_info in self.material_list[1].items():
try:
material_path = material_info['path']
# print(material_path)
wl, n, k= self.load_material_parameter(material_path)
# print(material)
# material_info = self.material
total_material[material_name] = {
'wl': wl,
'n': n,
'k': k
}
self.success_material.append(material_name)
except ValueError as ve:
self.failed_material.append(material_name)
print('Load %s filled' % material_name)
print('Material wavelength bound is out of range!')
except MemoryError as Me:
self.failed_material.append(material_name)
print('Load %s filled!' % material_name)
print('Material wavelength outof memory!')
# print(total_material, len(total_material))
return total_material
def load_total_material_generator(self):
for material_name, material_info in tqdm(self.material_list[1].items()):
try:
# print(material_name)
material_path = material_info['path']
wl, n, k= self.load_material_parameter(material_path)
self.success_material.append(material_name)
yield material_name, [wl, n, k]
except ValueError as ve:
self.failed_material.append(material_name)
print('Load %s filled' % material_name)
print('Material wavelength bound is out of range!')
except MemoryError as Me:
self.failed_material.append(material_name)
print('Load %s filled!' % material_name)
print('Material wavelength outof memory!')
def load_select_material(self, select_material):
selected_material = {}
for material in select_material:
material_info = self.material_list[1][material]
wl, n, k= self.load_material_parameter(material_info['path'])
selected_material[material] = {
'wl': wl,
'n': n,
'k': k
}
return selected_material
def extract_data_nk(self, datas):
datas_type = datas['DATA'][0]['type']
wl = []
n = []
k = []
if datas_type == 'tabulated nk':
datas = datas['DATA'][0]['data'].split('\n')
for data in datas:
data = data.strip().split(' ')
if len(data) == 3:
wl.append(float(data[0]) * 1000)
n.append(float(data[1]))
k.append(float(data[2]))
elif datas_type == 'tabulated n':
datas = datas['DATA'][0]['data'].split('\n')
for data in datas:
data = data.split(' ')
wl.append(float(data[0]) * 1000)
n.append(float(data[1]))
k.append(0)
elif datas_type == 'tabulated k':
datas = datas['DATA'][0]['data'].split('\n')
for data in datas:
data = data.split(' ')
wl.append(float(data[0]) * 1000)
n.append(0)
k.append(float(data[1]))
elif datas_type.split(' ')[0] == 'formula':
coefficients = list(map(float, datas['DATA'][0]['coefficients'].split(' ')))
wavelength_range = list(map(float, datas['DATA'][0]['wavelength_range'].split(' ')))
print(wavelength_range)
wl_tmp = list(np.arange(wavelength_range), 0.001)
wl = [1000*w for w in wl_tmp]
if datas_type == 'formula 1':
n = [self.formula_1(w, coefficients) for w in wl_tmp]
elif datas_type == 'formula 2':
n = [self.cauchy_model(w, coefficients) for w in wl_tmp]
elif datas_type == 'formula 4':
n = [self.formula_4(w, coefficients) for w in wl_tmp]
elif datas_type == 'formula 5':
n = [self.formula_5(w, coefficients) for w in wl_tmp]
elif datas_type == 'formula 6':
n = [self.formula_6(w, coefficients) for w in wl_tmp]
elif datas_type == 'formula 8':
n = [self.formula_8(w, coefficients) for w in wl_tmp]
k = [0 for x in range(len(wl))]
coefficients = list(map(float, datas['DATA'][0]['coefficients'].split(' ')))
wavelength_range = list(map(float, datas['DATA'][0]['wavelength_range'].split(' ')))
fwl = np.arange(math.ceil(min(wl)), int(max(wl)), 1)
fn = interpolate.interp1d(np.array(wl), np.array(n), kind='quadratic')
fk = interpolate.interp1d(np.array(wl), np.array(k), kind='quadratic')
return fwl, fn(fwl), fk(fwl)
def load_material_parameter(self, path):
fileYml = open(os.path.join(self.db_path, 'data', path), encoding='UTF-8')
datas = yaml.load(fileYml)
if len(datas['DATA']) == 1:
wl, n, k = self.extract_data_nk(datas)
elif len(datas['DATA']) == 2:
wl, n, k = self.extract_data_nk(datas)
return wl, n, k
def formula_1(self, wavelength, coefficients):
"""[summary]
Arguments:
wavelength {[type]} -- [description]
coefficients {[type]} -- [description]
"""
wavelength_square = pow(wavelength, 2)
if len(coefficients) == 3:
n_square = 1 + coefficients[0] \
+ ((coefficients[1] * wavelength_square)/(wavelength_square - pow(coefficients[2], 2)))
elif len(coefficients) == 5:
n_square = 1 + coefficients[0] \
+ ((coefficients[1] * wavelength_square)/(wavelength_square - pow(coefficients[2], 2)))\
+ ((coefficients[3] * wavelength_square)/(wavelength_square - pow(coefficients[4], 2)))
elif len(coefficients) == 7:
n_square = 1 + coefficients[0] \
+ ((coefficients[1] * wavelength_square)/(wavelength_square - pow(coefficients[2], 2)))\
+ ((coefficients[3] * wavelength_square)/(wavelength_square - pow(coefficients[4], 2)))\
+ ((coefficients[5] * wavelength_square)/(wavelength_square - pow(coefficients[6], 2)))
elif len(coefficients) == 9:
n_square = 1 + coefficients[0] \
+ ((coefficients[1] * wavelength_square)/(wavelength_square - pow(coefficients[2], 2)))\
+ ((coefficients[3] * wavelength_square)/(wavelength_square - pow(coefficients[4], 2)))\
+ ((coefficients[5] * wavelength_square)/(wavelength_square - pow(coefficients[6], 2)))\
+ ((coefficients[7] * wavelength_square)/(wavelength_square - pow(coefficients[8], 2)))
elif len(coefficients) == 11:
n_square = 1 + coefficients[0] \
+ ((coefficients[1] * wavelength_square)/(wavelength_square - pow(coefficients[2], 2))) \
+ ((coefficients[3] * wavelength_square)/(wavelength_square - pow(coefficients[4], 2))) \
+ ((coefficients[5] * wavelength_square)/(wavelength_square - pow(coefficients[6], 2))) \
+ ((coefficients[7] * wavelength_square)/(wavelength_square - pow(coefficients[8], 2))) \
+ ((coefficients[9] * wavelength_square)/(wavelength_square - pow(coefficients[10], 2)))
elif len(coefficients) == 13:
n_square = 1 + coefficients[0] \
+ ((coefficients[1] * wavelength_square)/(wavelength_square - pow(coefficients[2], 2))) \
+ ((coefficients[3] * wavelength_square)/(wavelength_square - pow(coefficients[4], 2))) \
+ ((coefficients[5] * wavelength_square)/(wavelength_square - pow(coefficients[6], 2))) \
+ ((coefficients[7] * wavelength_square)/(wavelength_square - pow(coefficients[8], 2))) \
+ ((coefficients[9] * wavelength_square)/(wavelength_square - pow(coefficients[10], 2))) \
+ ((coefficients[11] * wavelength_square)/(wavelength_square - pow(coefficients[12], 2)))
elif len(coefficients) == 15:
n_square = 1 + coefficients[0] \
+ ((coefficients[1] * wavelength_square)/(wavelength_square - pow(coefficients[2], 2))) \
+ ((coefficients[3] * wavelength_square)/(wavelength_square - pow(coefficients[4], 2))) \
+ ((coefficients[5] * wavelength_square)/(wavelength_square - pow(coefficients[6], 2))) \
+ ((coefficients[7] * wavelength_square)/(wavelength_square - pow(coefficients[8], 2))) \
+ ((coefficients[9] * wavelength_square)/(wavelength_square - pow(coefficients[10], 2))) \
+ ((coefficients[11] * wavelength_square)/(wavelength_square - pow(coefficients[12], 2))) \
+ ((coefficients[13] * wavelength_square)/(wavelength_square - pow(coefficients[14], 2)))
elif len(coefficients) == 17:
n_square = 1 + coefficients[0] \
+ ((coefficients[1] * wavelength_square)/(wavelength_square - pow(coefficients[2], 2))) \
+ ((coefficients[3] * wavelength_square)/(wavelength_square - pow(coefficients[4], 2))) \
+ ((coefficients[5] * wavelength_square)/(wavelength_square - pow(coefficients[6], 2))) \
+ ((coefficients[7] * wavelength_square)/(wavelength_square - pow(coefficients[8], 2))) \
+ ((coefficients[9] * wavelength_square)/(wavelength_square - pow(coefficients[10], 2))) \
+ ((coefficients[11] * wavelength_square)/(wavelength_square - pow(coefficients[12], 2))) \
+ ((coefficients[13] * wavelength_square)/(wavelength_square - pow(coefficients[14], 2))) \
+ ((coefficients[15] * wavelength_square)/(wavelength_square - pow(coefficients[16], 2))) \
return math.sqrt(n_square)
def formula_4(self, wavelength, coefficients):
wavelength_square = pow(wavelength, 2)
if len(coefficients) == 9:
n_square = coefficients[0]
n_square += (coefficients[1] * pow(wavelength, coefficients[2])) / (wavelength_square - pow(coefficients[3], coefficients[4]))
n_square += (coefficients[5] * pow(wavelength, coefficients[6])) / (wavelength_square - pow(coefficients[7], coefficients[8]))
elif len(coefficients) == 11:
n_square = coefficients[0]
n_square += (coefficients[1] * pow(wavelength, coefficients[2])) / (wavelength_square - pow(coefficients[3], coefficients[4]))
n_square += (coefficients[5] * pow(wavelength, coefficients[6])) / (wavelength_square - pow(coefficients[7], coefficients[8]))
n_square += coefficients[9] * pow(wavelength, coefficients[10])
elif len(coefficients) == 13:
n_square = coefficients[0]
n_square += (coefficients[1] * pow(wavelength, coefficients[2])) / (wavelength_square - pow(coefficients[3], coefficients[4]))
n_square += (coefficients[5] * pow(wavelength, coefficients[6])) / (wavelength_square - pow(coefficients[7], coefficients[8]))
n_square += coefficients[9] | |
import os.path as osp
import time
from collections import defaultdict, namedtuple
from functools import lru_cache
import numpy as np
from zqy_utils import Registry, TimeCounter, flat_nested_list, load
from .table_renderer import TableRenderer
Score_Fp_Precision = namedtuple("Score_Fp_Precision",
["score", "fp", "precision"])
# iou_type (class): box, segmentation
# iou_value (float): eg. 0.25
# score_type (class): score, objentness
# metric_type (class): fp, score, precision
EvalCondition = namedtuple(
"EvalCondition", ["iou_type", "iou_value", "score_type", "metric_type"])
def condition_to_str(c):
assert isinstance(c, EvalCondition), "only accepts EvalCondition"
string = (f"<{c.iou_type}-{c.score_type}>"
f"iou={c.iou_value:0.2f} by {c.metric_type}")
return string
EvalCondition.__str__ = condition_to_str
EVAL_METHOD = Registry("EvalMethod")
def _find_score_thresholds(scores_list,
fp_list,
image_count,
fp_range=[],
score_range=[],
precision_range=[]):
"""
score <-> fp <-> precision are interchangeable
this helper function helps to convert the values given one
Args:
scores_list ([scores]) : for each detection
fp_list ([bool]) : if is fp, with 1 = fp, 0 = tp
image_count (int) : of images
"""
assert len(scores_list) == len(
fp_list), "score count: {}, fp count {}, mismatch".format(
len(scores_list), len(fp_list))
thresholds = []
if len(scores_list) == 0:
for fp in sorted(fp_range):
thresholds.append(Score_Fp_Precision(0.0, fp, 0.0))
for score in sorted(score_range, reverse=True):
thresholds.append(Score_Fp_Precision(score, 0.0, 0.0))
for precision in sorted(precision_range, reverse=True):
thresholds.append(Score_Fp_Precision(0.0, 0.0, precision))
return thresholds
# sort scores_list in descending order
sorted_indices = np.argsort(scores_list)[::-1]
sorted_scores = np.array(scores_list)[sorted_indices]
sorted_fp = np.array(fp_list)[sorted_indices]
cummulative_fp = np.cumsum(sorted_fp, dtype=np.float)
count_list = np.arange(len(fp_list), dtype=np.float) + 1.0
precision_list = 1.0 - cummulative_fp / count_list
for fp in sorted(fp_range):
fp_allowed = fp * image_count
match_positions = np.where(cummulative_fp > fp_allowed)[0]
if len(match_positions) > 0:
index = match_positions[0]
else:
# #fp_allowed > than #proposals
index = -1
# do not change fp value to create table from different dataset
# fp = cummulative_fp[index] / image_count
score = sorted_scores[index]
precision = precision_list[index]
thresholds.append(Score_Fp_Precision(score, fp, precision))
for score in sorted(score_range, reverse=True):
match_positions = np.where(score < sorted_scores)[0]
if len(match_positions) > 0:
index = match_positions[0]
else:
# score threshold is higher than all predicted scores
index = 0
fp = cummulative_fp[index] / image_count
precision = precision_list[index]
thresholds.append(Score_Fp_Precision(score, fp, precision))
for precision in sorted(precision_range, reverse=True):
# count precision backward to avoid trivial solution
# where highest score is tp
# ideally precision_list is in decreasing order
match_positions = np.where(precision > precision_list)[0]
if len(match_positions) > 0:
index = match_positions[-1]
else:
index = np.argmax(precision_list)
score = sorted_scores[index]
fp = cummulative_fp[index] / image_count
thresholds.append(Score_Fp_Precision(score, fp, precision))
return thresholds
@lru_cache(maxsize=32)
def build_dataset_by_name(dataset_name, cfg):
from maskrcnn_benchmark.data.build import build_dataset, import_file
paths_catalog = import_file(
"maskrcnn_benchmark.config.paths_catalog", cfg.PATHS_CATALOG,
True)
DatasetCatalog = paths_catalog.DatasetCatalog
dataset = build_dataset([dataset_name],
transforms=None,
dataset_catalog=DatasetCatalog,
cfg=cfg,
is_train=False)
dataset = dataset[0]
return dataset
class DatasetStats(object):
"""
a simple placeholder for dataset stats
Parameters:
ious_dict ({iou_type: ious_list}):
ious_list = [ious_mat (#dt * #gt)] * #image
best_match_dict (iou_type: best_match_score]):
best_match_score = [highest iou] * #all_dts
scores_dict ({score_type: scores_list}):
scores_list = [scores (#dt)] * #image
dt_labels_list ([dt_labels(#dt)] * #image)
gt_labels_list ([gt_labels(#gt)] * #image)
dt_attrib_list ([dt_attrib(#dt)] * #image)
gt_attrib_list ([gt_attrib(#gt)] * #image)
Note:
#gt/#dt are number of ground_truth/detections per image
#all_dts is number of detections of entire dataset
"""
def __init__(self):
self.ious_dict = defaultdict(list)
self.best_match_dict = defaultdict(list)
self.scores_dict = defaultdict(list)
self.dt_labels_list = []
self.gt_labels_list = []
self.dt_attrib_list = []
self.gt_attrib_list = []
def __iadd__(self, other):
assert isinstance(other, DatasetStats), "invalid merge"
for iou_type in other.ious_dict:
self.ious_dict[iou_type].extend(other.ious_dict[iou_type])
self.best_match_dict[iou_type].extend(
other.best_match_dict[iou_type])
for score_type in other.scores_dict:
self.scores_dict[score_type].extend(other.scores_dict[score_type])
self.dt_labels_list.extend(other.dt_labels_list)
self.gt_labels_list.extend(other.gt_labels_list)
self.dt_attrib_list.extend(other.dt_attrib_list)
self.gt_attrib_list.extend(other.gt_attrib_list)
return self
def __str__(self):
stats_list = []
for k, v in self.__dict__.items():
stats_list.append(f"{k}: {len(v)}")
return ",".join(stats_list)
class ResultProcessor(object):
"""
Workflow:
0. prepare per dataset dtictions (outside of this class)
1. add_dataset
2. evaluate
a. _collect_stats
b. _summarize
3. create_tables
Parameters:
datasets: {dict} key = dataset_name, value = (predictions, dataset)
"""
SUPPORTED_IOU_TYPES = ("bbox", "segmentation")
SUPPORTED_SCORE_TYPES = ("score", "objectness")
SUPPORTED_TABLE_TYPES = ("recall", "confusion", "attrib")
def _validate_types(self, types, type_name):
assert type_name in ("iou_types", "score_types", "table_types")
tmp_types = []
supported_types = getattr(self, f"SUPPORTED_{type_name.upper()}")
for _type in types:
if _type not in supported_types:
if self.verbose:
print(f"[Warning]{type_name} {_type} is invalid")
else:
tmp_types.append(_type)
if not tmp_types:
tmp_types = [supported_types[0]]
print(f"[{type_name}] is not properly set, using: {tmp_types}")
setattr(self, type_name, tuple(tmp_types))
def __init__(self,
iou_types=("bbox", ),
iou_range=(0.10, 0.25),
score_types=("score", ),
score=(0.05, 0.25, 0.5, 0.75),
fp=(0.25, 0.50, 1.0, 2.0, 4.0, 100.0),
table_types=("recall", ),
included_labels=None,
verbose=False):
assert score or fp, "score or fp has to be set"
self.verbose = verbose
self._validate_types(iou_types, "iou_types")
self._validate_types(score_types, "score_types")
self._validate_types(table_types, "table_types")
self.iou_range = iou_range
self.score = score
self.fp = fp
self.datasets = {}
self.evaluated = False
self.cfg = None
self.included_labels = None
self.timer = TimeCounter(verbose=verbose)
def add_dataset(self, result_path, dataset=None, cfg=None):
assert dataset or cfg, "both dataset and cfg are not valid"
assert osp.exists(result_path), "result_path is not valid"
dataset_name = osp.basename(result_path).rpartition(".json")[0]
self.timer.tic("add_dataset-build")
if dataset is None:
# make CfgNode hashable at run time
type(cfg).__hash__ = lambda x: hash(x.dump())
dataset = build_dataset_by_name(dataset_name, cfg)
else:
assert dataset_name == dataset.name, f"result_path {dataset_name} "
f"dataset {dataset.name}, mismatch"
self.timer.toctic("add_dataset-load")
self.cfg = cfg
predictions = load(result_path)
self.datasets[dataset_name] = (predictions, dataset)
self.evaluated = False
self.timer.toc()
def _filter_by_labels(self, items):
if self.included_labels is None:
return items
new_items = [
item for item in items
if item["category_id"] in self.included_labels
]
return new_items
def _collect_stats(self, dataset_name):
"""
collect all neccessary stats for summarrize later
Args:
dataset_name (str)
Return:
stats (DatasetStats)
"""
import pycocotools.mask as mask_util
stats = DatasetStats()
predictions, dataset = self.datasets[dataset_name]
# Note: dt_list and gt_list are from same sample
# it is reserved for the usecase which has multiple output
# eg. per patient
# for most cases, they should be list with only 1 item
if self.verbose:
print(dataset_name, len(predictions))
dataset.load_gt_results()
for uid, dt_list in predictions.items():
try:
# reloaded key becomes unicode
image_id = int(uid)
except ValueError:
# image_id is actually image_uid
# which is invariant against shuffling sample dropping
image_id = dataset.get_index_from_img_uid(uid)
if image_id is None:
print(f"previous uid {uid} is not existed anymore")
continue
with_mask = "segmentation" in self.iou_types
gt_list = dataset.get_gt_results(image_id, with_mask=with_mask)
if dataset.is_multi_output():
assert len(dt_list) == len(gt_list), "size mismatch"
else:
# all single output
gt_list = [gt_list]
dt_list = [dt_list]
for dt, gt in zip(dt_list, gt_list):
dt = self._filter_by_labels(dt)
gt = self._filter_by_labels(gt)
# is_crowd = 0: intercetion over union
# is_crowd = 1: intercetion over detection
iscrowd = [0 for _ in gt]
for iou_type in self.iou_types:
dt_rois = [obj[iou_type] for obj in dt]
gt_rois = [obj[iou_type] for obj in gt]
# M x N mat, where M = #dt, N = #gt
ious_mat = mask_util.iou(dt_rois, gt_rois, iscrowd)
# for each detection, get its highest iou
# if this below cut-off thredhold, it is a fp
if ious_mat == []:
# no gt or not dt
best_match = [0.0 for _ in dt]
else:
best_match = ious_mat.max(axis=1).tolist()
stats.ious_dict[iou_type].append(ious_mat)
stats.best_match_dict[iou_type].extend(best_match)
for score_type in self.score_types:
scores = [p[score_type] for p in dt]
stats.scores_dict[score_type].append(scores)
stats.dt_labels_list.append([obj["category_id"] for obj in dt])
stats.gt_labels_list.append([obj["category_id"] for obj in gt])
return stats
def _summarize(self, stats):
"""
given compiled stats from dataset(s), return the summarry
Args:
stats (DatasetStats): compiled stats
Return:
tpfp_result {parameter_set: tp_fp_fn_dict}:
parameter_set = (iou_type, iou, score_type, "score"/"fp")
tp_fp_fn_dict = {thresh: tp_fp_fn_counter}
confusion_result {parameter_set: confusion_dict}:
parameter_set = (iou_type, iou, score_type, "score"/"fp")
confusion_dict = {thresh: confusion_counter}
"""
parameter_dict = dict()
for iou_type in self.iou_types:
ious_list = stats.ious_dict[iou_type]
image_count = len(ious_list)
for iou in self.iou_range:
fp_list = np.array(stats.best_match_dict[iou_type]) < iou
for score_type in self.score_types:
scores_list = stats.scores_dict[score_type]
# scores_list is [[scores, ...], ], this flattens the list
all_scores_list = flat_nested_list(scores_list)
if self.fp:
# given the iou threshold + image_count
# one can accurately estimates its fp count, hence fp@xx
# precision is only approximated
# since multiple nonFP may refer to single TP
thresholds = _find_score_thresholds(
all_scores_list,
fp_list,
image_count,
fp_range=self.fp)
# given the score threshold one can accurately counts TP
# by checking if GT has any overlap > iou threshold
# theres one pitfall where multiple TPs share same detection
# this is problematic when iou threshold is low
condition = EvalCondition(iou_type, iou, score_type,
"fp")
parameter_dict[condition] = (ious_list, scores_list,
thresholds, iou,
stats.dt_labels_list,
stats.gt_labels_list)
if self.score:
thresholds = _find_score_thresholds(
all_scores_list,
fp_list,
image_count,
score_range=self.score)
condition = EvalCondition(iou_type, iou, score_type,
"score")
parameter_dict[condition] = (ious_list, scores_list,
thresholds, iou,
stats.dt_labels_list,
stats.gt_labels_list)
results_dict = defaultdict(dict)
for condition, args in parameter_dict.items():
kwargs = {
"dt_attrib_list": stats.dt_attrib_list,
"gt_attrib_list": stats.gt_attrib_list
}
for table_type in self.table_types:
eval_fn = EVAL_METHOD[table_type]
results_dict[table_type][condition] = eval_fn(*args, **kwargs)
return | |
previous=previous,
previous_only_aid=previous_only_aid,
**kwargs,
)
@register_route('/turk/detection/dynamic/', methods=['GET'])
def turk_detection_dynamic(**kwargs):
ibs = current_app.ibs
gid = request.args.get('gid', None)
image_src = routes_ajax.image_src(gid)
# Get annotations
width, height = ibs.get_image_sizes(gid)
aid_list = ibs.get_image_aids(gid)
annot_bbox_list = ibs.get_annot_bboxes(aid_list)
annot_thetas_list = ibs.get_annot_thetas(aid_list)
species_list = ibs.get_annot_species_texts(aid_list)
# Get annotation bounding boxes
annotation_list = []
for aid, annot_bbox, annot_theta, species in list(
zip(aid_list, annot_bbox_list, annot_thetas_list, species_list)
):
temp = {}
temp['left'] = 100.0 * (annot_bbox[0] / width)
temp['top'] = 100.0 * (annot_bbox[1] / height)
temp['width'] = 100.0 * (annot_bbox[2] / width)
temp['height'] = 100.0 * (annot_bbox[3] / height)
temp['label'] = species
temp['id'] = aid
temp['theta'] = float(annot_theta)
annotation_list.append(temp)
if len(species_list) > 0:
species = max(
set(species_list), key=species_list.count
) # Get most common species
elif appf.default_species(ibs) is not None:
species = appf.default_species(ibs)
else:
species = KEY_DEFAULTS[SPECIES_KEY]
callback_url = '%s?imgsetid=%s' % (url_for('submit_detection'), gid)
return appf.template(
'turk',
'detection_dynamic',
gid=gid,
refer_aid=None,
species=species,
image_src=image_src,
annotation_list=annotation_list,
callback_url=callback_url,
callback_method='POST',
EMBEDDED_CSS=None,
EMBEDDED_JAVASCRIPT=None,
__wrapper__=False,
)
@register_route('/turk/annotation/', methods=['GET'])
def turk_annotation(**kwargs):
"""
CommandLine:
python -m wbia.web.app --exec-turk_annotation --db PZ_Master1
Example:
>>> # SCRIPT
>>> from wbia.other.ibsfuncs import * # NOQA
>>> import wbia
>>> ibs = wbia.opendb(defaultdb='PZ_Master1')
>>> aid_list_ = ibs.find_unlabeled_name_members(suspect_yaws=True)
>>> aid_list = ibs.filter_aids_to_quality(aid_list_, 'good', unknown_ok=False)
>>> ibs.start_web_annot_groupreview(aid_list)
"""
ibs = current_app.ibs
tup = appf.get_turk_annot_args(appf.imageset_annot_processed)
(aid_list, reviewed_list, imgsetid, src_ag, dst_ag, progress, aid, previous) = tup
review = 'review' in request.args.keys()
finished = aid is None
display_instructions = request.cookies.get('ia-annotation_instructions_seen', 1) == 0
if not finished:
gid = ibs.get_annot_gids(aid)
image_src = routes_ajax.annotation_src(aid)
species = ibs.get_annot_species_texts(aid)
viewpoint_text = ibs.get_annot_viewpoints(aid)
viewpoint_value = appf.VIEWPOINT_MAPPING_INVERT.get(viewpoint_text, None)
quality_value = ibs.get_annot_qualities(aid)
if quality_value in [-1, None]:
quality_value = -1
elif quality_value > 2:
quality_value = 1
elif quality_value <= 2:
quality_value = 0
multiple_value = ibs.get_annot_multiple(aid) == 1
else:
try:
ibs.update_special_imagesets()
ibs.notify_observers()
except Exception:
pass
gid = None
image_src = None
species = None
viewpoint_value = -1
quality_value = -1
multiple_value = False
imagesettext = ibs.get_imageset_text(imgsetid)
species_rowids = ibs._get_all_species_rowids()
species_nice_list = ibs.get_species_nice(species_rowids)
combined_list = sorted(zip(species_nice_list, species_rowids))
species_nice_list = [combined[0] for combined in combined_list]
species_rowids = [combined[1] for combined in combined_list]
species_text_list = ibs.get_species_texts(species_rowids)
species_selected_list = [species == species_ for species_ in species_text_list]
species_list = list(zip(species_nice_list, species_text_list, species_selected_list))
species_list = [('Unspecified', const.UNKNOWN, True)] + species_list
callback_url = url_for('submit_annotation')
return appf.template(
'turk',
'annotation',
imgsetid=imgsetid,
src_ag=src_ag,
dst_ag=dst_ag,
gid=gid,
aid=aid,
viewpoint_value=viewpoint_value,
quality_value=quality_value,
multiple_value=multiple_value,
image_src=image_src,
previous=previous,
species_list=species_list,
imagesettext=imagesettext,
progress=progress,
finished=finished,
display_instructions=display_instructions,
callback_url=callback_url,
callback_method='POST',
EMBEDDED_CSS=None,
EMBEDDED_JAVASCRIPT=None,
review=review,
)
@register_route('/turk/annotation/dynamic/', methods=['GET'])
def turk_annotation_dynamic(**kwargs):
ibs = current_app.ibs
aid = request.args.get('aid', None)
imgsetid = request.args.get('imgsetid', None)
review = 'review' in request.args.keys()
gid = ibs.get_annot_gids(aid)
image_src = routes_ajax.annotation_src(aid)
species = ibs.get_annot_species_texts(aid)
viewpoint_text = ibs.get_annot_viewpoints(aid)
viewpoint_value = appf.VIEWPOINT_MAPPING_INVERT.get(viewpoint_text, None)
quality_value = ibs.get_annot_qualities(aid)
if quality_value == -1:
quality_value = None
if quality_value == 0:
quality_value = 1
species_rowids = ibs._get_all_species_rowids()
species_nice_list = ibs.get_species_nice(species_rowids)
combined_list = sorted(zip(species_nice_list, species_rowids))
species_nice_list = [combined[0] for combined in combined_list]
species_rowids = [combined[1] for combined in combined_list]
species_text_list = ibs.get_species_texts(species_rowids)
species_selected_list = [species == species_ for species_ in species_text_list]
species_list = list(zip(species_nice_list, species_text_list, species_selected_list))
species_list = [('Unspecified', const.UNKNOWN, True)] + species_list
callback_url = url_for('submit_annotation')
return appf.template(
'turk',
'annotation_dynamic',
imgsetid=imgsetid,
gid=gid,
aid=aid,
viewpoint_value=viewpoint_value,
quality_value=quality_value,
image_src=image_src,
species_list=species_list,
callback_url=callback_url,
callback_method='POST',
EMBEDDED_CSS=None,
EMBEDDED_JAVASCRIPT=None,
review=review,
__wrapper__=False,
)
@register_route('/turk/annotation/canonical/', methods=['GET'])
def turk_annotation_canonical(
imgsetid=None, samples=200, species=None, version=1, **kwargs
):
import random
ibs = current_app.ibs
if imgsetid is None:
aid_list = ibs.get_valid_aids()
else:
aid_list = ibs.get_imageset_aids(imgsetid)
if species is not None:
aid_list = ibs.filter_annotation_set(aid_list, species=species)
# enable_canonical = version == 1
# aid_list = ibs.check_ggr_valid_aids(aid_list, species=species, threshold=0.75, enable_canonical=enable_canonical)
# metadata_list = ibs.get_annot_metadata(aid_list)
# canonical_flag_list = []
# for metadata in metadata_list:
# turk = metadata.get('turk', {})
# canonical = turk.get('canonical', turk.get('grid', None))
# canonical_flag_list.append(canonical)
canonical_flag_list = ibs.get_annot_canonical(aid_list)
canonical_str = None
if species == 'zebra_grevys':
canonical_str = (
"Grevy's Zebra - right side + shoulder chevron, side stripes, and hip chevron"
)
elif species == 'zebra_plains':
canonical_str = (
'Plains Zebra - left side + shoulder chevron, side stripes, and entire hip'
)
elif species == 'giraffe_reticulated':
canonical_str = 'Reticulated Giraffe - left side + entire body center mass, neck'
elif species == 'giraffe_masai':
canonical_str = 'Masai Giraffe - right side + entire body center mass, neck'
elif species == 'turtle_sea':
canonical_str = 'Sea Turtle - right size + entire side of head'
elif species == 'whale_fluke':
canonical_str = (
'Whale Fluke - top or bottom + trailing edge entirely out of water'
)
reviewed_list = []
for canonical_flag in canonical_flag_list:
if version in [1, 'set']:
# Version 1 - Annotations that are unreviewed
reviewed = canonical_flag in [True, False]
version = 1
elif version in [2, 'yes']:
# Version 2 - Annotations that are marked YES as CA
reviewed = canonical_flag in [None, False]
version = 2
elif version in [3, 'no']:
# Version 2 - Annotations that are marked NO as CA
reviewed = canonical_flag in [None, True]
version = 3
reviewed_list.append(reviewed)
try:
logger.info('Total len(reviewed_list) = %d' % (len(reviewed_list),))
progress = '%0.2f' % (100.0 * reviewed_list.count(True) / len(reviewed_list),)
except ZeroDivisionError:
progress = '100.0'
COMPARE_TO_AOI = False
if COMPARE_TO_AOI:
kwargs = {
'aoi_two_weight_filepath': 'ggr2',
}
prediction_list = ibs.depc_annot.get_property(
'aoi_two', aid_list, 'class', config=kwargs
)
confidence_list = ibs.depc_annot.get_property(
'aoi_two', aid_list, 'score', config=kwargs
)
confidence_list = [
confidence if prediction == 'positive' else 1.0 - confidence
for prediction, confidence in zip(prediction_list, confidence_list)
]
else:
confidence_list = [1.0] * len(aid_list)
zipped = list(zip(aid_list, reviewed_list, confidence_list))
values_list = ut.filterfalse_items(zipped, reviewed_list)
aid_list_ = []
highlighted_list_ = []
confidence_list_ = []
while len(values_list) > 0 and len(aid_list_) < samples:
index = random.randint(0, len(values_list) - 1)
aid, highlighted, confidence = values_list.pop(index)
if version == 2:
highlighted = True
aid_list_.append(aid)
highlighted_list_.append(highlighted)
confidence_list_.append(confidence)
finished = len(aid_list_) == 0
annotation_list = list(zip(aid_list_, highlighted_list_, confidence_list_))
aid_list_str = ','.join(map(str, aid_list_))
annotation_list.sort(key=lambda t: t[0])
args = (
url_for('submit_annotation_canonical'),
imgsetid,
version,
samples,
species,
)
callback_url = '%s?imgsetid=%s&version=%d&samples=%d&species=%s' % args
return appf.template(
'turk',
'canonical',
imgsetid=imgsetid,
canonical_str=canonical_str,
aid_list=aid_list_,
aid_list_str=aid_list_str,
num_aids=len(aid_list_),
annotation_list=annotation_list,
num_annotations=len(annotation_list),
progress=progress,
finished=finished,
callback_url=callback_url,
callback_method='POST',
)
@register_route('/turk/splits/', methods=['GET'])
def turk_splits(aid=None, **kwargs):
ibs = current_app.ibs
annotation_list = []
if aid is not None:
nid = ibs.get_annot_nids(aid)
aid_list = ibs.get_name_aids(nid)
annotation_list = list(zip(aid_list))
annotation_list.sort(key=lambda t: t[0])
callback_url = '%s' % (url_for('submit_splits'),)
return appf.template(
'turk',
'splits',
aid=aid,
annotation_list=annotation_list,
num_annotations=len(annotation_list),
callback_url=callback_url,
callback_method='POST',
)
@register_route('/turk/part/contour/', methods=['GET'])
def turk_contour(part_rowid=None, imgsetid=None, previous=None, **kwargs):
ibs = current_app.ibs
default_list = [
('temp', True),
]
config_kwargs = kwargs.get('config', {})
config = {
key: kwargs.get(key, config_kwargs.get(key, default))
for key, default in default_list
}
config_str_list = [
'%s=%s' % (key, 'true' if config[key] else 'false') for key in config.keys()
]
config_str = '&'.join(config_str_list)
imgsetid = None if imgsetid == '' or imgsetid == 'None' else imgsetid
gid_list = ibs.get_valid_gids(imgsetid=imgsetid)
aid_list = ut.flatten(ibs.get_image_aids(gid_list))
part_rowid_list = ut.flatten(ibs.get_annot_part_rowids(aid_list))
part_rowid_list = list(set(part_rowid_list))
reviewed_list = appf.imageset_part_contour_processed(ibs, part_rowid_list)
try:
progress = '%0.2f' % (100.0 * reviewed_list.count(True) / len(reviewed_list),)
except ZeroDivisionError:
progress = '100.0'
imagesettext = None if imgsetid is None else ibs.get_imageset_text(imgsetid)
if part_rowid is None:
part_rowid_list_ = ut.filterfalse_items(part_rowid_list, reviewed_list)
if len(part_rowid_list_) == 0:
part_rowid = None
else:
part_rowid = random.choice(part_rowid_list_)
finished = part_rowid is None
display_instructions = request.cookies.get('ia-contour_instructions_seen', 1) == 1
padding = 0.15
if not finished:
image_src = routes_ajax.part_src(part_rowid, pad=padding)
# Get contours from part
existing_contour_dict = ibs.get_part_contour(part_rowid)
existing_contour = existing_contour_dict.get('contour', None)
if existing_contour is None:
existing_contour = {}
else:
image_src = None
existing_contour = {}
existing_contour_json = ut.to_json(existing_contour)
settings_key_list = [
('ia-contour-setting-guiderail', '0'),
]
settings = {
settings_key: request.cookies.get(settings_key, settings_default) == '1'
for (settings_key, settings_default) in settings_key_list
}
callback_url = '%s?imgsetid=%s' % (url_for('submit_contour'), imgsetid)
return appf.template(
'turk',
'contour',
imgsetid=imgsetid,
part_rowid=part_rowid,
config_str=config_str,
config=config,
image_src=image_src,
padding=padding,
previous=previous,
imagesettext=imagesettext,
progress=progress,
finished=finished,
settings=settings,
display_instructions=display_instructions,
existing_contour_json=existing_contour_json,
callback_url=callback_url,
callback_method='POST',
)
@register_route('/turk/species/', methods=['GET'])
def turk_species(hotkeys=8, refresh=False, previous_species_rowids=None, **kwargs):
ibs = current_app.ibs
tup = appf.get_turk_annot_args(appf.imageset_annot_processed)
(aid_list, reviewed_list, imgsetid, src_ag, dst_ag, progress, aid, previous) = tup
review = 'review' in request.args.keys()
finished = aid is None
display_instructions = request.cookies.get('ia-species_instructions_seen', 1) == 0
if not finished:
gid = ibs.get_annot_gids(aid)
image_src = routes_ajax.annotation_src(aid, resize=None)
species = ibs.get_annot_species_texts(aid)
else:
try:
ibs.update_special_imagesets()
ibs.notify_observers()
except Exception:
pass
gid = None
image_src = None
species = None
imagesettext = ibs.get_imageset_text(imgsetid)
species_rowids = ibs._get_all_species_rowids()
if previous_species_rowids is not None:
try:
for previous_species_rowid in previous_species_rowids:
assert previous_species_rowid in species_rowids
species_rowids = previous_species_rowids
except Exception:
logger.info('Error finding previous | |
= True; x = not (t is True)", "x", False
yield self.st, "t = True; x = not (t is False)", "x", True
yield self.st, "t = True; x = not (t is None)", "x", True
yield self.st, "n = None; x = not (n is True)", "x", True
yield self.st, "n = None; x = not (n is False)", "x", True
yield self.st, "n = None; x = not (n is None)", "x", False
yield self.st, "t = True; x = not (t is not True)", "x", True
yield self.st, "t = True; x = not (t is not False)", "x", False
yield self.st, "t = True; x = not (t is not None)", "x", False
yield self.st, "n = None; x = not (n is not True)", "x", False
yield self.st, "n = None; x = not (n is not False)", "x", False
yield self.st, "n = None; x = not (n is not None)", "x", True
def test_multiexpr(self):
yield self.st, "z = 2+3; x = y = z", "x,y,z", (5,5,5)
def test_imports(self):
import os
yield self.st, "import sys", "sys.__name__", "sys"
yield self.st, "import sys as y", "y.__name__", "sys"
yield (self.st, "import sys, os",
"sys.__name__, os.__name__", ("sys", "os"))
yield (self.st, "import sys as x, os.path as y",
"x.__name__, y.__name__", ("sys", os.path.__name__))
yield self.st, 'import os.path', "os.path.__name__", os.path.__name__
yield (self.st, 'import os.path, sys',
"os.path.__name__, sys.__name__", (os.path.__name__, "sys"))
yield (self.st, 'import sys, os.path as osp',
"osp.__name__, sys.__name__", (os.path.__name__, "sys"))
yield (self.st, 'import os.path as osp',
"osp.__name__", os.path.__name__)
yield (self.st, 'from os import path',
"path.__name__", os.path.__name__)
yield (self.st, 'from os import path, sep',
"path.__name__, sep", (os.path.__name__, os.sep))
yield (self.st, 'from os import path as p',
"p.__name__", os.path.__name__)
yield (self.st, 'from os import *',
"path.__name__, sep", (os.path.__name__, os.sep))
yield (self.st, '''
class A(object):
def m(self):
from __foo__.bar import x
try:
A().m()
except ImportError, e:
msg = str(e)
''', "msg", "No module named __foo__")
def test_if_stmts(self):
yield self.st, "a = 42\nif a > 10: a += 2", "a", 44
yield self.st, "a=5\nif 0: a=7", "a", 5
yield self.st, "a=5\nif 1: a=7", "a", 7
yield self.st, "a=5\nif a and not not (a<10): a=7", "a", 7
yield self.st, """
lst = []
for a in range(10):
if a < 3:
a += 20
elif a > 3 and a < 8:
a += 30
else:
a += 40
lst.append(a)
""", "lst", [20, 21, 22, 43, 34, 35, 36, 37, 48, 49]
yield self.st, """
lst = []
for a in range(10):
b = (a & 7) ^ 1
if a or 1 or b: lst.append('A')
if a or 0 or b: lst.append('B')
if a and 1 and b: lst.append('C')
if a and 0 and b: lst.append('D')
if not (a or 1 or b): lst.append('-A')
if not (a or 0 or b): lst.append('-B')
if not (a and 1 and b): lst.append('-C')
if not (a and 0 and b): lst.append('-D')
if (not a) or (not 1) or (not b): lst.append('A')
if (not a) or (not 0) or (not b): lst.append('B')
if (not a) and (not 1) and (not b): lst.append('C')
if (not a) and (not 0) and (not b): lst.append('D')
""", "lst", ['A', 'B', '-C', '-D', 'A', 'B', 'A', 'B', '-C',
'-D', 'A', 'B', 'A', 'B', 'C', '-D', 'B', 'A', 'B',
'C', '-D', 'B', 'A', 'B', 'C', '-D', 'B', 'A', 'B',
'C', '-D', 'B', 'A', 'B', 'C', '-D', 'B', 'A', 'B',
'C', '-D', 'B', 'A', 'B', 'C', '-D', 'B', 'A', 'B',
'-C', '-D', 'A', 'B']
def test_docstrings(self):
for source, expected in [
('''def foo(): return 1''', None),
('''class foo: pass''', None),
('''foo = lambda: 4''', None),
('''foo = lambda: "foo"''', None),
('''def foo(): 4''', None),
('''class foo: "foo"''', "foo"),
('''def foo():
"""foo docstring"""
return 1
''', "foo docstring"),
('''def foo():
"""foo docstring"""
a = 1
"""bar"""
return a
''', "foo docstring"),
('''def foo():
"""doc"""; print 1
a=1
''', "doc"),
('''
class Foo(object): pass
foo = Foo()
exec "'moduledoc'" in foo.__dict__
''', "moduledoc"),
]:
yield self.simple_test, source, "foo.__doc__", expected
def test_in(self):
yield self.st, "n = 5; x = n in [3,4,5]", 'x', True
yield self.st, "n = 5; x = n in [3,4,6]", 'x', False
yield self.st, "n = 5; x = n in [3,4,n]", 'x', True
yield self.st, "n = 5; x = n in [3,4,n+1]", 'x', False
yield self.st, "n = 5; x = n in (3,4,5)", 'x', True
yield self.st, "n = 5; x = n in (3,4,6)", 'x', False
yield self.st, "n = 5; x = n in (3,4,n)", 'x', True
yield self.st, "n = 5; x = n in (3,4,n+1)", 'x', False
def test_for_loops(self):
yield self.st, """
total = 0
for i in [2, 7, 5]:
total += i
""", 'total', 2 + 7 + 5
yield self.st, """
total = 0
for i in (2, 7, 5):
total += i
""", 'total', 2 + 7 + 5
yield self.st, """
total = 0
for i in [2, 7, total+5]:
total += i
""", 'total', 2 + 7 + 5
yield self.st, "x = sum([n+2 for n in [6, 1, 2]])", 'x', 15
yield self.st, "x = sum([n+2 for n in (6, 1, 2)])", 'x', 15
yield self.st, "k=2; x = sum([n+2 for n in [6, 1, k]])", 'x', 15
yield self.st, "k=2; x = sum([n+2 for n in (6, 1, k)])", 'x', 15
yield self.st, "x = sum(n+2 for n in [6, 1, 2])", 'x', 15
yield self.st, "x = sum(n+2 for n in (6, 1, 2))", 'x', 15
yield self.st, "k=2; x = sum(n+2 for n in [6, 1, k])", 'x', 15
yield self.st, "k=2; x = sum(n+2 for n in (6, 1, k))", 'x', 15
def test_closure(self):
decl = py.code.Source("""
def make_adder(n):
def add(m):
return n + m
return add
""")
decl = str(decl) + "\n"
yield self.st, decl + "x = make_adder(40)(2)", 'x', 42
decl = py.code.Source("""
def f(a, g, e, c):
def b(n, d):
return (a, c, d, g, n)
def f(b, a):
return (a, b, c, g)
return (a, g, e, c, b, f)
A, G, E, C, B, F = f(6, 2, 8, 5)
A1, C1, D1, G1, N1 = B(7, 3)
A2, B2, C2, G2 = F(1, 4)
""")
decl = str(decl) + "\n"
yield self.st, decl, 'A,A1,A2,B2,C,C1,C2,D1,E,G,G1,G2,N1', \
(6,6 ,4 ,1 ,5,5 ,5 ,3 ,8,2,2 ,2 ,7 )
decl = py.code.Source("""
def f((a, b)):
def g((c, d)):
return (a, b, c, d)
return g
x = f((1, 2))((3, 4))
""")
decl = str(decl) + "\n"
yield self.st, decl, 'x', (1, 2, 3, 4)
def test_closure_error(self):
source = """if 1:
def f(a):
del a
def x():
a
"""
with py.test.raises(SyntaxError) as excinfo:
self.run(source)
msg = excinfo.value.msg
assert msg == "Can't delete variable used in nested scopes: 'a'"
def test_try_except_finally(self):
yield self.simple_test, """
try:
x = 5
try:
if x > 2:
raise ValueError
finally:
x += 1
except ValueError:
x *= 7
""", 'x', 42
def test_while_loop(self):
yield self.simple_test, """
comments = [42]
comment = '# foo'
while comment[:1] == '#':
comments[:0] = [comment]
comment = ''
""", 'comments', ['# foo', 42]
yield self.simple_test, """
while 0:
pass
else:
x = 1
""", "x", 1
def test_return_lineno(self):
# the point of this test is to check that there is no code associated
# with any line greater than 4.
# The implict return will have the line number of the last statement
# so we check that that line contains exactly the implicit return None
yield self.simple_test, """\
def ireturn_example(): # line 1
global b # line 2
if a == b: # line 3
b = a+1 # line 4
else: # line 5
if 1: pass # line 6
import dis
co = ireturn_example.func_code
linestarts = list(dis.findlinestarts(co))
addrreturn = linestarts[-1][0]
x = [addrreturn == (len(co.co_code) - 4)]
x.extend([lineno for addr, lineno in linestarts])
| |
is bounded, set model redshift to midpoint of bounds
# when doing the guess.
if ppfs is None:
ppfs = {}
if tied is None:
tied = {}
# Convert bounds/priors combinations into ppfs
if bounds is not None:
for key, val in six.iteritems(bounds):
if key in ppfs:
continue # ppfs take priority over bounds/priors
a, b = val
if priors is not None and key in priors:
# solve ppf at discrete points and return interpolating
# function
x_samples = np.linspace(0., 1., 101)
ppf_samples = sncosmo.utils.ppf(priors[key], x_samples, a, b)
f = sncosmo.utils.Interp1D(0., 1., ppf_samples)
else:
f = sncosmo.utils.Interp1D(0., 1., np.array([a, b]))
ppfs[key] = f
# NOTE: It is important that iparam_names is in the same order
# every time, otherwise results will not be reproducible, even
# with same random seed. This is because iparam_names[i] is
# matched to u[i] below and u will be in a reproducible order,
# so iparam_names must also be.
iparam_names = [key for key in vparam_names if key in ppfs]
ppflist = [ppfs[key] for key in iparam_names]
npdim = len(iparam_names) # length of u
ndim = len(vparam_names) # length of v
# Check that all param_names either have a direct prior or are tied.
for name in vparam_names:
if name in iparam_names:
continue
if name in tied:
continue
raise ValueError("Must supply ppf or bounds or tied for parameter '{}'"
.format(name))
def prior_transform(u):
d = {}
for i in range(npdim):
d[iparam_names[i]] = ppflist[i](u[i])
v = np.empty(ndim, dtype=np.float)
for i in range(ndim):
key = vparam_names[i]
if key in d:
v[i] = d[key]
else:
v[i] = tied[key](d)
return v
# Indicies of the model parameters in vparam_names
idx = np.array([model.param_names.index(name) for name in vparam_names])
def chisq(data, model,zp,zpsys):
mCol=model.color(bands[0],bands[1],zpsys,data['time'])
data=data[~np.isnan(mCol)]
mCol=mCol[~np.isnan(mCol)]
diff = data[bands[0]+'-'+bands[1]]- mCol
chi=diff/data[bands[0]+'-'+bands[1]+'_err']
chi2 = np.sum(chi ** 2)
return chi2
def loglike(parameters):
model.parameters[idx] = parameters
chisq_res=chisq(data,model,zp,zpsys)
return -0.5 * chisq_res
res = nestle.sample(loglike, prior_transform, ndim, npdim=npdim,
npoints=npoints, method=method, maxiter=maxiter,
maxcall=50, rstate=rstate,
callback=(nestle.print_progress if verbose else None))
# estimate parameters and covariance from samples
vparameters, cov = nestle.mean_and_cov(res.samples, res.weights)
# update model parameters to estimated ones.
model.set(**dict(zip(vparam_names, vparameters)))
# `res` is a nestle.Result object. Collect result into a sncosmo.Result
# object for consistency, and add more fields.
res = sncosmo.utils.Result(niter=res.niter,
ncall=res.ncall,
logz=res.logz,
logzerr=res.logzerr,
h=res.h,
samples=res.samples,
weights=res.weights,
logvol=res.logvol,
logl=res.logl,
vparam_names=copy(vparam_names),
ndof=len(data) - len(vparam_names),
bounds=bounds,
parameters=model.parameters.copy(),
covariance=cov,
errors=OrderedDict(zip(vparam_names,
np.sqrt(np.diagonal(cov)))),
param_dict=OrderedDict(zip(model.param_names,
model.parameters)))
return res, model
def _fitseries(all_args):
if isinstance(all_args,(list,tuple,np.ndarray)):
curves,args=all_args
if isinstance(args,list):
args=args[0]
if isinstance(curves,list):
curves,single_par_vars=curves
for key in single_par_vars:
args[key]=single_par_vars[key]
args['curves']=curves
if args['verbose']:
print('Fitting MISN number %i...'%curves.nsn)
else:
args=all_args
args['bands']=list(args['bands'])
if not args['curves'].series.table:
args['curves'].combine_curves(referenceImage=args['refImage'])
if not args['seriesGrids']:
print('Need bounds on time delay and magnification (i.e. seriesGrids undefined)')
sys.exit(1)
for b in [x for x in np.unique(args['curves'].series.table['band']) if x not in args['bands']]:
args['curve'].series.table=args['curves'].series.table[args['curves'].series.table['band']!=b]
if not args['refModel']:
if args['curves'].images['image_1'].fits is not None:
try:
args['refModel'],bounds=create_composite_model(args['curves'],args['refImage'],weight='logz',bound_vars=bounds.keys())
except RuntimeError:
args['refModel'],bounds=create_composite_model(args['curves'],args['refImage'],bound_vars=bounds.keys())
else:
raise RuntimeError("series fit had no reference model or model name.")
elif not isinstance(args['refModel'],sncosmo.Model):
raise RuntimeError("Your reference model needs to be an SNCosmo model object.")
gridBounds=dict([])
for k in args['curves'].images.keys():
for par in args['seriesGrids'].keys():
if par=='td' and k==args['refImage']:
continue
if par=='mu' and k==args['refImage']:
continue
gridBounds[k+'_'+par]=np.array(args['seriesGrids'][par])+args['curves'].series.meta[par][k]
myRes=dict([])
for b in args['bands']:
myRes[b]=nest_series_lc(args['curves'],
vparam_names=gridBounds.keys(),
band=b,refModel=args['refModel'],bounds=gridBounds,snBounds=args['bounds'],
snVparam_names=args['bounds'].keys(),ref=args['refImage'],guess_amplitude_bound=True,
minsnr=args.get('minsnr',5.),priors=args.get('priors',None),ppfs=args.get('ppfs',None),
method=args.get('nest_method','single'),maxcall=args.get('outer_maxcall',None),
modelcov=args.get('modelcov',None),rstate=args.get('rstate',None),
maxiter=args.get('outer_maxiter',100),npoints=args.get('outer_npoints',50),
inner_maxiter=args.get('inner_maxiter',10),inner_npoints=args.get('inner_npoints',10))
weights=np.array([myRes[b][-2].logz for b in args['bands']])
final_params=dict([])
for param in myRes[args['bands'][0]][-1].param_names:
final_params[param]=np.average([myRes[b][-1].get(param) for b in args['bands']],weights=weights)
args['curves'].series.time_delays=dict([])
args['curves'].series.magnifications=dict([])
args['curves'].series.magnification_errors=dict([])
args['curves'].series.time_delay_errors=dict([])
for k in args['curves'].images.keys():
args['curves'].series.time_delays[k]=np.average([myRes[b][0][k] for b in args['bands']],weights=weights)
args['curves'].series.magnifications[k]=np.average([myRes[b][1][k] for b in args['bands']],weights=weights)
args['curves'].series.time_delay_errors[k]=myRes[args['bands'][np.where(weights==np.max(weights))[0][0]]][2]
args['curves'].series.magnification_errors[k]=myRes[args['bands'][np.where(weights==np.max(weights))[0][0]]][3]
bestRes=myRes[args['bands'][np.where(weights==np.max(weights))[0][0]]][4]
bestMod=myRes[args['bands'][np.where(weights==np.max(weights))[0][0]]][5]
bestMod.set(**final_params)
args['curves'].combine_curves(time_delays=args['curves'].series.time_delays,magnifications=args['curves'].series.magnifications,referenceImage=args['refImage'])
for b in [x for x in set(args['curves'].series.table['band']) if x not in args['bands']]:
args['curves'].series.table=args['curves'].series.table[args['curve'].series.table['band']!=b]
args['curves'].series.fits=newDict()
args['curves'].series.fits['model']=bestMod
args['curves'].series.fits['res']=bestRes
return args['curves']
def create_composite_model(curves,ref,weight='chisq',bound_vars=[]):
weights=np.array([curves.images[im].fits.res[weight] for im in np.sort(list(curves.images.keys()))])
if weight=='chisq':
weights=1./weights
final_vars=dict([])
bounds=dict([])
for param in curves.images[ref].fits.model.param_names:
if param not in bound_vars:
final_vars[param]=curves.images[ref].fits.model.get(param)
else:
final_vars[param]=np.average([curves.images[k].fits.model.get(param) for k in np.sort(list(curves.images.keys()))],
weights=weights)
if param in bound_vars:
bounds[param]=np.average([curves.images[k].fits.final_errs[param] for k in np.sort(list(curves.images.keys()))],
weights=weights)/np.sqrt(len(weights))
bounds[param]=(final_vars[param]-bounds[param],final_vars[param]+bounds[param])
final_mod=copy(curves.images[list(curves.images.keys())[np.where(weights==np.max(weights))[0][0]]].fits.model)
final_mod.set(**final_vars)
return(final_mod,bounds)
def nest_series_lc(curves,vparam_names,bounds,snBounds,snVparam_names,ref,guess_amplitude_bound=False,
minsnr=5.,refModel=False,band=None, priors=None, ppfs=None, npoints=100, method='single',
maxiter=None, maxcall=None, modelcov=False, rstate=None,
verbose=False, warn=True,inner_maxiter=10,inner_npoints=10,**kwargs):
# experimental parameters
tied = kwargs.get("tied", None)
vparam_names=list(vparam_names)
if ppfs is None:
ppfs = {}
if tied is None:
tied = {}
# Convert bounds/priors combinations into ppfs
if bounds is not None:
for key, val in six.iteritems(bounds):
if key in ppfs:
continue # ppfs take priority over bounds/priors
a, b = val
if priors is not None and key in priors:
# solve ppf at discrete points and return interpolating
# function
x_samples = np.linspace(0., 1., 101)
ppf_samples = sncosmo.utils.ppf(priors[key], x_samples, a, b)
f = sncosmo.utils.Interp1D(0., 1., ppf_samples)
else:
f = sncosmo.utils.Interp1D(0., 1., np.array([a, b]))
ppfs[key] = f
# NOTE: It is important that iparam_names is in the same order
# every time, otherwise results will not be reproducible, even
# with same random seed. This is because iparam_names[i] is
# matched to u[i] below and u will be in a reproducible order,
# so iparam_names must also be.
all_delays=dict([])
all_mus=dict([])
all_mu_err=dict([])
all_delay_err=dict([])
all_mu_err[ref]=0
all_delay_err[ref]=0
all_delays[ref]=0
all_mus[ref]=1
iparam_names = [key for key in vparam_names if key in ppfs]
ppflist = [ppfs[key] for key in iparam_names]
npdim = len(iparam_names) # length of u
ndim = len(vparam_names) # length of v
# Check that all param_names either have a direct prior or are tied.
for name in vparam_names:
if name in iparam_names:
continue
if name in tied:
continue
raise ValueError("Must supply ppf or bounds or tied for parameter '{}'"
.format(name))
def prior_transform(u):
d = {}
for i in range(npdim):
d[iparam_names[i]] = ppflist[i](u[i])
v = np.empty(ndim, dtype=np.float)
for i in range(ndim):
key = vparam_names[i]
if key in d:
v[i] = d[key]
else:
v[i] = tied[key](d)
return v
global best_comb_Z
global best_comb_Mod
global best_comb_Res
best_comb_Res = None
best_comb_Mod = None
best_comb_Z = -np.inf
def loglike(parameters):
tempCurve=_sntd_deepcopy(curves)
tempTds=dict([])
tempMus=dict([])
for i in range(len(parameters)):
if iparam_names[i][-2:]=='td':
tempTds[iparam_names[i][0:iparam_names[i].rfind('_')]]=parameters[i]
elif iparam_names[i][-2:]=='mu':
tempMus[iparam_names[i][0:iparam_names[i].rfind('_')]]=parameters[i]
tempTds[ref]=all_delays[ref]
tempMus[ref]=all_mus[ref]
tempCurve.combine_curves(time_delays=tempTds,magnifications=tempMus,referenceImage=ref)
tempCurve.series.table=tempCurve.series.table[tempCurve.series.table['band']==band]
tempRes,tempMod=nest_lc(tempCurve.series.table,refModel,
vparam_names=snVparam_names,bounds=snBounds,guess_amplitude_bound=False,
maxiter=inner_maxiter,npoints=inner_npoints)
global best_comb_Res
global best_comb_Mod
global best_comb_Z
if tempRes.logz>best_comb_Z:
best_comb_Res=copy(tempRes)
best_comb_Z=copy(tempRes.logz)
best_comb_Mod=copy(tempMod)
return(tempRes.logz)
res = nestle.sample(loglike, prior_transform, ndim, npdim=npdim,
npoints=npoints, method=method, maxiter=maxiter,
maxcall=maxcall, rstate=rstate,
callback=(nestle.print_progress if verbose else None))
res = sncosmo.utils.Result(niter=res.niter,
ncall=res.ncall,
logz=res.logz,
logzerr=res.logzerr,
h=res.h,
samples=res.samples,
weights=res.weights,
logvol=res.logvol,
logl=res.logl,
vparam_names=copy(vparam_names),
bounds=bounds)
pdf=_get_marginal_pdfs(res,nbins=npoints,verbose=False)
for im in [x for x in curves.images.keys() if x!=ref]:
all_delays[im]=pdf[im+'_td'][2]
all_delay_err[im]=pdf[im+'_td'][3]
all_mus[im]=pdf[im+'_mu'][2]
all_mu_err[im]=pdf[im+'_mu'][3]
return all_delays,all_mus,all_delay_err,all_mu_err,best_comb_Res,best_comb_Mod
def par_fit_parallel(all_args):
d,fitDict,args,bestFit,bestRes=all_args
_,bestFit,bestMod,bounds=fitDict[d]
tempTable=deepcopy(args['curves'].images[d].table)
for b in [x for x in np.unique(tempTable['band']) if x not in args['bands']]:
tempTable=tempTable[tempTable['band']!=b]
if args['flip']:
tempTable['flux']=np.flip(tempTable['flux'],axis=0)
if 'amplitude' not in bounds.keys():
guess_amp_bounds=True
else:
guess_amp_bounds=False
nest_res,nest_fit=_nested_wrapper(args['curves'],tempTable,bestFit,vparams=bestRes.vparam_names,bounds=bounds,
priors=args.get('priors',None), ppfs=args.get('None'), method=args.get('nest_method','single'),
maxcall=args.get('maxcall',None), modelcov=args.get('modelcov',False),
rstate=args.get('rstate',None),
guess_amplitude_bound=guess_amp_bounds,microlensing=args['microlensing'],
zpsys=args['curves'].images[d].zpsys,kernel=args['kernel'],
maxiter=args.get('maxiter',None),npoints=args.get('npoints',100),nsamples=args['nMicroSamples'])
return([d,nest_fit,nest_res,len(tempTable)- len(nest_res.vparam_names)])
def _fitparallel(all_args):
if isinstance(all_args,(list,tuple,np.ndarray)):
curves,args=all_args
if isinstance(args,list):
args=args[0]
if isinstance(curves,list):
curves,single_par_vars=curves
for key in single_par_vars:
args[key]=single_par_vars[key]
args['curves']=curves
if args['verbose']:
print('Fitting MISN number %i...'%curves.nsn)
else:
args=all_args
fitDict=dict([])
if 't0' in args['bounds']:
t0Bounds=copy(args['bounds']['t0'])
if 'amplitude' in args['bounds']:
ampBounds=copy(args['bounds']['amplitude'])
for d in args['curves'].images.keys():
#print(curves.images[d].simMeta)
args['curve']=copy(args['curves'].images[d])
for b in [x for x in np.unique(args['curve'].table['band']) if x not in args['bands']]:
args['curve'].table=args['curve'].table[args['curve'].table['band']!=b]
if 't0' in args['bounds']:
if args['t0_guess'] is not None:
args['bounds']['t0']=(t0Bounds[0]+args['t0_guess'][d],t0Bounds[1]+args['t0_guess'][d])
else:
maxFlux=np.max(args['curve'].table['flux'])
maxTime=args['curve'].table['time'][args['curve'].table['flux']==maxFlux]
args['bounds']['t0']=(t0Bounds[0]+maxTime,t0Bounds[1]+maxTime)
if 'amplitude' in args['bounds'] and args['guess_amplitude']:
args['bounds']['amplitude']=(ampBounds[0]*np.max(args['curve'].table['flux']),ampBounds[1]*np.max(args['curve'].table['flux']))
args['curves'].images[d].fits=newDict()
if True or len(args['curve'].table)>63 or len(args['mods'])==1 or args['snType']=='Ia':
fits=[]
for mod in args['mods']:
if mod =='BazinSource' or isinstance(mod,BazinSource):
fits.append(param_fit(args,mod))
else:
if len(args['mods'])==1:
doFit=False
else:
doFit=True
args['doFit']=doFit
fits.append(_fit_data_wrap((mod,args)))
else:
args['doFit']=True
fits=pyParz.foreach(args['mods'],_fit_data,args)
if len(fits)>1:
bestChisq=np.inf
for f in fits:
if f:
res=f['res']
mod=f['model']
if res.chisq <bestChisq:
bestChisq=res.chisq
bestFit=mod
bestRes=res
else:
bestFit=fits[0]['model']
bestRes=fits[0]['res']
fitDict[d]=[fits,bestFit,bestRes,copy(args['bounds'])]
if not all([fitDict[d][1]._source.name==fitDict[list(fitDict.keys())[0]][1]._source.name for d in fitDict.keys()]):
print('All models did not match, finding best...')
bestChisq=np.inf
bestMod=None
for d in fitDict.keys():
chisq=fitDict[d][2].chisq
if chisq<bestChisq:
bestChisq=chisq
bestMod=fitDict[d][1]._source.name
for d in fitDict.keys():
for f in fitDict[d][0]:
if f and f['model']._source.name==bestMod:
fitDict[d][1]=f['model']
fitDict[d][2]=f['res']
break
if args['microlensing'] is None and not args['parlist']:
res=pyParz.foreach(list(args['curves'].images.keys()),par_fit_parallel,[fitDict,args,bestFit,bestRes],
min(multiprocessing.cpu_count(),len(list(args['curves'].images.keys()))))
else:
res=[]
for d in args['curves'].images.keys():
res.append(par_fit_parallel([d,fitDict,args,bestFit,bestRes]))
dofs={}
resList={}
for i in range(len(res)):
dofs[res[i][0]]=res[i][-1]
resList[res[i][0]]=res[i][2]
args['curves'].images[res[i][0]].fits=newDict()
args['curves'].images[res[i][0]].fits['model']=res[i][1]
args['curves'].images[res[i][0]].fits['res']=res[i][2]
joint=_joint_likelihood(resList,verbose=False)
for d in np.sort(list(args['curves'].images.keys())):
errs=dict([])
if 'micro' in args['curves'].images[d].fits.res.errors.keys():
errs['micro']=args['curves'].images[d].fits.res.errors['micro']
else:
errs['micro']=0.
tempTable=copy(args['curves'].images[d].table)
for b in [x for x in np.unique(tempTable['band']) if x not in args['bands']]:
tempTable=tempTable[tempTable['band']!=b]
if not np.all([x in __thetaL__ for x in joint.keys()]):# or args['microlensing'] is not None:
bds=dict([])
final_vparams=[]
for p in joint.keys():
if p in args['curves'].images[d].fits.res.vparam_names:
if isinstance(joint[p],dict):
if p=='t0':
bds[p]=(-3+joint[p][d][0],3+joint[p][d][0])
elif p!='amplitude' and p!='x0':
if joint[p][d][1]==0 or np.round(joint[p][d][1]/joint[p][d][0],6)==0:
bds[p]=(joint[p][d][0]-.05*joint[p][d][0],joint[p][d][0]+.05*joint[p][d][0])
else:
bds[p]=(joint[p][d][0]-3*joint[p][d][1],joint[p][d][0]+3*joint[p][d][1])
errs[p]=joint[p][d][1]
final_vparams.append(p)
else:
args['curves'].images[d].fits.model.set(**{p:joint[p][0]})
errs[p]=joint[p][1]
finalRes,finalFit=nest_lc(tempTable,args['curves'].images[d].fits.model,final_vparams,bounds=bds,guess_amplitude_bound=True,maxiter=None,priors=args.get('priors',None))
finalRes.ndof=dofs[d]
args['curves'].images[d].fits['final_model']=finalFit
args['curves'].images[d].fits['final_res']=finalRes
else:
for p in joint.keys():
if p in args['curves'].images[d].fits.res.vparam_names:
if isinstance(joint[p],dict):
args['curves'].images[d].fits.model.set(**{p:joint[p][d][0]})
errs[p]=joint[p][d][1]
else:
args['curves'].images[d].fits.model.set(**{p:joint[p][0]})
errs[p]=joint[p][1]
args['curves'].images[d].fits['final_errs']=errs
tds,td_errs,mags,mag_errs,times,fluxes,time_errors,flux_errors=timeDelaysAndMagnifications(args['curves'])
args['curves'].time_delays=tds
args['curves'].time_delay_errors=td_errs
args['curves'].magnifications=mags
args['curves'].magnification_errors=mag_errs
args['curves'].measurements={'t0':times,'A':fluxes,'t0_err':time_errors,'A_err':flux_errors}
if args['showPlots']:
for d in args['curves'].images.keys():
tempTable=copy(args['curves'].images[d].table)
for b in [x for x in np.unique(tempTable['band']) if x not in args['bands']]:
tempTable=tempTable[tempTable['band']!=b]
tempMod=copy(args['curves'].images[d].fits.model)
sncosmo.plot_lc(tempTable,model=tempMod)
#plt.savefig(nest_fit._source.name+'_'+tempTable['band'][0]+'_refs_'+d+'.pdf',format='pdf',overwrik4ite=True)
#plt.savefig('example_plot_dust_image_'+str(d[-1])+'.png',format='png',overwrite=True)
plt.show()
plt.clf()
plt.close()
return args['curves']
def _micro_uncertainty(args):
sample,other=args
nest_fit,data,colnames,x_pred,vparam_names,bounds,priors=other
data=Table(data,names=colnames)
temp_nest_mod=deepcopy(nest_fit)
tempMicro=AchromaticMicrolensing(x_pred/(1+nest_fit.get('z')),sample,magformat='multiply')
temp_nest_mod.add_effect(tempMicro,'microlensing','rest')
tempRes,tempMod=nest_lc(data,temp_nest_mod,vparam_names=vparam_names,bounds=bounds,
guess_amplitude_bound=True,maxiter=None,npoints=200,priors=priors)
return float(tempMod.get('t0'))
def _nested_wrapper(curves,data,model,vparams,bounds,priors,guess_amplitude_bound,
microlensing,zpsys,kernel,maxiter,npoints,nsamples,ppfs, method,
maxcall, modelcov,
rstate):
temp=deepcopy(data)
vparam_names=deepcopy(vparams)
if microlensing is not None:
nest_res,nest_fit=nest_lc(temp,model,vparam_names=vparam_names,bounds=bounds,ppfs=ppfs,
guess_amplitude_bound=guess_amplitude_bound,maxiter=maxiter,npoints=npoints,
priors=priors,method=method,maxcall=maxcall,modelcov=modelcov,rstate=rstate)
micro,sigma,x_pred,y_pred,samples=fit_micro(curves,nest_res,nest_fit,temp,zpsys,nsamples,
micro_type=microlensing,kernel=kernel)
#temp=deepcopy(data)
#print(samples.shape)
#return(nest_res,nest_fit)
t0s=pyParz.foreach(samples.T,_micro_uncertainty,[nest_fit,np.array(temp),temp.colnames,x_pred,vparam_names,bounds,priors])
mu,sigma=scipy.stats.norm.fit(t0s)
nest_res.errors['micro']=np.sqrt(np.abs(nest_fit.get('t0')-mu)**2+(3*sigma)**2)
bestRes=nest_res
bestMod=nest_fit
else:
bestRes,bestMod=nest_lc(data,model,vparam_names=vparam_names,bounds=bounds,ppfs=ppfs,
guess_amplitude_bound=guess_amplitude_bound,maxiter=maxiter,npoints=npoints,
priors=priors,method=method,maxcall=maxcall,modelcov=modelcov,rstate=rstate)
return(bestRes,bestMod)
def _maxFromModel(mod,band,zp,zpsys):
time=np.arange(mod.mintime(),mod.maxtime(),.1)
flux=mod.bandflux(band,time,zp,zpsys)
return (time[flux==np.max(flux)],np.max(flux))
def fit_micro(curves,res,fit,dat,zpsys,nsamples,micro_type='achromatic',kernel='RBF'):
t0=fit.get('t0')
fit.set(t0=t0)
data=deepcopy(dat)
data['time']-=t0
data=data[data['time']<=40.]
data=data[data['time']>=-15.]
achromatic=micro_type.lower()=='achromatic'
if achromatic:
allResid=[]
allErr=[]
allTime=[]
else:
allResid=dict([])
allErr=dict([])
allTime=dict([])
for b in np.unique(data['band']):
tempData=data[data['band']==b]
tempData=tempData[tempData['flux']>.1]
tempTime=tempData['time']
mod=fit.bandflux(b,tempTime+t0,zpsys=zpsys,zp=tempData['zp'])
#fig=plt.figure()
#ax=fig.gca()
#ax.plot(tempTime,mod)
#ax.scatter(tempData['time'],tempData['flux'])
#plt.show()
#tempData=tempData[mod>.1]
residual=tempData['flux']/mod
tempData=tempData[~np.isnan(residual)]
residual=residual[~np.isnan(residual)]
tempTime=tempTime[~np.isnan(residual)]
if achromatic:
allResid=np.append(allResid,residual)
allErr=np.append(allErr,residual*tempData['fluxerr']/tempData['flux'])
allTime=np.append(allTime,tempTime)
else:
allResid[b]=residual
allErr[b]=residual*tempData['fluxerr']/tempData['flux']
allTime[b]=tempTime
if kernel=='RBF':
kernel = RBF(10., (20., 50.))
if achromatic:
gp = GaussianProcessRegressor(kernel=kernel, alpha=allErr ** 2,
n_restarts_optimizer=100)
try:
gp.fit(np.atleast_2d(allTime).T,allResid.ravel())
except:
temp=np.atleast_2d(allTime).T
temp2=allResid.ravel()
temp=temp[~np.isnan(temp2)]
temp2=temp2[~np.isnan(temp2)]
gp.fit(temp,temp2)
X=np.atleast_2d(np.linspace(np.min(allTime), np.max(allTime), 1000)).T
y_pred, sigma = gp.predict(X, return_std=True)
samples=gp.sample_y(X,nsamples)
if False:
plt.close()
fig=plt.figure()
ax=fig.gca()
for i in range(samples.shape[1]):
if i==0:
ax.plot(X, samples[:,i],alpha=.1,label='Posterior Samples',color='b')
else:
ax.plot(X, samples[:,i],alpha=.1,color='b')
ax.errorbar(allTime.ravel(), allResid, allErr, fmt='r.', markersize=10, label=u'Observations')
ax.plot(X, y_pred - 3 * sigma, '--g')
ax.plot(X, y_pred + 3 * sigma, '--g',label='$3\sigma$ Bounds')
ax.plot(X,y_pred,'k-.',label="GPR Prediction")
ax.set_ylabel('Magnification ($\mu$)')
ax.set_xlabel('Observer Frame Time (Days)')
ax.plot(X,curves.images['image_2'].simMeta['microlensing_params'](X/(1+1.33))/np.median(curves.images['image_2'].simMeta['microlensing_params'](X/(1+1.33))),'k',label='True $\mu$-Lensing')
ax.legend(fontsize=10)
plt.show()
#plt.savefig('microlensing_gpr.pdf',format='pdf',overwrite=True)
plt.close()
#sys.exit()
tempX=X[:,0]
tempX=np.append([fit._source._phase[0]*(1+fit.get('z'))],np.append(tempX,[fit._source._phase[-1]*(1+fit.get('z'))]))
y_pred=np.append([1.],np.append(y_pred,[1.]))
sigma=np.append([0.],np.append(sigma,[0.]))
result=AchromaticMicrolensing(tempX/(1+fit.get('z')),y_pred,magformat='multiply')
'''
fig=plt.figure()
ax=fig.gca()
#plt.plot(X, resid, 'r:', label=u'$f(x) = x\,\sin(x)$')
ax.errorbar(allTime.ravel(), allResid, allErr, fmt='r.', markersize=10, label=u'Observations')
#for c in np.arange(0,1.01,.1):
# for d in np.arange(-np.max(sigma),np.max(sigma),np.max(sigma)/10):
# plt.plot(X, 1+(y_pred[1:-1]-1)*c+d, 'b-')#, label=u'Prediction')
ax.plot(X, y_pred[1:-1], 'b-' ,label=u'Prediction')
ax.fill(np.concatenate([X, X[::-1]]),
np.concatenate([y_pred[1:-1] - 1.9600 * sigma[1:-1],
(y_pred[1:-1] + 1.9600 * sigma[1:-1])[::-1]]),
alpha=.5, fc='b', ec='None', label='95% confidence interval')
ax.set_xlabel('$x$')
ax.set_ylabel('$f(x)$')
#plt.xlim(-10, 50)
#plt.ylim(.8, 1.4)
ax.legend(loc='upper left')
#plt.show()
#plt.show()
#figures.append(ax)
#plt.clf()
plt.close()
'''
else:
pass
#TODO make chromatic microlensing a thing
return result,sigma,X[:,0],y_pred[1:-1],samples
def timeDelaysAndMagnifications(curves):
times=dict([])
fluxes=dict([])
time_errors=dict([])
flux_errors=dict([])
for d in curves.images.keys():
b=[x for x in curves.bands if curves.images[d].fits.model.bandoverlap(x)]
if len(b)==0:
print('None of your bands overlap your fit?')
sys.exit()
b=b[0]
try:
timeOfPeak=curves.images[d].fits.model.get('t0')
band_time_errors=curves.images[d].fits.final_errs['t0']
except:
timeOfPeak,peakFlux=_maxFromModel(curves.images[d].fits.model,b,
curves.images[d].table['zp'][curves.images[d]['band']==b],
curves.images[d].zpsys)
band_time_errors=0
band_flux_errors=0
for amp in ['x0','amplitude','A']:
try:
peakFlux=curves.images[d].fits.model.get(amp)
band_flux_errors=curves.images[d].fits.final_errs[amp]
success=True
break
except:
success=False
if not success:
peakFlux=curves.images[d].fits.model.bandflux(b,timeOfPeak,
zp=curves.images[d].table['zp'][curves.images[d]['band']==b],
zpsys=curves.images[d].zpsys)
band_flux_errors=0
band_times=timeOfPeak
band_fluxes=peakFlux
times[d]=band_times
fluxes[d]=band_fluxes
time_errors[d]=band_time_errors
flux_errors[d]=band_flux_errors
ims=np.sort(list(curves.images.keys()))
delays=dict([])
mags=dict([])
delay_errs=dict([])
mag_errs=dict([])
ref1=None
ref2=None
ref1_err=None
ref2_err=None
for im in ims:
if not ref1:
ref1=times[im]
delays[im]=0
ref2=fluxes[im]
mags[im]=1
ref1_err=time_errors[im]
ref2_err=flux_errors[im]
delay_errs[im]=0
mag_errs[im]=0
else:
delays[im]=times[im]-ref1
mags[im]=fluxes[im]/ref2
delay_errs[im]=math.sqrt(time_errors[im]**2+ref1_err**2)
mag_errs[im]=mags[im]*math.sqrt((flux_errors[im]/fluxes[im])**2+(ref2_err/ref2)**2)
return (delays,delay_errs,mags,mag_errs,times,fluxes,time_errors,flux_errors)
def _plot_marginal_pdfs( res, nbins=101, **kwargs):
""" plot the results of a classification run
:return:
"""
from matplotlib import pyplot as pl
import numpy as np
nparam = len(res.vparam_names)
# nrow = np.sqrt( nparam )
# ncol = nparam / nrow + 1
nrow, ncol = 1, nparam
pdfdict = _get_marginal_pdfs( res, nbins )
fig = plt.gcf()
for parname in res.vparam_names :
iax = res.vparam_names.index( parname )+1
ax = fig.add_subplot( nrow, ncol, iax )
parval, pdf, mean, std = pdfdict[parname]
ax.plot( parval, pdf, **kwargs )
if np.abs(std)>=0.1:
ax.text( 0.95, 0.95, '%s %.1f +- %.1f'%( parname, np.round(mean,1), np.round(std,1)),
ha='right',va='top',transform=ax.transAxes )
elif np.abs(std)>=0.01:
ax.text( 0.95, 0.95, '%s %.2f +- %.2f'%( parname, np.round(mean,2), np.round(std,2)),
ha='right',va='top',transform=ax.transAxes )
elif np.abs(std)>=0.001:
ax.text( 0.95, 0.95, '%s %.3f +- %.3f'%( parname, np.round(mean,3), np.round(std,3)),
ha='right',va='top',transform=ax.transAxes )
else :
ax.text( 0.95, 0.95, '%s %.3e +- %.3e'%( parname, mean, std),
ha='right',va='top',transform=ax.transAxes )
plt.draw()
def _fit_data_wrap(args):
try:
return _fit_data(args)
except RuntimeError:
print('There was an issue running model {0}, skipping...'.format(args[0]))
return None#args[0]
#todo decide about multiple versions of model
def _fit_data(args):
"""
Helper function that allows parallel processing to occur.
:param args: All the arguments given from fit_data
:return: modResults: tuple (Name of current model (including version if exists),name of the sncosmo model,version
of sncosmo model,list of tuples containing index and dcurve.fit[modname] for each dcurve in curves)
"""
warnings.simplefilter("ignore")
mod=args[0]
args=args[1]
if isinstance(mod, tuple):
version = mod[1]
mod = mod[0]
else:
version = None
dust_dict={'CCM89Dust':sncosmo.CCM89Dust,'OD94Dust':sncosmo.OD94Dust,'F99Dust':sncosmo.F99Dust}
if args['dust']:
dust=dust_dict[args['dust']]()
else:
dust=[]
effect_names=args['effect_names']
effect_frames=args['effect_frames']
effects=[dust for i in range(len(effect_names))] if effect_names else []
effect_names=effect_names if effect_names else []
effect_frames=effect_frames | |
# -*- coding: utf-8 -*-
# Copyright 2017, Digital Reasoning
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import unicode_literals
import logging
from collections import OrderedDict
from django.core.cache import cache
from guardian.shortcuts import assign_perm
from rest_framework import generics
from rest_framework.filters import DjangoFilterBackend, DjangoObjectPermissionsFilter
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.reverse import reverse
from rest_framework.serializers import ValidationError
from rest_framework.views import APIView
from stackdio.api.blueprints.models import Blueprint
from stackdio.api.cloud import filters, mixins, models, serializers
from stackdio.api.cloud.providers.base import DeleteGroupException
from stackdio.api.formulas.serializers import FormulaVersionSerializer
from stackdio.core.permissions import StackdioModelPermissions, StackdioObjectPermissions
from stackdio.core.utils import FakeQuerySet
from stackdio.core.viewsets import (
StackdioModelUserPermissionsViewSet,
StackdioModelGroupPermissionsViewSet,
StackdioObjectUserPermissionsViewSet,
StackdioObjectGroupPermissionsViewSet,
)
logger = logging.getLogger(__name__)
class CloudRootView(APIView):
"""
Root of the cloud API. Below are all of the cloud API endpoints that
are currently accessible. Each API will have its own documentation
and particular parameters that may discoverable by browsing directly
to them.
"""
permission_classes = (IsAuthenticated,)
def get(self, request, format=None):
api = OrderedDict((
('providers', reverse('api:cloud:cloudprovider-list',
request=request,
format=format)),
('accounts', reverse('api:cloud:cloudaccount-list',
request=request,
format=format)),
('images', reverse('api:cloud:cloudimage-list',
request=request,
format=format)),
('snapshots', reverse('api:cloud:snapshot-list',
request=request,
format=format)),
('security_groups', reverse('api:cloud:securitygroup-list',
request=request,
format=format)),
))
return Response(api)
class CloudProviderListAPIView(generics.ListAPIView):
queryset = models.CloudProvider.objects.all()
serializer_class = serializers.CloudProviderSerializer
permission_classes = (StackdioModelPermissions,)
filter_backends = (DjangoObjectPermissionsFilter, DjangoFilterBackend)
lookup_field = 'name'
class CloudProviderDetailAPIView(generics.RetrieveAPIView):
queryset = models.CloudProvider.objects.all()
serializer_class = serializers.CloudProviderSerializer
permission_classes = (StackdioObjectPermissions,)
lookup_field = 'name'
class CloudProviderRequiredFieldsAPIView(generics.RetrieveAPIView):
"""
This endpoint lists all the extra fields required when creating an account for this provider.
"""
queryset = models.CloudProvider.objects.all()
permission_classes = (StackdioObjectPermissions,)
lookup_field = 'name'
# Just list the required fields instead of using a serializer
def retrieve(self, request, *args, **kwargs):
provider = self.get_object()
driver = provider.get_driver()
return Response(driver.get_required_fields())
class CloudProviderObjectUserPermissionsViewSet(mixins.CloudProviderPermissionsMixin,
StackdioObjectUserPermissionsViewSet):
pass
class CloudProviderObjectGroupPermissionsViewSet(mixins.CloudProviderPermissionsMixin,
StackdioObjectGroupPermissionsViewSet):
pass
class CloudAccountListAPIView(generics.ListCreateAPIView):
queryset = models.CloudAccount.objects.all()
serializer_class = serializers.CloudAccountSerializer
permission_classes = (StackdioModelPermissions,)
filter_backends = (DjangoObjectPermissionsFilter, DjangoFilterBackend)
filter_class = filters.CloudAccountFilter
def perform_create(self, serializer):
account = serializer.save()
for perm in models.CloudAccount.object_permissions:
assign_perm('cloud.%s_cloudaccount' % perm, self.request.user, account)
class CloudAccountDetailAPIView(generics.RetrieveUpdateDestroyAPIView):
queryset = models.CloudAccount.objects.all()
serializer_class = serializers.CloudAccountSerializer
permission_classes = (StackdioObjectPermissions,)
def perform_update(self, serializer):
account = serializer.save()
account.update_config()
def perform_destroy(self, instance):
# check for images using this account before deleting
images = [p.slug for p in instance.images.all()]
if images:
raise ValidationError({
'detail': 'One or more images are making use of this account.',
'images': images,
})
# ask the driver to clean up after itself since it's no longer needed
driver = instance.get_driver()
driver.destroy()
instance.delete()
class CloudAccountModelUserPermissionsViewSet(StackdioModelUserPermissionsViewSet):
model_cls = models.CloudAccount
class CloudAccountModelGroupPermissionsViewSet(StackdioModelGroupPermissionsViewSet):
model_cls = models.CloudAccount
class CloudAccountObjectUserPermissionsViewSet(mixins.CloudAccountPermissionsMixin,
StackdioObjectUserPermissionsViewSet):
pass
class CloudAccountObjectGroupPermissionsViewSet(mixins.CloudAccountPermissionsMixin,
StackdioObjectGroupPermissionsViewSet):
pass
class GlobalOrchestrationComponentListAPIView(mixins.CloudAccountRelatedMixin,
generics.ListCreateAPIView):
serializer_class = serializers.GlobalOrchestrationComponentSerializer
def get_queryset(self):
cloud_account = self.get_cloudaccount()
return cloud_account.formula_components.all()
def get_serializer_context(self):
context = super(GlobalOrchestrationComponentListAPIView, self).get_serializer_context()
context['content_object'] = self.get_cloudaccount()
return context
def perform_create(self, serializer):
serializer.save(content_object=self.get_cloudaccount())
class GlobalOrchestrationComponentDetailAPIView(mixins.CloudAccountRelatedMixin,
generics.RetrieveUpdateDestroyAPIView):
serializer_class = serializers.GlobalOrchestrationComponentSerializer
def get_queryset(self):
cloud_account = self.get_cloudaccount()
return cloud_account.formula_components.all()
def get_serializer_context(self):
context = super(GlobalOrchestrationComponentDetailAPIView, self).get_serializer_context()
context['content_object'] = self.get_cloudaccount()
return context
class GlobalOrchestrationPropertiesAPIView(generics.RetrieveUpdateAPIView):
queryset = models.CloudAccount.objects.all()
serializer_class = serializers.GlobalOrchestrationPropertiesSerializer
permission_classes = (StackdioObjectPermissions,)
class CloudAccountVPCSubnetListAPIView(mixins.CloudAccountRelatedMixin, generics.ListAPIView):
serializer_class = serializers.VPCSubnetSerializer
def get_queryset(self):
account = self.get_cloudaccount()
driver = account.get_driver()
# Grab the subnets from the driver
subnets = driver.get_vpc_subnets()
# Sort them by name
return sorted(subnets, key=lambda s: s.tags.get('Name'))
class CloudAccountFormulaVersionsAPIView(mixins.CloudAccountRelatedMixin,
generics.ListCreateAPIView):
serializer_class = FormulaVersionSerializer
def get_queryset(self):
account = self.get_cloudaccount()
return account.formula_versions.all()
def perform_create(self, serializer):
serializer.save(content_object=self.get_cloudaccount())
class CloudAccountImageListAPIView(mixins.CloudAccountRelatedMixin, generics.ListAPIView):
serializer_class = serializers.CloudImageSerializer
filter_backends = (DjangoObjectPermissionsFilter, DjangoFilterBackend)
filter_class = filters.CloudImageFilter
def get_queryset(self):
cloud_account = self.get_cloudaccount()
return cloud_account.images.all()
class CloudImageListAPIView(generics.ListCreateAPIView):
queryset = models.CloudImage.objects.all()
serializer_class = serializers.CloudImageSerializer
permission_classes = (StackdioModelPermissions,)
filter_backends = (DjangoObjectPermissionsFilter, DjangoFilterBackend)
filter_class = filters.CloudImageFilter
def perform_create(self, serializer):
image = serializer.save()
image.update_config()
for perm in models.CloudImage.object_permissions:
assign_perm('cloud.%s_cloudimage' % perm, self.request.user, image)
class CloudImageDetailAPIView(generics.RetrieveUpdateDestroyAPIView):
queryset = models.CloudImage.objects.all()
serializer_class = serializers.CloudImageSerializer
permission_classes = (StackdioObjectPermissions,)
def perform_update(self, serializer):
image = serializer.save()
image.update_config()
def perform_destroy(self, instance):
# check for blueprint usage before deleting
blueprints = Blueprint.objects.filter(host_definitions__cloud_image=instance).distinct()
if blueprints:
raise ValidationError({
'detail': ['One or more blueprints are making use of this image.'],
'blueprints': [b.title for b in blueprints],
})
instance.delete()
class CloudImageModelUserPermissionsViewSet(StackdioModelUserPermissionsViewSet):
model_cls = models.CloudImage
class CloudImageModelGroupPermissionsViewSet(StackdioModelGroupPermissionsViewSet):
model_cls = models.CloudImage
class CloudImageObjectUserPermissionsViewSet(mixins.CloudImagePermissionsMixin,
StackdioObjectUserPermissionsViewSet):
pass
class CloudImageObjectGroupPermissionsViewSet(mixins.CloudImagePermissionsMixin,
StackdioObjectGroupPermissionsViewSet):
pass
class SnapshotListAPIView(generics.ListCreateAPIView):
queryset = models.Snapshot.objects.all()
serializer_class = serializers.SnapshotSerializer
permission_classes = (StackdioModelPermissions,)
filter_backends = (DjangoObjectPermissionsFilter, DjangoFilterBackend)
filter_class = filters.SnapshotFilter
def perform_create(self, serializer):
snapshot = serializer.save()
for perm in models.Snapshot.object_permissions:
assign_perm('cloud.%s_snapshot' % perm, self.request.user, snapshot)
class SnapshotDetailAPIView(generics.RetrieveUpdateDestroyAPIView):
queryset = models.Snapshot.objects.all()
serializer_class = serializers.SnapshotSerializer
permission_classes = (StackdioObjectPermissions,)
class SnapshotModelUserPermissionsViewSet(StackdioModelUserPermissionsViewSet):
model_cls = models.Snapshot
class SnapshotModelGroupPermissionsViewSet(StackdioModelGroupPermissionsViewSet):
model_cls = models.Snapshot
class SnapshotObjectUserPermissionsViewSet(mixins.SnapshotPermissionsMixin,
StackdioObjectUserPermissionsViewSet):
pass
class SnapshotObjectGroupPermissionsViewSet(mixins.SnapshotPermissionsMixin,
StackdioObjectGroupPermissionsViewSet):
pass
class CloudInstanceSizeListAPIView(mixins.CloudProviderRelatedMixin, generics.ListAPIView):
serializer_class = serializers.CloudInstanceSizeSerializer
filter_class = filters.CloudInstanceSizeFilter
lookup_field = 'instance_id'
def get_queryset(self):
cloud_provider = self.get_cloudprovider()
return cloud_provider.instance_sizes.all()
class CloudInstanceSizeDetailAPIView(mixins.CloudProviderRelatedMixin, generics.RetrieveAPIView):
serializer_class = serializers.CloudInstanceSizeSerializer
lookup_field = 'instance_id'
def get_queryset(self):
cloud_provider = self.get_cloudprovider()
return cloud_provider.instance_sizes.all()
class CloudRegionListAPIView(mixins.CloudProviderRelatedMixin, generics.ListAPIView):
serializer_class = serializers.CloudRegionSerializer
filter_class = filters.CloudRegionFilter
lookup_field = 'title'
def get_queryset(self):
cloud_provider = self.get_cloudprovider()
return cloud_provider.regions.all()
class CloudRegionDetailAPIView(mixins.CloudProviderRelatedMixin, generics.RetrieveAPIView):
serializer_class = serializers.CloudRegionSerializer
lookup_field = 'title'
def get_queryset(self):
cloud_provider = self.get_cloudprovider()
return cloud_provider.regions.all()
class CloudRegionZoneListAPIView(mixins.CloudProviderRelatedMixin, generics.ListAPIView):
serializer_class = serializers.CloudZoneSerializer
filter_class = filters.CloudZoneFilter
def get_queryset(self):
cloud_provider = self.get_cloudprovider()
region = cloud_provider.regions.get(title=self.kwargs.get('title'))
return region.zones.all()
class CloudZoneListAPIView(mixins.CloudProviderRelatedMixin, generics.ListAPIView):
serializer_class = serializers.CloudZoneSerializer
filter_class = filters.CloudZoneFilter
lookup_field = 'title'
def get_queryset(self):
return models.CloudZone.objects.filter(region__provider=self.get_cloudprovider())
class CloudZoneDetailAPIView(mixins.CloudProviderRelatedMixin, generics.RetrieveAPIView):
serializer_class = serializers.CloudZoneSerializer
lookup_field = 'title'
def get_queryset(self):
return models.CloudZone.objects.filter(region__provider=self.get_cloudprovider())
class SecurityGroupListAPIView(generics.ListCreateAPIView):
"""
Lists and creates new security groups.
### GET
Retrieves all security groups owned by the authenticated user.
The associated rules for each group will also be given in the
`rules` attribute. The `active_hosts` field will also be
updated to show the number of hosts known by stackd.io to be
using the security group at this time, but please **note**
that other machines in the cloud account could be using
the same security group and stackd.io may not be aware.
### POST
Creates a new security group given the following properties
in the JSON request.
`group_id` -- the security group ID as defined py the cloud provider.
You may only provide either the group_id or the name, but
not both. Using this property will **NOT** create a new group in the provider
`name` -- The name of the security group. This will also be
used to create the security group on the account.
You may only provide either the group_id or the name, but
not both. Using this property **WILL** create a new group in the provider
`description` -- The description or purpose of the group.
`account` -- The id of the cloud account to associate
this group with.
`default` -- Boolean representing if this group, for this
account, is set to automatically be added
to all hosts launched on the account. **NOTE**
this property may only be set by an admin.
"""
queryset = models.SecurityGroup.objects.all()
serializer_class = serializers.SecurityGroupSerializer
permission_classes = (StackdioModelPermissions,)
filter_backends = (DjangoObjectPermissionsFilter, DjangoFilterBackend)
filter_class = filters.SecurityGroupFilter
class SecurityGroupDetailAPIView(generics.RetrieveUpdateDestroyAPIView):
"""
Shows the detail for a security group and allows for the default
flag to be modified (for admins only.)
### GET
Retrieves the detail for the security group as defined by its
`pk` identifier in the URL. The associated `rules` and
`active_hosts` fields will be populated like with the full
list.
### PUT / PATCH
Updates an existing security group's details. Currently, only
the `default` field may be modified.
### DELETE
Removes the corresponding security group from stackd.io, as well as
from the underlying cloud account if `managed` is true.
**NOTE** that if the security group is currently being used, then
it can not be removed. You must first terminate all machines depending
on the security group and then delete it.
"""
queryset = models.SecurityGroup.objects.all()
serializer_class = serializers.SecurityGroupSerializer
permission_classes = (StackdioObjectPermissions,)
def perform_destroy(self, instance):
account = instance.account
if instance.is_managed:
# Delete from AWS. This will throw the appropriate error
# if the group is being used.
driver = account.get_driver()
try:
driver.delete_security_group(instance.name)
except DeleteGroupException as e:
if 'does not exist' in e.message:
logger.info('Security group already deleted.')
else:
raise ValidationError({
'detail': ['Could not delete this security group.', e.message]
})
# Save this before we delete
is_default = instance.is_default
# Delete the instance
instance.delete()
# update |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.