hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b7e289bda4ec6ee05c1db0eb5bd3b68dfe7f6698
| 2,317
|
py
|
Python
|
tests/inferbeddings/test_kbp_cli.py
|
issca/inferbeddings
|
80492a7aebcdcac21e758514c8af403d77e8594a
|
[
"MIT"
] | 33
|
2017-07-25T14:31:00.000Z
|
2019-03-06T09:18:00.000Z
|
tests/inferbeddings/test_kbp_cli.py
|
issca/inferbeddings
|
80492a7aebcdcac21e758514c8af403d77e8594a
|
[
"MIT"
] | 1
|
2017-08-22T13:49:30.000Z
|
2017-08-22T13:49:30.000Z
|
tests/inferbeddings/test_kbp_cli.py
|
issca/inferbeddings
|
80492a7aebcdcac21e758514c8af403d77e8594a
|
[
"MIT"
] | 9
|
2017-10-05T08:50:45.000Z
|
2019-04-18T12:40:56.000Z
|
# -*- coding: utf-8 -*-
import pytest
import subprocess
import sys
sys.setrecursionlimit(65535)
@pytest.mark.light
def test_nations_cli():
# Checking if results are still nice
cmd = ['./bin/kbp-cli.py',
'--train', 'data/nations/stratified_folds/0/nations_train.tsv.gz',
'--valid', 'data/nations/stratified_folds/0/nations_valid.tsv.gz',
'--test', 'data/nations/stratified_folds/0/nations_test.tsv.gz',
'--lr', '0.1',
'--model', 'ComplEx',
'--similarity', 'dot',
'--margin', '1',
'--embedding-size', '50',
'--nb-epochs', '10'] # 1000
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
# Hits@10 should be at least 90% even after a limited number of epochs
assert float(err.split()[-1][:-1]) > 90.0
cmd = ['./bin/kbp-cli.py',
'--train', 'data/nations/stratified_folds/0/nations_train.tsv.gz',
'--valid', 'data/nations/stratified_folds/0/nations_valid.tsv.gz',
'--test', 'data/nations/stratified_folds/0/nations_test.tsv.gz',
'--lr', '0.1',
'--model', 'TransE',
'--similarity', 'l1',
'--margin', '1',
'--embedding-size', '20',
'--nb-epochs', '50'] # 1000
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
# Hits@10 should be at least 90% even after a limited number of epochs
assert float(err.split()[-1][:-1]) > 90.0
cmd = ['./bin/kbp-cli.py',
'--train', 'data/nations/stratified_folds/0/nations_train.tsv.gz',
'--valid', 'data/nations/stratified_folds/0/nations_valid.tsv.gz',
'--test', 'data/nations/stratified_folds/0/nations_test.tsv.gz',
'--lr', '0.1',
'--model', 'DistMult',
'--similarity', 'dot',
'--margin', '1',
'--embedding-size', '50',
'--nb-epochs', '50'] # 1000
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
# Hits@10 should be at least 85% even after a limited number of epochs
assert float(err.split()[-1][:-1]) > 85.0
if __name__ == '__main__':
pytest.main([__file__])
| 36.777778
| 77
| 0.57186
| 292
| 2,317
| 4.428082
| 0.267123
| 0.076566
| 0.146172
| 0.180974
| 0.830626
| 0.830626
| 0.830626
| 0.830626
| 0.830626
| 0.770302
| 0
| 0.043133
| 0.239534
| 2,317
| 62
| 78
| 37.370968
| 0.690692
| 0.119983
| 0
| 0.680851
| 0
| 0
| 0.39803
| 0.229064
| 0
| 0
| 0
| 0
| 0.06383
| 1
| 0.021277
| false
| 0
| 0.06383
| 0
| 0.085106
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b7f9f58a5f24f28d62fbf2c57524bd6c0bd7ada4
| 247
|
py
|
Python
|
ncbi_genome_download/__init__.py
|
metagenomics/ncbi-genome-download
|
427ad8be725471b015b1ea824b17c53d756cdcbe
|
[
"Apache-2.0"
] | 1
|
2016-06-16T12:01:43.000Z
|
2016-06-16T12:01:43.000Z
|
ncbi_genome_download/__init__.py
|
metagenomics/ncbi-genome-download
|
427ad8be725471b015b1ea824b17c53d756cdcbe
|
[
"Apache-2.0"
] | null | null | null |
ncbi_genome_download/__init__.py
|
metagenomics/ncbi-genome-download
|
427ad8be725471b015b1ea824b17c53d756cdcbe
|
[
"Apache-2.0"
] | 1
|
2020-03-31T01:52:15.000Z
|
2020-03-31T01:52:15.000Z
|
__version__ = '0.1.7'
from ncbi_genome_download.core import (
download,
NCBI_URI,
supported_domains,
format_name_map,
assembly_level_map,
)
__all__ = [download, NCBI_URI, supported_domains, format_name_map, assembly_level_map]
| 24.7
| 86
| 0.757085
| 33
| 247
| 5
| 0.575758
| 0.145455
| 0.181818
| 0.290909
| 0.727273
| 0.727273
| 0.727273
| 0.727273
| 0.727273
| 0.727273
| 0
| 0.014493
| 0.161943
| 247
| 9
| 87
| 27.444444
| 0.782609
| 0
| 0
| 0
| 0
| 0
| 0.020243
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.111111
| 0
| 0.111111
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
b7fef90ff25dfdefd6e565f46d2e2ac3bbf6d0f0
| 113,835
|
py
|
Python
|
sxs/PYPostNewtonian/Code/PNEvolution.py
|
dongzesun/sxs
|
74ac9576032ddc232ff48510ba20f0a9e7116861
|
[
"MIT"
] | null | null | null |
sxs/PYPostNewtonian/Code/PNEvolution.py
|
dongzesun/sxs
|
74ac9576032ddc232ff48510ba20f0a9e7116861
|
[
"MIT"
] | null | null | null |
sxs/PYPostNewtonian/Code/PNEvolution.py
|
dongzesun/sxs
|
74ac9576032ddc232ff48510ba20f0a9e7116861
|
[
"MIT"
] | null | null | null |
# File produced automatically by PNCodeGen.ipynb
from scipy.integrate import solve_ivp
import numpy as np
from numpy import dot, cross, log, sqrt, pi
from numpy import euler_gamma as EulerGamma
from numba import jit, njit, float64, boolean
from numba.experimental import jitclass
from scipy.interpolate import InterpolatedUnivariateSpline as Spline
from scipy.special import zeta
import quaternionic
qmul = njit(quaternionic.algebra.multiply)
qexp=njit(quaternionic.algebra.exp)
qconj=njit(quaternionic.algebra.conj)
qinverse=njit(quaternionic.algebra.reciprocal)
@njit(cache=True)
def mul(A,B):
C=np.empty(4)
qmul(A,B,C)
return C
@njit(cache=True)
def exp(A):
B=np.empty(4)
qexp(A,B)
return B
@njit(cache=True)
def conjugate(A):
B=np.empty(4)
qconj(A,B)
return B
@njit(cache=True)
def inverse(A):
B=np.empty(4)
qinverse(A,B)
return B
@njit(cache=True)
def FrameFromAngularVelocity_2D_Integrand(rfrak_x, rfrak_y, Omega):
rfrakMag = np.sqrt(rfrak_x*rfrak_x+rfrak_y*rfrak_y)
rfrakDot_x = Omega[0]/2.0
rfrakDot_y = Omega[1]/2.0
if np.abs(np.sin(rfrakMag)) > 1e-12 and np.abs(np.cos(rfrakMag)) > 1e-12:
omega_v = (Omega[0]*(-rfrak_y/rfrakMag)+Omega[1]*(rfrak_x/rfrakMag))*np.tan(rfrakMag)-Omega[2]
Omega[0] += -omega_v*np.sin(2*rfrakMag)*(-rfrak_y/rfrakMag)
Omega[1] += -omega_v*np.sin(2*rfrakMag)*(rfrak_x/rfrakMag)
Omega[2] += omega_v*np.cos(2*rfrakMag)
dotTerm = (rfrak_x*Omega[0]+rfrak_y*Omega[1])/(rfrakMag*rfrakMag)
cotTerm = rfrakMag/(2*np.tan(rfrakMag))
rfrakDot_x = (Omega[0] - rfrak_x*dotTerm)*cotTerm + rfrak_x*dotTerm/2. - 0.5*Omega[2]*rfrak_y
rfrakDot_y = (Omega[1] - rfrak_y*dotTerm)*cotTerm + rfrak_y*dotTerm/2. + 0.5*Omega[2]*rfrak_x
return rfrakDot_x, rfrakDot_y
@njit(cache=True)
def FrameFromAngularVelocityIntegrand(rfrak, Omega):
rfrakMag = np.sqrt(rfrak[0] * rfrak[0] + rfrak[1] * rfrak[1] + rfrak[2] * rfrak[2])
OmegaMag = np.sqrt(Omega[0] * Omega[0] + Omega[1] * Omega[1] + Omega[2] * Omega[2])
# If the matrix is really close to the identity, return
if rfrakMag < 1e-12*OmegaMag:
return np.array([Omega[0] / 2.0, Omega[1] / 2.0, Omega[2] / 2.0])
# If the matrix is really close to singular, it's equivalent to the identity, so return
if np.abs(np.sin(rfrakMag)) < 1e-12:
return np.array([Omega[0] / 2.0, Omega[1] / 2.0, Omega[2] / 2.0])
OmegaOver2 = np.array([Omega[0] / 2.0, Omega[1] / 2.0, Omega[2] / 2.0])
rfrakHat = np.array([rfrak[0] / rfrakMag, rfrak[1] / rfrakMag, rfrak[2] / rfrakMag])
return ((OmegaOver2 - rfrakHat * np.dot(rfrakHat, OmegaOver2)) * (rfrakMag / np.tan(rfrakMag))
+ rfrakHat * np.dot(rfrakHat, OmegaOver2) + np.cross(OmegaOver2, rfrak))
ConsSpec=[('xHat', float64[:]),('yHat', float64[:]),('zHat', float64[:]),('M1', float64[:]),('M2', float64[:]),('S_chi1', float64[:]),('S_chi2', float64[:]),('M', float64[:]),('delta', float64[:]),('nu', float64[:]),('chi1chi1', float64[:]),('chi1chi2', float64[:]),('chi2chi2', float64[:]),('Fcal_0', float64[:]),('Fcal_2', float64[:]),('Fcal_3', float64[:]),('Fcal_4', float64[:]),('Fcal_5', float64[:]),('Fcal_6', float64[:]),('Fcal_lnv_6', float64[:]),('Fcal_7', float64[:]),('Fcal_8', float64[:]),('Fcal_lnv_8', float64[:]),('E_0', float64[:]),('E_2', float64[:]),('E_4', float64[:]),('E_6', float64[:]),('E_8', float64[:]),('E_lnv_8', float64[:]),('EvolveSpin1',boolean),('EvolveSpin2',boolean)]
@jitclass(ConsSpec)
class Cons:
def __init__(self,xHat,yHat,zHat,M1,M2,S_chi1,S_chi2,M,delta,nu,chi1chi1,chi1chi2,chi2chi2,Fcal_0,Fcal_2,Fcal_3,Fcal_4,Fcal_5,Fcal_6,Fcal_lnv_6,Fcal_7,Fcal_8,Fcal_lnv_8,E_0,E_2,E_4,E_6,E_8,E_lnv_8,EvolveSpin1,EvolveSpin2):
self.xHat=xHat
self.yHat=yHat
self.zHat=zHat
self.M1=M1
self.M2=M2
self.S_chi1=S_chi1
self.S_chi2=S_chi2
self.M=M
self.delta=delta
self.nu=nu
self.chi1chi1=chi1chi1
self.chi1chi2=chi1chi2
self.chi2chi2=chi2chi2
self.Fcal_0=Fcal_0
self.Fcal_2=Fcal_2
self.Fcal_3=Fcal_3
self.Fcal_4=Fcal_4
self.Fcal_5=Fcal_5
self.Fcal_6=Fcal_6
self.Fcal_lnv_6=Fcal_lnv_6
self.Fcal_7=Fcal_7
self.Fcal_8=Fcal_8
self.Fcal_lnv_8=Fcal_lnv_8
self.E_0=E_0
self.E_2=E_2
self.E_4=E_4
self.E_6=E_6
self.E_8=E_8
self.E_lnv_8=E_lnv_8
self.EvolveSpin1=EvolveSpin1
self.EvolveSpin2=EvolveSpin2
VarsSpec=[('v', float64[:]),('rfrak_chi1', float64[:]),('rfrak_chi2', float64[:]),('rfrak_frame', float64[:]),('R', float64[:]),('nHat', float64[:]),('lambdaHat', float64[:]),('ellHat', float64[:]),('R_S1', float64[:]),('R_S2', float64[:]),('chiVec1', float64[:]),('chiVec2', float64[:]),('chi1_n', float64[:]),('chi1_lambda', float64[:]),('chi1_ell', float64[:]),('chi2_n', float64[:]),('chi2_lambda', float64[:]),('chi2_ell', float64[:]),('S_ell', float64[:]),('S_n', float64[:]),('S_lambda', float64[:]),('Sigma_ell', float64[:]),('Sigma_n', float64[:]),('Sigma_lambda', float64[:]),('chi_s_ell', float64[:]),('chi_a_ell', float64[:]),('logv', float64[:]),('Fcal_coeff', float64[:]),('Fcal_SQ_4', float64[:]),('Fcal_SO_3', float64[:]),('Fcal_SO_5', float64[:]),('Fcal_SO_6', float64[:]),('Fcal_SO_7', float64[:]),('Fcal_SO_8', float64[:]),('E_SQ_4', float64[:]),('E_SO_3', float64[:]),('E_SO_5', float64[:]),('E_SO_7', float64[:])]
@jitclass(VarsSpec)
class Vars:
def __init__(self,v,rfrak_chi1,rfrak_chi2,rfrak_frame,R,nHat,lambdaHat,ellHat,R_S1,R_S2,chiVec1,chiVec2,chi1_n,chi1_lambda,chi1_ell,chi2_n,chi2_lambda,chi2_ell,S_ell,S_n,S_lambda,Sigma_ell,Sigma_n,Sigma_lambda,chi_s_ell,chi_a_ell,logv,Fcal_coeff,Fcal_SQ_4,Fcal_SO_3,Fcal_SO_5,Fcal_SO_6,Fcal_SO_7,Fcal_SO_8,E_SQ_4,E_SO_3,E_SO_5,E_SO_7):
self.v=v
self.rfrak_chi1=rfrak_chi1
self.rfrak_chi2=rfrak_chi2
self.rfrak_frame=rfrak_frame
self.R=R
self.nHat=nHat
self.lambdaHat=lambdaHat
self.ellHat=ellHat
self.R_S1=R_S1
self.R_S2=R_S2
self.chiVec1=chiVec1
self.chiVec2=chiVec2
self.chi1_n=chi1_n
self.chi1_lambda=chi1_lambda
self.chi1_ell=chi1_ell
self.chi2_n=chi2_n
self.chi2_lambda=chi2_lambda
self.chi2_ell=chi2_ell
self.S_ell=S_ell
self.S_n=S_n
self.S_lambda=S_lambda
self.Sigma_ell=Sigma_ell
self.Sigma_n=Sigma_n
self.Sigma_lambda=Sigma_lambda
self.chi_s_ell=chi_s_ell
self.chi_a_ell=chi_a_ell
self.logv=logv
self.Fcal_coeff=Fcal_coeff
self.Fcal_SQ_4=Fcal_SQ_4
self.Fcal_SO_3=Fcal_SO_3
self.Fcal_SO_5=Fcal_SO_5
self.Fcal_SO_6=Fcal_SO_6
self.Fcal_SO_7=Fcal_SO_7
self.Fcal_SO_8=Fcal_SO_8
self.E_SQ_4=E_SQ_4
self.E_SO_3=E_SO_3
self.E_SO_5=E_SO_5
self.E_SO_7=E_SO_7
@njit(cache=True)
def Initialization(Cons, xHat_i, yHat_i, zHat_i, M1_i, M2_i, v_i, S_chi1_i, S_chi2_i, rfrak_frame_i):
Cons.xHat=xHat_i
Cons.yHat=yHat_i
Cons.zHat=zHat_i
Cons.M1=np.array([M1_i])
Cons.M2=np.array([M2_i])
Cons.S_chi1=S_chi1_i
Cons.S_chi2=S_chi2_i
rfrak_chi1=np.array([0.0,0.0])
rfrak_chi2=np.array([0.0,0.0])
Cons.M=Cons.M1 + Cons.M2
Cons.delta=(Cons.M1 - Cons.M2)/Cons.M
Cons.nu=Cons.M1*Cons.M2/Cons.M**2
R_S1=exp(rfrak_chi1[0]*Cons.xHat + rfrak_chi1[1]*Cons.yHat)
R_S2=exp(rfrak_chi2[0]*Cons.xHat + rfrak_chi2[1]*Cons.yHat)
chiVec1=mul(mul(mul(Cons.S_chi1,R_S1),Cons.zHat),mul(conjugate(R_S1),conjugate(Cons.S_chi1)))
chiVec2=mul(mul(mul(Cons.S_chi2,R_S2),Cons.zHat),mul(conjugate(R_S2),conjugate(Cons.S_chi2)))
Cons.chi1chi1=np.array([dot(chiVec1[1:],chiVec1[1:])])
Cons.chi1chi2=np.array([dot(chiVec1[1:],chiVec2[1:])])
Cons.chi2chi2=np.array([dot(chiVec2[1:],chiVec2[1:])])
Cons.Fcal_0=np.array([1.0])
Cons.Fcal_2=-35*Cons.nu/12 - 1247/336
Cons.Fcal_3=np.array([4*pi])
Cons.Fcal_4=65*Cons.nu**2/18 + 9271*Cons.nu/504 - 44711/9072
Cons.Fcal_5=pi*(-583*Cons.nu/24 - 8191/672)
Cons.Fcal_6=-775*Cons.nu**3/324 - 94403*Cons.nu**2/3024 + Cons.nu*(-134543/7776 + 41*pi**2/48) - 1712*log(4)/105 - 1712*EulerGamma/105 + 16*pi**2/3 + 6643739519/69854400
Cons.Fcal_lnv_6=np.array([-1712/105])
Cons.Fcal_7=pi*(193385*Cons.nu**2/3024 + 214745*Cons.nu/1728 - 16285/504)
Cons.Fcal_8=np.array([-1369*pi**2/126 - 323105549467/3178375200 - 47385*log(3)/1568 + 232597*EulerGamma/4410 + 39931*log(2)/294])
Cons.Fcal_lnv_8=np.array([232597/4410])
Cons.E_0=np.array([1.0])
Cons.E_2=-Cons.nu/12 - 3/4
Cons.E_4=-Cons.nu**2/24 + 19*Cons.nu/8 - 27/8
Cons.E_6=-35*Cons.nu**3/5184 - 155*Cons.nu**2/96 + Cons.nu*(34445/576 - 205*pi**2/96) - 675/64
Cons.E_8=77*Cons.nu**4/31104 + 301*Cons.nu**3/1728 + Cons.nu**2*(-498449/3456 + 3157*pi**2/576) + Cons.nu*(-123671/5760 + 896*EulerGamma/15 + 9037*pi**2/1536 + 1792*log(2)/15) - 3969/128
Cons.E_lnv_8=896*Cons.nu/15
Cons.EvolveSpin1=np.linalg.norm(mul(Cons.S_chi1,conjugate(Cons.S_chi1)))>1e-8
Cons.EvolveSpin2=np.linalg.norm(mul(Cons.S_chi2,conjugate(Cons.S_chi2)))>1e-8
@njit(cache=True)
def Recalculate_0(Cons,Vars,y):
Vars.v = np.array([y[0]])
Vars.rfrak_chi1 = np.array([y[1],y[2]])
Vars.rfrak_chi2 = np.array([y[3],y[4]])
Vars.rfrak_frame = np.array([y[5],y[6],y[7]])
Vars.R = exp(Vars.rfrak_frame[0]*Cons.xHat + Vars.rfrak_frame[1]*Cons.yHat + Vars.rfrak_frame[2]*Cons.zHat)
Vars.nHat = mul(mul(Vars.R,Cons.xHat),conjugate(Vars.R))
Vars.ellHat = mul(mul(Vars.R,Cons.zHat),conjugate(Vars.R))
Vars.R_S1 = exp(Vars.rfrak_chi1[0]*Cons.xHat + Vars.rfrak_chi1[1]*Cons.yHat)
Vars.R_S2 = exp(Vars.rfrak_chi2[0]*Cons.xHat + Vars.rfrak_chi2[1]*Cons.yHat)
Vars.chiVec1 = mul(mul(mul(Cons.S_chi1,Vars.R_S1),Cons.zHat),mul(conjugate(Vars.R_S1),conjugate(Cons.S_chi1)))
Vars.chiVec2 = mul(mul(mul(Cons.S_chi2,Vars.R_S2),Cons.zHat),mul(conjugate(Vars.R_S2),conjugate(Cons.S_chi2)))
Vars.chi1_n = np.array([dot(Vars.chiVec1[1:],Vars.nHat[1:])])
Vars.chi1_ell = np.array([dot(Vars.chiVec1[1:],Vars.ellHat[1:])])
Vars.chi2_n = np.array([dot(Vars.chiVec2[1:],Vars.nHat[1:])])
Vars.chi2_ell = np.array([dot(Vars.chiVec2[1:],Vars.ellHat[1:])])
Vars.S_ell = Cons.M1**2*Vars.chi1_ell + Cons.M2**2*Vars.chi2_ell
Vars.S_n = Cons.M1**2*Vars.chi1_n + Cons.M2**2*Vars.chi2_n
Vars.Sigma_ell = Cons.M*(-Cons.M1*Vars.chi1_ell + Cons.M2*Vars.chi2_ell)
Vars.Sigma_n = Cons.M*(-Cons.M1*Vars.chi1_n + Cons.M2*Vars.chi2_n)
Vars.chi_s_ell = Vars.chi1_ell/2 + Vars.chi2_ell/2
Vars.chi_a_ell = Vars.chi1_ell/2 - Vars.chi2_ell/2
Vars.Fcal_coeff = 32*Cons.nu**2*Vars.v**10/5
@njit
def OmegaVec_chiVec_1_0(Cons,Vars):
Omega1_coeff = Vars.v**5/Cons.M
return Omega1_coeff*Vars.ellHat*(-0.75*Cons.delta + 0.5*Cons.nu + 0.75)
@njit
def OmegaVec_chiVec_2_0(Cons,Vars):
Omega2_coeff = Vars.v**5/Cons.M
return Omega2_coeff*Vars.ellHat*(0.75*Cons.delta + 0.5*Cons.nu + 0.75)
@njit
def OmegaVec_0(Cons,Vars):
a_ell_0 = 7.0*Vars.S_n + 3.0*Vars.Sigma_n*Cons.delta
gamma_PN_0 = 1.00000000000000
return Vars.ellHat*Vars.v**3/Cons.M + a_ell_0*gamma_PN_0*Vars.nHat*Vars.v**6/Cons.M**3
@njit(cache=True)
def TaylorT1_0(Cons,Vars):
Flux = Cons.Fcal_0*Vars.Fcal_coeff
dEdV = -Cons.E_0*Cons.M*Cons.nu*Vars.v
Absorption = 0
dvdt_T1 = (-Absorption - Flux)/dEdV
dydt=np.zeros(8)
[dydt[5],dydt[6],dydt[7]] = FrameFromAngularVelocityIntegrand(Vars.rfrak_frame, OmegaVec_0(Cons,Vars)[1:])
dydt[0] = dvdt_T1[0]
if(Cons.EvolveSpin1):
dydt[1], dydt[2]=FrameFromAngularVelocity_2D_Integrand(Vars.rfrak_chi1[0], Vars.rfrak_chi1[1],(mul(mul(inverse(Cons.S_chi1),OmegaVec_chiVec_1_0(Cons,Vars)),Cons.S_chi1))[1:])
else:
dydt[1], dydt[2] = 0.0, 0.0
if(Cons.EvolveSpin2):
dydt[3], dydt[4]=FrameFromAngularVelocity_2D_Integrand(Vars.rfrak_chi2[0], Vars.rfrak_chi2[1],(mul(mul(inverse(Cons.S_chi2),OmegaVec_chiVec_2_0(Cons,Vars)),Cons.S_chi2))[1:])
else:
dydt[3], dydt[4] = 0.0, 0.0
return dydt
@njit(cache=True)
def TaylorT4_0(Cons,Vars):
dvdt_T4 = -2.0*Vars.Fcal_coeff*-Cons.Fcal_0/(Cons.nu*Vars.v*2*Cons.E_0*Cons.M)
dydt=np.zeros(8)
[dydt[5],dydt[6],dydt[7]] = FrameFromAngularVelocityIntegrand(Vars.rfrak_frame, OmegaVec_0(Cons,Vars)[1:])
dydt[0] = dvdt_T4[0]
if(Cons.EvolveSpin1):
dydt[1], dydt[2]=FrameFromAngularVelocity_2D_Integrand(Vars.rfrak_chi1[0], Vars.rfrak_chi1[1],(mul(mul(inverse(Cons.S_chi1),OmegaVec_chiVec_1_0(Cons,Vars)),Cons.S_chi1))[1:])
else:
dydt[1], dydt[2] = 0.0, 0.0
if(Cons.EvolveSpin2):
dydt[3], dydt[4]=FrameFromAngularVelocity_2D_Integrand(Vars.rfrak_chi2[0], Vars.rfrak_chi2[1],(mul(mul(inverse(Cons.S_chi2),OmegaVec_chiVec_2_0(Cons,Vars)),Cons.S_chi2))[1:])
else:
dydt[3], dydt[4] = 0.0, 0.0
return dydt
@njit(cache=True)
def TaylorT5_0(Cons,Vars):
dtdv = -0.5*Cons.nu*Vars.v*2*Cons.E_0*Cons.M/(Vars.Fcal_coeff*-Cons.Fcal_0)
dvdt_T5 = 1.0/dtdv
dydt=np.zeros(8)
[dydt[5],dydt[6],dydt[7]] = FrameFromAngularVelocityIntegrand(Vars.rfrak_frame, OmegaVec_0(Cons,Vars)[1:])
dydt[0] = dvdt_T5[0]
if(Cons.EvolveSpin1):
dydt[1], dydt[2]=FrameFromAngularVelocity_2D_Integrand(Vars.rfrak_chi1[0], Vars.rfrak_chi1[1],(mul(mul(inverse(Cons.S_chi1),OmegaVec_chiVec_1_0(Cons,Vars)),Cons.S_chi1))[1:])
else:
dydt[1], dydt[2] = 0.0, 0.0
if(Cons.EvolveSpin2):
dydt[3], dydt[4]=FrameFromAngularVelocity_2D_Integrand(Vars.rfrak_chi2[0], Vars.rfrak_chi2[1],(mul(mul(inverse(Cons.S_chi2),OmegaVec_chiVec_2_0(Cons,Vars)),Cons.S_chi2))[1:])
else:
dydt[3], dydt[4] = 0.0, 0.0
return dydt
@njit(cache=True)
def Recalculate_0p50(Cons,Vars,y):
Vars.v = np.array([y[0]])
Vars.rfrak_chi1 = np.array([y[1],y[2]])
Vars.rfrak_chi2 = np.array([y[3],y[4]])
Vars.rfrak_frame = np.array([y[5],y[6],y[7]])
Vars.R = exp(Vars.rfrak_frame[0]*Cons.xHat + Vars.rfrak_frame[1]*Cons.yHat + Vars.rfrak_frame[2]*Cons.zHat)
Vars.nHat = mul(mul(Vars.R,Cons.xHat),conjugate(Vars.R))
Vars.ellHat = mul(mul(Vars.R,Cons.zHat),conjugate(Vars.R))
Vars.R_S1 = exp(Vars.rfrak_chi1[0]*Cons.xHat + Vars.rfrak_chi1[1]*Cons.yHat)
Vars.R_S2 = exp(Vars.rfrak_chi2[0]*Cons.xHat + Vars.rfrak_chi2[1]*Cons.yHat)
Vars.chiVec1 = mul(mul(mul(Cons.S_chi1,Vars.R_S1),Cons.zHat),mul(conjugate(Vars.R_S1),conjugate(Cons.S_chi1)))
Vars.chiVec2 = mul(mul(mul(Cons.S_chi2,Vars.R_S2),Cons.zHat),mul(conjugate(Vars.R_S2),conjugate(Cons.S_chi2)))
Vars.chi1_n = np.array([dot(Vars.chiVec1[1:],Vars.nHat[1:])])
Vars.chi1_ell = np.array([dot(Vars.chiVec1[1:],Vars.ellHat[1:])])
Vars.chi2_n = np.array([dot(Vars.chiVec2[1:],Vars.nHat[1:])])
Vars.chi2_ell = np.array([dot(Vars.chiVec2[1:],Vars.ellHat[1:])])
Vars.S_ell = Cons.M1**2*Vars.chi1_ell + Cons.M2**2*Vars.chi2_ell
Vars.S_n = Cons.M1**2*Vars.chi1_n + Cons.M2**2*Vars.chi2_n
Vars.Sigma_ell = Cons.M*(-Cons.M1*Vars.chi1_ell + Cons.M2*Vars.chi2_ell)
Vars.Sigma_n = Cons.M*(-Cons.M1*Vars.chi1_n + Cons.M2*Vars.chi2_n)
Vars.chi_s_ell = Vars.chi1_ell/2 + Vars.chi2_ell/2
Vars.chi_a_ell = Vars.chi1_ell/2 - Vars.chi2_ell/2
Vars.Fcal_coeff = 32*Cons.nu**2*Vars.v**10/5
@njit
def OmegaVec_chiVec_1_0p50(Cons,Vars):
Omega1_coeff = Vars.v**5/Cons.M
return Omega1_coeff*(Vars.ellHat*(-0.75*Cons.delta + 0.5*Cons.nu + 0.75) + Vars.nHat*Vars.v*(3.0*Vars.chi1_n*Cons.nu + 3.0*Cons.M2**2*Vars.chi2_n/Cons.M**2) - Cons.M2**2*Vars.chiVec2*Vars.v/Cons.M**2)
@njit
def OmegaVec_chiVec_2_0p50(Cons,Vars):
Omega2_coeff = Vars.v**5/Cons.M
return Omega2_coeff*(Vars.ellHat*(0.75*Cons.delta + 0.5*Cons.nu + 0.75) + Vars.nHat*Vars.v*(3.0*Vars.chi2_n*Cons.nu + 3.0*Cons.M1**2*Vars.chi1_n/Cons.M**2) - Cons.M1**2*Vars.chiVec1*Vars.v/Cons.M**2)
@njit
def OmegaVec_0p50(Cons,Vars):
a_ell_0 = 7.0*Vars.S_n + 3.0*Vars.Sigma_n*Cons.delta
gamma_PN_0 = 1.00000000000000
return Vars.ellHat*Vars.v**3/Cons.M + a_ell_0*gamma_PN_0*Vars.nHat*Vars.v**6/Cons.M**3
@njit(cache=True)
def TaylorT1_0p50(Cons,Vars):
Flux = Cons.Fcal_0*Vars.Fcal_coeff
dEdV = -Cons.E_0*Cons.M*Cons.nu*Vars.v
Absorption = 0
dvdt_T1 = (-Absorption - Flux)/dEdV
dydt=np.zeros(8)
[dydt[5],dydt[6],dydt[7]] = FrameFromAngularVelocityIntegrand(Vars.rfrak_frame, OmegaVec_0p50(Cons,Vars)[1:])
dydt[0] = dvdt_T1[0]
if(Cons.EvolveSpin1):
dydt[1], dydt[2]=FrameFromAngularVelocity_2D_Integrand(Vars.rfrak_chi1[0], Vars.rfrak_chi1[1],(mul(mul(inverse(Cons.S_chi1),OmegaVec_chiVec_1_0p50(Cons,Vars)),Cons.S_chi1))[1:])
else:
dydt[1], dydt[2] = 0.0, 0.0
if(Cons.EvolveSpin2):
dydt[3], dydt[4]=FrameFromAngularVelocity_2D_Integrand(Vars.rfrak_chi2[0], Vars.rfrak_chi2[1],(mul(mul(inverse(Cons.S_chi2),OmegaVec_chiVec_2_0p50(Cons,Vars)),Cons.S_chi2))[1:])
else:
dydt[3], dydt[4] = 0.0, 0.0
return dydt
@njit(cache=True)
def TaylorT4_0p50(Cons,Vars):
dvdt_T4 = -2.0*Vars.Fcal_coeff*(-Cons.Fcal_0 + 0*Vars.v - 0*-Cons.Fcal_0*Vars.v/2*Cons.E_0*Cons.M)/(Cons.nu*Vars.v*2*Cons.E_0*Cons.M)
dydt=np.zeros(8)
[dydt[5],dydt[6],dydt[7]] = FrameFromAngularVelocityIntegrand(Vars.rfrak_frame, OmegaVec_0p50(Cons,Vars)[1:])
dydt[0] = dvdt_T4[0]
if(Cons.EvolveSpin1):
dydt[1], dydt[2]=FrameFromAngularVelocity_2D_Integrand(Vars.rfrak_chi1[0], Vars.rfrak_chi1[1],(mul(mul(inverse(Cons.S_chi1),OmegaVec_chiVec_1_0p50(Cons,Vars)),Cons.S_chi1))[1:])
else:
dydt[1], dydt[2] = 0.0, 0.0
if(Cons.EvolveSpin2):
dydt[3], dydt[4]=FrameFromAngularVelocity_2D_Integrand(Vars.rfrak_chi2[0], Vars.rfrak_chi2[1],(mul(mul(inverse(Cons.S_chi2),OmegaVec_chiVec_2_0p50(Cons,Vars)),Cons.S_chi2))[1:])
else:
dydt[3], dydt[4] = 0.0, 0.0
return dydt
@njit(cache=True)
def TaylorT5_0p50(Cons,Vars):
dtdv = -0.5*Cons.nu*Vars.v*(2*Cons.E_0*Cons.M + 0*Vars.v - 0*2*Cons.E_0*Cons.M*Vars.v/-Cons.Fcal_0)/(Vars.Fcal_coeff*-Cons.Fcal_0)
dvdt_T5 = 1.0/dtdv
dydt=np.zeros(8)
[dydt[5],dydt[6],dydt[7]] = FrameFromAngularVelocityIntegrand(Vars.rfrak_frame, OmegaVec_0p50(Cons,Vars)[1:])
dydt[0] = dvdt_T5[0]
if(Cons.EvolveSpin1):
dydt[1], dydt[2]=FrameFromAngularVelocity_2D_Integrand(Vars.rfrak_chi1[0], Vars.rfrak_chi1[1],(mul(mul(inverse(Cons.S_chi1),OmegaVec_chiVec_1_0p50(Cons,Vars)),Cons.S_chi1))[1:])
else:
dydt[1], dydt[2] = 0.0, 0.0
if(Cons.EvolveSpin2):
dydt[3], dydt[4]=FrameFromAngularVelocity_2D_Integrand(Vars.rfrak_chi2[0], Vars.rfrak_chi2[1],(mul(mul(inverse(Cons.S_chi2),OmegaVec_chiVec_2_0p50(Cons,Vars)),Cons.S_chi2))[1:])
else:
dydt[3], dydt[4] = 0.0, 0.0
return dydt
@njit(cache=True)
def Recalculate_1p0(Cons,Vars,y):
Vars.v = np.array([y[0]])
Vars.rfrak_chi1 = np.array([y[1],y[2]])
Vars.rfrak_chi2 = np.array([y[3],y[4]])
Vars.rfrak_frame = np.array([y[5],y[6],y[7]])
Vars.R = exp(Vars.rfrak_frame[0]*Cons.xHat + Vars.rfrak_frame[1]*Cons.yHat + Vars.rfrak_frame[2]*Cons.zHat)
Vars.nHat = mul(mul(Vars.R,Cons.xHat),conjugate(Vars.R))
Vars.ellHat = mul(mul(Vars.R,Cons.zHat),conjugate(Vars.R))
Vars.R_S1 = exp(Vars.rfrak_chi1[0]*Cons.xHat + Vars.rfrak_chi1[1]*Cons.yHat)
Vars.R_S2 = exp(Vars.rfrak_chi2[0]*Cons.xHat + Vars.rfrak_chi2[1]*Cons.yHat)
Vars.chiVec1 = mul(mul(mul(Cons.S_chi1,Vars.R_S1),Cons.zHat),mul(conjugate(Vars.R_S1),conjugate(Cons.S_chi1)))
Vars.chiVec2 = mul(mul(mul(Cons.S_chi2,Vars.R_S2),Cons.zHat),mul(conjugate(Vars.R_S2),conjugate(Cons.S_chi2)))
Vars.chi1_n = np.array([dot(Vars.chiVec1[1:],Vars.nHat[1:])])
Vars.chi1_ell = np.array([dot(Vars.chiVec1[1:],Vars.ellHat[1:])])
Vars.chi2_n = np.array([dot(Vars.chiVec2[1:],Vars.nHat[1:])])
Vars.chi2_ell = np.array([dot(Vars.chiVec2[1:],Vars.ellHat[1:])])
Vars.S_ell = Cons.M1**2*Vars.chi1_ell + Cons.M2**2*Vars.chi2_ell
Vars.S_n = Cons.M1**2*Vars.chi1_n + Cons.M2**2*Vars.chi2_n
Vars.Sigma_ell = Cons.M*(-Cons.M1*Vars.chi1_ell + Cons.M2*Vars.chi2_ell)
Vars.Sigma_n = Cons.M*(-Cons.M1*Vars.chi1_n + Cons.M2*Vars.chi2_n)
Vars.chi_s_ell = Vars.chi1_ell/2 + Vars.chi2_ell/2
Vars.chi_a_ell = Vars.chi1_ell/2 - Vars.chi2_ell/2
Vars.Fcal_coeff = 32*Cons.nu**2*Vars.v**10/5
@njit
def OmegaVec_chiVec_1_1p0(Cons,Vars):
Omega1_coeff = Vars.v**5/Cons.M
return Omega1_coeff*(Vars.ellHat*(-0.75*Cons.delta + 0.5*Cons.nu + Vars.v**2*(Cons.delta*(0.625*Cons.nu - 0.5625) + Cons.nu*(1.25 - 0.0416666666666667*Cons.nu) + 0.5625) + 0.75) + Vars.nHat*Vars.v*(3.0*Vars.chi1_n*Cons.nu + 3.0*Cons.M2**2*Vars.chi2_n/Cons.M**2) - Cons.M2**2*Vars.chiVec2*Vars.v/Cons.M**2)
@njit
def OmegaVec_chiVec_2_1p0(Cons,Vars):
Omega2_coeff = Vars.v**5/Cons.M
return Omega2_coeff*(Vars.ellHat*(0.75*Cons.delta + 0.5*Cons.nu + Vars.v**2*(Cons.delta*(0.5625 - 0.625*Cons.nu) + Cons.nu*(1.25 - 0.0416666666666667*Cons.nu) + 0.5625) + 0.75) + Vars.nHat*Vars.v*(3.0*Vars.chi2_n*Cons.nu + 3.0*Cons.M1**2*Vars.chi1_n/Cons.M**2) - Cons.M1**2*Vars.chiVec1*Vars.v/Cons.M**2)
@njit
def OmegaVec_1p0(Cons,Vars):
a_ell_0 = 7.0*Vars.S_n + 3.0*Vars.Sigma_n*Cons.delta
gamma_PN_2 = 1.0 - 0.333333333333333*Cons.nu
gamma_PN_0 = 1.00000000000000
a_ell_2 = Vars.S_n*(-9.66666666666667*Cons.nu - 10.0) + Vars.Sigma_n*Cons.delta*(-4.5*Cons.nu - 6.0)
return Vars.ellHat*Vars.v**3/Cons.M + Vars.nHat*Vars.v**6*(a_ell_0 + a_ell_2*Vars.v**2)*(gamma_PN_0 + gamma_PN_2*Vars.v**2)/Cons.M**3
@njit(cache=True)
def TaylorT1_1p0(Cons,Vars):
Flux = Vars.Fcal_coeff*(Cons.Fcal_0 + Cons.Fcal_2*Vars.v**2)
dEdV = -Cons.M*Cons.nu*Vars.v*(Cons.E_0 + 2.0*Cons.E_2*Vars.v**2)
Absorption = 0
dvdt_T1 = (-Absorption - Flux)/dEdV
dydt=np.zeros(8)
[dydt[5],dydt[6],dydt[7]] = FrameFromAngularVelocityIntegrand(Vars.rfrak_frame, OmegaVec_1p0(Cons,Vars)[1:])
dydt[0] = dvdt_T1[0]
if(Cons.EvolveSpin1):
dydt[1], dydt[2]=FrameFromAngularVelocity_2D_Integrand(Vars.rfrak_chi1[0], Vars.rfrak_chi1[1],(mul(mul(inverse(Cons.S_chi1),OmegaVec_chiVec_1_1p0(Cons,Vars)),Cons.S_chi1))[1:])
else:
dydt[1], dydt[2] = 0.0, 0.0
if(Cons.EvolveSpin2):
dydt[3], dydt[4]=FrameFromAngularVelocity_2D_Integrand(Vars.rfrak_chi2[0], Vars.rfrak_chi2[1],(mul(mul(inverse(Cons.S_chi2),OmegaVec_chiVec_2_1p0(Cons,Vars)),Cons.S_chi2))[1:])
else:
dydt[3], dydt[4] = 0.0, 0.0
return dydt
@njit(cache=True)
def TaylorT4_1p0(Cons,Vars):
dvdt_T4 = -2.0*Vars.Fcal_coeff*(-Cons.Fcal_0 + 0*Vars.v + -Cons.Fcal_2*Vars.v**2 + (0*(--Cons.Fcal_0*Vars.v - 0*Vars.v**2) - 4*Cons.E_2*Cons.M*-Cons.Fcal_0*Vars.v**2 + 0**2*-Cons.Fcal_0*Vars.v**2/2*Cons.E_0*Cons.M)/2*Cons.E_0*Cons.M)/(Cons.nu*Vars.v*2*Cons.E_0*Cons.M)
dydt=np.zeros(8)
[dydt[5],dydt[6],dydt[7]] = FrameFromAngularVelocityIntegrand(Vars.rfrak_frame, OmegaVec_1p0(Cons,Vars)[1:])
dydt[0] = dvdt_T4[0]
if(Cons.EvolveSpin1):
dydt[1], dydt[2]=FrameFromAngularVelocity_2D_Integrand(Vars.rfrak_chi1[0], Vars.rfrak_chi1[1],(mul(mul(inverse(Cons.S_chi1),OmegaVec_chiVec_1_1p0(Cons,Vars)),Cons.S_chi1))[1:])
else:
dydt[1], dydt[2] = 0.0, 0.0
if(Cons.EvolveSpin2):
dydt[3], dydt[4]=FrameFromAngularVelocity_2D_Integrand(Vars.rfrak_chi2[0], Vars.rfrak_chi2[1],(mul(mul(inverse(Cons.S_chi2),OmegaVec_chiVec_2_1p0(Cons,Vars)),Cons.S_chi2))[1:])
else:
dydt[3], dydt[4] = 0.0, 0.0
return dydt
@njit(cache=True)
def TaylorT5_1p0(Cons,Vars):
dtdv = -0.5*Cons.nu*Vars.v*(2*Cons.E_0*Cons.M + 0*Vars.v + 4*Cons.E_2*Cons.M*Vars.v**2 + (0*(-2*Cons.E_0*Cons.M*Vars.v - 0*Vars.v**2) - -Cons.Fcal_2*2*Cons.E_0*Cons.M*Vars.v**2 + 0**2*2*Cons.E_0*Cons.M*Vars.v**2/-Cons.Fcal_0)/-Cons.Fcal_0)/(Vars.Fcal_coeff*-Cons.Fcal_0)
dvdt_T5 = 1.0/dtdv
dydt=np.zeros(8)
[dydt[5],dydt[6],dydt[7]] = FrameFromAngularVelocityIntegrand(Vars.rfrak_frame, OmegaVec_1p0(Cons,Vars)[1:])
dydt[0] = dvdt_T5[0]
if(Cons.EvolveSpin1):
dydt[1], dydt[2]=FrameFromAngularVelocity_2D_Integrand(Vars.rfrak_chi1[0], Vars.rfrak_chi1[1],(mul(mul(inverse(Cons.S_chi1),OmegaVec_chiVec_1_1p0(Cons,Vars)),Cons.S_chi1))[1:])
else:
dydt[1], dydt[2] = 0.0, 0.0
if(Cons.EvolveSpin2):
dydt[3], dydt[4]=FrameFromAngularVelocity_2D_Integrand(Vars.rfrak_chi2[0], Vars.rfrak_chi2[1],(mul(mul(inverse(Cons.S_chi2),OmegaVec_chiVec_2_1p0(Cons,Vars)),Cons.S_chi2))[1:])
else:
dydt[3], dydt[4] = 0.0, 0.0
return dydt
@njit(cache=True)
def Recalculate_1p5(Cons,Vars,y):
Vars.v = np.array([y[0]])
Vars.rfrak_chi1 = np.array([y[1],y[2]])
Vars.rfrak_chi2 = np.array([y[3],y[4]])
Vars.rfrak_frame = np.array([y[5],y[6],y[7]])
Vars.R = exp(Vars.rfrak_frame[0]*Cons.xHat + Vars.rfrak_frame[1]*Cons.yHat + Vars.rfrak_frame[2]*Cons.zHat)
Vars.nHat = mul(mul(Vars.R,Cons.xHat),conjugate(Vars.R))
Vars.lambdaHat = mul(mul(Vars.R,Cons.yHat),conjugate(Vars.R))
Vars.ellHat = mul(mul(Vars.R,Cons.zHat),conjugate(Vars.R))
Vars.R_S1 = exp(Vars.rfrak_chi1[0]*Cons.xHat + Vars.rfrak_chi1[1]*Cons.yHat)
Vars.R_S2 = exp(Vars.rfrak_chi2[0]*Cons.xHat + Vars.rfrak_chi2[1]*Cons.yHat)
Vars.chiVec1 = mul(mul(mul(Cons.S_chi1,Vars.R_S1),Cons.zHat),mul(conjugate(Vars.R_S1),conjugate(Cons.S_chi1)))
Vars.chiVec2 = mul(mul(mul(Cons.S_chi2,Vars.R_S2),Cons.zHat),mul(conjugate(Vars.R_S2),conjugate(Cons.S_chi2)))
Vars.chi1_n = np.array([dot(Vars.chiVec1[1:],Vars.nHat[1:])])
Vars.chi1_lambda = np.array([dot(Vars.chiVec1[1:],Vars.lambdaHat[1:])])
Vars.chi1_ell = np.array([dot(Vars.chiVec1[1:],Vars.ellHat[1:])])
Vars.chi2_n = np.array([dot(Vars.chiVec2[1:],Vars.nHat[1:])])
Vars.chi2_lambda = np.array([dot(Vars.chiVec2[1:],Vars.lambdaHat[1:])])
Vars.chi2_ell = np.array([dot(Vars.chiVec2[1:],Vars.ellHat[1:])])
Vars.S_ell = Cons.M1**2*Vars.chi1_ell + Cons.M2**2*Vars.chi2_ell
Vars.S_n = Cons.M1**2*Vars.chi1_n + Cons.M2**2*Vars.chi2_n
Vars.S_lambda = Cons.M1**2*Vars.chi1_lambda + Cons.M2**2*Vars.chi2_lambda
Vars.Sigma_ell = Cons.M*(-Cons.M1*Vars.chi1_ell + Cons.M2*Vars.chi2_ell)
Vars.Sigma_n = Cons.M*(-Cons.M1*Vars.chi1_n + Cons.M2*Vars.chi2_n)
Vars.Sigma_lambda = Cons.M*(-Cons.M1*Vars.chi1_lambda + Cons.M2*Vars.chi2_lambda)
Vars.chi_s_ell = Vars.chi1_ell/2 + Vars.chi2_ell/2
Vars.chi_a_ell = Vars.chi1_ell/2 - Vars.chi2_ell/2
Vars.Fcal_coeff = 32*Cons.nu**2*Vars.v**10/5
Vars.Fcal_SO_3 = (-4*Vars.S_ell - 5*Vars.Sigma_ell*Cons.delta/4)/Cons.M**2
Vars.E_SO_3 = (14*Vars.S_ell/3 + 2*Vars.Sigma_ell*Cons.delta)/Cons.M**2
@njit
def OmegaVec_chiVec_1_1p5(Cons,Vars):
Omega1_coeff = Vars.v**5/Cons.M
return Omega1_coeff*(Vars.ellHat*(-0.75*Cons.delta + 0.5*Cons.nu + Vars.v**2*(Cons.delta*(0.625*Cons.nu - 0.5625) + Cons.nu*(1.25 - 0.0416666666666667*Cons.nu) + 0.5625) + 0.75) + Vars.nHat*Vars.v*(3.0*Vars.chi1_n*Cons.nu + 3.0*Cons.M2**2*Vars.chi2_n/Cons.M**2) - Cons.M2**2*Vars.chiVec2*Vars.v/Cons.M**2)
@njit
def OmegaVec_chiVec_2_1p5(Cons,Vars):
Omega2_coeff = Vars.v**5/Cons.M
return Omega2_coeff*(Vars.ellHat*(0.75*Cons.delta + 0.5*Cons.nu + Vars.v**2*(Cons.delta*(0.5625 - 0.625*Cons.nu) + Cons.nu*(1.25 - 0.0416666666666667*Cons.nu) + 0.5625) + 0.75) + Vars.nHat*Vars.v*(3.0*Vars.chi2_n*Cons.nu + 3.0*Cons.M1**2*Vars.chi1_n/Cons.M**2) - Cons.M1**2*Vars.chiVec1*Vars.v/Cons.M**2)
@njit
def OmegaVec_1p5(Cons,Vars):
a_ell_0 = 7.0*Vars.S_n + 3.0*Vars.Sigma_n*Cons.delta
gamma_PN_2 = 1.0 - 0.333333333333333*Cons.nu
gamma_PN_3 = (1.66666666666667*Vars.S_ell + Vars.Sigma_ell*Cons.delta)/Cons.M**2
gamma_PN_0 = 1.00000000000000
a_ell_2 = Vars.S_n*(-9.66666666666667*Cons.nu - 10.0) + Vars.Sigma_n*Cons.delta*(-4.5*Cons.nu - 6.0)
return Vars.ellHat*Vars.v**3/Cons.M + Vars.nHat*Vars.v**6*(a_ell_0 + a_ell_2*Vars.v**2)*(gamma_PN_0 + Vars.v**2*(gamma_PN_2 + gamma_PN_3*Vars.v))/Cons.M**3
@njit(cache=True)
def TaylorT1_1p5(Cons,Vars):
Flux = Vars.Fcal_coeff*(Cons.Fcal_0 + Vars.v**2*(Cons.Fcal_2 + Vars.v*(Cons.Fcal_3 + Vars.Fcal_SO_3)))
dEdV = -0.5*Cons.M*Cons.nu*Vars.v*(2.0*Cons.E_0 + Vars.v**2*(4.0*Cons.E_2 + 5.0*Vars.E_SO_3*Vars.v))
Absorption = 0
dvdt_T1 = (-Absorption - Flux)/dEdV
dydt=np.zeros(8)
[dydt[5],dydt[6],dydt[7]] = FrameFromAngularVelocityIntegrand(Vars.rfrak_frame, OmegaVec_1p5(Cons,Vars)[1:])
dydt[0] = dvdt_T1[0]
if(Cons.EvolveSpin1):
dydt[1], dydt[2]=FrameFromAngularVelocity_2D_Integrand(Vars.rfrak_chi1[0], Vars.rfrak_chi1[1],(mul(mul(inverse(Cons.S_chi1),OmegaVec_chiVec_1_1p5(Cons,Vars)),Cons.S_chi1))[1:])
else:
dydt[1], dydt[2] = 0.0, 0.0
if(Cons.EvolveSpin2):
dydt[3], dydt[4]=FrameFromAngularVelocity_2D_Integrand(Vars.rfrak_chi2[0], Vars.rfrak_chi2[1],(mul(mul(inverse(Cons.S_chi2),OmegaVec_chiVec_2_1p5(Cons,Vars)),Cons.S_chi2))[1:])
else:
dydt[3], dydt[4] = 0.0, 0.0
return dydt
@njit(cache=True)
def TaylorT4_1p5(Cons,Vars):
dvdt_T4 = -2.0*Vars.Fcal_coeff*(1.0*-Cons.Fcal_0 + 1.0*0*Vars.v + 1.0*-Cons.Fcal_2*Vars.v**2 + 1.0*-Cons.Fcal_3 - Vars.Fcal_SO_3*Vars.v**3 + (0*(-1.0*-Cons.Fcal_0*Vars.v - 1.0*0*Vars.v**2 - 1.0*-Cons.Fcal_2*Vars.v**3) + 4*Cons.E_2*Cons.M*(-1.0*-Cons.Fcal_0*Vars.v**2 - 1.0*0*Vars.v**3) - 1.0*5*Vars.E_SO_3*Cons.M*-Cons.Fcal_0*Vars.v**3 + (0*(0*(1.0*-Cons.Fcal_0*Vars.v**2 + 1.0*0*Vars.v**3) + 2.0*4*Cons.E_2*Cons.M*-Cons.Fcal_0*Vars.v**3) - 1.0*0**3*-Cons.Fcal_0*Vars.v**3/2*Cons.E_0*Cons.M)/2*Cons.E_0*Cons.M)/2*Cons.E_0*Cons.M)/(Cons.nu*Vars.v*2*Cons.E_0*Cons.M)
dydt=np.zeros(8)
[dydt[5],dydt[6],dydt[7]] = FrameFromAngularVelocityIntegrand(Vars.rfrak_frame, OmegaVec_1p5(Cons,Vars)[1:])
dydt[0] = dvdt_T4[0]
if(Cons.EvolveSpin1):
dydt[1], dydt[2]=FrameFromAngularVelocity_2D_Integrand(Vars.rfrak_chi1[0], Vars.rfrak_chi1[1],(mul(mul(inverse(Cons.S_chi1),OmegaVec_chiVec_1_1p5(Cons,Vars)),Cons.S_chi1))[1:])
else:
dydt[1], dydt[2] = 0.0, 0.0
if(Cons.EvolveSpin2):
dydt[3], dydt[4]=FrameFromAngularVelocity_2D_Integrand(Vars.rfrak_chi2[0], Vars.rfrak_chi2[1],(mul(mul(inverse(Cons.S_chi2),OmegaVec_chiVec_2_1p5(Cons,Vars)),Cons.S_chi2))[1:])
else:
dydt[3], dydt[4] = 0.0, 0.0
return dydt
@njit(cache=True)
def TaylorT5_1p5(Cons,Vars):
dtdv = -0.5*Cons.nu*Vars.v*(1.0*2*Cons.E_0*Cons.M + 1.0*0*Vars.v + 1.0*4*Cons.E_2*Cons.M*Vars.v**2 + 1.0*5*Vars.E_SO_3*Cons.M*Vars.v**3 + (0*(-1.0*2*Cons.E_0*Cons.M*Vars.v - 1.0*0*Vars.v**2 - 1.0*4*Cons.E_2*Cons.M*Vars.v**3) + -Cons.Fcal_2*(-1.0*2*Cons.E_0*Cons.M*Vars.v**2 - 1.0*0*Vars.v**3) - 1.0*-Cons.Fcal_3 - Vars.Fcal_SO_3*2*Cons.E_0*Cons.M*Vars.v**3 + (0*(0*(1.0*2*Cons.E_0*Cons.M*Vars.v**2 + 1.0*0*Vars.v**3) + 2.0*-Cons.Fcal_2*2*Cons.E_0*Cons.M*Vars.v**3) - 1.0*0**3*2*Cons.E_0*Cons.M*Vars.v**3/-Cons.Fcal_0)/-Cons.Fcal_0)/-Cons.Fcal_0)/(Vars.Fcal_coeff*-Cons.Fcal_0)
dvdt_T5 = 1.0/dtdv
dydt=np.zeros(8)
[dydt[5],dydt[6],dydt[7]] = FrameFromAngularVelocityIntegrand(Vars.rfrak_frame, OmegaVec_1p5(Cons,Vars)[1:])
dydt[0] = dvdt_T5[0]
if(Cons.EvolveSpin1):
dydt[1], dydt[2]=FrameFromAngularVelocity_2D_Integrand(Vars.rfrak_chi1[0], Vars.rfrak_chi1[1],(mul(mul(inverse(Cons.S_chi1),OmegaVec_chiVec_1_1p5(Cons,Vars)),Cons.S_chi1))[1:])
else:
dydt[1], dydt[2] = 0.0, 0.0
if(Cons.EvolveSpin2):
dydt[3], dydt[4]=FrameFromAngularVelocity_2D_Integrand(Vars.rfrak_chi2[0], Vars.rfrak_chi2[1],(mul(mul(inverse(Cons.S_chi2),OmegaVec_chiVec_2_1p5(Cons,Vars)),Cons.S_chi2))[1:])
else:
dydt[3], dydt[4] = 0.0, 0.0
return dydt
@njit(cache=True)
def Recalculate_2p0(Cons,Vars,y):
Vars.v = np.array([y[0]])
Vars.rfrak_chi1 = np.array([y[1],y[2]])
Vars.rfrak_chi2 = np.array([y[3],y[4]])
Vars.rfrak_frame = np.array([y[5],y[6],y[7]])
Vars.R = exp(Vars.rfrak_frame[0]*Cons.xHat + Vars.rfrak_frame[1]*Cons.yHat + Vars.rfrak_frame[2]*Cons.zHat)
Vars.nHat = mul(mul(Vars.R,Cons.xHat),conjugate(Vars.R))
Vars.lambdaHat = mul(mul(Vars.R,Cons.yHat),conjugate(Vars.R))
Vars.ellHat = mul(mul(Vars.R,Cons.zHat),conjugate(Vars.R))
Vars.R_S1 = exp(Vars.rfrak_chi1[0]*Cons.xHat + Vars.rfrak_chi1[1]*Cons.yHat)
Vars.R_S2 = exp(Vars.rfrak_chi2[0]*Cons.xHat + Vars.rfrak_chi2[1]*Cons.yHat)
Vars.chiVec1 = mul(mul(mul(Cons.S_chi1,Vars.R_S1),Cons.zHat),mul(conjugate(Vars.R_S1),conjugate(Cons.S_chi1)))
Vars.chiVec2 = mul(mul(mul(Cons.S_chi2,Vars.R_S2),Cons.zHat),mul(conjugate(Vars.R_S2),conjugate(Cons.S_chi2)))
Vars.chi1_n = np.array([dot(Vars.chiVec1[1:],Vars.nHat[1:])])
Vars.chi1_lambda = np.array([dot(Vars.chiVec1[1:],Vars.lambdaHat[1:])])
Vars.chi1_ell = np.array([dot(Vars.chiVec1[1:],Vars.ellHat[1:])])
Vars.chi2_n = np.array([dot(Vars.chiVec2[1:],Vars.nHat[1:])])
Vars.chi2_lambda = np.array([dot(Vars.chiVec2[1:],Vars.lambdaHat[1:])])
Vars.chi2_ell = np.array([dot(Vars.chiVec2[1:],Vars.ellHat[1:])])
Vars.S_ell = Cons.M1**2*Vars.chi1_ell + Cons.M2**2*Vars.chi2_ell
Vars.S_n = Cons.M1**2*Vars.chi1_n + Cons.M2**2*Vars.chi2_n
Vars.S_lambda = Cons.M1**2*Vars.chi1_lambda + Cons.M2**2*Vars.chi2_lambda
Vars.Sigma_ell = Cons.M*(-Cons.M1*Vars.chi1_ell + Cons.M2*Vars.chi2_ell)
Vars.Sigma_n = Cons.M*(-Cons.M1*Vars.chi1_n + Cons.M2*Vars.chi2_n)
Vars.Sigma_lambda = Cons.M*(-Cons.M1*Vars.chi1_lambda + Cons.M2*Vars.chi2_lambda)
Vars.chi_s_ell = Vars.chi1_ell/2 + Vars.chi2_ell/2
Vars.chi_a_ell = Vars.chi1_ell/2 - Vars.chi2_ell/2
Vars.Fcal_coeff = 32*Cons.nu**2*Vars.v**10/5
Vars.Fcal_SQ_4 = Cons.chi1chi1*(-89*Cons.delta/192 + 89*Cons.nu/96 - 89/192) - 103*Cons.chi1chi2*Cons.nu/48 + Cons.chi2chi2*(89*Cons.delta/192 + 89*Cons.nu/96 - 89/192) + Vars.chi_a_ell*(Vars.chi_a_ell*(287/96 - 12*Cons.nu) + 287*Vars.chi_s_ell*Cons.delta/48) + Vars.chi_s_ell**2*(Cons.nu/24 + 287/96)
Vars.Fcal_SO_3 = (-4*Vars.S_ell - 5*Vars.Sigma_ell*Cons.delta/4)/Cons.M**2
Vars.E_SQ_4 = -3*Vars.chi_a_ell**2/2 - 3*Vars.chi_s_ell**2/2 - Cons.delta*(Cons.chi2chi2/2 + 3*Vars.chi_a_ell*Vars.chi_s_ell) + Cons.nu*(Cons.chi1chi2 + 6*Vars.chi_a_ell**2) + (Cons.chi1chi1 + Cons.chi2chi2)*(Cons.delta - 2*Cons.nu + 1)/4
Vars.E_SO_3 = (14*Vars.S_ell/3 + 2*Vars.Sigma_ell*Cons.delta)/Cons.M**2
@njit
def OmegaVec_chiVec_1_2p0(Cons,Vars):
Omega1_coeff = Vars.v**5/Cons.M
return Omega1_coeff*(Vars.ellHat*(-0.75*Cons.delta + 0.5*Cons.nu + Vars.v**2*(Cons.delta*(0.625*Cons.nu - 0.5625) + Cons.nu*(1.25 - 0.0416666666666667*Cons.nu) + Vars.v**2*(Cons.delta*(Cons.nu*(4.875 - 0.15625*Cons.nu) - 0.84375) + Cons.nu*(Cons.nu*(-0.0208333333333333*Cons.nu - 3.28125) + 0.1875) + 0.84375) + 0.5625) + 0.75) + Vars.nHat*Vars.v*(3.0*Vars.chi1_n*Cons.nu + 3.0*Cons.M2**2*Vars.chi2_n/Cons.M**2) - Cons.M2**2*Vars.chiVec2*Vars.v/Cons.M**2)
@njit
def OmegaVec_chiVec_2_2p0(Cons,Vars):
Omega2_coeff = Vars.v**5/Cons.M
return Omega2_coeff*(Vars.ellHat*(0.75*Cons.delta + 0.5*Cons.nu + Vars.v**2*(Cons.delta*(0.5625 - 0.625*Cons.nu) + Cons.nu*(1.25 - 0.0416666666666667*Cons.nu) + Vars.v**2*(Cons.delta*(Cons.nu*(0.15625*Cons.nu - 4.875) + 0.84375) + Cons.nu*(Cons.nu*(-0.0208333333333333*Cons.nu - 3.28125) + 0.1875) + 0.84375) + 0.5625) + 0.75) + Vars.nHat*Vars.v*(3.0*Vars.chi2_n*Cons.nu + 3.0*Cons.M1**2*Vars.chi1_n/Cons.M**2) - Cons.M1**2*Vars.chiVec1*Vars.v/Cons.M**2)
@njit
def OmegaVec_2p0(Cons,Vars):
a_ell_0 = 7.0*Vars.S_n + 3.0*Vars.Sigma_n*Cons.delta
gamma_PN_2 = 1.0 - 0.333333333333333*Cons.nu
a_ell_4 = Vars.S_n*(5.77777777777778*Cons.nu**2 + 14.75*Cons.nu + 1.5) + Vars.Sigma_n*Cons.delta*(2.83333333333333*Cons.nu**2 + 9.125*Cons.nu + 1.5)
gamma_PN_3 = (1.66666666666667*Vars.S_ell + Vars.Sigma_ell*Cons.delta)/Cons.M**2
gamma_PN_0 = 1.00000000000000
gamma_PN_4 = 1.0 - 5.41666666666667*Cons.nu
a_ell_2 = Vars.S_n*(-9.66666666666667*Cons.nu - 10.0) + Vars.Sigma_n*Cons.delta*(-4.5*Cons.nu - 6.0)
return Vars.ellHat*Vars.v**3/Cons.M + Vars.nHat*Vars.v**6*(a_ell_0 + Vars.v**2*(a_ell_2 + a_ell_4*Vars.v**2))*(gamma_PN_0 + Vars.v**2*(gamma_PN_2 + Vars.v*(gamma_PN_3 + gamma_PN_4*Vars.v)))/Cons.M**3
@njit(cache=True)
def TaylorT1_2p0(Cons,Vars):
Flux = Vars.Fcal_coeff*(Cons.Fcal_0 + Vars.v**2*(Cons.Fcal_2 + Vars.v*(Cons.Fcal_3 + Vars.Fcal_SO_3 + Vars.v*(Cons.Fcal_4 + Vars.Fcal_SQ_4))))
dEdV = -0.5*Cons.M*Cons.nu*Vars.v*(2.0*Cons.E_0 + Vars.v**2*(4.0*Cons.E_2 + Vars.v*(5.0*Vars.E_SO_3 + 6.0*Vars.v*(Cons.E_4 + Vars.E_SQ_4))))
Absorption = 0
dvdt_T1 = (-Absorption - Flux)/dEdV
dydt=np.zeros(8)
[dydt[5],dydt[6],dydt[7]] = FrameFromAngularVelocityIntegrand(Vars.rfrak_frame, OmegaVec_2p0(Cons,Vars)[1:])
dydt[0] = dvdt_T1[0]
if(Cons.EvolveSpin1):
dydt[1], dydt[2]=FrameFromAngularVelocity_2D_Integrand(Vars.rfrak_chi1[0], Vars.rfrak_chi1[1],(mul(mul(inverse(Cons.S_chi1),OmegaVec_chiVec_1_2p0(Cons,Vars)),Cons.S_chi1))[1:])
else:
dydt[1], dydt[2] = 0.0, 0.0
if(Cons.EvolveSpin2):
dydt[3], dydt[4]=FrameFromAngularVelocity_2D_Integrand(Vars.rfrak_chi2[0], Vars.rfrak_chi2[1],(mul(mul(inverse(Cons.S_chi2),OmegaVec_chiVec_2_2p0(Cons,Vars)),Cons.S_chi2))[1:])
else:
dydt[3], dydt[4] = 0.0, 0.0
return dydt
@njit(cache=True)
def TaylorT4_2p0(Cons,Vars):
dvdt_T4 = -2.0*Vars.Fcal_coeff*(1.0*-Cons.Fcal_0 + 1.0*0*Vars.v + 1.0*-Cons.Fcal_2*Vars.v**2 + 1.0*-Cons.Fcal_3 - Vars.Fcal_SO_3*Vars.v**3 + 1.0*-Cons.Fcal_4 - Vars.Fcal_SQ_4*Vars.v**4 + (0*(-1.0*-Cons.Fcal_0*Vars.v - 1.0*0*Vars.v**2 - 1.0*-Cons.Fcal_2*Vars.v**3 - 1.0*-Cons.Fcal_3 - Vars.Fcal_SO_3*Vars.v**4) + 4*Cons.E_2*Cons.M*(-1.0*-Cons.Fcal_0*Vars.v**2 - 1.0*0*Vars.v**3 - 1.0*-Cons.Fcal_2*Vars.v**4) + 5*Vars.E_SO_3*Cons.M*(-1.0*-Cons.Fcal_0*Vars.v**3 - 1.0*0*Vars.v**4) - 1.0*6*Cons.E_4*Cons.M + 6*Vars.E_SQ_4*Cons.M*-Cons.Fcal_0*Vars.v**4 + (0*(0*(1.0*-Cons.Fcal_0*Vars.v**2 + 1.0*0*Vars.v**3 + 1.0*-Cons.Fcal_2*Vars.v**4) + 4*Cons.E_2*Cons.M*(2.0*-Cons.Fcal_0*Vars.v**3 + 2.0*0*Vars.v**4) + 2.0*5*Vars.E_SO_3*Cons.M*-Cons.Fcal_0*Vars.v**4) + 1.0*4*Cons.E_2*Cons.M**2*-Cons.Fcal_0*Vars.v**4 + (0**2*(0*(-1.0*-Cons.Fcal_0*Vars.v**3 - 1.0*0*Vars.v**4) - 3.0*4*Cons.E_2*Cons.M*-Cons.Fcal_0*Vars.v**4) + 1.0*0**4*-Cons.Fcal_0*Vars.v**4/2*Cons.E_0*Cons.M)/2*Cons.E_0*Cons.M)/2*Cons.E_0*Cons.M)/2*Cons.E_0*Cons.M)/(Cons.nu*Vars.v*2*Cons.E_0*Cons.M)
dydt=np.zeros(8)
[dydt[5],dydt[6],dydt[7]] = FrameFromAngularVelocityIntegrand(Vars.rfrak_frame, OmegaVec_2p0(Cons,Vars)[1:])
dydt[0] = dvdt_T4[0]
if(Cons.EvolveSpin1):
dydt[1], dydt[2]=FrameFromAngularVelocity_2D_Integrand(Vars.rfrak_chi1[0], Vars.rfrak_chi1[1],(mul(mul(inverse(Cons.S_chi1),OmegaVec_chiVec_1_2p0(Cons,Vars)),Cons.S_chi1))[1:])
else:
dydt[1], dydt[2] = 0.0, 0.0
if(Cons.EvolveSpin2):
dydt[3], dydt[4]=FrameFromAngularVelocity_2D_Integrand(Vars.rfrak_chi2[0], Vars.rfrak_chi2[1],(mul(mul(inverse(Cons.S_chi2),OmegaVec_chiVec_2_2p0(Cons,Vars)),Cons.S_chi2))[1:])
else:
dydt[3], dydt[4] = 0.0, 0.0
return dydt
@njit(cache=True)
def TaylorT5_2p0(Cons,Vars):
dtdv = -0.5*Cons.nu*Vars.v*(1.0*2*Cons.E_0*Cons.M + 1.0*0*Vars.v + 1.0*4*Cons.E_2*Cons.M*Vars.v**2 + 1.0*5*Vars.E_SO_3*Cons.M*Vars.v**3 + 1.0*6*Cons.E_4*Cons.M + 6*Vars.E_SQ_4*Cons.M*Vars.v**4 + (0*(-1.0*2*Cons.E_0*Cons.M*Vars.v - 1.0*0*Vars.v**2 - 1.0*4*Cons.E_2*Cons.M*Vars.v**3 - 1.0*5*Vars.E_SO_3*Cons.M*Vars.v**4) + -Cons.Fcal_2*(-1.0*2*Cons.E_0*Cons.M*Vars.v**2 - 1.0*0*Vars.v**3 - 1.0*4*Cons.E_2*Cons.M*Vars.v**4) + -Cons.Fcal_3 - Vars.Fcal_SO_3*(-1.0*2*Cons.E_0*Cons.M*Vars.v**3 - 1.0*0*Vars.v**4) - 1.0*-Cons.Fcal_4 - Vars.Fcal_SQ_4*2*Cons.E_0*Cons.M*Vars.v**4 + (0*(0*(1.0*2*Cons.E_0*Cons.M*Vars.v**2 + 1.0*0*Vars.v**3 + 1.0*4*Cons.E_2*Cons.M*Vars.v**4) + -Cons.Fcal_2*(2.0*2*Cons.E_0*Cons.M*Vars.v**3 + 2.0*0*Vars.v**4) + 2.0*-Cons.Fcal_3 - Vars.Fcal_SO_3*2*Cons.E_0*Cons.M*Vars.v**4) + 1.0*-Cons.Fcal_2**2*2*Cons.E_0*Cons.M*Vars.v**4 + (0**2*(0*(-1.0*2*Cons.E_0*Cons.M*Vars.v**3 - 1.0*0*Vars.v**4) - 3.0*-Cons.Fcal_2*2*Cons.E_0*Cons.M*Vars.v**4) + 1.0*0**4*2*Cons.E_0*Cons.M*Vars.v**4/-Cons.Fcal_0)/-Cons.Fcal_0)/-Cons.Fcal_0)/-Cons.Fcal_0)/(Vars.Fcal_coeff*-Cons.Fcal_0)
dvdt_T5 = 1.0/dtdv
dydt=np.zeros(8)
[dydt[5],dydt[6],dydt[7]] = FrameFromAngularVelocityIntegrand(Vars.rfrak_frame, OmegaVec_2p0(Cons,Vars)[1:])
dydt[0] = dvdt_T5[0]
if(Cons.EvolveSpin1):
dydt[1], dydt[2]=FrameFromAngularVelocity_2D_Integrand(Vars.rfrak_chi1[0], Vars.rfrak_chi1[1],(mul(mul(inverse(Cons.S_chi1),OmegaVec_chiVec_1_2p0(Cons,Vars)),Cons.S_chi1))[1:])
else:
dydt[1], dydt[2] = 0.0, 0.0
if(Cons.EvolveSpin2):
dydt[3], dydt[4]=FrameFromAngularVelocity_2D_Integrand(Vars.rfrak_chi2[0], Vars.rfrak_chi2[1],(mul(mul(inverse(Cons.S_chi2),OmegaVec_chiVec_2_2p0(Cons,Vars)),Cons.S_chi2))[1:])
else:
dydt[3], dydt[4] = 0.0, 0.0
return dydt
@njit(cache=True)
def Recalculate_2p5(Cons,Vars,y):
Vars.v = np.array([y[0]])
Vars.rfrak_chi1 = np.array([y[1],y[2]])
Vars.rfrak_chi2 = np.array([y[3],y[4]])
Vars.rfrak_frame = np.array([y[5],y[6],y[7]])
Vars.R = exp(Vars.rfrak_frame[0]*Cons.xHat + Vars.rfrak_frame[1]*Cons.yHat + Vars.rfrak_frame[2]*Cons.zHat)
Vars.nHat = mul(mul(Vars.R,Cons.xHat),conjugate(Vars.R))
Vars.lambdaHat = mul(mul(Vars.R,Cons.yHat),conjugate(Vars.R))
Vars.ellHat = mul(mul(Vars.R,Cons.zHat),conjugate(Vars.R))
Vars.R_S1 = exp(Vars.rfrak_chi1[0]*Cons.xHat + Vars.rfrak_chi1[1]*Cons.yHat)
Vars.R_S2 = exp(Vars.rfrak_chi2[0]*Cons.xHat + Vars.rfrak_chi2[1]*Cons.yHat)
Vars.chiVec1 = mul(mul(mul(Cons.S_chi1,Vars.R_S1),Cons.zHat),mul(conjugate(Vars.R_S1),conjugate(Cons.S_chi1)))
Vars.chiVec2 = mul(mul(mul(Cons.S_chi2,Vars.R_S2),Cons.zHat),mul(conjugate(Vars.R_S2),conjugate(Cons.S_chi2)))
Vars.chi1_n = np.array([dot(Vars.chiVec1[1:],Vars.nHat[1:])])
Vars.chi1_lambda = np.array([dot(Vars.chiVec1[1:],Vars.lambdaHat[1:])])
Vars.chi1_ell = np.array([dot(Vars.chiVec1[1:],Vars.ellHat[1:])])
Vars.chi2_n = np.array([dot(Vars.chiVec2[1:],Vars.nHat[1:])])
Vars.chi2_lambda = np.array([dot(Vars.chiVec2[1:],Vars.lambdaHat[1:])])
Vars.chi2_ell = np.array([dot(Vars.chiVec2[1:],Vars.ellHat[1:])])
Vars.S_ell = Cons.M1**2*Vars.chi1_ell + Cons.M2**2*Vars.chi2_ell
Vars.S_n = Cons.M1**2*Vars.chi1_n + Cons.M2**2*Vars.chi2_n
Vars.S_lambda = Cons.M1**2*Vars.chi1_lambda + Cons.M2**2*Vars.chi2_lambda
Vars.Sigma_ell = Cons.M*(-Cons.M1*Vars.chi1_ell + Cons.M2*Vars.chi2_ell)
Vars.Sigma_n = Cons.M*(-Cons.M1*Vars.chi1_n + Cons.M2*Vars.chi2_n)
Vars.Sigma_lambda = Cons.M*(-Cons.M1*Vars.chi1_lambda + Cons.M2*Vars.chi2_lambda)
Vars.chi_s_ell = Vars.chi1_ell/2 + Vars.chi2_ell/2
Vars.chi_a_ell = Vars.chi1_ell/2 - Vars.chi2_ell/2
Vars.Fcal_coeff = 32*Cons.nu**2*Vars.v**10/5
Vars.Fcal_SQ_4 = Cons.chi1chi1*(-89*Cons.delta/192 + 89*Cons.nu/96 - 89/192) - 103*Cons.chi1chi2*Cons.nu/48 + Cons.chi2chi2*(89*Cons.delta/192 + 89*Cons.nu/96 - 89/192) + Vars.chi_a_ell*(Vars.chi_a_ell*(287/96 - 12*Cons.nu) + 287*Vars.chi_s_ell*Cons.delta/48) + Vars.chi_s_ell**2*(Cons.nu/24 + 287/96)
Vars.Fcal_SO_3 = (-4*Vars.S_ell - 5*Vars.Sigma_ell*Cons.delta/4)/Cons.M**2
Vars.Fcal_SO_5 = (Vars.S_ell*(272*Cons.nu/9 - 9/2) + Vars.Sigma_ell*Cons.delta*(43*Cons.nu/4 - 13/16))/Cons.M**2
Vars.E_SQ_4 = -3*Vars.chi_a_ell**2/2 - 3*Vars.chi_s_ell**2/2 - Cons.delta*(Cons.chi2chi2/2 + 3*Vars.chi_a_ell*Vars.chi_s_ell) + Cons.nu*(Cons.chi1chi2 + 6*Vars.chi_a_ell**2) + (Cons.chi1chi1 + Cons.chi2chi2)*(Cons.delta - 2*Cons.nu + 1)/4
Vars.E_SO_3 = (14*Vars.S_ell/3 + 2*Vars.Sigma_ell*Cons.delta)/Cons.M**2
Vars.E_SO_5 = (Vars.S_ell*(11 - 61*Cons.nu/9) + Vars.Sigma_ell*Cons.delta*(3 - 10*Cons.nu/3))/Cons.M**2
@njit
def OmegaVec_chiVec_1_2p5(Cons,Vars):
Omega1_coeff = Vars.v**5/Cons.M
return Omega1_coeff*(Vars.ellHat*(-0.75*Cons.delta + 0.5*Cons.nu + Vars.v**2*(Cons.delta*(0.625*Cons.nu - 0.5625) + Cons.nu*(1.25 - 0.0416666666666667*Cons.nu) + Vars.v**2*(Cons.delta*(Cons.nu*(4.875 - 0.15625*Cons.nu) - 0.84375) + Cons.nu*(Cons.nu*(-0.0208333333333333*Cons.nu - 3.28125) + 0.1875) + 0.84375) + 0.5625) + 0.75) + Vars.nHat*Vars.v*(3.0*Vars.chi1_n*Cons.nu + 3.0*Cons.M2**2*Vars.chi2_n/Cons.M**2) - Cons.M2**2*Vars.chiVec2*Vars.v/Cons.M**2)
@njit
def OmegaVec_chiVec_2_2p5(Cons,Vars):
Omega2_coeff = Vars.v**5/Cons.M
return Omega2_coeff*(Vars.ellHat*(0.75*Cons.delta + 0.5*Cons.nu + Vars.v**2*(Cons.delta*(0.5625 - 0.625*Cons.nu) + Cons.nu*(1.25 - 0.0416666666666667*Cons.nu) + Vars.v**2*(Cons.delta*(Cons.nu*(0.15625*Cons.nu - 4.875) + 0.84375) + Cons.nu*(Cons.nu*(-0.0208333333333333*Cons.nu - 3.28125) + 0.1875) + 0.84375) + 0.5625) + 0.75) + Vars.nHat*Vars.v*(3.0*Vars.chi2_n*Cons.nu + 3.0*Cons.M1**2*Vars.chi1_n/Cons.M**2) - Cons.M1**2*Vars.chiVec1*Vars.v/Cons.M**2)
@njit
def OmegaVec_2p5(Cons,Vars):
a_ell_0 = 7.0*Vars.S_n + 3.0*Vars.Sigma_n*Cons.delta
gamma_PN_2 = 1.0 - 0.333333333333333*Cons.nu
a_ell_4 = Vars.S_n*(5.77777777777778*Cons.nu**2 + 14.75*Cons.nu + 1.5) + Vars.Sigma_n*Cons.delta*(2.83333333333333*Cons.nu**2 + 9.125*Cons.nu + 1.5)
gamma_PN_3 = (1.66666666666667*Vars.S_ell + Vars.Sigma_ell*Cons.delta)/Cons.M**2
gamma_PN_0 = 1.00000000000000
gamma_PN_5 = (Vars.S_ell*(0.888888888888889*Cons.nu + 3.33333333333333) + 2.0*Vars.Sigma_ell*Cons.delta)/Cons.M**2
gamma_PN_4 = 1.0 - 5.41666666666667*Cons.nu
a_ell_2 = Vars.S_n*(-9.66666666666667*Cons.nu - 10.0) + Vars.Sigma_n*Cons.delta*(-4.5*Cons.nu - 6.0)
return Vars.ellHat*Vars.v**3/Cons.M + Vars.nHat*Vars.v**6*(a_ell_0 + Vars.v**2*(a_ell_2 + a_ell_4*Vars.v**2))*(gamma_PN_0 + Vars.v**2*(gamma_PN_2 + Vars.v*(gamma_PN_3 + Vars.v*(gamma_PN_4 + gamma_PN_5*Vars.v))))/Cons.M**3
@njit(cache=True)
def TaylorT1_2p5(Cons,Vars):
Flux = Vars.Fcal_coeff*(Cons.Fcal_0 + Vars.v**2*(Cons.Fcal_2 + Vars.v*(Cons.Fcal_3 + Vars.Fcal_SO_3 + Vars.v*(Cons.Fcal_4 + Vars.Fcal_SQ_4 + Vars.v*(Cons.Fcal_5 + Vars.Fcal_SO_5)))))
dEdV = -0.5*Cons.M*Cons.nu*Vars.v*(2.0*Cons.E_0 + Vars.v**2*(4.0*Cons.E_2 + Vars.v*(5.0*Vars.E_SO_3 + Vars.v*(6.0*Cons.E_4 + 7.0*Vars.E_SO_5*Vars.v + 6.0*Vars.E_SQ_4))))
Absorption = 0
dvdt_T1 = (-Absorption - Flux)/dEdV
dydt=np.zeros(8)
[dydt[5],dydt[6],dydt[7]] = FrameFromAngularVelocityIntegrand(Vars.rfrak_frame, OmegaVec_2p5(Cons,Vars)[1:])
dydt[0] = dvdt_T1[0]
if(Cons.EvolveSpin1):
dydt[1], dydt[2]=FrameFromAngularVelocity_2D_Integrand(Vars.rfrak_chi1[0], Vars.rfrak_chi1[1],(mul(mul(inverse(Cons.S_chi1),OmegaVec_chiVec_1_2p5(Cons,Vars)),Cons.S_chi1))[1:])
else:
dydt[1], dydt[2] = 0.0, 0.0
if(Cons.EvolveSpin2):
dydt[3], dydt[4]=FrameFromAngularVelocity_2D_Integrand(Vars.rfrak_chi2[0], Vars.rfrak_chi2[1],(mul(mul(inverse(Cons.S_chi2),OmegaVec_chiVec_2_2p5(Cons,Vars)),Cons.S_chi2))[1:])
else:
dydt[3], dydt[4] = 0.0, 0.0
return dydt
@njit(cache=True)
def TaylorT4_2p5(Cons,Vars):
dvdt_T4 = -2.0*Vars.Fcal_coeff*(1.0*-Cons.Fcal_0 + 1.0*0*Vars.v + 1.0*-Cons.Fcal_2*Vars.v**2 + 1.0*-Cons.Fcal_3 - Vars.Fcal_SO_3*Vars.v**3 + 1.0*-Cons.Fcal_4 - Vars.Fcal_SQ_4*Vars.v**4 + 1.0*-Cons.Fcal_5 - Vars.Fcal_SO_5*Vars.v**5 + (0*(-1.0*-Cons.Fcal_0*Vars.v - 1.0*0*Vars.v**2 - 1.0*-Cons.Fcal_2*Vars.v**3 - 1.0*-Cons.Fcal_3 - Vars.Fcal_SO_3*Vars.v**4 - 1.0*-Cons.Fcal_4 - Vars.Fcal_SQ_4*Vars.v**5) + 4*Cons.E_2*Cons.M*(-1.0*-Cons.Fcal_0*Vars.v**2 - 1.0*0*Vars.v**3 - 1.0*-Cons.Fcal_2*Vars.v**4 - 1.0*-Cons.Fcal_3 - Vars.Fcal_SO_3*Vars.v**5) + 5*Vars.E_SO_3*Cons.M*(-1.0*-Cons.Fcal_0*Vars.v**3 - 1.0*0*Vars.v**4 - 1.0*-Cons.Fcal_2*Vars.v**5) + 6*Cons.E_4*Cons.M + 6*Vars.E_SQ_4*Cons.M*(-1.0*-Cons.Fcal_0*Vars.v**4 - 1.0*0*Vars.v**5) - 1.0*7*Vars.E_SO_5*Cons.M*-Cons.Fcal_0*Vars.v**5 + (0*(0*(1.0*-Cons.Fcal_0*Vars.v**2 + 1.0*0*Vars.v**3 + 1.0*-Cons.Fcal_2*Vars.v**4 + 1.0*-Cons.Fcal_3 - Vars.Fcal_SO_3*Vars.v**5) + 4*Cons.E_2*Cons.M*(2.0*-Cons.Fcal_0*Vars.v**3 + 2.0*0*Vars.v**4 + 2.0*-Cons.Fcal_2*Vars.v**5) + 5*Vars.E_SO_3*Cons.M*(2.0*-Cons.Fcal_0*Vars.v**4 + 2.0*0*Vars.v**5) + 2.0*6*Cons.E_4*Cons.M + 6*Vars.E_SQ_4*Cons.M*-Cons.Fcal_0*Vars.v**5) + 4*Cons.E_2*Cons.M*(4*Cons.E_2*Cons.M*(1.0*-Cons.Fcal_0*Vars.v**4 + 1.0*0*Vars.v**5) + 2.0*5*Vars.E_SO_3*Cons.M*-Cons.Fcal_0*Vars.v**5) + (0*(0*(0*(-1.0*-Cons.Fcal_0*Vars.v**3 - 1.0*0*Vars.v**4 - 1.0*-Cons.Fcal_2*Vars.v**5) + 4*Cons.E_2*Cons.M*(-3.0*-Cons.Fcal_0*Vars.v**4 - 3.0*0*Vars.v**5) - 3.0*5*Vars.E_SO_3*Cons.M*-Cons.Fcal_0*Vars.v**5) - 3.0*4*Cons.E_2*Cons.M**2*-Cons.Fcal_0*Vars.v**5) + (0**3*(0*(1.0*-Cons.Fcal_0*Vars.v**4 + 1.0*0*Vars.v**5) + 4.0*4*Cons.E_2*Cons.M*-Cons.Fcal_0*Vars.v**5) - 1.0*0**5*-Cons.Fcal_0*Vars.v**5/2*Cons.E_0*Cons.M)/2*Cons.E_0*Cons.M)/2*Cons.E_0*Cons.M)/2*Cons.E_0*Cons.M)/2*Cons.E_0*Cons.M)/(Cons.nu*Vars.v*2*Cons.E_0*Cons.M)
dydt=np.zeros(8)
[dydt[5],dydt[6],dydt[7]] = FrameFromAngularVelocityIntegrand(Vars.rfrak_frame, OmegaVec_2p5(Cons,Vars)[1:])
dydt[0] = dvdt_T4[0]
if(Cons.EvolveSpin1):
dydt[1], dydt[2]=FrameFromAngularVelocity_2D_Integrand(Vars.rfrak_chi1[0], Vars.rfrak_chi1[1],(mul(mul(inverse(Cons.S_chi1),OmegaVec_chiVec_1_2p5(Cons,Vars)),Cons.S_chi1))[1:])
else:
dydt[1], dydt[2] = 0.0, 0.0
if(Cons.EvolveSpin2):
dydt[3], dydt[4]=FrameFromAngularVelocity_2D_Integrand(Vars.rfrak_chi2[0], Vars.rfrak_chi2[1],(mul(mul(inverse(Cons.S_chi2),OmegaVec_chiVec_2_2p5(Cons,Vars)),Cons.S_chi2))[1:])
else:
dydt[3], dydt[4] = 0.0, 0.0
return dydt
@njit(cache=True)
def TaylorT5_2p5(Cons,Vars):
dtdv = -0.5*Cons.nu*Vars.v*(1.0*2*Cons.E_0*Cons.M + 1.0*0*Vars.v + 1.0*4*Cons.E_2*Cons.M*Vars.v**2 + 1.0*5*Vars.E_SO_3*Cons.M*Vars.v**3 + 1.0*6*Cons.E_4*Cons.M + 6*Vars.E_SQ_4*Cons.M*Vars.v**4 + 1.0*7*Vars.E_SO_5*Cons.M*Vars.v**5 + (0*(-1.0*2*Cons.E_0*Cons.M*Vars.v - 1.0*0*Vars.v**2 - 1.0*4*Cons.E_2*Cons.M*Vars.v**3 - 1.0*5*Vars.E_SO_3*Cons.M*Vars.v**4 - 1.0*6*Cons.E_4*Cons.M + 6*Vars.E_SQ_4*Cons.M*Vars.v**5) + -Cons.Fcal_2*(-1.0*2*Cons.E_0*Cons.M*Vars.v**2 - 1.0*0*Vars.v**3 - 1.0*4*Cons.E_2*Cons.M*Vars.v**4 - 1.0*5*Vars.E_SO_3*Cons.M*Vars.v**5) + -Cons.Fcal_3 - Vars.Fcal_SO_3*(-1.0*2*Cons.E_0*Cons.M*Vars.v**3 - 1.0*0*Vars.v**4 - 1.0*4*Cons.E_2*Cons.M*Vars.v**5) + -Cons.Fcal_4 - Vars.Fcal_SQ_4*(-1.0*2*Cons.E_0*Cons.M*Vars.v**4 - 1.0*0*Vars.v**5) - 1.0*-Cons.Fcal_5 - Vars.Fcal_SO_5*2*Cons.E_0*Cons.M*Vars.v**5 + (0*(0*(1.0*2*Cons.E_0*Cons.M*Vars.v**2 + 1.0*0*Vars.v**3 + 1.0*4*Cons.E_2*Cons.M*Vars.v**4 + 1.0*5*Vars.E_SO_3*Cons.M*Vars.v**5) + -Cons.Fcal_2*(2.0*2*Cons.E_0*Cons.M*Vars.v**3 + 2.0*0*Vars.v**4 + 2.0*4*Cons.E_2*Cons.M*Vars.v**5) + -Cons.Fcal_3 - Vars.Fcal_SO_3*(2.0*2*Cons.E_0*Cons.M*Vars.v**4 + 2.0*0*Vars.v**5) + 2.0*-Cons.Fcal_4 - Vars.Fcal_SQ_4*2*Cons.E_0*Cons.M*Vars.v**5) + -Cons.Fcal_2*(-Cons.Fcal_2*(1.0*2*Cons.E_0*Cons.M*Vars.v**4 + 1.0*0*Vars.v**5) + 2.0*-Cons.Fcal_3 - Vars.Fcal_SO_3*2*Cons.E_0*Cons.M*Vars.v**5) + (0*(0*(0*(-1.0*2*Cons.E_0*Cons.M*Vars.v**3 - 1.0*0*Vars.v**4 - 1.0*4*Cons.E_2*Cons.M*Vars.v**5) + -Cons.Fcal_2*(-3.0*2*Cons.E_0*Cons.M*Vars.v**4 - 3.0*0*Vars.v**5) - 3.0*-Cons.Fcal_3 - Vars.Fcal_SO_3*2*Cons.E_0*Cons.M*Vars.v**5) - 3.0*-Cons.Fcal_2**2*2*Cons.E_0*Cons.M*Vars.v**5) + (0**3*(0*(1.0*2*Cons.E_0*Cons.M*Vars.v**4 + 1.0*0*Vars.v**5) + 4.0*-Cons.Fcal_2*2*Cons.E_0*Cons.M*Vars.v**5) - 1.0*0**5*2*Cons.E_0*Cons.M*Vars.v**5/-Cons.Fcal_0)/-Cons.Fcal_0)/-Cons.Fcal_0)/-Cons.Fcal_0)/-Cons.Fcal_0)/(Vars.Fcal_coeff*-Cons.Fcal_0)
dvdt_T5 = 1.0/dtdv
dydt=np.zeros(8)
[dydt[5],dydt[6],dydt[7]] = FrameFromAngularVelocityIntegrand(Vars.rfrak_frame, OmegaVec_2p5(Cons,Vars)[1:])
dydt[0] = dvdt_T5[0]
if(Cons.EvolveSpin1):
dydt[1], dydt[2]=FrameFromAngularVelocity_2D_Integrand(Vars.rfrak_chi1[0], Vars.rfrak_chi1[1],(mul(mul(inverse(Cons.S_chi1),OmegaVec_chiVec_1_2p5(Cons,Vars)),Cons.S_chi1))[1:])
else:
dydt[1], dydt[2] = 0.0, 0.0
if(Cons.EvolveSpin2):
dydt[3], dydt[4]=FrameFromAngularVelocity_2D_Integrand(Vars.rfrak_chi2[0], Vars.rfrak_chi2[1],(mul(mul(inverse(Cons.S_chi2),OmegaVec_chiVec_2_2p5(Cons,Vars)),Cons.S_chi2))[1:])
else:
dydt[3], dydt[4] = 0.0, 0.0
return dydt
@njit(cache=True)
def Recalculate_3p0(Cons,Vars,y):
Vars.v = np.array([y[0]])
Vars.rfrak_chi1 = np.array([y[1],y[2]])
Vars.rfrak_chi2 = np.array([y[3],y[4]])
Vars.rfrak_frame = np.array([y[5],y[6],y[7]])
Vars.R = exp(Vars.rfrak_frame[0]*Cons.xHat + Vars.rfrak_frame[1]*Cons.yHat + Vars.rfrak_frame[2]*Cons.zHat)
Vars.nHat = mul(mul(Vars.R,Cons.xHat),conjugate(Vars.R))
Vars.lambdaHat = mul(mul(Vars.R,Cons.yHat),conjugate(Vars.R))
Vars.ellHat = mul(mul(Vars.R,Cons.zHat),conjugate(Vars.R))
Vars.R_S1 = exp(Vars.rfrak_chi1[0]*Cons.xHat + Vars.rfrak_chi1[1]*Cons.yHat)
Vars.R_S2 = exp(Vars.rfrak_chi2[0]*Cons.xHat + Vars.rfrak_chi2[1]*Cons.yHat)
Vars.chiVec1 = mul(mul(mul(Cons.S_chi1,Vars.R_S1),Cons.zHat),mul(conjugate(Vars.R_S1),conjugate(Cons.S_chi1)))
Vars.chiVec2 = mul(mul(mul(Cons.S_chi2,Vars.R_S2),Cons.zHat),mul(conjugate(Vars.R_S2),conjugate(Cons.S_chi2)))
Vars.chi1_n = np.array([dot(Vars.chiVec1[1:],Vars.nHat[1:])])
Vars.chi1_lambda = np.array([dot(Vars.chiVec1[1:],Vars.lambdaHat[1:])])
Vars.chi1_ell = np.array([dot(Vars.chiVec1[1:],Vars.ellHat[1:])])
Vars.chi2_n = np.array([dot(Vars.chiVec2[1:],Vars.nHat[1:])])
Vars.chi2_lambda = np.array([dot(Vars.chiVec2[1:],Vars.lambdaHat[1:])])
Vars.chi2_ell = np.array([dot(Vars.chiVec2[1:],Vars.ellHat[1:])])
Vars.S_ell = Cons.M1**2*Vars.chi1_ell + Cons.M2**2*Vars.chi2_ell
Vars.S_n = Cons.M1**2*Vars.chi1_n + Cons.M2**2*Vars.chi2_n
Vars.S_lambda = Cons.M1**2*Vars.chi1_lambda + Cons.M2**2*Vars.chi2_lambda
Vars.Sigma_ell = Cons.M*(-Cons.M1*Vars.chi1_ell + Cons.M2*Vars.chi2_ell)
Vars.Sigma_n = Cons.M*(-Cons.M1*Vars.chi1_n + Cons.M2*Vars.chi2_n)
Vars.Sigma_lambda = Cons.M*(-Cons.M1*Vars.chi1_lambda + Cons.M2*Vars.chi2_lambda)
Vars.chi_s_ell = Vars.chi1_ell/2 + Vars.chi2_ell/2
Vars.chi_a_ell = Vars.chi1_ell/2 - Vars.chi2_ell/2
Vars.logv = log(Vars.v)
Vars.Fcal_coeff = 32*Cons.nu**2*Vars.v**10/5
Vars.Fcal_SQ_4 = Cons.chi1chi1*(-89*Cons.delta/192 + 89*Cons.nu/96 - 89/192) - 103*Cons.chi1chi2*Cons.nu/48 + Cons.chi2chi2*(89*Cons.delta/192 + 89*Cons.nu/96 - 89/192) + Vars.chi_a_ell*(Vars.chi_a_ell*(287/96 - 12*Cons.nu) + 287*Vars.chi_s_ell*Cons.delta/48) + Vars.chi_s_ell**2*(Cons.nu/24 + 287/96)
Vars.Fcal_SO_3 = (-4*Vars.S_ell - 5*Vars.Sigma_ell*Cons.delta/4)/Cons.M**2
Vars.Fcal_SO_5 = (Vars.S_ell*(272*Cons.nu/9 - 9/2) + Vars.Sigma_ell*Cons.delta*(43*Cons.nu/4 - 13/16))/Cons.M**2
Vars.Fcal_SO_6 = (-16*Vars.S_ell*pi - 31*Vars.Sigma_ell*Cons.delta*pi/6)/Cons.M**2
Vars.E_SQ_4 = -3*Vars.chi_a_ell**2/2 - 3*Vars.chi_s_ell**2/2 - Cons.delta*(Cons.chi2chi2/2 + 3*Vars.chi_a_ell*Vars.chi_s_ell) + Cons.nu*(Cons.chi1chi2 + 6*Vars.chi_a_ell**2) + (Cons.chi1chi1 + Cons.chi2chi2)*(Cons.delta - 2*Cons.nu + 1)/4
Vars.E_SO_3 = (14*Vars.S_ell/3 + 2*Vars.Sigma_ell*Cons.delta)/Cons.M**2
Vars.E_SO_5 = (Vars.S_ell*(11 - 61*Cons.nu/9) + Vars.Sigma_ell*Cons.delta*(3 - 10*Cons.nu/3))/Cons.M**2
@njit
def OmegaVec_chiVec_1_3p0(Cons,Vars):
Omega1_coeff = Vars.v**5/Cons.M
return Omega1_coeff*(Vars.ellHat*(-0.75*Cons.delta + 0.5*Cons.nu + Vars.v**2*(Cons.delta*(0.625*Cons.nu - 0.5625) + Cons.nu*(1.25 - 0.0416666666666667*Cons.nu) + Vars.v**2*(Cons.delta*(Cons.nu*(4.875 - 0.15625*Cons.nu) - 0.84375) + Cons.nu*(Cons.nu*(-0.0208333333333333*Cons.nu - 3.28125) + 0.1875) + 0.84375) + 0.5625) + 0.75) + Vars.nHat*Vars.v*(3.0*Vars.chi1_n*Cons.nu + 3.0*Cons.M2**2*Vars.chi2_n/Cons.M**2) - Cons.M2**2*Vars.chiVec2*Vars.v/Cons.M**2)
@njit
def OmegaVec_chiVec_2_3p0(Cons,Vars):
Omega2_coeff = Vars.v**5/Cons.M
return Omega2_coeff*(Vars.ellHat*(0.75*Cons.delta + 0.5*Cons.nu + Vars.v**2*(Cons.delta*(0.5625 - 0.625*Cons.nu) + Cons.nu*(1.25 - 0.0416666666666667*Cons.nu) + Vars.v**2*(Cons.delta*(Cons.nu*(0.15625*Cons.nu - 4.875) + 0.84375) + Cons.nu*(Cons.nu*(-0.0208333333333333*Cons.nu - 3.28125) + 0.1875) + 0.84375) + 0.5625) + 0.75) + Vars.nHat*Vars.v*(3.0*Vars.chi2_n*Cons.nu + 3.0*Cons.M1**2*Vars.chi1_n/Cons.M**2) - Cons.M1**2*Vars.chiVec1*Vars.v/Cons.M**2)
@njit
def OmegaVec_3p0(Cons,Vars):
a_ell_0 = 7.0*Vars.S_n + 3.0*Vars.Sigma_n*Cons.delta
gamma_PN_2 = 1.0 - 0.333333333333333*Cons.nu
a_ell_4 = Vars.S_n*(5.77777777777778*Cons.nu**2 + 14.75*Cons.nu + 1.5) + Vars.Sigma_n*Cons.delta*(2.83333333333333*Cons.nu**2 + 9.125*Cons.nu + 1.5)
gamma_PN_3 = (1.66666666666667*Vars.S_ell + Vars.Sigma_ell*Cons.delta)/Cons.M**2
gamma_PN_6 = 0.0123456790123457*Cons.nu**3 + 6.36111111111111*Cons.nu**2 - 2.98177812235564*Cons.nu + 1.0
gamma_PN_0 = 1.00000000000000
gamma_PN_5 = (Vars.S_ell*(0.888888888888889*Cons.nu + 3.33333333333333) + 2.0*Vars.Sigma_ell*Cons.delta)/Cons.M**2
gamma_PN_4 = 1.0 - 5.41666666666667*Cons.nu
a_ell_2 = Vars.S_n*(-9.66666666666667*Cons.nu - 10.0) + Vars.Sigma_n*Cons.delta*(-4.5*Cons.nu - 6.0)
return Vars.ellHat*Vars.v**3/Cons.M + Vars.nHat*Vars.v**6*(a_ell_0 + Vars.v**2*(a_ell_2 + a_ell_4*Vars.v**2))*(gamma_PN_0 + Vars.v**2*(gamma_PN_2 + Vars.v*(gamma_PN_3 + Vars.v*(gamma_PN_4 + Vars.v*(gamma_PN_5 + gamma_PN_6*Vars.v)))))/Cons.M**3
@njit(cache=True)
def TaylorT1_3p0(Cons,Vars):
Flux = Vars.Fcal_coeff*(Cons.Fcal_0 + Vars.v**2*(Cons.Fcal_2 + Vars.v*(Cons.Fcal_3 + Vars.Fcal_SO_3 + Vars.v*(Cons.Fcal_4 + Vars.Fcal_SQ_4 + Vars.v*(Cons.Fcal_5 + Vars.Fcal_SO_5 + Vars.v*(Cons.Fcal_6 + Vars.Fcal_SO_6 + Cons.Fcal_lnv_6*Vars.logv))))))
dEdV = -0.5*Cons.M*Cons.nu*Vars.v*(2.0*Cons.E_0 + Vars.v**2*(4.0*Cons.E_2 + Vars.v*(5.0*Vars.E_SO_3 + Vars.v*(6.0*Cons.E_4 + 6.0*Vars.E_SQ_4 + Vars.v*(8.0*Cons.E_6*Vars.v + 7.0*Vars.E_SO_5)))))
Absorption = 0
dvdt_T1 = (-Absorption - Flux)/dEdV
dydt=np.zeros(8)
[dydt[5],dydt[6],dydt[7]] = FrameFromAngularVelocityIntegrand(Vars.rfrak_frame, OmegaVec_3p0(Cons,Vars)[1:])
dydt[0] = dvdt_T1[0]
if(Cons.EvolveSpin1):
dydt[1], dydt[2]=FrameFromAngularVelocity_2D_Integrand(Vars.rfrak_chi1[0], Vars.rfrak_chi1[1],(mul(mul(inverse(Cons.S_chi1),OmegaVec_chiVec_1_3p0(Cons,Vars)),Cons.S_chi1))[1:])
else:
dydt[1], dydt[2] = 0.0, 0.0
if(Cons.EvolveSpin2):
dydt[3], dydt[4]=FrameFromAngularVelocity_2D_Integrand(Vars.rfrak_chi2[0], Vars.rfrak_chi2[1],(mul(mul(inverse(Cons.S_chi2),OmegaVec_chiVec_2_3p0(Cons,Vars)),Cons.S_chi2))[1:])
else:
dydt[3], dydt[4] = 0.0, 0.0
return dydt
@njit(cache=True)
def TaylorT4_3p0(Cons,Vars):
dvdt_T4 = -2.0*Vars.Fcal_coeff*(1.0*-Cons.Fcal_0 + 1.0*0*Vars.v + 1.0*-Cons.Fcal_2*Vars.v**2 + 1.0*-Cons.Fcal_3 - Vars.Fcal_SO_3*Vars.v**3 + 1.0*-Cons.Fcal_4 - Vars.Fcal_SQ_4*Vars.v**4 + 1.0*-Cons.Fcal_5 - Vars.Fcal_SO_5*Vars.v**5 + 1.0*-Cons.Fcal_6 - Vars.Fcal_SO_6 - Cons.Fcal_lnv_6*Vars.logv*Vars.v**6 + (0*(-1.0*-Cons.Fcal_0*Vars.v - 1.0*0*Vars.v**2 - 1.0*-Cons.Fcal_2*Vars.v**3 - 1.0*-Cons.Fcal_3 - Vars.Fcal_SO_3*Vars.v**4 - 1.0*-Cons.Fcal_4 - Vars.Fcal_SQ_4*Vars.v**5 - 1.0*-Cons.Fcal_5 - Vars.Fcal_SO_5*Vars.v**6) + 4*Cons.E_2*Cons.M*(-1.0*-Cons.Fcal_0*Vars.v**2 - 1.0*0*Vars.v**3 - 1.0*-Cons.Fcal_2*Vars.v**4 - 1.0*-Cons.Fcal_3 - Vars.Fcal_SO_3*Vars.v**5 - 1.0*-Cons.Fcal_4 - Vars.Fcal_SQ_4*Vars.v**6) + 5*Vars.E_SO_3*Cons.M*(-1.0*-Cons.Fcal_0*Vars.v**3 - 1.0*0*Vars.v**4 - 1.0*-Cons.Fcal_2*Vars.v**5 - 1.0*-Cons.Fcal_3 - Vars.Fcal_SO_3*Vars.v**6) + 6*Cons.E_4*Cons.M + 6*Vars.E_SQ_4*Cons.M*(-1.0*-Cons.Fcal_0*Vars.v**4 - 1.0*0*Vars.v**5 - 1.0*-Cons.Fcal_2*Vars.v**6) + 7*Vars.E_SO_5*Cons.M*(-1.0*-Cons.Fcal_0*Vars.v**5 - 1.0*0*Vars.v**6) - 1.0*8*Cons.E_6*Cons.M*-Cons.Fcal_0*Vars.v**6 + (0*(0*(1.0*-Cons.Fcal_0*Vars.v**2 + 1.0*0*Vars.v**3 + 1.0*-Cons.Fcal_2*Vars.v**4 + 1.0*-Cons.Fcal_3 - Vars.Fcal_SO_3*Vars.v**5 + 1.0*-Cons.Fcal_4 - Vars.Fcal_SQ_4*Vars.v**6) + 4*Cons.E_2*Cons.M*(2.0*-Cons.Fcal_0*Vars.v**3 + 2.0*0*Vars.v**4 + 2.0*-Cons.Fcal_2*Vars.v**5 + 2.0*-Cons.Fcal_3 - Vars.Fcal_SO_3*Vars.v**6) + 5*Vars.E_SO_3*Cons.M*(2.0*-Cons.Fcal_0*Vars.v**4 + 2.0*0*Vars.v**5 + 2.0*-Cons.Fcal_2*Vars.v**6) + 6*Cons.E_4*Cons.M + 6*Vars.E_SQ_4*Cons.M*(2.0*-Cons.Fcal_0*Vars.v**5 + 2.0*0*Vars.v**6) + 2.0*7*Vars.E_SO_5*Cons.M*-Cons.Fcal_0*Vars.v**6) + 4*Cons.E_2*Cons.M*(4*Cons.E_2*Cons.M*(1.0*-Cons.Fcal_0*Vars.v**4 + 1.0*0*Vars.v**5 + 1.0*-Cons.Fcal_2*Vars.v**6) + 5*Vars.E_SO_3*Cons.M*(2.0*-Cons.Fcal_0*Vars.v**5 + 2.0*0*Vars.v**6) + 2.0*6*Cons.E_4*Cons.M + 6*Vars.E_SQ_4*Cons.M*-Cons.Fcal_0*Vars.v**6) + 1.0*5*Vars.E_SO_3*Cons.M**2*-Cons.Fcal_0*Vars.v**6 + (0*(0*(0*(-1.0*-Cons.Fcal_0*Vars.v**3 - 1.0*0*Vars.v**4 - 1.0*-Cons.Fcal_2*Vars.v**5 - 1.0*-Cons.Fcal_3 - Vars.Fcal_SO_3*Vars.v**6) + 4*Cons.E_2*Cons.M*(-3.0*-Cons.Fcal_0*Vars.v**4 - 3.0*0*Vars.v**5 - 3.0*-Cons.Fcal_2*Vars.v**6) + 5*Vars.E_SO_3*Cons.M*(-3.0*-Cons.Fcal_0*Vars.v**5 - 3.0*0*Vars.v**6) - 3.0*6*Cons.E_4*Cons.M + 6*Vars.E_SQ_4*Cons.M*-Cons.Fcal_0*Vars.v**6) + 4*Cons.E_2*Cons.M*(4*Cons.E_2*Cons.M*(-3.0*-Cons.Fcal_0*Vars.v**5 - 3.0*0*Vars.v**6) - 6.0*5*Vars.E_SO_3*Cons.M*-Cons.Fcal_0*Vars.v**6)) - 1.0*4*Cons.E_2*Cons.M**3*-Cons.Fcal_0*Vars.v**6 + (0**2*(0*(0*(1.0*-Cons.Fcal_0*Vars.v**4 + 1.0*0*Vars.v**5 + 1.0*-Cons.Fcal_2*Vars.v**6) + 4*Cons.E_2*Cons.M*(4.0*-Cons.Fcal_0*Vars.v**5 + 4.0*0*Vars.v**6) + 4.0*5*Vars.E_SO_3*Cons.M*-Cons.Fcal_0*Vars.v**6) + 6.0*4*Cons.E_2*Cons.M**2*-Cons.Fcal_0*Vars.v**6) + (0**4*(0*(-1.0*-Cons.Fcal_0*Vars.v**5 - 1.0*0*Vars.v**6) - 5.0*4*Cons.E_2*Cons.M*-Cons.Fcal_0*Vars.v**6) + 1.0*0**6*-Cons.Fcal_0*Vars.v**6/2*Cons.E_0*Cons.M)/2*Cons.E_0*Cons.M)/2*Cons.E_0*Cons.M)/2*Cons.E_0*Cons.M)/2*Cons.E_0*Cons.M)/2*Cons.E_0*Cons.M)/(Cons.nu*Vars.v*2*Cons.E_0*Cons.M)
dydt=np.zeros(8)
[dydt[5],dydt[6],dydt[7]] = FrameFromAngularVelocityIntegrand(Vars.rfrak_frame, OmegaVec_3p0(Cons,Vars)[1:])
dydt[0] = dvdt_T4[0]
if(Cons.EvolveSpin1):
dydt[1], dydt[2]=FrameFromAngularVelocity_2D_Integrand(Vars.rfrak_chi1[0], Vars.rfrak_chi1[1],(mul(mul(inverse(Cons.S_chi1),OmegaVec_chiVec_1_3p0(Cons,Vars)),Cons.S_chi1))[1:])
else:
dydt[1], dydt[2] = 0.0, 0.0
if(Cons.EvolveSpin2):
dydt[3], dydt[4]=FrameFromAngularVelocity_2D_Integrand(Vars.rfrak_chi2[0], Vars.rfrak_chi2[1],(mul(mul(inverse(Cons.S_chi2),OmegaVec_chiVec_2_3p0(Cons,Vars)),Cons.S_chi2))[1:])
else:
dydt[3], dydt[4] = 0.0, 0.0
return dydt
@njit(cache=True)
def TaylorT5_3p0(Cons,Vars):
dtdv = -0.5*Cons.nu*Vars.v*(1.0*2*Cons.E_0*Cons.M + 1.0*0*Vars.v + 1.0*4*Cons.E_2*Cons.M*Vars.v**2 + 1.0*5*Vars.E_SO_3*Cons.M*Vars.v**3 + 1.0*6*Cons.E_4*Cons.M + 6*Vars.E_SQ_4*Cons.M*Vars.v**4 + 1.0*7*Vars.E_SO_5*Cons.M*Vars.v**5 + 1.0*8*Cons.E_6*Cons.M*Vars.v**6 + (0*(-1.0*2*Cons.E_0*Cons.M*Vars.v - 1.0*0*Vars.v**2 - 1.0*4*Cons.E_2*Cons.M*Vars.v**3 - 1.0*5*Vars.E_SO_3*Cons.M*Vars.v**4 - 1.0*6*Cons.E_4*Cons.M + 6*Vars.E_SQ_4*Cons.M*Vars.v**5 - 1.0*7*Vars.E_SO_5*Cons.M*Vars.v**6) + -Cons.Fcal_2*(-1.0*2*Cons.E_0*Cons.M*Vars.v**2 - 1.0*0*Vars.v**3 - 1.0*4*Cons.E_2*Cons.M*Vars.v**4 - 1.0*5*Vars.E_SO_3*Cons.M*Vars.v**5 - 1.0*6*Cons.E_4*Cons.M + 6*Vars.E_SQ_4*Cons.M*Vars.v**6) + -Cons.Fcal_3 - Vars.Fcal_SO_3*(-1.0*2*Cons.E_0*Cons.M*Vars.v**3 - 1.0*0*Vars.v**4 - 1.0*4*Cons.E_2*Cons.M*Vars.v**5 - 1.0*5*Vars.E_SO_3*Cons.M*Vars.v**6) + -Cons.Fcal_4 - Vars.Fcal_SQ_4*(-1.0*2*Cons.E_0*Cons.M*Vars.v**4 - 1.0*0*Vars.v**5 - 1.0*4*Cons.E_2*Cons.M*Vars.v**6) + -Cons.Fcal_5 - Vars.Fcal_SO_5*(-1.0*2*Cons.E_0*Cons.M*Vars.v**5 - 1.0*0*Vars.v**6) - 1.0*-Cons.Fcal_6 - Vars.Fcal_SO_6 - Cons.Fcal_lnv_6*Vars.logv*2*Cons.E_0*Cons.M*Vars.v**6 + (0*(0*(1.0*2*Cons.E_0*Cons.M*Vars.v**2 + 1.0*0*Vars.v**3 + 1.0*4*Cons.E_2*Cons.M*Vars.v**4 + 1.0*5*Vars.E_SO_3*Cons.M*Vars.v**5 + 1.0*6*Cons.E_4*Cons.M + 6*Vars.E_SQ_4*Cons.M*Vars.v**6) + -Cons.Fcal_2*(2.0*2*Cons.E_0*Cons.M*Vars.v**3 + 2.0*0*Vars.v**4 + 2.0*4*Cons.E_2*Cons.M*Vars.v**5 + 2.0*5*Vars.E_SO_3*Cons.M*Vars.v**6) + -Cons.Fcal_3 - Vars.Fcal_SO_3*(2.0*2*Cons.E_0*Cons.M*Vars.v**4 + 2.0*0*Vars.v**5 + 2.0*4*Cons.E_2*Cons.M*Vars.v**6) + -Cons.Fcal_4 - Vars.Fcal_SQ_4*(2.0*2*Cons.E_0*Cons.M*Vars.v**5 + 2.0*0*Vars.v**6) + 2.0*-Cons.Fcal_5 - Vars.Fcal_SO_5*2*Cons.E_0*Cons.M*Vars.v**6) + -Cons.Fcal_2*(-Cons.Fcal_2*(1.0*2*Cons.E_0*Cons.M*Vars.v**4 + 1.0*0*Vars.v**5 + 1.0*4*Cons.E_2*Cons.M*Vars.v**6) + -Cons.Fcal_3 - Vars.Fcal_SO_3*(2.0*2*Cons.E_0*Cons.M*Vars.v**5 + 2.0*0*Vars.v**6) + 2.0*-Cons.Fcal_4 - Vars.Fcal_SQ_4*2*Cons.E_0*Cons.M*Vars.v**6) + 1.0*-Cons.Fcal_3 - Vars.Fcal_SO_3**2*2*Cons.E_0*Cons.M*Vars.v**6 + (0*(0*(0*(-1.0*2*Cons.E_0*Cons.M*Vars.v**3 - 1.0*0*Vars.v**4 - 1.0*4*Cons.E_2*Cons.M*Vars.v**5 - 1.0*5*Vars.E_SO_3*Cons.M*Vars.v**6) + -Cons.Fcal_2*(-3.0*2*Cons.E_0*Cons.M*Vars.v**4 - 3.0*0*Vars.v**5 - 3.0*4*Cons.E_2*Cons.M*Vars.v**6) + -Cons.Fcal_3 - Vars.Fcal_SO_3*(-3.0*2*Cons.E_0*Cons.M*Vars.v**5 - 3.0*0*Vars.v**6) - 3.0*-Cons.Fcal_4 - Vars.Fcal_SQ_4*2*Cons.E_0*Cons.M*Vars.v**6) + -Cons.Fcal_2*(-Cons.Fcal_2*(-3.0*2*Cons.E_0*Cons.M*Vars.v**5 - 3.0*0*Vars.v**6) - 6.0*-Cons.Fcal_3 - Vars.Fcal_SO_3*2*Cons.E_0*Cons.M*Vars.v**6)) - 1.0*-Cons.Fcal_2**3*2*Cons.E_0*Cons.M*Vars.v**6 + (0**2*(0*(0*(1.0*2*Cons.E_0*Cons.M*Vars.v**4 + 1.0*0*Vars.v**5 + 1.0*4*Cons.E_2*Cons.M*Vars.v**6) + -Cons.Fcal_2*(4.0*2*Cons.E_0*Cons.M*Vars.v**5 + 4.0*0*Vars.v**6) + 4.0*-Cons.Fcal_3 - Vars.Fcal_SO_3*2*Cons.E_0*Cons.M*Vars.v**6) + 6.0*-Cons.Fcal_2**2*2*Cons.E_0*Cons.M*Vars.v**6) + (0**4*(0*(-1.0*2*Cons.E_0*Cons.M*Vars.v**5 - 1.0*0*Vars.v**6) - 5.0*-Cons.Fcal_2*2*Cons.E_0*Cons.M*Vars.v**6) + 1.0*0**6*2*Cons.E_0*Cons.M*Vars.v**6/-Cons.Fcal_0)/-Cons.Fcal_0)/-Cons.Fcal_0)/-Cons.Fcal_0)/-Cons.Fcal_0)/-Cons.Fcal_0)/(Vars.Fcal_coeff*-Cons.Fcal_0)
dvdt_T5 = 1.0/dtdv
dydt=np.zeros(8)
[dydt[5],dydt[6],dydt[7]] = FrameFromAngularVelocityIntegrand(Vars.rfrak_frame, OmegaVec_3p0(Cons,Vars)[1:])
dydt[0] = dvdt_T5[0]
if(Cons.EvolveSpin1):
dydt[1], dydt[2]=FrameFromAngularVelocity_2D_Integrand(Vars.rfrak_chi1[0], Vars.rfrak_chi1[1],(mul(mul(inverse(Cons.S_chi1),OmegaVec_chiVec_1_3p0(Cons,Vars)),Cons.S_chi1))[1:])
else:
dydt[1], dydt[2] = 0.0, 0.0
if(Cons.EvolveSpin2):
dydt[3], dydt[4]=FrameFromAngularVelocity_2D_Integrand(Vars.rfrak_chi2[0], Vars.rfrak_chi2[1],(mul(mul(inverse(Cons.S_chi2),OmegaVec_chiVec_2_3p0(Cons,Vars)),Cons.S_chi2))[1:])
else:
dydt[3], dydt[4] = 0.0, 0.0
return dydt
@njit(cache=True)
def Recalculate_3p5(Cons,Vars,y):
Vars.v = np.array([y[0]])
Vars.rfrak_chi1 = np.array([y[1],y[2]])
Vars.rfrak_chi2 = np.array([y[3],y[4]])
Vars.rfrak_frame = np.array([y[5],y[6],y[7]])
Vars.R = exp(Vars.rfrak_frame[0]*Cons.xHat + Vars.rfrak_frame[1]*Cons.yHat + Vars.rfrak_frame[2]*Cons.zHat)
Vars.nHat = mul(mul(Vars.R,Cons.xHat),conjugate(Vars.R))
Vars.lambdaHat = mul(mul(Vars.R,Cons.yHat),conjugate(Vars.R))
Vars.ellHat = mul(mul(Vars.R,Cons.zHat),conjugate(Vars.R))
Vars.R_S1 = exp(Vars.rfrak_chi1[0]*Cons.xHat + Vars.rfrak_chi1[1]*Cons.yHat)
Vars.R_S2 = exp(Vars.rfrak_chi2[0]*Cons.xHat + Vars.rfrak_chi2[1]*Cons.yHat)
Vars.chiVec1 = mul(mul(mul(Cons.S_chi1,Vars.R_S1),Cons.zHat),mul(conjugate(Vars.R_S1),conjugate(Cons.S_chi1)))
Vars.chiVec2 = mul(mul(mul(Cons.S_chi2,Vars.R_S2),Cons.zHat),mul(conjugate(Vars.R_S2),conjugate(Cons.S_chi2)))
Vars.chi1_n = np.array([dot(Vars.chiVec1[1:],Vars.nHat[1:])])
Vars.chi1_lambda = np.array([dot(Vars.chiVec1[1:],Vars.lambdaHat[1:])])
Vars.chi1_ell = np.array([dot(Vars.chiVec1[1:],Vars.ellHat[1:])])
Vars.chi2_n = np.array([dot(Vars.chiVec2[1:],Vars.nHat[1:])])
Vars.chi2_lambda = np.array([dot(Vars.chiVec2[1:],Vars.lambdaHat[1:])])
Vars.chi2_ell = np.array([dot(Vars.chiVec2[1:],Vars.ellHat[1:])])
Vars.S_ell = Cons.M1**2*Vars.chi1_ell + Cons.M2**2*Vars.chi2_ell
Vars.S_n = Cons.M1**2*Vars.chi1_n + Cons.M2**2*Vars.chi2_n
Vars.S_lambda = Cons.M1**2*Vars.chi1_lambda + Cons.M2**2*Vars.chi2_lambda
Vars.Sigma_ell = Cons.M*(-Cons.M1*Vars.chi1_ell + Cons.M2*Vars.chi2_ell)
Vars.Sigma_n = Cons.M*(-Cons.M1*Vars.chi1_n + Cons.M2*Vars.chi2_n)
Vars.Sigma_lambda = Cons.M*(-Cons.M1*Vars.chi1_lambda + Cons.M2*Vars.chi2_lambda)
Vars.chi_s_ell = Vars.chi1_ell/2 + Vars.chi2_ell/2
Vars.chi_a_ell = Vars.chi1_ell/2 - Vars.chi2_ell/2
Vars.logv = log(Vars.v)
Vars.Fcal_coeff = 32*Cons.nu**2*Vars.v**10/5
Vars.Fcal_SQ_4 = Cons.chi1chi1*(-89*Cons.delta/192 + 89*Cons.nu/96 - 89/192) - 103*Cons.chi1chi2*Cons.nu/48 + Cons.chi2chi2*(89*Cons.delta/192 + 89*Cons.nu/96 - 89/192) + Vars.chi_a_ell*(Vars.chi_a_ell*(287/96 - 12*Cons.nu) + 287*Vars.chi_s_ell*Cons.delta/48) + Vars.chi_s_ell**2*(Cons.nu/24 + 287/96)
Vars.Fcal_SO_3 = (-4*Vars.S_ell - 5*Vars.Sigma_ell*Cons.delta/4)/Cons.M**2
Vars.Fcal_SO_5 = (Vars.S_ell*(272*Cons.nu/9 - 9/2) + Vars.Sigma_ell*Cons.delta*(43*Cons.nu/4 - 13/16))/Cons.M**2
Vars.Fcal_SO_6 = (-16*Vars.S_ell*pi - 31*Vars.Sigma_ell*Cons.delta*pi/6)/Cons.M**2
Vars.Fcal_SO_7 = (Vars.S_ell*(-2810*Cons.nu**2/27 + 6172*Cons.nu/189 + 476645/6804) + Vars.Sigma_ell*Cons.delta*(-1501*Cons.nu**2/36 + 1849*Cons.nu/126 + 9535/336))/Cons.M**2
Vars.E_SQ_4 = -3*Vars.chi_a_ell**2/2 - 3*Vars.chi_s_ell**2/2 - Cons.delta*(Cons.chi2chi2/2 + 3*Vars.chi_a_ell*Vars.chi_s_ell) + Cons.nu*(Cons.chi1chi2 + 6*Vars.chi_a_ell**2) + (Cons.chi1chi1 + Cons.chi2chi2)*(Cons.delta - 2*Cons.nu + 1)/4
Vars.E_SO_3 = (14*Vars.S_ell/3 + 2*Vars.Sigma_ell*Cons.delta)/Cons.M**2
Vars.E_SO_5 = (Vars.S_ell*(11 - 61*Cons.nu/9) + Vars.Sigma_ell*Cons.delta*(3 - 10*Cons.nu/3))/Cons.M**2
Vars.E_SO_7 = (Vars.S_ell*(29*Cons.nu**2/12 - 367*Cons.nu/4 + 135/4) + Vars.Sigma_ell*Cons.delta*(5*Cons.nu**2/4 - 39*Cons.nu + 27/4))/Cons.M**2
@njit
def OmegaVec_chiVec_1_3p5(Cons,Vars):
Omega1_coeff = Vars.v**5/Cons.M
return Omega1_coeff*(Vars.ellHat*(-0.75*Cons.delta + 0.5*Cons.nu + Vars.v**2*(Cons.delta*(0.625*Cons.nu - 0.5625) + Cons.nu*(1.25 - 0.0416666666666667*Cons.nu) + Vars.v**2*(Cons.delta*(Cons.nu*(4.875 - 0.15625*Cons.nu) - 0.84375) + Cons.nu*(Cons.nu*(-0.0208333333333333*Cons.nu - 3.28125) + 0.1875) + 0.84375) + 0.5625) + 0.75) + Vars.nHat*Vars.v*(3.0*Vars.chi1_n*Cons.nu + 3.0*Cons.M2**2*Vars.chi2_n/Cons.M**2) - Cons.M2**2*Vars.chiVec2*Vars.v/Cons.M**2)
@njit
def OmegaVec_chiVec_2_3p5(Cons,Vars):
Omega2_coeff = Vars.v**5/Cons.M
return Omega2_coeff*(Vars.ellHat*(0.75*Cons.delta + 0.5*Cons.nu + Vars.v**2*(Cons.delta*(0.5625 - 0.625*Cons.nu) + Cons.nu*(1.25 - 0.0416666666666667*Cons.nu) + Vars.v**2*(Cons.delta*(Cons.nu*(0.15625*Cons.nu - 4.875) + 0.84375) + Cons.nu*(Cons.nu*(-0.0208333333333333*Cons.nu - 3.28125) + 0.1875) + 0.84375) + 0.5625) + 0.75) + Vars.nHat*Vars.v*(3.0*Vars.chi2_n*Cons.nu + 3.0*Cons.M1**2*Vars.chi1_n/Cons.M**2) - Cons.M1**2*Vars.chiVec1*Vars.v/Cons.M**2)
@njit
def OmegaVec_3p5(Cons,Vars):
gamma_PN_7 = (Vars.S_ell*(-6.0*Cons.nu**2 - 10.5833333333333*Cons.nu + 5.0) - 2.66666666666667*Vars.Sigma_ell*Cons.delta*Cons.nu**2 + Vars.Sigma_ell*Cons.delta*(3.0 - 10.1666666666667*Cons.nu))/Cons.M**2
a_ell_0 = 7.0*Vars.S_n + 3.0*Vars.Sigma_n*Cons.delta
gamma_PN_2 = 1.0 - 0.333333333333333*Cons.nu
a_ell_4 = Vars.S_n*(5.77777777777778*Cons.nu**2 + 14.75*Cons.nu + 1.5) + Vars.Sigma_n*Cons.delta*(2.83333333333333*Cons.nu**2 + 9.125*Cons.nu + 1.5)
gamma_PN_3 = (1.66666666666667*Vars.S_ell + Vars.Sigma_ell*Cons.delta)/Cons.M**2
gamma_PN_6 = 0.0123456790123457*Cons.nu**3 + 6.36111111111111*Cons.nu**2 - 2.98177812235564*Cons.nu + 1.0
gamma_PN_0 = 1.00000000000000
gamma_PN_5 = (Vars.S_ell*(0.888888888888889*Cons.nu + 3.33333333333333) + 2.0*Vars.Sigma_ell*Cons.delta)/Cons.M**2
gamma_PN_4 = 1.0 - 5.41666666666667*Cons.nu
a_ell_2 = Vars.S_n*(-9.66666666666667*Cons.nu - 10.0) + Vars.Sigma_n*Cons.delta*(-4.5*Cons.nu - 6.0)
return Vars.ellHat*Vars.v**3/Cons.M + Vars.nHat*Vars.v**6*(a_ell_0 + Vars.v**2*(a_ell_2 + a_ell_4*Vars.v**2))*(gamma_PN_0 + Vars.v**2*(gamma_PN_2 + Vars.v*(gamma_PN_3 + Vars.v*(gamma_PN_4 + Vars.v*(gamma_PN_5 + Vars.v*(gamma_PN_6 + gamma_PN_7*Vars.v))))))/Cons.M**3
@njit(cache=True)
def TaylorT1_3p5(Cons,Vars):
Flux = Vars.Fcal_coeff*(Cons.Fcal_0 + Vars.v**2*(Cons.Fcal_2 + Vars.v*(Cons.Fcal_3 + Vars.Fcal_SO_3 + Vars.v*(Cons.Fcal_4 + Vars.Fcal_SQ_4 + Vars.v*(Cons.Fcal_5 + Vars.Fcal_SO_5 + Vars.v*(Cons.Fcal_6 + Vars.Fcal_SO_6 + Cons.Fcal_lnv_6*Vars.logv + Vars.v*(Cons.Fcal_7 + Vars.Fcal_SO_7)))))))
dEdV = -0.5*Cons.M*Cons.nu*Vars.v*(2.0*Cons.E_0 + Vars.v**2*(4.0*Cons.E_2 + Vars.v*(5.0*Vars.E_SO_3 + Vars.v*(6.0*Cons.E_4 + 6.0*Vars.E_SQ_4 + Vars.v*(7.0*Vars.E_SO_5 + Vars.v*(8.0*Cons.E_6 + 9.0*Vars.E_SO_7*Vars.v))))))
Absorption = 0
dvdt_T1 = (-Absorption - Flux)/dEdV
dydt=np.zeros(8)
[dydt[5],dydt[6],dydt[7]] = FrameFromAngularVelocityIntegrand(Vars.rfrak_frame, OmegaVec_3p5(Cons,Vars)[1:])
dydt[0] = dvdt_T1[0]
if(Cons.EvolveSpin1):
dydt[1], dydt[2]=FrameFromAngularVelocity_2D_Integrand(Vars.rfrak_chi1[0], Vars.rfrak_chi1[1],(mul(mul(inverse(Cons.S_chi1),OmegaVec_chiVec_1_3p5(Cons,Vars)),Cons.S_chi1))[1:])
else:
dydt[1], dydt[2] = 0.0, 0.0
if(Cons.EvolveSpin2):
dydt[3], dydt[4]=FrameFromAngularVelocity_2D_Integrand(Vars.rfrak_chi2[0], Vars.rfrak_chi2[1],(mul(mul(inverse(Cons.S_chi2),OmegaVec_chiVec_2_3p5(Cons,Vars)),Cons.S_chi2))[1:])
else:
dydt[3], dydt[4] = 0.0, 0.0
return dydt
@njit(cache=True)
def TaylorT4_3p5(Cons,Vars):
dvdt_T4 = -2.0*Vars.Fcal_coeff*(1.0*-Cons.Fcal_0 + 1.0*0*Vars.v + 1.0*-Cons.Fcal_2*Vars.v**2 + 1.0*-Cons.Fcal_3 - Vars.Fcal_SO_3*Vars.v**3 + 1.0*-Cons.Fcal_4 - Vars.Fcal_SQ_4*Vars.v**4 + 1.0*-Cons.Fcal_5 - Vars.Fcal_SO_5*Vars.v**5 + 1.0*-Cons.Fcal_6 - Vars.Fcal_SO_6 - Cons.Fcal_lnv_6*Vars.logv*Vars.v**6 + 1.0*-Cons.Fcal_7 - Vars.Fcal_SO_7*Vars.v**7 + (0*(-1.0*-Cons.Fcal_0*Vars.v - 1.0*0*Vars.v**2 - 1.0*-Cons.Fcal_2*Vars.v**3 - 1.0*-Cons.Fcal_3 - Vars.Fcal_SO_3*Vars.v**4 - 1.0*-Cons.Fcal_4 - Vars.Fcal_SQ_4*Vars.v**5 - 1.0*-Cons.Fcal_5 - Vars.Fcal_SO_5*Vars.v**6 - 1.0*-Cons.Fcal_6 - Vars.Fcal_SO_6 - Cons.Fcal_lnv_6*Vars.logv*Vars.v**7) + 4*Cons.E_2*Cons.M*(-1.0*-Cons.Fcal_0*Vars.v**2 - 1.0*0*Vars.v**3 - 1.0*-Cons.Fcal_2*Vars.v**4 - 1.0*-Cons.Fcal_3 - Vars.Fcal_SO_3*Vars.v**5 - 1.0*-Cons.Fcal_4 - Vars.Fcal_SQ_4*Vars.v**6 - 1.0*-Cons.Fcal_5 - Vars.Fcal_SO_5*Vars.v**7) + 5*Vars.E_SO_3*Cons.M*(-1.0*-Cons.Fcal_0*Vars.v**3 - 1.0*0*Vars.v**4 - 1.0*-Cons.Fcal_2*Vars.v**5 - 1.0*-Cons.Fcal_3 - Vars.Fcal_SO_3*Vars.v**6 - 1.0*-Cons.Fcal_4 - Vars.Fcal_SQ_4*Vars.v**7) + 6*Cons.E_4*Cons.M + 6*Vars.E_SQ_4*Cons.M*(-1.0*-Cons.Fcal_0*Vars.v**4 - 1.0*0*Vars.v**5 - 1.0*-Cons.Fcal_2*Vars.v**6 - 1.0*-Cons.Fcal_3 - Vars.Fcal_SO_3*Vars.v**7) + 7*Vars.E_SO_5*Cons.M*(-1.0*-Cons.Fcal_0*Vars.v**5 - 1.0*0*Vars.v**6 - 1.0*-Cons.Fcal_2*Vars.v**7) + 8*Cons.E_6*Cons.M*(-1.0*-Cons.Fcal_0*Vars.v**6 - 1.0*0*Vars.v**7) - 1.0*9*Vars.E_SO_7*Cons.M*-Cons.Fcal_0*Vars.v**7 + (0*(0*(1.0*-Cons.Fcal_0*Vars.v**2 + 1.0*0*Vars.v**3 + 1.0*-Cons.Fcal_2*Vars.v**4 + 1.0*-Cons.Fcal_3 - Vars.Fcal_SO_3*Vars.v**5 + 1.0*-Cons.Fcal_4 - Vars.Fcal_SQ_4*Vars.v**6 + 1.0*-Cons.Fcal_5 - Vars.Fcal_SO_5*Vars.v**7) + 4*Cons.E_2*Cons.M*(2.0*-Cons.Fcal_0*Vars.v**3 + 2.0*0*Vars.v**4 + 2.0*-Cons.Fcal_2*Vars.v**5 + 2.0*-Cons.Fcal_3 - Vars.Fcal_SO_3*Vars.v**6 + 2.0*-Cons.Fcal_4 - Vars.Fcal_SQ_4*Vars.v**7) + 5*Vars.E_SO_3*Cons.M*(2.0*-Cons.Fcal_0*Vars.v**4 + 2.0*0*Vars.v**5 + 2.0*-Cons.Fcal_2*Vars.v**6 + 2.0*-Cons.Fcal_3 - Vars.Fcal_SO_3*Vars.v**7) + 6*Cons.E_4*Cons.M + 6*Vars.E_SQ_4*Cons.M*(2.0*-Cons.Fcal_0*Vars.v**5 + 2.0*0*Vars.v**6 + 2.0*-Cons.Fcal_2*Vars.v**7) + 7*Vars.E_SO_5*Cons.M*(2.0*-Cons.Fcal_0*Vars.v**6 + 2.0*0*Vars.v**7) + 2.0*8*Cons.E_6*Cons.M*-Cons.Fcal_0*Vars.v**7) + 4*Cons.E_2*Cons.M*(4*Cons.E_2*Cons.M*(1.0*-Cons.Fcal_0*Vars.v**4 + 1.0*0*Vars.v**5 + 1.0*-Cons.Fcal_2*Vars.v**6 + 1.0*-Cons.Fcal_3 - Vars.Fcal_SO_3*Vars.v**7) + 5*Vars.E_SO_3*Cons.M*(2.0*-Cons.Fcal_0*Vars.v**5 + 2.0*0*Vars.v**6 + 2.0*-Cons.Fcal_2*Vars.v**7) + 6*Cons.E_4*Cons.M + 6*Vars.E_SQ_4*Cons.M*(2.0*-Cons.Fcal_0*Vars.v**6 + 2.0*0*Vars.v**7) + 2.0*7*Vars.E_SO_5*Cons.M*-Cons.Fcal_0*Vars.v**7) + 5*Vars.E_SO_3*Cons.M*(5*Vars.E_SO_3*Cons.M*(1.0*-Cons.Fcal_0*Vars.v**6 + 1.0*0*Vars.v**7) + 2.0*6*Cons.E_4*Cons.M + 6*Vars.E_SQ_4*Cons.M*-Cons.Fcal_0*Vars.v**7) + (0*(0*(0*(-1.0*-Cons.Fcal_0*Vars.v**3 - 1.0*0*Vars.v**4 - 1.0*-Cons.Fcal_2*Vars.v**5 - 1.0*-Cons.Fcal_3 - Vars.Fcal_SO_3*Vars.v**6 - 1.0*-Cons.Fcal_4 - Vars.Fcal_SQ_4*Vars.v**7) + 4*Cons.E_2*Cons.M*(-3.0*-Cons.Fcal_0*Vars.v**4 - 3.0*0*Vars.v**5 - 3.0*-Cons.Fcal_2*Vars.v**6 - 3.0*-Cons.Fcal_3 - Vars.Fcal_SO_3*Vars.v**7) + 5*Vars.E_SO_3*Cons.M*(-3.0*-Cons.Fcal_0*Vars.v**5 - 3.0*0*Vars.v**6 - 3.0*-Cons.Fcal_2*Vars.v**7) + 6*Cons.E_4*Cons.M + 6*Vars.E_SQ_4*Cons.M*(-3.0*-Cons.Fcal_0*Vars.v**6 - 3.0*0*Vars.v**7) - 3.0*7*Vars.E_SO_5*Cons.M*-Cons.Fcal_0*Vars.v**7) + 4*Cons.E_2*Cons.M*(4*Cons.E_2*Cons.M*(-3.0*-Cons.Fcal_0*Vars.v**5 - 3.0*0*Vars.v**6 - 3.0*-Cons.Fcal_2*Vars.v**7) + 5*Vars.E_SO_3*Cons.M*(-6.0*-Cons.Fcal_0*Vars.v**6 - 6.0*0*Vars.v**7) - 6.0*6*Cons.E_4*Cons.M + 6*Vars.E_SQ_4*Cons.M*-Cons.Fcal_0*Vars.v**7) - 3.0*5*Vars.E_SO_3*Cons.M**2*-Cons.Fcal_0*Vars.v**7) + 4*Cons.E_2*Cons.M**2*(4*Cons.E_2*Cons.M*(-1.0*-Cons.Fcal_0*Vars.v**6 - 1.0*0*Vars.v**7) - 3.0*5*Vars.E_SO_3*Cons.M*-Cons.Fcal_0*Vars.v**7) + (0*(0*(0*(0*(1.0*-Cons.Fcal_0*Vars.v**4 + 1.0*0*Vars.v**5 + 1.0*-Cons.Fcal_2*Vars.v**6 + 1.0*-Cons.Fcal_3 - Vars.Fcal_SO_3*Vars.v**7) + 4*Cons.E_2*Cons.M*(4.0*-Cons.Fcal_0*Vars.v**5 + 4.0*0*Vars.v**6 + 4.0*-Cons.Fcal_2*Vars.v**7) + 5*Vars.E_SO_3*Cons.M*(4.0*-Cons.Fcal_0*Vars.v**6 + 4.0*0*Vars.v**7) + 4.0*6*Cons.E_4*Cons.M + 6*Vars.E_SQ_4*Cons.M*-Cons.Fcal_0*Vars.v**7) + 4*Cons.E_2*Cons.M*(4*Cons.E_2*Cons.M*(6.0*-Cons.Fcal_0*Vars.v**6 + 6.0*0*Vars.v**7) + 12.0*5*Vars.E_SO_3*Cons.M*-Cons.Fcal_0*Vars.v**7)) + 4.0*4*Cons.E_2*Cons.M**3*-Cons.Fcal_0*Vars.v**7) + (0**3*(0*(0*(-1.0*-Cons.Fcal_0*Vars.v**5 - 1.0*0*Vars.v**6 - 1.0*-Cons.Fcal_2*Vars.v**7) + 4*Cons.E_2*Cons.M*(-5.0*-Cons.Fcal_0*Vars.v**6 - 5.0*0*Vars.v**7) - 5.0*5*Vars.E_SO_3*Cons.M*-Cons.Fcal_0*Vars.v**7) - 10.0*4*Cons.E_2*Cons.M**2*-Cons.Fcal_0*Vars.v**7) + (0**5*(0*(1.0*-Cons.Fcal_0*Vars.v**6 + 1.0*0*Vars.v**7) + 6.0*4*Cons.E_2*Cons.M*-Cons.Fcal_0*Vars.v**7) - 1.0*0**7*-Cons.Fcal_0*Vars.v**7/2*Cons.E_0*Cons.M)/2*Cons.E_0*Cons.M)/2*Cons.E_0*Cons.M)/2*Cons.E_0*Cons.M)/2*Cons.E_0*Cons.M)/2*Cons.E_0*Cons.M)/2*Cons.E_0*Cons.M)/(Cons.nu*Vars.v*2*Cons.E_0*Cons.M)
dydt=np.zeros(8)
[dydt[5],dydt[6],dydt[7]] = FrameFromAngularVelocityIntegrand(Vars.rfrak_frame, OmegaVec_3p5(Cons,Vars)[1:])
dydt[0] = dvdt_T4[0]
if(Cons.EvolveSpin1):
dydt[1], dydt[2]=FrameFromAngularVelocity_2D_Integrand(Vars.rfrak_chi1[0], Vars.rfrak_chi1[1],(mul(mul(inverse(Cons.S_chi1),OmegaVec_chiVec_1_3p5(Cons,Vars)),Cons.S_chi1))[1:])
else:
dydt[1], dydt[2] = 0.0, 0.0
if(Cons.EvolveSpin2):
dydt[3], dydt[4]=FrameFromAngularVelocity_2D_Integrand(Vars.rfrak_chi2[0], Vars.rfrak_chi2[1],(mul(mul(inverse(Cons.S_chi2),OmegaVec_chiVec_2_3p5(Cons,Vars)),Cons.S_chi2))[1:])
else:
dydt[3], dydt[4] = 0.0, 0.0
return dydt
@njit(cache=True)
def TaylorT5_3p5(Cons,Vars):
dtdv = -0.5*Cons.nu*Vars.v*(1.0*2*Cons.E_0*Cons.M + 1.0*0*Vars.v + 1.0*4*Cons.E_2*Cons.M*Vars.v**2 + 1.0*5*Vars.E_SO_3*Cons.M*Vars.v**3 + 1.0*6*Cons.E_4*Cons.M + 6*Vars.E_SQ_4*Cons.M*Vars.v**4 + 1.0*7*Vars.E_SO_5*Cons.M*Vars.v**5 + 1.0*8*Cons.E_6*Cons.M*Vars.v**6 + 1.0*9*Vars.E_SO_7*Cons.M*Vars.v**7 + (0*(-1.0*2*Cons.E_0*Cons.M*Vars.v - 1.0*0*Vars.v**2 - 1.0*4*Cons.E_2*Cons.M*Vars.v**3 - 1.0*5*Vars.E_SO_3*Cons.M*Vars.v**4 - 1.0*6*Cons.E_4*Cons.M + 6*Vars.E_SQ_4*Cons.M*Vars.v**5 - 1.0*7*Vars.E_SO_5*Cons.M*Vars.v**6 - 1.0*8*Cons.E_6*Cons.M*Vars.v**7) + -Cons.Fcal_2*(-1.0*2*Cons.E_0*Cons.M*Vars.v**2 - 1.0*0*Vars.v**3 - 1.0*4*Cons.E_2*Cons.M*Vars.v**4 - 1.0*5*Vars.E_SO_3*Cons.M*Vars.v**5 - 1.0*6*Cons.E_4*Cons.M + 6*Vars.E_SQ_4*Cons.M*Vars.v**6 - 1.0*7*Vars.E_SO_5*Cons.M*Vars.v**7) + -Cons.Fcal_3 - Vars.Fcal_SO_3*(-1.0*2*Cons.E_0*Cons.M*Vars.v**3 - 1.0*0*Vars.v**4 - 1.0*4*Cons.E_2*Cons.M*Vars.v**5 - 1.0*5*Vars.E_SO_3*Cons.M*Vars.v**6 - 1.0*6*Cons.E_4*Cons.M + 6*Vars.E_SQ_4*Cons.M*Vars.v**7) + -Cons.Fcal_4 - Vars.Fcal_SQ_4*(-1.0*2*Cons.E_0*Cons.M*Vars.v**4 - 1.0*0*Vars.v**5 - 1.0*4*Cons.E_2*Cons.M*Vars.v**6 - 1.0*5*Vars.E_SO_3*Cons.M*Vars.v**7) + -Cons.Fcal_5 - Vars.Fcal_SO_5*(-1.0*2*Cons.E_0*Cons.M*Vars.v**5 - 1.0*0*Vars.v**6 - 1.0*4*Cons.E_2*Cons.M*Vars.v**7) + -Cons.Fcal_6 - Vars.Fcal_SO_6 - Cons.Fcal_lnv_6*Vars.logv*(-1.0*2*Cons.E_0*Cons.M*Vars.v**6 - 1.0*0*Vars.v**7) - 1.0*-Cons.Fcal_7 - Vars.Fcal_SO_7*2*Cons.E_0*Cons.M*Vars.v**7 + (0*(0*(1.0*2*Cons.E_0*Cons.M*Vars.v**2 + 1.0*0*Vars.v**3 + 1.0*4*Cons.E_2*Cons.M*Vars.v**4 + 1.0*5*Vars.E_SO_3*Cons.M*Vars.v**5 + 1.0*6*Cons.E_4*Cons.M + 6*Vars.E_SQ_4*Cons.M*Vars.v**6 + 1.0*7*Vars.E_SO_5*Cons.M*Vars.v**7) + -Cons.Fcal_2*(2.0*2*Cons.E_0*Cons.M*Vars.v**3 + 2.0*0*Vars.v**4 + 2.0*4*Cons.E_2*Cons.M*Vars.v**5 + 2.0*5*Vars.E_SO_3*Cons.M*Vars.v**6 + 2.0*6*Cons.E_4*Cons.M + 6*Vars.E_SQ_4*Cons.M*Vars.v**7) + -Cons.Fcal_3 - Vars.Fcal_SO_3*(2.0*2*Cons.E_0*Cons.M*Vars.v**4 + 2.0*0*Vars.v**5 + 2.0*4*Cons.E_2*Cons.M*Vars.v**6 + 2.0*5*Vars.E_SO_3*Cons.M*Vars.v**7) + -Cons.Fcal_4 - Vars.Fcal_SQ_4*(2.0*2*Cons.E_0*Cons.M*Vars.v**5 + 2.0*0*Vars.v**6 + 2.0*4*Cons.E_2*Cons.M*Vars.v**7) + -Cons.Fcal_5 - Vars.Fcal_SO_5*(2.0*2*Cons.E_0*Cons.M*Vars.v**6 + 2.0*0*Vars.v**7) + 2.0*-Cons.Fcal_6 - Vars.Fcal_SO_6 - Cons.Fcal_lnv_6*Vars.logv*2*Cons.E_0*Cons.M*Vars.v**7) + -Cons.Fcal_2*(-Cons.Fcal_2*(1.0*2*Cons.E_0*Cons.M*Vars.v**4 + 1.0*0*Vars.v**5 + 1.0*4*Cons.E_2*Cons.M*Vars.v**6 + 1.0*5*Vars.E_SO_3*Cons.M*Vars.v**7) + -Cons.Fcal_3 - Vars.Fcal_SO_3*(2.0*2*Cons.E_0*Cons.M*Vars.v**5 + 2.0*0*Vars.v**6 + 2.0*4*Cons.E_2*Cons.M*Vars.v**7) + -Cons.Fcal_4 - Vars.Fcal_SQ_4*(2.0*2*Cons.E_0*Cons.M*Vars.v**6 + 2.0*0*Vars.v**7) + 2.0*-Cons.Fcal_5 - Vars.Fcal_SO_5*2*Cons.E_0*Cons.M*Vars.v**7) + -Cons.Fcal_3 - Vars.Fcal_SO_3*(-Cons.Fcal_3 - Vars.Fcal_SO_3*(1.0*2*Cons.E_0*Cons.M*Vars.v**6 + 1.0*0*Vars.v**7) + 2.0*-Cons.Fcal_4 - Vars.Fcal_SQ_4*2*Cons.E_0*Cons.M*Vars.v**7) + (0*(0*(0*(-1.0*2*Cons.E_0*Cons.M*Vars.v**3 - 1.0*0*Vars.v**4 - 1.0*4*Cons.E_2*Cons.M*Vars.v**5 - 1.0*5*Vars.E_SO_3*Cons.M*Vars.v**6 - 1.0*6*Cons.E_4*Cons.M + 6*Vars.E_SQ_4*Cons.M*Vars.v**7) + -Cons.Fcal_2*(-3.0*2*Cons.E_0*Cons.M*Vars.v**4 - 3.0*0*Vars.v**5 - 3.0*4*Cons.E_2*Cons.M*Vars.v**6 - 3.0*5*Vars.E_SO_3*Cons.M*Vars.v**7) + -Cons.Fcal_3 - Vars.Fcal_SO_3*(-3.0*2*Cons.E_0*Cons.M*Vars.v**5 - 3.0*0*Vars.v**6 - 3.0*4*Cons.E_2*Cons.M*Vars.v**7) + -Cons.Fcal_4 - Vars.Fcal_SQ_4*(-3.0*2*Cons.E_0*Cons.M*Vars.v**6 - 3.0*0*Vars.v**7) - 3.0*-Cons.Fcal_5 - Vars.Fcal_SO_5*2*Cons.E_0*Cons.M*Vars.v**7) + -Cons.Fcal_2*(-Cons.Fcal_2*(-3.0*2*Cons.E_0*Cons.M*Vars.v**5 - 3.0*0*Vars.v**6 - 3.0*4*Cons.E_2*Cons.M*Vars.v**7) + -Cons.Fcal_3 - Vars.Fcal_SO_3*(-6.0*2*Cons.E_0*Cons.M*Vars.v**6 - 6.0*0*Vars.v**7) - 6.0*-Cons.Fcal_4 - Vars.Fcal_SQ_4*2*Cons.E_0*Cons.M*Vars.v**7) - 3.0*-Cons.Fcal_3 - Vars.Fcal_SO_3**2*2*Cons.E_0*Cons.M*Vars.v**7) + -Cons.Fcal_2**2*(-Cons.Fcal_2*(-1.0*2*Cons.E_0*Cons.M*Vars.v**6 - 1.0*0*Vars.v**7) - 3.0*-Cons.Fcal_3 - Vars.Fcal_SO_3*2*Cons.E_0*Cons.M*Vars.v**7) + (0*(0*(0*(0*(1.0*2*Cons.E_0*Cons.M*Vars.v**4 + 1.0*0*Vars.v**5 + 1.0*4*Cons.E_2*Cons.M*Vars.v**6 + 1.0*5*Vars.E_SO_3*Cons.M*Vars.v**7) + -Cons.Fcal_2*(4.0*2*Cons.E_0*Cons.M*Vars.v**5 + 4.0*0*Vars.v**6 + 4.0*4*Cons.E_2*Cons.M*Vars.v**7) + -Cons.Fcal_3 - Vars.Fcal_SO_3*(4.0*2*Cons.E_0*Cons.M*Vars.v**6 + 4.0*0*Vars.v**7) + 4.0*-Cons.Fcal_4 - Vars.Fcal_SQ_4*2*Cons.E_0*Cons.M*Vars.v**7) + -Cons.Fcal_2*(-Cons.Fcal_2*(6.0*2*Cons.E_0*Cons.M*Vars.v**6 + 6.0*0*Vars.v**7) + 12.0*-Cons.Fcal_3 - Vars.Fcal_SO_3*2*Cons.E_0*Cons.M*Vars.v**7)) + 4.0*-Cons.Fcal_2**3*2*Cons.E_0*Cons.M*Vars.v**7) + (0**3*(0*(0*(-1.0*2*Cons.E_0*Cons.M*Vars.v**5 - 1.0*0*Vars.v**6 - 1.0*4*Cons.E_2*Cons.M*Vars.v**7) + -Cons.Fcal_2*(-5.0*2*Cons.E_0*Cons.M*Vars.v**6 - 5.0*0*Vars.v**7) - 5.0*-Cons.Fcal_3 - Vars.Fcal_SO_3*2*Cons.E_0*Cons.M*Vars.v**7) - 10.0*-Cons.Fcal_2**2*2*Cons.E_0*Cons.M*Vars.v**7) + (0**5*(0*(1.0*2*Cons.E_0*Cons.M*Vars.v**6 + 1.0*0*Vars.v**7) + 6.0*-Cons.Fcal_2*2*Cons.E_0*Cons.M*Vars.v**7) - 1.0*0**7*2*Cons.E_0*Cons.M*Vars.v**7/-Cons.Fcal_0)/-Cons.Fcal_0)/-Cons.Fcal_0)/-Cons.Fcal_0)/-Cons.Fcal_0)/-Cons.Fcal_0)/-Cons.Fcal_0)/(Vars.Fcal_coeff*-Cons.Fcal_0)
dvdt_T5 = 1.0/dtdv
dydt=np.zeros(8)
[dydt[5],dydt[6],dydt[7]] = FrameFromAngularVelocityIntegrand(Vars.rfrak_frame, OmegaVec_3p5(Cons,Vars)[1:])
dydt[0] = dvdt_T5[0]
if(Cons.EvolveSpin1):
dydt[1], dydt[2]=FrameFromAngularVelocity_2D_Integrand(Vars.rfrak_chi1[0], Vars.rfrak_chi1[1],(mul(mul(inverse(Cons.S_chi1),OmegaVec_chiVec_1_3p5(Cons,Vars)),Cons.S_chi1))[1:])
else:
dydt[1], dydt[2] = 0.0, 0.0
if(Cons.EvolveSpin2):
dydt[3], dydt[4]=FrameFromAngularVelocity_2D_Integrand(Vars.rfrak_chi2[0], Vars.rfrak_chi2[1],(mul(mul(inverse(Cons.S_chi2),OmegaVec_chiVec_2_3p5(Cons,Vars)),Cons.S_chi2))[1:])
else:
dydt[3], dydt[4] = 0.0, 0.0
return dydt
@njit(cache=True)
def Recalculate_4p0(Cons,Vars,y):
Vars.v = np.array([y[0]])
Vars.rfrak_chi1 = np.array([y[1],y[2]])
Vars.rfrak_chi2 = np.array([y[3],y[4]])
Vars.rfrak_frame = np.array([y[5],y[6],y[7]])
Vars.R = exp(Vars.rfrak_frame[0]*Cons.xHat + Vars.rfrak_frame[1]*Cons.yHat + Vars.rfrak_frame[2]*Cons.zHat)
Vars.nHat = mul(mul(Vars.R,Cons.xHat),conjugate(Vars.R))
Vars.lambdaHat = mul(mul(Vars.R,Cons.yHat),conjugate(Vars.R))
Vars.ellHat = mul(mul(Vars.R,Cons.zHat),conjugate(Vars.R))
Vars.R_S1 = exp(Vars.rfrak_chi1[0]*Cons.xHat + Vars.rfrak_chi1[1]*Cons.yHat)
Vars.R_S2 = exp(Vars.rfrak_chi2[0]*Cons.xHat + Vars.rfrak_chi2[1]*Cons.yHat)
Vars.chiVec1 = mul(mul(mul(Cons.S_chi1,Vars.R_S1),Cons.zHat),mul(conjugate(Vars.R_S1),conjugate(Cons.S_chi1)))
Vars.chiVec2 = mul(mul(mul(Cons.S_chi2,Vars.R_S2),Cons.zHat),mul(conjugate(Vars.R_S2),conjugate(Cons.S_chi2)))
Vars.chi1_n = np.array([dot(Vars.chiVec1[1:],Vars.nHat[1:])])
Vars.chi1_lambda = np.array([dot(Vars.chiVec1[1:],Vars.lambdaHat[1:])])
Vars.chi1_ell = np.array([dot(Vars.chiVec1[1:],Vars.ellHat[1:])])
Vars.chi2_n = np.array([dot(Vars.chiVec2[1:],Vars.nHat[1:])])
Vars.chi2_lambda = np.array([dot(Vars.chiVec2[1:],Vars.lambdaHat[1:])])
Vars.chi2_ell = np.array([dot(Vars.chiVec2[1:],Vars.ellHat[1:])])
Vars.S_ell = Cons.M1**2*Vars.chi1_ell + Cons.M2**2*Vars.chi2_ell
Vars.S_n = Cons.M1**2*Vars.chi1_n + Cons.M2**2*Vars.chi2_n
Vars.S_lambda = Cons.M1**2*Vars.chi1_lambda + Cons.M2**2*Vars.chi2_lambda
Vars.Sigma_ell = Cons.M*(-Cons.M1*Vars.chi1_ell + Cons.M2*Vars.chi2_ell)
Vars.Sigma_n = Cons.M*(-Cons.M1*Vars.chi1_n + Cons.M2*Vars.chi2_n)
Vars.Sigma_lambda = Cons.M*(-Cons.M1*Vars.chi1_lambda + Cons.M2*Vars.chi2_lambda)
Vars.chi_s_ell = Vars.chi1_ell/2 + Vars.chi2_ell/2
Vars.chi_a_ell = Vars.chi1_ell/2 - Vars.chi2_ell/2
Vars.logv = log(Vars.v)
Vars.Fcal_coeff = 32*Cons.nu**2*Vars.v**10/5
Vars.Fcal_SQ_4 = Cons.chi1chi1*(-89*Cons.delta/192 + 89*Cons.nu/96 - 89/192) - 103*Cons.chi1chi2*Cons.nu/48 + Cons.chi2chi2*(89*Cons.delta/192 + 89*Cons.nu/96 - 89/192) + Vars.chi_a_ell*(Vars.chi_a_ell*(287/96 - 12*Cons.nu) + 287*Vars.chi_s_ell*Cons.delta/48) + Vars.chi_s_ell**2*(Cons.nu/24 + 287/96)
Vars.Fcal_SO_3 = (-4*Vars.S_ell - 5*Vars.Sigma_ell*Cons.delta/4)/Cons.M**2
Vars.Fcal_SO_5 = (Vars.S_ell*(272*Cons.nu/9 - 9/2) + Vars.Sigma_ell*Cons.delta*(43*Cons.nu/4 - 13/16))/Cons.M**2
Vars.Fcal_SO_6 = (-16*Vars.S_ell*pi - 31*Vars.Sigma_ell*Cons.delta*pi/6)/Cons.M**2
Vars.Fcal_SO_7 = (Vars.S_ell*(-2810*Cons.nu**2/27 + 6172*Cons.nu/189 + 476645/6804) + Vars.Sigma_ell*Cons.delta*(-1501*Cons.nu**2/36 + 1849*Cons.nu/126 + 9535/336))/Cons.M**2
Vars.Fcal_SO_8 = (Vars.S_ell*pi*(13879*Cons.nu/72 - 3485/96) + Vars.Sigma_ell*Cons.delta*pi*(130583*Cons.nu/2016 - 7163/672))/Cons.M**2
Vars.E_SQ_4 = -3*Vars.chi_a_ell**2/2 - 3*Vars.chi_s_ell**2/2 - Cons.delta*(Cons.chi2chi2/2 + 3*Vars.chi_a_ell*Vars.chi_s_ell) + Cons.nu*(Cons.chi1chi2 + 6*Vars.chi_a_ell**2) + (Cons.chi1chi1 + Cons.chi2chi2)*(Cons.delta - 2*Cons.nu + 1)/4
Vars.E_SO_3 = (14*Vars.S_ell/3 + 2*Vars.Sigma_ell*Cons.delta)/Cons.M**2
Vars.E_SO_5 = (Vars.S_ell*(11 - 61*Cons.nu/9) + Vars.Sigma_ell*Cons.delta*(3 - 10*Cons.nu/3))/Cons.M**2
Vars.E_SO_7 = (Vars.S_ell*(29*Cons.nu**2/12 - 367*Cons.nu/4 + 135/4) + Vars.Sigma_ell*Cons.delta*(5*Cons.nu**2/4 - 39*Cons.nu + 27/4))/Cons.M**2
@njit
def OmegaVec_chiVec_1_4p0(Cons,Vars):
Omega1_coeff = Vars.v**5/Cons.M
return Omega1_coeff*(Vars.ellHat*(-0.75*Cons.delta + 0.5*Cons.nu + Vars.v**2*(Cons.delta*(0.625*Cons.nu - 0.5625) + Cons.nu*(1.25 - 0.0416666666666667*Cons.nu) + Vars.v**2*(Cons.delta*(Cons.nu*(4.875 - 0.15625*Cons.nu) - 0.84375) + Cons.nu*(Cons.nu*(-0.0208333333333333*Cons.nu - 3.28125) + 0.1875) + 0.84375) + 0.5625) + 0.75) + Vars.nHat*Vars.v*(3.0*Vars.chi1_n*Cons.nu + 3.0*Cons.M2**2*Vars.chi2_n/Cons.M**2) - Cons.M2**2*Vars.chiVec2*Vars.v/Cons.M**2)
@njit
def OmegaVec_chiVec_2_4p0(Cons,Vars):
Omega2_coeff = Vars.v**5/Cons.M
return Omega2_coeff*(Vars.ellHat*(0.75*Cons.delta + 0.5*Cons.nu + Vars.v**2*(Cons.delta*(0.5625 - 0.625*Cons.nu) + Cons.nu*(1.25 - 0.0416666666666667*Cons.nu) + Vars.v**2*(Cons.delta*(Cons.nu*(0.15625*Cons.nu - 4.875) + 0.84375) + Cons.nu*(Cons.nu*(-0.0208333333333333*Cons.nu - 3.28125) + 0.1875) + 0.84375) + 0.5625) + 0.75) + Vars.nHat*Vars.v*(3.0*Vars.chi2_n*Cons.nu + 3.0*Cons.M1**2*Vars.chi1_n/Cons.M**2) - Cons.M1**2*Vars.chiVec1*Vars.v/Cons.M**2)
@njit
def OmegaVec_4p0(Cons,Vars):
gamma_PN_7 = (Vars.S_ell*(-6.0*Cons.nu**2 - 10.5833333333333*Cons.nu + 5.0) - 2.66666666666667*Vars.Sigma_ell*Cons.delta*Cons.nu**2 + Vars.Sigma_ell*Cons.delta*(3.0 - 10.1666666666667*Cons.nu))/Cons.M**2
a_ell_0 = 7.0*Vars.S_n + 3.0*Vars.Sigma_n*Cons.delta
gamma_PN_2 = 1.0 - 0.333333333333333*Cons.nu
a_ell_4 = Vars.S_n*(5.77777777777778*Cons.nu**2 + 14.75*Cons.nu + 1.5) + Vars.Sigma_n*Cons.delta*(2.83333333333333*Cons.nu**2 + 9.125*Cons.nu + 1.5)
gamma_PN_3 = (1.66666666666667*Vars.S_ell + Vars.Sigma_ell*Cons.delta)/Cons.M**2
gamma_PN_6 = 0.0123456790123457*Cons.nu**3 + 6.36111111111111*Cons.nu**2 - 2.98177812235564*Cons.nu + 1.0
gamma_PN_0 = 1.00000000000000
gamma_PN_5 = (Vars.S_ell*(0.888888888888889*Cons.nu + 3.33333333333333) + 2.0*Vars.Sigma_ell*Cons.delta)/Cons.M**2
gamma_PN_4 = 1.0 - 5.41666666666667*Cons.nu
a_ell_2 = Vars.S_n*(-9.66666666666667*Cons.nu - 10.0) + Vars.Sigma_n*Cons.delta*(-4.5*Cons.nu - 6.0)
return Vars.ellHat*Vars.v**3/Cons.M + Vars.nHat*Vars.v**6*(a_ell_0 + Vars.v**2*(a_ell_2 + a_ell_4*Vars.v**2))*(gamma_PN_0 + Vars.v**2*(gamma_PN_2 + Vars.v*(gamma_PN_3 + Vars.v*(gamma_PN_4 + Vars.v*(gamma_PN_5 + Vars.v*(gamma_PN_6 + gamma_PN_7*Vars.v))))))/Cons.M**3
@njit(cache=True)
def TaylorT1_4p0(Cons,Vars):
Flux = Vars.Fcal_coeff*(Cons.Fcal_0 + Vars.v**2*(Cons.Fcal_2 + Vars.v*(Cons.Fcal_3 + Vars.Fcal_SO_3 + Vars.v*(Cons.Fcal_4 + Vars.Fcal_SQ_4 + Vars.v*(Cons.Fcal_5 + Vars.Fcal_SO_5 + Vars.v*(Cons.Fcal_6 + Vars.Fcal_SO_6 + Cons.Fcal_lnv_6*Vars.logv + Vars.v*(Cons.Fcal_7 + Vars.Fcal_SO_7 + Vars.v*(Cons.Fcal_8 + Vars.Fcal_SO_8 + Cons.Fcal_lnv_8*Vars.logv))))))))
dEdV = -0.5*Cons.M*Cons.nu*Vars.v*(2.0*Cons.E_0 + Vars.v**2*(4.0*Cons.E_2 + Vars.v*(5.0*Vars.E_SO_3 + Vars.v*(6.0*Cons.E_4 + 6.0*Vars.E_SQ_4 + Vars.v*(7.0*Vars.E_SO_5 + Vars.v*(8.0*Cons.E_6 + Vars.v*(9.0*Vars.E_SO_7 + Vars.v*(10.0*Cons.E_8 + Cons.E_lnv_8*(10.0*Vars.logv + 1.0)))))))))
Absorption = 0
dvdt_T1 = (-Absorption - Flux)/dEdV
dydt=np.zeros(8)
[dydt[5],dydt[6],dydt[7]] = FrameFromAngularVelocityIntegrand(Vars.rfrak_frame, OmegaVec_4p0(Cons,Vars)[1:])
dydt[0] = dvdt_T1[0]
if(Cons.EvolveSpin1):
dydt[1], dydt[2]=FrameFromAngularVelocity_2D_Integrand(Vars.rfrak_chi1[0], Vars.rfrak_chi1[1],(mul(mul(inverse(Cons.S_chi1),OmegaVec_chiVec_1_4p0(Cons,Vars)),Cons.S_chi1))[1:])
else:
dydt[1], dydt[2] = 0.0, 0.0
if(Cons.EvolveSpin2):
dydt[3], dydt[4]=FrameFromAngularVelocity_2D_Integrand(Vars.rfrak_chi2[0], Vars.rfrak_chi2[1],(mul(mul(inverse(Cons.S_chi2),OmegaVec_chiVec_2_4p0(Cons,Vars)),Cons.S_chi2))[1:])
else:
dydt[3], dydt[4] = 0.0, 0.0
return dydt
@njit(cache=True)
def TaylorT4_4p0(Cons,Vars):
dvdt_T4 = -2.0*Vars.Fcal_coeff*(1.0*-Cons.Fcal_0 + 1.0*0*Vars.v + 1.0*-Cons.Fcal_2*Vars.v**2 + 1.0*-Cons.Fcal_3 - Vars.Fcal_SO_3*Vars.v**3 + 1.0*-Cons.Fcal_4 - Vars.Fcal_SQ_4*Vars.v**4 + 1.0*-Cons.Fcal_5 - Vars.Fcal_SO_5*Vars.v**5 + 1.0*-Cons.Fcal_6 - Vars.Fcal_SO_6 - Cons.Fcal_lnv_6*Vars.logv*Vars.v**6 + 1.0*-Cons.Fcal_7 - Vars.Fcal_SO_7*Vars.v**7 + 1.0*-Cons.Fcal_8 - Vars.Fcal_SO_8 - Cons.Fcal_lnv_8*Vars.logv*Vars.v**8 + (0*(-1.0*-Cons.Fcal_0*Vars.v - 1.0*0*Vars.v**2 - 1.0*-Cons.Fcal_2*Vars.v**3 - 1.0*-Cons.Fcal_3 - Vars.Fcal_SO_3*Vars.v**4 - 1.0*-Cons.Fcal_4 - Vars.Fcal_SQ_4*Vars.v**5 - 1.0*-Cons.Fcal_5 - Vars.Fcal_SO_5*Vars.v**6 - 1.0*-Cons.Fcal_6 - Vars.Fcal_SO_6 - Cons.Fcal_lnv_6*Vars.logv*Vars.v**7 - 1.0*-Cons.Fcal_7 - Vars.Fcal_SO_7*Vars.v**8) + 4*Cons.E_2*Cons.M*(-1.0*-Cons.Fcal_0*Vars.v**2 - 1.0*0*Vars.v**3 - 1.0*-Cons.Fcal_2*Vars.v**4 - 1.0*-Cons.Fcal_3 - Vars.Fcal_SO_3*Vars.v**5 - 1.0*-Cons.Fcal_4 - Vars.Fcal_SQ_4*Vars.v**6 - 1.0*-Cons.Fcal_5 - Vars.Fcal_SO_5*Vars.v**7 - 1.0*-Cons.Fcal_6 - Vars.Fcal_SO_6 - Cons.Fcal_lnv_6*Vars.logv*Vars.v**8) + 5*Vars.E_SO_3*Cons.M*(-1.0*-Cons.Fcal_0*Vars.v**3 - 1.0*0*Vars.v**4 - 1.0*-Cons.Fcal_2*Vars.v**5 - 1.0*-Cons.Fcal_3 - Vars.Fcal_SO_3*Vars.v**6 - 1.0*-Cons.Fcal_4 - Vars.Fcal_SQ_4*Vars.v**7 - 1.0*-Cons.Fcal_5 - Vars.Fcal_SO_5*Vars.v**8) + 6*Cons.E_4*Cons.M + 6*Vars.E_SQ_4*Cons.M*(-1.0*-Cons.Fcal_0*Vars.v**4 - 1.0*0*Vars.v**5 - 1.0*-Cons.Fcal_2*Vars.v**6 - 1.0*-Cons.Fcal_3 - Vars.Fcal_SO_3*Vars.v**7 - 1.0*-Cons.Fcal_4 - Vars.Fcal_SQ_4*Vars.v**8) + 7*Vars.E_SO_5*Cons.M*(-1.0*-Cons.Fcal_0*Vars.v**5 - 1.0*0*Vars.v**6 - 1.0*-Cons.Fcal_2*Vars.v**7 - 1.0*-Cons.Fcal_3 - Vars.Fcal_SO_3*Vars.v**8) + 8*Cons.E_6*Cons.M*(-1.0*-Cons.Fcal_0*Vars.v**6 - 1.0*0*Vars.v**7 - 1.0*-Cons.Fcal_2*Vars.v**8) + 9*Vars.E_SO_7*Cons.M*(-1.0*-Cons.Fcal_0*Vars.v**7 - 1.0*0*Vars.v**8) - 1.0*10*Cons.E_8*Cons.M + Cons.E_lnv_8*Cons.M*(10*Vars.logv + 1)*-Cons.Fcal_0*Vars.v**8 + (0*(0*(1.0*-Cons.Fcal_0*Vars.v**2 + 1.0*0*Vars.v**3 + 1.0*-Cons.Fcal_2*Vars.v**4 + 1.0*-Cons.Fcal_3 - Vars.Fcal_SO_3*Vars.v**5 + 1.0*-Cons.Fcal_4 - Vars.Fcal_SQ_4*Vars.v**6 + 1.0*-Cons.Fcal_5 - Vars.Fcal_SO_5*Vars.v**7 + 1.0*-Cons.Fcal_6 - Vars.Fcal_SO_6 - Cons.Fcal_lnv_6*Vars.logv*Vars.v**8) + 4*Cons.E_2*Cons.M*(2.0*-Cons.Fcal_0*Vars.v**3 + 2.0*0*Vars.v**4 + 2.0*-Cons.Fcal_2*Vars.v**5 + 2.0*-Cons.Fcal_3 - Vars.Fcal_SO_3*Vars.v**6 + 2.0*-Cons.Fcal_4 - Vars.Fcal_SQ_4*Vars.v**7 + 2.0*-Cons.Fcal_5 - Vars.Fcal_SO_5*Vars.v**8) + 5*Vars.E_SO_3*Cons.M*(2.0*-Cons.Fcal_0*Vars.v**4 + 2.0*0*Vars.v**5 + 2.0*-Cons.Fcal_2*Vars.v**6 + 2.0*-Cons.Fcal_3 - Vars.Fcal_SO_3*Vars.v**7 + 2.0*-Cons.Fcal_4 - Vars.Fcal_SQ_4*Vars.v**8) + 6*Cons.E_4*Cons.M + 6*Vars.E_SQ_4*Cons.M*(2.0*-Cons.Fcal_0*Vars.v**5 + 2.0*0*Vars.v**6 + 2.0*-Cons.Fcal_2*Vars.v**7 + 2.0*-Cons.Fcal_3 - Vars.Fcal_SO_3*Vars.v**8) + 7*Vars.E_SO_5*Cons.M*(2.0*-Cons.Fcal_0*Vars.v**6 + 2.0*0*Vars.v**7 + 2.0*-Cons.Fcal_2*Vars.v**8) + 8*Cons.E_6*Cons.M*(2.0*-Cons.Fcal_0*Vars.v**7 + 2.0*0*Vars.v**8) + 2.0*9*Vars.E_SO_7*Cons.M*-Cons.Fcal_0*Vars.v**8) + 4*Cons.E_2*Cons.M*(4*Cons.E_2*Cons.M*(1.0*-Cons.Fcal_0*Vars.v**4 + 1.0*0*Vars.v**5 + 1.0*-Cons.Fcal_2*Vars.v**6 + 1.0*-Cons.Fcal_3 - Vars.Fcal_SO_3*Vars.v**7 + 1.0*-Cons.Fcal_4 - Vars.Fcal_SQ_4*Vars.v**8) + 5*Vars.E_SO_3*Cons.M*(2.0*-Cons.Fcal_0*Vars.v**5 + 2.0*0*Vars.v**6 + 2.0*-Cons.Fcal_2*Vars.v**7 + 2.0*-Cons.Fcal_3 - Vars.Fcal_SO_3*Vars.v**8) + 6*Cons.E_4*Cons.M + 6*Vars.E_SQ_4*Cons.M*(2.0*-Cons.Fcal_0*Vars.v**6 + 2.0*0*Vars.v**7 + 2.0*-Cons.Fcal_2*Vars.v**8) + 7*Vars.E_SO_5*Cons.M*(2.0*-Cons.Fcal_0*Vars.v**7 + 2.0*0*Vars.v**8) + 2.0*8*Cons.E_6*Cons.M*-Cons.Fcal_0*Vars.v**8) + 5*Vars.E_SO_3*Cons.M*(5*Vars.E_SO_3*Cons.M*(1.0*-Cons.Fcal_0*Vars.v**6 + 1.0*0*Vars.v**7 + 1.0*-Cons.Fcal_2*Vars.v**8) + 6*Cons.E_4*Cons.M + 6*Vars.E_SQ_4*Cons.M*(2.0*-Cons.Fcal_0*Vars.v**7 + 2.0*0*Vars.v**8) + 2.0*7*Vars.E_SO_5*Cons.M*-Cons.Fcal_0*Vars.v**8) + 1.0*6*Cons.E_4*Cons.M + 6*Vars.E_SQ_4*Cons.M**2*-Cons.Fcal_0*Vars.v**8 + (0*(0*(0*(-1.0*-Cons.Fcal_0*Vars.v**3 - 1.0*0*Vars.v**4 - 1.0*-Cons.Fcal_2*Vars.v**5 - 1.0*-Cons.Fcal_3 - Vars.Fcal_SO_3*Vars.v**6 - 1.0*-Cons.Fcal_4 - Vars.Fcal_SQ_4*Vars.v**7 - 1.0*-Cons.Fcal_5 - Vars.Fcal_SO_5*Vars.v**8) + 4*Cons.E_2*Cons.M*(-3.0*-Cons.Fcal_0*Vars.v**4 - 3.0*0*Vars.v**5 - 3.0*-Cons.Fcal_2*Vars.v**6 - 3.0*-Cons.Fcal_3 - Vars.Fcal_SO_3*Vars.v**7 - 3.0*-Cons.Fcal_4 - Vars.Fcal_SQ_4*Vars.v**8) + 5*Vars.E_SO_3*Cons.M*(-3.0*-Cons.Fcal_0*Vars.v**5 - 3.0*0*Vars.v**6 - 3.0*-Cons.Fcal_2*Vars.v**7 - 3.0*-Cons.Fcal_3 - Vars.Fcal_SO_3*Vars.v**8) + 6*Cons.E_4*Cons.M + 6*Vars.E_SQ_4*Cons.M*(-3.0*-Cons.Fcal_0*Vars.v**6 - 3.0*0*Vars.v**7 - 3.0*-Cons.Fcal_2*Vars.v**8) + 7*Vars.E_SO_5*Cons.M*(-3.0*-Cons.Fcal_0*Vars.v**7 - 3.0*0*Vars.v**8) - 3.0*8*Cons.E_6*Cons.M*-Cons.Fcal_0*Vars.v**8) + 4*Cons.E_2*Cons.M*(4*Cons.E_2*Cons.M*(-3.0*-Cons.Fcal_0*Vars.v**5 - 3.0*0*Vars.v**6 - 3.0*-Cons.Fcal_2*Vars.v**7 - 3.0*-Cons.Fcal_3 - Vars.Fcal_SO_3*Vars.v**8) + 5*Vars.E_SO_3*Cons.M*(-6.0*-Cons.Fcal_0*Vars.v**6 - 6.0*0*Vars.v**7 - 6.0*-Cons.Fcal_2*Vars.v**8) + 6*Cons.E_4*Cons.M + 6*Vars.E_SQ_4*Cons.M*(-6.0*-Cons.Fcal_0*Vars.v**7 - 6.0*0*Vars.v**8) - 6.0*7*Vars.E_SO_5*Cons.M*-Cons.Fcal_0*Vars.v**8) + 5*Vars.E_SO_3*Cons.M*(5*Vars.E_SO_3*Cons.M*(-3.0*-Cons.Fcal_0*Vars.v**7 - 3.0*0*Vars.v**8) - 6.0*6*Cons.E_4*Cons.M + 6*Vars.E_SQ_4*Cons.M*-Cons.Fcal_0*Vars.v**8)) + 4*Cons.E_2*Cons.M*(4*Cons.E_2*Cons.M*(4*Cons.E_2*Cons.M*(-1.0*-Cons.Fcal_0*Vars.v**6 - 1.0*0*Vars.v**7 - 1.0*-Cons.Fcal_2*Vars.v**8) + 5*Vars.E_SO_3*Cons.M*(-3.0*-Cons.Fcal_0*Vars.v**7 - 3.0*0*Vars.v**8) - 3.0*6*Cons.E_4*Cons.M + 6*Vars.E_SQ_4*Cons.M*-Cons.Fcal_0*Vars.v**8) - 3.0*5*Vars.E_SO_3*Cons.M**2*-Cons.Fcal_0*Vars.v**8) + (0*(0*(0*(0*(1.0*-Cons.Fcal_0*Vars.v**4 + 1.0*0*Vars.v**5 + 1.0*-Cons.Fcal_2*Vars.v**6 + 1.0*-Cons.Fcal_3 - Vars.Fcal_SO_3*Vars.v**7 + 1.0*-Cons.Fcal_4 - Vars.Fcal_SQ_4*Vars.v**8) + 4*Cons.E_2*Cons.M*(4.0*-Cons.Fcal_0*Vars.v**5 + 4.0*0*Vars.v**6 + 4.0*-Cons.Fcal_2*Vars.v**7 + 4.0*-Cons.Fcal_3 - Vars.Fcal_SO_3*Vars.v**8) + 5*Vars.E_SO_3*Cons.M*(4.0*-Cons.Fcal_0*Vars.v**6 + 4.0*0*Vars.v**7 + 4.0*-Cons.Fcal_2*Vars.v**8) + 6*Cons.E_4*Cons.M + 6*Vars.E_SQ_4*Cons.M*(4.0*-Cons.Fcal_0*Vars.v**7 + 4.0*0*Vars.v**8) + 4.0*7*Vars.E_SO_5*Cons.M*-Cons.Fcal_0*Vars.v**8) + 4*Cons.E_2*Cons.M*(4*Cons.E_2*Cons.M*(6.0*-Cons.Fcal_0*Vars.v**6 + 6.0*0*Vars.v**7 + 6.0*-Cons.Fcal_2*Vars.v**8) + 5*Vars.E_SO_3*Cons.M*(12.0*-Cons.Fcal_0*Vars.v**7 + 12.0*0*Vars.v**8) + 12.0*6*Cons.E_4*Cons.M + 6*Vars.E_SQ_4*Cons.M*-Cons.Fcal_0*Vars.v**8) + 6.0*5*Vars.E_SO_3*Cons.M**2*-Cons.Fcal_0*Vars.v**8) + 4*Cons.E_2*Cons.M**2*(4*Cons.E_2*Cons.M*(4.0*-Cons.Fcal_0*Vars.v**7 + 4.0*0*Vars.v**8) + 12.0*5*Vars.E_SO_3*Cons.M*-Cons.Fcal_0*Vars.v**8)) + 1.0*4*Cons.E_2*Cons.M**4*-Cons.Fcal_0*Vars.v**8 + (0**2*(0*(0*(0*(-1.0*-Cons.Fcal_0*Vars.v**5 - 1.0*0*Vars.v**6 - 1.0*-Cons.Fcal_2*Vars.v**7 - 1.0*-Cons.Fcal_3 - Vars.Fcal_SO_3*Vars.v**8) + 4*Cons.E_2*Cons.M*(-5.0*-Cons.Fcal_0*Vars.v**6 - 5.0*0*Vars.v**7 - 5.0*-Cons.Fcal_2*Vars.v**8) + 5*Vars.E_SO_3*Cons.M*(-5.0*-Cons.Fcal_0*Vars.v**7 - 5.0*0*Vars.v**8) - 5.0*6*Cons.E_4*Cons.M + 6*Vars.E_SQ_4*Cons.M*-Cons.Fcal_0*Vars.v**8) + 4*Cons.E_2*Cons.M*(4*Cons.E_2*Cons.M*(-10.0*-Cons.Fcal_0*Vars.v**7 - 10.0*0*Vars.v**8) - 20.0*5*Vars.E_SO_3*Cons.M*-Cons.Fcal_0*Vars.v**8)) - 10.0*4*Cons.E_2*Cons.M**3*-Cons.Fcal_0*Vars.v**8) + (0**4*(0*(0*(1.0*-Cons.Fcal_0*Vars.v**6 + 1.0*0*Vars.v**7 + 1.0*-Cons.Fcal_2*Vars.v**8) + 4*Cons.E_2*Cons.M*(6.0*-Cons.Fcal_0*Vars.v**7 + 6.0*0*Vars.v**8) + 6.0*5*Vars.E_SO_3*Cons.M*-Cons.Fcal_0*Vars.v**8) + 15.0*4*Cons.E_2*Cons.M**2*-Cons.Fcal_0*Vars.v**8) + (0**6*(0*(-1.0*-Cons.Fcal_0*Vars.v**7 - 1.0*0*Vars.v**8) - 7.0*4*Cons.E_2*Cons.M*-Cons.Fcal_0*Vars.v**8) + 1.0*0**8*-Cons.Fcal_0*Vars.v**8/2*Cons.E_0*Cons.M)/2*Cons.E_0*Cons.M)/2*Cons.E_0*Cons.M)/2*Cons.E_0*Cons.M)/2*Cons.E_0*Cons.M)/2*Cons.E_0*Cons.M)/2*Cons.E_0*Cons.M)/2*Cons.E_0*Cons.M)/(Cons.nu*Vars.v*2*Cons.E_0*Cons.M)
dydt=np.zeros(8)
[dydt[5],dydt[6],dydt[7]] = FrameFromAngularVelocityIntegrand(Vars.rfrak_frame, OmegaVec_4p0(Cons,Vars)[1:])
dydt[0] = dvdt_T4[0]
if(Cons.EvolveSpin1):
dydt[1], dydt[2]=FrameFromAngularVelocity_2D_Integrand(Vars.rfrak_chi1[0], Vars.rfrak_chi1[1],(mul(mul(inverse(Cons.S_chi1),OmegaVec_chiVec_1_4p0(Cons,Vars)),Cons.S_chi1))[1:])
else:
dydt[1], dydt[2] = 0.0, 0.0
if(Cons.EvolveSpin2):
dydt[3], dydt[4]=FrameFromAngularVelocity_2D_Integrand(Vars.rfrak_chi2[0], Vars.rfrak_chi2[1],(mul(mul(inverse(Cons.S_chi2),OmegaVec_chiVec_2_4p0(Cons,Vars)),Cons.S_chi2))[1:])
else:
dydt[3], dydt[4] = 0.0, 0.0
return dydt
@njit(cache=True)
def TaylorT5_4p0(Cons,Vars):
dtdv = -0.5*Cons.nu*Vars.v*(1.0*2*Cons.E_0*Cons.M + 1.0*0*Vars.v + 1.0*4*Cons.E_2*Cons.M*Vars.v**2 + 1.0*5*Vars.E_SO_3*Cons.M*Vars.v**3 + 1.0*6*Cons.E_4*Cons.M + 6*Vars.E_SQ_4*Cons.M*Vars.v**4 + 1.0*7*Vars.E_SO_5*Cons.M*Vars.v**5 + 1.0*8*Cons.E_6*Cons.M*Vars.v**6 + 1.0*9*Vars.E_SO_7*Cons.M*Vars.v**7 + 1.0*10*Cons.E_8*Cons.M + Cons.E_lnv_8*Cons.M*(10*Vars.logv + 1)*Vars.v**8 + (0*(-1.0*2*Cons.E_0*Cons.M*Vars.v - 1.0*0*Vars.v**2 - 1.0*4*Cons.E_2*Cons.M*Vars.v**3 - 1.0*5*Vars.E_SO_3*Cons.M*Vars.v**4 - 1.0*6*Cons.E_4*Cons.M + 6*Vars.E_SQ_4*Cons.M*Vars.v**5 - 1.0*7*Vars.E_SO_5*Cons.M*Vars.v**6 - 1.0*8*Cons.E_6*Cons.M*Vars.v**7 - 1.0*9*Vars.E_SO_7*Cons.M*Vars.v**8) + -Cons.Fcal_2*(-1.0*2*Cons.E_0*Cons.M*Vars.v**2 - 1.0*0*Vars.v**3 - 1.0*4*Cons.E_2*Cons.M*Vars.v**4 - 1.0*5*Vars.E_SO_3*Cons.M*Vars.v**5 - 1.0*6*Cons.E_4*Cons.M + 6*Vars.E_SQ_4*Cons.M*Vars.v**6 - 1.0*7*Vars.E_SO_5*Cons.M*Vars.v**7 - 1.0*8*Cons.E_6*Cons.M*Vars.v**8) + -Cons.Fcal_3 - Vars.Fcal_SO_3*(-1.0*2*Cons.E_0*Cons.M*Vars.v**3 - 1.0*0*Vars.v**4 - 1.0*4*Cons.E_2*Cons.M*Vars.v**5 - 1.0*5*Vars.E_SO_3*Cons.M*Vars.v**6 - 1.0*6*Cons.E_4*Cons.M + 6*Vars.E_SQ_4*Cons.M*Vars.v**7 - 1.0*7*Vars.E_SO_5*Cons.M*Vars.v**8) + -Cons.Fcal_4 - Vars.Fcal_SQ_4*(-1.0*2*Cons.E_0*Cons.M*Vars.v**4 - 1.0*0*Vars.v**5 - 1.0*4*Cons.E_2*Cons.M*Vars.v**6 - 1.0*5*Vars.E_SO_3*Cons.M*Vars.v**7 - 1.0*6*Cons.E_4*Cons.M + 6*Vars.E_SQ_4*Cons.M*Vars.v**8) + -Cons.Fcal_5 - Vars.Fcal_SO_5*(-1.0*2*Cons.E_0*Cons.M*Vars.v**5 - 1.0*0*Vars.v**6 - 1.0*4*Cons.E_2*Cons.M*Vars.v**7 - 1.0*5*Vars.E_SO_3*Cons.M*Vars.v**8) + -Cons.Fcal_6 - Vars.Fcal_SO_6 - Cons.Fcal_lnv_6*Vars.logv*(-1.0*2*Cons.E_0*Cons.M*Vars.v**6 - 1.0*0*Vars.v**7 - 1.0*4*Cons.E_2*Cons.M*Vars.v**8) + -Cons.Fcal_7 - Vars.Fcal_SO_7*(-1.0*2*Cons.E_0*Cons.M*Vars.v**7 - 1.0*0*Vars.v**8) - 1.0*-Cons.Fcal_8 - Vars.Fcal_SO_8 - Cons.Fcal_lnv_8*Vars.logv*2*Cons.E_0*Cons.M*Vars.v**8 + (0*(0*(1.0*2*Cons.E_0*Cons.M*Vars.v**2 + 1.0*0*Vars.v**3 + 1.0*4*Cons.E_2*Cons.M*Vars.v**4 + 1.0*5*Vars.E_SO_3*Cons.M*Vars.v**5 + 1.0*6*Cons.E_4*Cons.M + 6*Vars.E_SQ_4*Cons.M*Vars.v**6 + 1.0*7*Vars.E_SO_5*Cons.M*Vars.v**7 + 1.0*8*Cons.E_6*Cons.M*Vars.v**8) + -Cons.Fcal_2*(2.0*2*Cons.E_0*Cons.M*Vars.v**3 + 2.0*0*Vars.v**4 + 2.0*4*Cons.E_2*Cons.M*Vars.v**5 + 2.0*5*Vars.E_SO_3*Cons.M*Vars.v**6 + 2.0*6*Cons.E_4*Cons.M + 6*Vars.E_SQ_4*Cons.M*Vars.v**7 + 2.0*7*Vars.E_SO_5*Cons.M*Vars.v**8) + -Cons.Fcal_3 - Vars.Fcal_SO_3*(2.0*2*Cons.E_0*Cons.M*Vars.v**4 + 2.0*0*Vars.v**5 + 2.0*4*Cons.E_2*Cons.M*Vars.v**6 + 2.0*5*Vars.E_SO_3*Cons.M*Vars.v**7 + 2.0*6*Cons.E_4*Cons.M + 6*Vars.E_SQ_4*Cons.M*Vars.v**8) + -Cons.Fcal_4 - Vars.Fcal_SQ_4*(2.0*2*Cons.E_0*Cons.M*Vars.v**5 + 2.0*0*Vars.v**6 + 2.0*4*Cons.E_2*Cons.M*Vars.v**7 + 2.0*5*Vars.E_SO_3*Cons.M*Vars.v**8) + -Cons.Fcal_5 - Vars.Fcal_SO_5*(2.0*2*Cons.E_0*Cons.M*Vars.v**6 + 2.0*0*Vars.v**7 + 2.0*4*Cons.E_2*Cons.M*Vars.v**8) + -Cons.Fcal_6 - Vars.Fcal_SO_6 - Cons.Fcal_lnv_6*Vars.logv*(2.0*2*Cons.E_0*Cons.M*Vars.v**7 + 2.0*0*Vars.v**8) + 2.0*-Cons.Fcal_7 - Vars.Fcal_SO_7*2*Cons.E_0*Cons.M*Vars.v**8) + -Cons.Fcal_2*(-Cons.Fcal_2*(1.0*2*Cons.E_0*Cons.M*Vars.v**4 + 1.0*0*Vars.v**5 + 1.0*4*Cons.E_2*Cons.M*Vars.v**6 + 1.0*5*Vars.E_SO_3*Cons.M*Vars.v**7 + 1.0*6*Cons.E_4*Cons.M + 6*Vars.E_SQ_4*Cons.M*Vars.v**8) + -Cons.Fcal_3 - Vars.Fcal_SO_3*(2.0*2*Cons.E_0*Cons.M*Vars.v**5 + 2.0*0*Vars.v**6 + 2.0*4*Cons.E_2*Cons.M*Vars.v**7 + 2.0*5*Vars.E_SO_3*Cons.M*Vars.v**8) + -Cons.Fcal_4 - Vars.Fcal_SQ_4*(2.0*2*Cons.E_0*Cons.M*Vars.v**6 + 2.0*0*Vars.v**7 + 2.0*4*Cons.E_2*Cons.M*Vars.v**8) + -Cons.Fcal_5 - Vars.Fcal_SO_5*(2.0*2*Cons.E_0*Cons.M*Vars.v**7 + 2.0*0*Vars.v**8) + 2.0*-Cons.Fcal_6 - Vars.Fcal_SO_6 - Cons.Fcal_lnv_6*Vars.logv*2*Cons.E_0*Cons.M*Vars.v**8) + -Cons.Fcal_3 - Vars.Fcal_SO_3*(-Cons.Fcal_3 - Vars.Fcal_SO_3*(1.0*2*Cons.E_0*Cons.M*Vars.v**6 + 1.0*0*Vars.v**7 + 1.0*4*Cons.E_2*Cons.M*Vars.v**8) + -Cons.Fcal_4 - Vars.Fcal_SQ_4*(2.0*2*Cons.E_0*Cons.M*Vars.v**7 + 2.0*0*Vars.v**8) + 2.0*-Cons.Fcal_5 - Vars.Fcal_SO_5*2*Cons.E_0*Cons.M*Vars.v**8) + 1.0*-Cons.Fcal_4 - Vars.Fcal_SQ_4**2*2*Cons.E_0*Cons.M*Vars.v**8 + (0*(0*(0*(-1.0*2*Cons.E_0*Cons.M*Vars.v**3 - 1.0*0*Vars.v**4 - 1.0*4*Cons.E_2*Cons.M*Vars.v**5 - 1.0*5*Vars.E_SO_3*Cons.M*Vars.v**6 - 1.0*6*Cons.E_4*Cons.M + 6*Vars.E_SQ_4*Cons.M*Vars.v**7 - 1.0*7*Vars.E_SO_5*Cons.M*Vars.v**8) + -Cons.Fcal_2*(-3.0*2*Cons.E_0*Cons.M*Vars.v**4 - 3.0*0*Vars.v**5 - 3.0*4*Cons.E_2*Cons.M*Vars.v**6 - 3.0*5*Vars.E_SO_3*Cons.M*Vars.v**7 - 3.0*6*Cons.E_4*Cons.M + 6*Vars.E_SQ_4*Cons.M*Vars.v**8) + -Cons.Fcal_3 - Vars.Fcal_SO_3*(-3.0*2*Cons.E_0*Cons.M*Vars.v**5 - 3.0*0*Vars.v**6 - 3.0*4*Cons.E_2*Cons.M*Vars.v**7 - 3.0*5*Vars.E_SO_3*Cons.M*Vars.v**8) + -Cons.Fcal_4 - Vars.Fcal_SQ_4*(-3.0*2*Cons.E_0*Cons.M*Vars.v**6 - 3.0*0*Vars.v**7 - 3.0*4*Cons.E_2*Cons.M*Vars.v**8) + -Cons.Fcal_5 - Vars.Fcal_SO_5*(-3.0*2*Cons.E_0*Cons.M*Vars.v**7 - 3.0*0*Vars.v**8) - 3.0*-Cons.Fcal_6 - Vars.Fcal_SO_6 - Cons.Fcal_lnv_6*Vars.logv*2*Cons.E_0*Cons.M*Vars.v**8) + -Cons.Fcal_2*(-Cons.Fcal_2*(-3.0*2*Cons.E_0*Cons.M*Vars.v**5 - 3.0*0*Vars.v**6 - 3.0*4*Cons.E_2*Cons.M*Vars.v**7 - 3.0*5*Vars.E_SO_3*Cons.M*Vars.v**8) + -Cons.Fcal_3 - Vars.Fcal_SO_3*(-6.0*2*Cons.E_0*Cons.M*Vars.v**6 - 6.0*0*Vars.v**7 - 6.0*4*Cons.E_2*Cons.M*Vars.v**8) + -Cons.Fcal_4 - Vars.Fcal_SQ_4*(-6.0*2*Cons.E_0*Cons.M*Vars.v**7 - 6.0*0*Vars.v**8) - 6.0*-Cons.Fcal_5 - Vars.Fcal_SO_5*2*Cons.E_0*Cons.M*Vars.v**8) + -Cons.Fcal_3 - Vars.Fcal_SO_3*(-Cons.Fcal_3 - Vars.Fcal_SO_3*(-3.0*2*Cons.E_0*Cons.M*Vars.v**7 - 3.0*0*Vars.v**8) - 6.0*-Cons.Fcal_4 - Vars.Fcal_SQ_4*2*Cons.E_0*Cons.M*Vars.v**8)) + -Cons.Fcal_2*(-Cons.Fcal_2*(-Cons.Fcal_2*(-1.0*2*Cons.E_0*Cons.M*Vars.v**6 - 1.0*0*Vars.v**7 - 1.0*4*Cons.E_2*Cons.M*Vars.v**8) + -Cons.Fcal_3 - Vars.Fcal_SO_3*(-3.0*2*Cons.E_0*Cons.M*Vars.v**7 - 3.0*0*Vars.v**8) - 3.0*-Cons.Fcal_4 - Vars.Fcal_SQ_4*2*Cons.E_0*Cons.M*Vars.v**8) - 3.0*-Cons.Fcal_3 - Vars.Fcal_SO_3**2*2*Cons.E_0*Cons.M*Vars.v**8) + (0*(0*(0*(0*(1.0*2*Cons.E_0*Cons.M*Vars.v**4 + 1.0*0*Vars.v**5 + 1.0*4*Cons.E_2*Cons.M*Vars.v**6 + 1.0*5*Vars.E_SO_3*Cons.M*Vars.v**7 + 1.0*6*Cons.E_4*Cons.M + 6*Vars.E_SQ_4*Cons.M*Vars.v**8) + -Cons.Fcal_2*(4.0*2*Cons.E_0*Cons.M*Vars.v**5 + 4.0*0*Vars.v**6 + 4.0*4*Cons.E_2*Cons.M*Vars.v**7 + 4.0*5*Vars.E_SO_3*Cons.M*Vars.v**8) + -Cons.Fcal_3 - Vars.Fcal_SO_3*(4.0*2*Cons.E_0*Cons.M*Vars.v**6 + 4.0*0*Vars.v**7 + 4.0*4*Cons.E_2*Cons.M*Vars.v**8) + -Cons.Fcal_4 - Vars.Fcal_SQ_4*(4.0*2*Cons.E_0*Cons.M*Vars.v**7 + 4.0*0*Vars.v**8) + 4.0*-Cons.Fcal_5 - Vars.Fcal_SO_5*2*Cons.E_0*Cons.M*Vars.v**8) + -Cons.Fcal_2*(-Cons.Fcal_2*(6.0*2*Cons.E_0*Cons.M*Vars.v**6 + 6.0*0*Vars.v**7 + 6.0*4*Cons.E_2*Cons.M*Vars.v**8) + -Cons.Fcal_3 - Vars.Fcal_SO_3*(12.0*2*Cons.E_0*Cons.M*Vars.v**7 + 12.0*0*Vars.v**8) + 12.0*-Cons.Fcal_4 - Vars.Fcal_SQ_4*2*Cons.E_0*Cons.M*Vars.v**8) + 6.0*-Cons.Fcal_3 - Vars.Fcal_SO_3**2*2*Cons.E_0*Cons.M*Vars.v**8) + -Cons.Fcal_2**2*(-Cons.Fcal_2*(4.0*2*Cons.E_0*Cons.M*Vars.v**7 + 4.0*0*Vars.v**8) + 12.0*-Cons.Fcal_3 - Vars.Fcal_SO_3*2*Cons.E_0*Cons.M*Vars.v**8)) + 1.0*-Cons.Fcal_2**4*2*Cons.E_0*Cons.M*Vars.v**8 + (0**2*(0*(0*(0*(-1.0*2*Cons.E_0*Cons.M*Vars.v**5 - 1.0*0*Vars.v**6 - 1.0*4*Cons.E_2*Cons.M*Vars.v**7 - 1.0*5*Vars.E_SO_3*Cons.M*Vars.v**8) + -Cons.Fcal_2*(-5.0*2*Cons.E_0*Cons.M*Vars.v**6 - 5.0*0*Vars.v**7 - 5.0*4*Cons.E_2*Cons.M*Vars.v**8) + -Cons.Fcal_3 - Vars.Fcal_SO_3*(-5.0*2*Cons.E_0*Cons.M*Vars.v**7 - 5.0*0*Vars.v**8) - 5.0*-Cons.Fcal_4 - Vars.Fcal_SQ_4*2*Cons.E_0*Cons.M*Vars.v**8) + -Cons.Fcal_2*(-Cons.Fcal_2*(-10.0*2*Cons.E_0*Cons.M*Vars.v**7 - 10.0*0*Vars.v**8) - 20.0*-Cons.Fcal_3 - Vars.Fcal_SO_3*2*Cons.E_0*Cons.M*Vars.v**8)) - 10.0*-Cons.Fcal_2**3*2*Cons.E_0*Cons.M*Vars.v**8) + (0**4*(0*(0*(1.0*2*Cons.E_0*Cons.M*Vars.v**6 + 1.0*0*Vars.v**7 + 1.0*4*Cons.E_2*Cons.M*Vars.v**8) + -Cons.Fcal_2*(6.0*2*Cons.E_0*Cons.M*Vars.v**7 + 6.0*0*Vars.v**8) + 6.0*-Cons.Fcal_3 - Vars.Fcal_SO_3*2*Cons.E_0*Cons.M*Vars.v**8) + 15.0*-Cons.Fcal_2**2*2*Cons.E_0*Cons.M*Vars.v**8) + (0**6*(0*(-1.0*2*Cons.E_0*Cons.M*Vars.v**7 - 1.0*0*Vars.v**8) - 7.0*-Cons.Fcal_2*2*Cons.E_0*Cons.M*Vars.v**8) + 1.0*0**8*2*Cons.E_0*Cons.M*Vars.v**8/-Cons.Fcal_0)/-Cons.Fcal_0)/-Cons.Fcal_0)/-Cons.Fcal_0)/-Cons.Fcal_0)/-Cons.Fcal_0)/-Cons.Fcal_0)/-Cons.Fcal_0)/(Vars.Fcal_coeff*-Cons.Fcal_0)
dvdt_T5 = 1.0/dtdv
dydt=np.zeros(8)
[dydt[5],dydt[6],dydt[7]] = FrameFromAngularVelocityIntegrand(Vars.rfrak_frame, OmegaVec_4p0(Cons,Vars)[1:])
dydt[0] = dvdt_T5[0]
if(Cons.EvolveSpin1):
dydt[1], dydt[2]=FrameFromAngularVelocity_2D_Integrand(Vars.rfrak_chi1[0], Vars.rfrak_chi1[1],(mul(mul(inverse(Cons.S_chi1),OmegaVec_chiVec_1_4p0(Cons,Vars)),Cons.S_chi1))[1:])
else:
dydt[1], dydt[2] = 0.0, 0.0
if(Cons.EvolveSpin2):
dydt[3], dydt[4]=FrameFromAngularVelocity_2D_Integrand(Vars.rfrak_chi2[0], Vars.rfrak_chi2[1],(mul(mul(inverse(Cons.S_chi2),OmegaVec_chiVec_2_4p0(Cons,Vars)),Cons.S_chi2))[1:])
else:
dydt[3], dydt[4] = 0.0, 0.0
return dydt
class PNEv:
def Integrand(t,y):
PNEv.Recalculate.get(2*PNEv.PNEvolutionOrder)(PNEv.Cons,PNEv.Vars,y)
dydt=PNEv.Taylor.get(PNEv.TaylorTn+20*PNEv.PNEvolutionOrder)(PNEv.Cons,PNEv.Vars)
if PNEv.Vars.v>=1.0 and PNEv.NotForward:
print("Beyond domain of PN validity, this is a good way to terminate.")
PNEv.terminal1=False
if dydt[0]<1.0e-12 and PNEv.NotForward:
print("v is decreasing, which is not an uncommon way to stop.")
PNEv.terminal2=False
return dydt
def Evolution(xHat_i, yHat_i, zHat_i, M1_i, M2_i, v_i, S_chi1_i, S_chi2_i, rfrak_frame,
t_PNStart=False, t_PNEnd=False, PNEvolutionOrder=3.5, TaylorTn=1, StepsPerOrbit=32, ForwardInTime=True, tol=1e-8, MinStep=1e-7):
# Initialization of constants
PNEv.terminal1=True
PNEv.terminal2=True
PNEv.NotForward=True
PNEv.PNEvolutionOrder=PNEvolutionOrder
PNEv.TaylorTn=TaylorTn
PNEv.Recalculate={ 0:Recalculate_0,
1:Recalculate_0p50,
2:Recalculate_1p0,
3:Recalculate_1p5,
4:Recalculate_2p0,
5:Recalculate_2p5,
6:Recalculate_3p0,
7:Recalculate_3p5,
8:Recalculate_4p0}
PNEv.Taylor={
1:TaylorT1_0,
11:TaylorT1_0p50,
21:TaylorT1_1p0,
31:TaylorT1_1p5,
41:TaylorT1_2p0,
51:TaylorT1_2p5,
61:TaylorT1_3p0,
71:TaylorT1_3p5,
81:TaylorT1_4p0,
4:TaylorT4_0,
14:TaylorT4_0p50,
24:TaylorT4_1p0,
34:TaylorT4_1p5,
44:TaylorT4_2p0,
54:TaylorT4_2p5,
64:TaylorT4_3p0,
74:TaylorT4_3p5,
84:TaylorT4_4p0,
5:TaylorT5_0,
15:TaylorT5_0p50,
25:TaylorT5_1p0,
35:TaylorT5_1p5,
45:TaylorT5_2p0,
55:TaylorT5_2p5,
65:TaylorT5_3p0,
75:TaylorT5_3p5,
85:TaylorT5_4p0}
z=np.array([0.0])
PNEv.Cons=Cons(z,z,z,z,z,z,z,z,z,z,z,z,z,z,z,z,z,z,z,z,z,z,z,z,z,z,z,z,z,True,True)
PNEv.Vars=Vars(z,z,z,z,z,z,z,z,z,z,z,z,z,z,z,z,z,z,z,z,z,z,z,z,z,z,z,z,z,z,z,z,z,z,z,z,z,z)
Initialization(PNEv.Cons,xHat_i, yHat_i, zHat_i, M1_i, M2_i, v_i, S_chi1_i, S_chi2_i, rfrak_frame)
def terminate(t,y):
return 1.0*PNEv.terminal1*PNEv.terminal2
terminate.terminal=True
TMerger=5.0/(256.0*PNEv.Cons.nu*v_i**8)
TEnd=TMerger
if t_PNEnd:
TEnd=t_PNEnd
time=[0.0]
while time[-1]<TEnd and 2*PNEv.Cons.M*(256*PNEv.Cons.nu*(TMerger-time[-1])/5)**(3/8)/StepsPerOrbit>MinStep:
time.append(time[-1]+(2*PNEv.Cons.M*(256*PNEv.Cons.nu*(TMerger-time[-1])/5)**(3/8)/StepsPerOrbit)[0])
time=np.delete(time, -1)
# Integrate
yy=solve_ivp(PNEv.Integrand, [time[0],time[-1]], [v_i,0.0,
0.0,0.0,0.0,rfrak_frame[0],rfrak_frame[1],rfrak_frame[2]], method='DOP853',
t_eval=time, dense_output=True, events=terminate, rtol=tol, atol=tol)
if ForwardInTime:
PNEv.NotForward=False
time=[0.0]
TStart=-3*TMerger
if t_PNStart:
TStart=t_PNStart
while time[-1]>TStart:
time.append(time[-1]-(2*PNEv.Cons.M*(256*PNEv.Cons.nu*(TMerger-time[-1])/5)**(3/8)/StepsPerOrbit)[0])
yyForward=solve_ivp(PNEv.Integrand, [time[0],time[-1]], [v_i,0.0,
0.0,0.0,0.0,rfrak_frame[0],rfrak_frame[1],rfrak_frame[2]], method='DOP853',
t_eval=time, dense_output=True, rtol=tol, atol=tol)
yy.t=np.append(yyForward.t[1:][::-1],yy.t)
data=np.empty((8,len(yy.t)))
for i in range(8):
data[i]=np.append(yyForward.y[i][1:][::-1],yy.y[i])
yy.y=data
return yy
| 91.580853
| 8,197
| 0.653586
| 25,375
| 113,835
| 2.770128
| 0.014621
| 0.083509
| 0.037728
| 0.049508
| 0.909463
| 0.902478
| 0.895863
| 0.890628
| 0.886744
| 0.884638
| 0
| 0.124883
| 0.109597
| 113,835
| 1,242
| 8,198
| 91.654589
| 0.568613
| 0.001968
| 0
| 0.694783
| 0
| 0
| 0.005088
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.065217
| false
| 0
| 0.007826
| 0.00087
| 0.132174
| 0.001739
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4d181341b8e71f92d9acf08fbae4484edf168117
| 138
|
py
|
Python
|
crawler/admin.py
|
awwong1/semscrape
|
7e7184a35d6cc96bad9a2e64ab7211b20df4dad6
|
[
"Apache-2.0"
] | null | null | null |
crawler/admin.py
|
awwong1/semscrape
|
7e7184a35d6cc96bad9a2e64ab7211b20df4dad6
|
[
"Apache-2.0"
] | null | null | null |
crawler/admin.py
|
awwong1/semscrape
|
7e7184a35d6cc96bad9a2e64ab7211b20df4dad6
|
[
"Apache-2.0"
] | null | null | null |
from django.contrib import admin
from crawler.models import RSSEntry, RSSFeed
admin.site.register(RSSFeed)
admin.site.register(RSSEntry)
| 23
| 44
| 0.833333
| 19
| 138
| 6.052632
| 0.578947
| 0.208696
| 0.278261
| 0.417391
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.086957
| 138
| 5
| 45
| 27.6
| 0.912698
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
4da304479d3c1b84b397e6deb2a2e12b13e7f8d1
| 22,556
|
py
|
Python
|
ascat/eumetsat.py
|
wpreimes/ascat
|
31339ddb444c29a92e0085cc58e9b59e91cff967
|
[
"BSD-3-Clause"
] | null | null | null |
ascat/eumetsat.py
|
wpreimes/ascat
|
31339ddb444c29a92e0085cc58e9b59e91cff967
|
[
"BSD-3-Clause"
] | null | null | null |
ascat/eumetsat.py
|
wpreimes/ascat
|
31339ddb444c29a92e0085cc58e9b59e91cff967
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2018, TU Wien, Department of Geodesy and Geoinformation
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of TU Wien, Department of Geodesy and Geoinformation
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL TU WIEN DEPARTMENT OF GEODESY AND
# GEOINFORMATION BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Readers for data downloaded from EUMETSAT data centre (UMARF)
"""
from ascat.level2 import AscatL2SsmBufr
from ascat.level2 import AscatL2SsmBufrChunked
from ascat.level2 import AscatL2SsmNc
class AscatAL2Ssm125(AscatL2SsmBufr):
"""
ASCAT A Level2 Soil Moisture at 12.5 km Swath Grid BUFR files from EUMETSAT
Parameters
----------
path: string
path where the data is stored
month_path_str: string, optional
If the data is stored in subpaths per year or month then specify the string
that should be used in datetime.datetime.strftime to get the subpath for a file.
Default: ''
"""
def __init__(self, path, month_path_str=''):
day_search_str = 'M02-ASCA-ASCSMR02-NA-5.0-%Y%m%d*.bfr'
file_search_str = 'M02-ASCA-ASCSMR02-NA-5.0-{datetime}*.bfr'
datetime_format = '%Y%m%d%H%M%S'
filename_datetime_format = (25, 39, '%Y%m%d%H%M%S')
super(AscatAL2Ssm125, self).__init__(path, month_path_str=month_path_str,
day_search_str=day_search_str,
file_search_str=file_search_str,
datetime_format=datetime_format,
filename_datetime_format=filename_datetime_format)
class AscatBL2Ssm125(AscatL2SsmBufr):
"""
ASCAT B Level2 Soil Moisture at 12.5 km Swath Grid BUFR files from EUMETSAT
Parameters
----------
path: string
path where the data is stored
month_path_str: string, optional
If the data is stored in subpaths per year or month then specify the string
that should be used in datetime.datetime.strftime to get the subpath for a file.
Default: ''
"""
def __init__(self, path, month_path_str=''):
day_search_str = 'M01-ASCA-ASCSMR02-NA-5.0-%Y%m%d*.bfr'
file_search_str = 'M01-ASCA-ASCSMR02-NA-5.0-{datetime}*.bfr'
datetime_format = '%Y%m%d%H%M%S'
filename_datetime_format = (25, 39, '%Y%m%d%H%M%S')
super(AscatBL2Ssm125, self).__init__(path, month_path_str=month_path_str,
day_search_str=day_search_str,
file_search_str=file_search_str,
datetime_format=datetime_format,
filename_datetime_format=filename_datetime_format)
class AscatAL2Ssm125PDU(AscatL2SsmBufr):
"""
ASCAT A Level2 Soil Moisture at 12.5 km Swath Grid PDU BUFR files from EUMETSAT
Parameters
----------
path: string
path where the data is stored
month_path_str: string, optional
If the data is stored in subpaths per year or month then specify the string
that should be used in datetime.datetime.strftime to get the subpath for a file.
Default: ''
"""
def __init__(self, path, month_path_str=''):
day_search_str = 'W_XX-EUMETSAT-Darmstadt,SOUNDING+SATELLITE,METOPA+ASCAT_C_EUMC_%Y%m%d*_125_ssm_l2.bin'
file_search_str = 'W_XX-EUMETSAT-Darmstadt,SOUNDING+SATELLITE,METOPA+ASCAT_C_EUMC_{datetime}*_125_ssm_l2.bin'
datetime_format = '%Y%m%d%H%M%S'
filename_datetime_format = (63, 77, '%Y%m%d%H%M%S')
super(AscatAL2Ssm125PDU, self).__init__(path, month_path_str=month_path_str,
day_search_str=day_search_str,
file_search_str=file_search_str,
datetime_format=datetime_format,
filename_datetime_format=filename_datetime_format)
class AscatAL2Ssm125PDUChunked(AscatL2SsmBufrChunked):
"""
ASCAT A Level2 Soil Moisture at 12.5 km Swath Grid PDU BUFR files from EUMETSAT
in 50 minute chunks.
Parameters
----------
path: string
path where the data is stored
month_path_str: string, optional
If the data is stored in subpaths per year or month then specify the string
that should be used in datetime.datetime.strftime to get the subpath for a file.
Default: ''
chunk_minutes: int, optional
How many minutes should a chunk of data cover.
"""
def __init__(self, path, month_path_str='', chunk_minutes=100):
day_search_str = 'W_XX-EUMETSAT-Darmstadt,SOUNDING+SATELLITE,METOPA+ASCAT_C_EUMC_%Y%m%d*_125_ssm_l2.bin'
file_search_str = 'W_XX-EUMETSAT-Darmstadt,SOUNDING+SATELLITE,METOPA+ASCAT_C_EUMC_{datetime}*_125_ssm_l2.bin'
datetime_format = '%Y%m%d%H%M%S'
filename_datetime_format = (63, 77, '%Y%m%d%H%M%S')
super(AscatAL2Ssm125PDUChunked, self).__init__(path, month_path_str=month_path_str,
day_search_str=day_search_str,
file_search_str=file_search_str,
datetime_format=datetime_format,
filename_datetime_format=filename_datetime_format,
chunk_minutes=chunk_minutes)
class AscatBL2Ssm125PDU(AscatL2SsmBufr):
"""
ASCAT B Level2 Soil Moisture at 12.5 km Swath Grid PDU BUFR files from EUMETSAT
Parameters
----------
path: string
path where the data is stored
month_path_str: string, optional
If the data is stored in subpaths per year or month then specify the string
that should be used in datetime.datetime.strftime to get the subpath for a file.
Default: ''
"""
def __init__(self, path, month_path_str=''):
day_search_str = 'W_XX-EUMETSAT-Darmstadt,SOUNDING+SATELLITE,METOPB+ASCAT_C_EUMP_%Y%m%d*_125_ssm_l2.bin'
file_search_str = 'W_XX-EUMETSAT-Darmstadt,SOUNDING+SATELLITE,METOPB+ASCAT_C_EUMP_{datetime}*_125_ssm_l2.bin'
datetime_format = '%Y%m%d%H%M%S'
filename_datetime_format = (63, 77, '%Y%m%d%H%M%S')
super(AscatBL2Ssm125PDU, self).__init__(path, month_path_str=month_path_str,
day_search_str=day_search_str,
file_search_str=file_search_str,
datetime_format=datetime_format,
filename_datetime_format=filename_datetime_format)
class AscatBL2Ssm125PDUChunked(AscatL2SsmBufrChunked):
"""
ASCAT B Level2 Soil Moisture at 12.5 km Swath Grid PDU BUFR files from EUMETSAT
in 50 minute chunks.
Parameters
----------
path: string
path where the data is stored
month_path_str: string, optional
If the data is stored in subpaths per year or month then specify the string
that should be used in datetime.datetime.strftime to get the subpath for a file.
Default: ''
chunk_minutes: int, optional
How many minutes should a chunk of data cover.
"""
def __init__(self, path, month_path_str='', chunk_minutes=100):
day_search_str = 'W_XX-EUMETSAT-Darmstadt,SOUNDING+SATELLITE,METOPB+ASCAT_C_EUMP_%Y%m%d*_125_ssm_l2.bin'
file_search_str = 'W_XX-EUMETSAT-Darmstadt,SOUNDING+SATELLITE,METOPB+ASCAT_C_EUMP_{datetime}*_125_ssm_l2.bin'
datetime_format = '%Y%m%d%H%M%S'
filename_datetime_format = (63, 77, '%Y%m%d%H%M%S')
super(AscatBL2Ssm125PDUChunked, self).__init__(path, month_path_str=month_path_str,
day_search_str=day_search_str,
file_search_str=file_search_str,
datetime_format=datetime_format,
filename_datetime_format=filename_datetime_format,
chunk_minutes=chunk_minutes)
class AscatAL2Ssm250(AscatL2SsmBufr):
"""
ASCAT A Level2 Soil Moisture at 25.0 km Swath Grid BUFR files from EUMETSAT
Parameters
----------
path: string
path where the data is stored
month_path_str: string, optional
If the data is stored in subpaths per year or month then specify the string
that should be used in datetime.datetime.strftime to get the subpath for a file.
Default: ''
"""
def __init__(self, path, month_path_str=''):
day_search_str = 'M02-ASCA-ASCSMO02-NA-5.0-%Y%m%d*.bfr'
file_search_str = 'M02-ASCA-ASCSMO02-NA-5.0-{datetime}*.bfr'
datetime_format = '%Y%m%d%H%M%S'
filename_datetime_format = (25, 39, '%Y%m%d%H%M%S')
super(AscatAL2Ssm250, self).__init__(path, month_path_str=month_path_str,
day_search_str=day_search_str,
file_search_str=file_search_str,
datetime_format=datetime_format,
filename_datetime_format=filename_datetime_format)
class AscatBL2Ssm250(AscatL2SsmBufr):
"""
ASCAT B Level2 Soil Moisture at 25.0 km Swath Grid BUFR files from EUMETSAT
Parameters
----------
path: string
path where the data is stored
month_path_str: string, optional
If the data is stored in subpaths per year or month then specify the string
that should be used in datetime.datetime.strftime to get the subpath for a file.
Default: ''
"""
def __init__(self, path, month_path_str=''):
day_search_str = 'M01-ASCA-ASCSMO02-NA-5.0-%Y%m%d*.bfr'
file_search_str = 'M01-ASCA-ASCSMO02-NA-5.0-{datetime}*.bfr'
datetime_format = '%Y%m%d%H%M%S'
filename_datetime_format = (25, 39, '%Y%m%d%H%M%S')
super(AscatBL2Ssm250, self).__init__(path, month_path_str=month_path_str,
day_search_str=day_search_str,
file_search_str=file_search_str,
datetime_format=datetime_format,
filename_datetime_format=filename_datetime_format)
class AscatAL2Ssm250PDU(AscatL2SsmBufr):
"""
ASCAT A Level2 Soil Moisture at 25 km Swath Grid PDU BUFR files from EUMETSAT
Parameters
----------
path: string
path where the data is stored
month_path_str: string, optional
If the data is stored in subpaths per year or month then specify the string
that should be used in datetime.datetime.strftime to get the subpath for a file.
Default: ''
"""
def __init__(self, path, month_path_str=''):
day_search_str = 'W_XX-EUMETSAT-Darmstadt,SOUNDING+SATELLITE,METOPA+ASCAT_C_EUMC_%Y%m%d*_250_ssm_l2.bin'
file_search_str = 'W_XX-EUMETSAT-Darmstadt,SOUNDING+SATELLITE,METOPA+ASCAT_C_EUMC_{datetime}*_250_ssm_l2.bin'
datetime_format = '%Y%m%d%H%M%S'
filename_datetime_format = (63, 77, '%Y%m%d%H%M%S')
super(AscatAL2Ssm250PDU, self).__init__(path, month_path_str=month_path_str,
day_search_str=day_search_str,
file_search_str=file_search_str,
datetime_format=datetime_format,
filename_datetime_format=filename_datetime_format)
class AscatAL2Ssm250PDUChunked(AscatL2SsmBufrChunked):
"""
ASCAT A Level2 Soil Moisture at 25 km Swath Grid PDU BUFR files from EUMETSAT
Parameters
----------
path: string
path where the data is stored
month_path_str: string, optional
If the data is stored in subpaths per year or month then specify the string
that should be used in datetime.datetime.strftime to get the subpath for a file.
Default: ''
chunk_minutes: int, optional
How many minutes should a chunk of data cover.
"""
def __init__(self, path, month_path_str='', chunk_minutes=100):
day_search_str = 'W_XX-EUMETSAT-Darmstadt,SOUNDING+SATELLITE,METOPA+ASCAT_C_EUMC_%Y%m%d*_250_ssm_l2.bin'
file_search_str = 'W_XX-EUMETSAT-Darmstadt,SOUNDING+SATELLITE,METOPA+ASCAT_C_EUMC_{datetime}*_250_ssm_l2.bin'
datetime_format = '%Y%m%d%H%M%S'
filename_datetime_format = (63, 77, '%Y%m%d%H%M%S')
super(AscatAL2Ssm250PDUChunked, self).__init__(path, month_path_str=month_path_str,
day_search_str=day_search_str,
file_search_str=file_search_str,
datetime_format=datetime_format,
filename_datetime_format=filename_datetime_format,
chunk_minutes=chunk_minutes)
class AscatBL2Ssm250PDU(AscatL2SsmBufr):
"""
ASCAT B Level2 Soil Moisture at 25 km Swath Grid PDU BUFR files from EUMETSAT
Parameters
----------
path: string
path where the data is stored
month_path_str: string, optional
If the data is stored in subpaths per year or month then specify the string
that should be used in datetime.datetime.strftime to get the subpath for a file.
Default: ''
"""
def __init__(self, path, month_path_str=''):
day_search_str = 'W_XX-EUMETSAT-Darmstadt,SOUNDING+SATELLITE,METOPB+ASCAT_C_EUMP_%Y%m%d*_250_ssm_l2.bin'
file_search_str = 'W_XX-EUMETSAT-Darmstadt,SOUNDING+SATELLITE,METOPB+ASCAT_C_EUMP_{datetime}*_250_ssm_l2.bin'
datetime_format = '%Y%m%d%H%M%S'
filename_datetime_format = (63, 77, '%Y%m%d%H%M%S')
super(AscatBL2Ssm250PDU, self).__init__(path, month_path_str=month_path_str,
day_search_str=day_search_str,
file_search_str=file_search_str,
datetime_format=datetime_format,
filename_datetime_format=filename_datetime_format)
class AscatBL2Ssm250PDUChunked(AscatL2SsmBufrChunked):
"""
ASCAT B Level2 Soil Moisture at 25 km Swath Grid PDU BUFR files from EUMETSAT
Parameters
----------
path: string
path where the data is stored
month_path_str: string, optional
If the data is stored in subpaths per year or month then specify the string
that should be used in datetime.datetime.strftime to get the subpath for a file.
Default: ''
chunk_minutes: int, optional
How many minutes should a chunk of data cover.
"""
def __init__(self, path, month_path_str='', chunk_minutes=100):
day_search_str = 'W_XX-EUMETSAT-Darmstadt,SOUNDING+SATELLITE,METOPB+ASCAT_C_EUMP_%Y%m%d*_250_ssm_l2.bin'
file_search_str = 'W_XX-EUMETSAT-Darmstadt,SOUNDING+SATELLITE,METOPB+ASCAT_C_EUMP_{datetime}*_250_ssm_l2.bin'
datetime_format = '%Y%m%d%H%M%S'
filename_datetime_format = (63, 77, '%Y%m%d%H%M%S')
super(AscatBL2Ssm250PDUChunked, self).__init__(path, month_path_str=month_path_str,
day_search_str=day_search_str,
file_search_str=file_search_str,
datetime_format=datetime_format,
filename_datetime_format=filename_datetime_format,
chunk_minutes=chunk_minutes)
class AscatAL2Ssm125Nc(AscatL2SsmNc):
"""
ASCAT A Level2 Soil Moisture at 12.5 km Swath Grid NetCDF files from EUMETSAT
Parameters
----------
path: string
path where the data is stored
month_path_str: string, optional
If the data is stored in subpaths per year or month then specify the string
that should be used in datetime.datetime.strftime to get the subpath for a file.
Default: ''
"""
def __init__(self, path, month_path_str=''):
day_search_str = 'W_XX-EUMETSAT-Darmstadt,SURFACE+SATELLITE,METOPA+ASCAT_C_EUMP_%Y%m%d*_125_ssm_l2.nc'
file_search_str = 'W_XX-EUMETSAT-Darmstadt,SURFACE+SATELLITE,METOPA+ASCAT_C_EUMP_{datetime}*_125_ssm_l2.nc'
datetime_format = '%Y%m%d%H%M%S'
filename_datetime_format = (62, 76, '%Y%m%d%H%M%S')
super(AscatAL2Ssm125Nc, self).__init__(path, month_path_str=month_path_str,
day_search_str=day_search_str,
file_search_str=file_search_str,
datetime_format=datetime_format,
filename_datetime_format=filename_datetime_format)
class AscatBL2Ssm125Nc(AscatL2SsmNc):
"""
ASCAT B Level2 Soil Moisture at 12.5 km Swath Grid NetCDF files from EUMETSAT
Parameters
----------
path: string
path where the data is stored
month_path_str: string, optional
If the data is stored in subpaths per year or month then specify the string
that should be used in datetime.datetime.strftime to get the subpath for a file.
Default: ''
"""
def __init__(self, path, month_path_str=''):
day_search_str = 'W_XX-EUMETSAT-Darmstadt,SURFACE+SATELLITE,METOPB+ASCAT_C_EUMP_%Y%m%d*_125_ssm_l2.nc'
file_search_str = 'W_XX-EUMETSAT-Darmstadt,SURFACE+SATELLITE,METOPB+ASCAT_C_EUMP_{datetime}*_125_ssm_l2.nc'
datetime_format = '%Y%m%d%H%M%S'
filename_datetime_format = (62, 76, '%Y%m%d%H%M%S')
super(AscatBL2Ssm125Nc, self).__init__(path, month_path_str=month_path_str,
day_search_str=day_search_str,
file_search_str=file_search_str,
datetime_format=datetime_format,
filename_datetime_format=filename_datetime_format)
class AscatAL2Ssm250Nc(AscatL2SsmNc):
"""
ASCAT A Level2 Soil Moisture at 25 km Swath Grid NetCDF files from EUMETSAT
Parameters
----------
path: string
path where the data is stored
month_path_str: string, optional
If the data is stored in subpaths per year or month then specify the string
that should be used in datetime.datetime.strftime to get the subpath for a file.
Default: ''
"""
def __init__(self, path, month_path_str=''):
day_search_str = 'W_XX-EUMETSAT-Darmstadt,SURFACE+SATELLITE,METOPA+ASCAT_C_EUMP_%Y%m%d*_250_ssm_l2.nc'
file_search_str = 'W_XX-EUMETSAT-Darmstadt,SURFACE+SATELLITE,METOPA+ASCAT_C_EUMP_{datetime}*_250_ssm_l2.nc'
datetime_format = '%Y%m%d%H%M%S'
filename_datetime_format = (62, 76, '%Y%m%d%H%M%S')
super(AscatAL2Ssm250Nc, self).__init__(path, month_path_str=month_path_str,
day_search_str=day_search_str,
file_search_str=file_search_str,
datetime_format=datetime_format,
filename_datetime_format=filename_datetime_format)
class AscatBL2Ssm250Nc(AscatL2SsmNc):
"""
ASCAT B Level2 Soil Moisture at 25 km Swath Grid NetCDF files from EUMETSAT
Parameters
----------
path: string
path where the data is stored
month_path_str: string, optional
If the data is stored in subpaths per year or month then specify the string
that should be used in datetime.datetime.strftime to get the subpath for a file.
Default: ''
"""
def __init__(self, path, month_path_str=''):
day_search_str = 'W_XX-EUMETSAT-Darmstadt,SURFACE+SATELLITE,METOPB+ASCAT_C_EUMP_%Y%m%d*_250_ssm_l2.nc'
file_search_str = 'W_XX-EUMETSAT-Darmstadt,SURFACE+SATELLITE,METOPB+ASCAT_C_EUMP_{datetime}*250_ssm_l2.nc'
datetime_format = '%Y%m%d%H%M%S'
filename_datetime_format = (62, 76, '%Y%m%d%H%M%S')
super(AscatBL2Ssm250Nc, self).__init__(path, month_path_str=month_path_str,
day_search_str=day_search_str,
file_search_str=file_search_str,
datetime_format=datetime_format,
filename_datetime_format=filename_datetime_format)
| 48.507527
| 117
| 0.617308
| 2,851
| 22,556
| 4.612066
| 0.08383
| 0.065708
| 0.058407
| 0.050194
| 0.870865
| 0.867214
| 0.867214
| 0.847593
| 0.842421
| 0.842421
| 0
| 0.027843
| 0.305772
| 22,556
| 464
| 118
| 48.612069
| 0.811865
| 0.345983
| 0
| 0.721311
| 0
| 0.065574
| 0.198076
| 0.170508
| 0
| 0
| 0
| 0
| 0
| 1
| 0.087432
| false
| 0
| 0.016393
| 0
| 0.191257
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4dbe147d5ee08b7d20b3ea6675d0bf8d79fb903b
| 16,697
|
py
|
Python
|
cogs/translate.py
|
mome0320/EZ-Bot
|
45bf0e1e3b81b2e0097eb630cd4561063dbbf581
|
[
"MIT"
] | 1
|
2021-04-03T13:21:29.000Z
|
2021-04-03T13:21:29.000Z
|
cogs/translate.py
|
mome0320/EZ-Bot
|
45bf0e1e3b81b2e0097eb630cd4561063dbbf581
|
[
"MIT"
] | null | null | null |
cogs/translate.py
|
mome0320/EZ-Bot
|
45bf0e1e3b81b2e0097eb630cd4561063dbbf581
|
[
"MIT"
] | null | null | null |
import discord
import asyncio
import os
from discord.ext import commands
import urllib
from urllib.request import URLError
from urllib.request import HTTPError
from urllib.request import urlopen
from urllib.request import Request, urlopen
from bs4 import BeautifulSoup
from urllib.parse import quote
import re
import warnings
import requests
import unicodedata
import json
class Translate(commands.Cog):
def __init__(self, client):
self.client = client
# Commands
#Ko to En
@commands.command()
async def 한영(self, ctx):
reply = ctx.message.content.split(" ")
if len(reply) > 1:
for i in range(2, len(reply)):
reply[1] = reply[1] + " " + reply[i]
baseurl = "https://openapi.naver.com/v1/papago/n2mt"
try:
if len(reply) == 1:
await ctx.trigger_typing()
embed = discord.Embed(title="에러", description="단어 혹은 문장이 입력되지 않았어요. 다시한번 확인해주세요.", color=0xffffff)
embed.set_footer(text="Offered by NACL - Shio", icon_url="https://raw.githubusercontent.com/Shio7/EZ-Bot/master/images/Shio7.png")
await ctx.send(embed = embed)
else:
await ctx.trigger_typing()
dataParmas = "source=ko&target=en&text=" + reply[1]
# Make a Request Instance
request = Request(baseurl)
# add header to packet
request.add_header("X-Naver-Client-Id", client_id)
request.add_header("X-Naver-Client-Secret", client_secret)
response = urlopen(request, data=dataParmas.encode("utf-8"))
responsedCode = response.getcode()
if (responsedCode == 200):
response_body = response.read()
# response_body -> byte string : decode to utf-8
api_callResult = response_body.decode('utf-8')
# JSON data will be printed as string type. So need to make it back to type JSON(like dictionary)
api_callResult = json.loads(api_callResult)
# Final Result
translatedText = api_callResult['message']['result']["translatedText"]
embed = discord.Embed(title="번역결과", description=translatedText, color=0xffffff)
embed.set_footer(text="Offered by NACL - Shio", icon_url="https://raw.githubusercontent.com/Shio7/EZ-Bot/master/images/Shio7.png")
await ctx.send(embed=embed)
else:
await ctx.trigger_typing()
embed = discord.Embed(title="에러", description="에러 코드: " + responsedCode, color=0xffffff)
embed.set_footer(text="Offered by NACL - Shio", icon_url="https://raw.githubusercontent.com/Shio7/EZ-Bot/master/images/Shio7.png")
await ctx.send(embed = embed)
except HTTPError as e:
await ctx.trigger_typing()
embed = discord.Embed(title="에러", description="오류가 발생하여 번역에 실패했어요.", color=0xffffff)
embed.set_footer(text="Offered by NACL - Shio", icon_url="https://raw.githubusercontent.com/Shio7/EZ-Bot/master/images/Shio7.png")
await ctx.send(embed = embed)
#En to Ko
@commands.command()
async def 영한(self, ctx):
reply = ctx.message.content.split(" ")
if len(reply) > 1:
for i in range(2, len(reply)):
reply[1] = reply[1] + " " + reply[i]
baseurl = "https://openapi.naver.com/v1/papago/n2mt"
try:
if len(reply) == 1:
await ctx.trigger_typing()
embed = discord.Embed(title="에러", description="단어 혹은 문장이 입력되지 않았어요. 다시한번 확인해주세요.", color=0xffffff)
embed.set_footer(text="Offered by NACL - Shio", icon_url="https://raw.githubusercontent.com/Shio7/EZ-Bot/master/images/Shio7.png")
await ctx.send(embed = embed)
else:
await ctx.trigger_typing()
dataParmas = "source=en&target=ko&text=" + reply[1]
# Make a Request Instance
request = Request(baseurl)
# add header to packet
request.add_header("X-Naver-Client-Id", client_id)
request.add_header("X-Naver-Client-Secret", client_secret)
response = urlopen(request, data=dataParmas.encode("utf-8"))
responsedCode = response.getcode()
if (responsedCode == 200):
response_body = response.read()
# response_body -> byte string : decode to utf-8
api_callResult = response_body.decode('utf-8')
# JSON data will be printed as string type. So need to make it back to type JSON(like dictionary)
api_callResult = json.loads(api_callResult)
# Final Result
translatedText = api_callResult['message']['result']["translatedText"]
embed = discord.Embed(title="번역결과", description=translatedText, color=0xffffff)
embed.set_footer(text="Offered by NACL - Shio", icon_url="https://raw.githubusercontent.com/Shio7/EZ-Bot/master/images/Shio7.png")
await ctx.send(embed=embed)
else:
await ctx.trigger_typing()
embed = discord.Embed(title="에러", description="에러 코드: " + responsedCode, color=0xffffff)
embed.set_footer(text="Offered by NACL - Shio", icon_url="https://raw.githubusercontent.com/Shio7/EZ-Bot/master/images/Shio7.png")
await ctx.send(embed = embed)
except HTTPError as e:
await ctx.trigger_typing()
embed = discord.Embed(title="에러", description="오류가 발생하여 번역에 실패했어요.", color=0xffffff)
embed.set_footer(text="Offered by NACL - Shio", icon_url="https://raw.githubusercontent.com/Shio7/EZ-Bot/master/images/Shio7.png")
await ctx.send(embed = embed)
#Ko to zh-CN(간체)
@commands.command()
async def 한중(self, ctx):
reply = ctx.message.content.split(" ")
if len(reply) > 1:
for i in range(2, len(reply)):
reply[1] = reply[1] + " " + reply[i]
baseurl = "https://openapi.naver.com/v1/papago/n2mt"
try:
if len(reply) == 1:
embed = discord.Embed(title="에러", description="단어 혹은 문장이 입력되지 않았어요. 다시한번 확인해주세요.", color=0xffffff)
embed.set_footer(text="Offered by NACL - Shio", icon_url="https://raw.githubusercontent.com/Shio7/EZ-Bot/master/images/Shio7.png")
await ctx.send(embed = embed)
else:
await ctx.trigger_typing()
dataParmas = "source=ko&target=zh-CN&text=" + reply[1]
# Make a Request Instance
request = Request(baseurl)
# add header to packet
request.add_header("X-Naver-Client-Id", client_id)
request.add_header("X-Naver-Client-Secret", client_secret)
response = urlopen(request, data=dataParmas.encode("utf-8"))
responsedCode = response.getcode()
if (responsedCode == 200):
response_body = response.read()
# response_body -> byte string : decode to utf-8
api_callResult = response_body.decode('utf-8')
# JSON data will be printed as string type. So need to make it back to type JSON(like dictionary)
api_callResult = json.loads(api_callResult)
# Final Result
translatedText = api_callResult['message']['result']["translatedText"]
embed = discord.Embed(title="번역결과", description=translatedText, color=0xffffff)
embed.set_footer(text="Offered by NACL - Shio", icon_url="https://raw.githubusercontent.com/Shio7/EZ-Bot/master/images/Shio7.png")
await ctx.send(embed=embed)
else:
embed = discord.Embed(title="에러", description="에러 코드: " + responsedCode, color=0xffffff)
embed.set_footer(text="Offered by NACL - Shio", icon_url="https://raw.githubusercontent.com/Shio7/EZ-Bot/master/images/Shio7.png")
await ctx.send(embed = embed)
except HTTPError as e:
embed = discord.Embed(title="에러", description="오류가 발생하여 번역에 실패했어요.", color=0xffffff)
embed.set_footer(text="Offered by NACL - Shio", icon_url="https://raw.githubusercontent.com/Shio7/EZ-Bot/master/images/Shio7.png")
await ctx.send(embed = embed)
#Ko to Ja
@commands.command()
async def 한일(self, ctx):
reply = ctx.message.content.split(" ")
if len(reply) > 1:
for i in range(2, len(reply)):
reply[1] = reply[1] + " " + reply[i]
baseurl = "https://openapi.naver.com/v1/papago/n2mt"
try:
if len(reply) == 1:
embed = discord.Embed(title="에러", description="단어 혹은 문장이 입력되지 않았어요. 다시한번 확인해주세요.", color=0xffffff)
embed.set_footer(text="Offered by NACL - Shio", icon_url="https://raw.githubusercontent.com/Shio7/EZ-Bot/master/images/Shio7.png")
await ctx.send(embed = embed)
else:
await ctx.trigger_typing()
dataParmas = "source=ko&target=ja&text=" + reply[1]
# Make a Request Instance
request = Request(baseurl)
# add header to packet
request.add_header("X-Naver-Client-Id", client_id)
request.add_header("X-Naver-Client-Secret", client_secret)
response = urlopen(request, data=dataParmas.encode("utf-8"))
responsedCode = response.getcode()
if (responsedCode == 200):
response_body = response.read()
# response_body -> byte string : decode to utf-8
api_callResult = response_body.decode('utf-8')
# JSON data will be printed as string type. So need to make it back to type JSON(like dictionary)
api_callResult = json.loads(api_callResult)
# Final Result
translatedText = api_callResult['message']['result']["translatedText"]
embed = discord.Embed(title="번역결과", description=translatedText, color=0xffffff)
embed.set_footer(text="Offered by NACL - Shio", icon_url="https://raw.githubusercontent.com/Shio7/EZ-Bot/master/images/Shio7.png")
await ctx.send(embed=embed)
else:
embed = discord.Embed(title="에러", description="에러 코드: " + responsedCode, color=0xffffff)
embed.set_footer(text="Offered by NACL - Shio", icon_url="https://raw.githubusercontent.com/Shio7/EZ-Bot/master/images/Shio7.png")
await ctx.send(embed = embed)
except HTTPError as e:
embed = discord.Embed(title="에러", description="오류가 발생하여 번역에 실패했어요.", color=0xffffff)
embed.set_footer(text="Offered by NACL - Shio", icon_url="https://raw.githubusercontent.com/Shio7/EZ-Bot/master/images/Shio7.png")
await ctx.send(embed = embed)
#Ja to Ko
@commands.command()
async def 일한(self, ctx):
reply = ctx.message.content.split(" ")
if len(reply) > 1:
for i in range(2, len(reply)):
reply[1] = reply[1] + " " + reply[i]
baseurl = "https://openapi.naver.com/v1/papago/n2mt"
try:
if len(reply) == 1:
embed = discord.Embed(title="에러", description="단어 혹은 문장이 입력되지 않았어요. 다시한번 확인해주세요.", color=0xffffff)
embed.set_footer(text="Offered by NACL - Shio", icon_url="https://raw.githubusercontent.com/Shio7/EZ-Bot/master/images/Shio7.png")
await ctx.send(embed = embed)
else:
dataParmas = "source=ja&target=ko&text=" + reply[1]
# Make a Request Instance
request = Request(baseurl)
# add header to packet
request.add_header("X-Naver-Client-Id", client_id)
request.add_header("X-Naver-Client-Secret", client_secret)
response = urlopen(request, data=dataParmas.encode("utf-8"))
responsedCode = response.getcode()
if (responsedCode == 200):
response_body = response.read()
# response_body -> byte string : decode to utf-8
api_callResult = response_body.decode('utf-8')
# JSON data will be printed as string type. So need to make it back to type JSON(like dictionary)
api_callResult = json.loads(api_callResult)
# Final Result
translatedText = api_callResult['message']['result']["translatedText"]
embed = discord.Embed(title="번역결과", description=translatedText, color=0xffffff)
embed.set_footer(text="Offered by NACL - Shio", icon_url="https://raw.githubusercontent.com/Shio7/EZ-Bot/master/images/Shio7.png")
await ctx.send(embed=embed)
else:
embed = discord.Embed(title="에러", description="에러 코드: " + responsedCode, color=0xffffff)
embed.set_footer(text="Offered by NACL - Shio", icon_url="https://raw.githubusercontent.com/Shio7/EZ-Bot/master/images/Shio7.png")
await ctx.send(embed = embed)
except HTTPError as e:
embed = discord.Embed(title="에러", description="오류가 발생하여 번역에 실패했어요.", color=0xffffff)
embed.set_footer(text="Offered by NACL - Shio", icon_url="https://raw.githubusercontent.com/Shio7/EZ-Bot/master/images/Shio7.png")
await ctx.send(embed = embed)
#zh-CN to Ko(간체)
@commands.command()
async def 중한(self, ctx):
reply = ctx.message.content.split(" ")
if len(reply) > 1:
for i in range(2, len(reply)):
reply[1] = reply[1] + " " + reply[i]
baseurl = "https://openapi.naver.com/v1/papago/n2mt"
try:
if len(reply) == 1:
embed = discord.Embed(title="에러", description="단어 혹은 문장이 입력되지 않았어요. 다시한번 확인해주세요.", color=0xffffff)
embed.set_footer(text="Offered by NACL - Shio", icon_url="https://raw.githubusercontent.com/Shio7/EZ-Bot/master/images/Shio7.png")
await ctx.send(embed = embed)
else:
dataParmas = "source=zh-CN&target=ko&text=" + reply[1]
# Make a Request Instance
request = Request(baseurl)
# add header to packet
request.add_header("X-Naver-Client-Id", client_id)
request.add_header("X-Naver-Client-Secret", client_secret)
response = urlopen(request, data=dataParmas.encode("utf-8"))
responsedCode = response.getcode()
if (responsedCode == 200):
response_body = response.read()
# response_body -> byte string : decode to utf-8
api_callResult = response_body.decode('utf-8')
# JSON data will be printed as string type. So need to make it back to type JSON(like dictionary)
api_callResult = json.loads(api_callResult)
# Final Result
translatedText = api_callResult['message']['result']["translatedText"]
embed = discord.Embed(title="번역결과", description=translatedText, color=0xffffff)
embed.set_footer(text="Offered by NACL - Shio", icon_url="https://raw.githubusercontent.com/Shio7/EZ-Bot/master/images/Shio7.png")
await ctx.send(embed=embed)
else:
embed = discord.Embed(title="에러", description="에러 코드: " + responsedCode, color=0xffffff)
embed.set_footer(text="Offered by NACL - Shio", icon_url="https://raw.githubusercontent.com/Shio7/EZ-Bot/master/images/Shio7.png")
await ctx.send(embed = embed)
except HTTPError as e:
embed = discord.Embed(title="에러", description="오류가 발생하여 번역에 실패했어요.", color=0xffffff)
embed.set_footer(text="Offered by NACL - Shio", icon_url="https://raw.githubusercontent.com/Shio7/EZ-Bot/master/images/Shio7.png")
await ctx.send(embed = embed)
def setup(client):
client.add_cog(Translate(client))
| 54.744262
| 150
| 0.579805
| 1,949
| 16,697
| 4.900975
| 0.077476
| 0.028476
| 0.042714
| 0.055276
| 0.945142
| 0.939908
| 0.934255
| 0.934255
| 0.934255
| 0.934255
| 0
| 0.013508
| 0.303887
| 16,697
| 304
| 151
| 54.924342
| 0.808311
| 0.076421
| 0
| 0.863071
| 0
| 0.099585
| 0.226224
| 0.018332
| 0
| 0
| 0.012481
| 0
| 0
| 1
| 0.008299
| false
| 0
| 0.06639
| 0
| 0.078838
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1501afce66eeda329d7efe7fd69a988362fe06e3
| 9,582
|
py
|
Python
|
src/models/cnn.py
|
hung96ad/face_classification
|
321c5c8fea38b681b9210461edeea729406ab11f
|
[
"MIT"
] | null | null | null |
src/models/cnn.py
|
hung96ad/face_classification
|
321c5c8fea38b681b9210461edeea729406ab11f
|
[
"MIT"
] | null | null | null |
src/models/cnn.py
|
hung96ad/face_classification
|
321c5c8fea38b681b9210461edeea729406ab11f
|
[
"MIT"
] | null | null | null |
from keras.layers import Activation, Convolution2D, Dropout, Conv2D, Dense, LSTM
from keras.layers import BatchNormalization
from keras.layers import GlobalAveragePooling2D
from keras.models import Sequential
from keras.layers import Flatten
from keras.models import Model
from keras.layers import Input
from keras.layers import MaxPooling2D
from keras.layers import SeparableConv2D
from keras import layers
from keras.regularizers import l2
def detect_emotion_model(input_shape, num_classes, l2_regularization=0.01, rate_dropout=0.3):
regularization = l2(l2_regularization)
# base
img_input = Input(input_shape)
x = Conv2D(8, (3, 3), strides=(1, 1), kernel_regularizer=regularization,
use_bias=False)(img_input)
x = BatchNormalization()(x)
x = Activation('relu')(x)
for i in range(1,6):
filters = 8*pow(2,i)
residual = Conv2D(filters, (1, 1), strides=(2, 2),
padding='same', use_bias=False)(x)
residual = BatchNormalization()(residual)
x = SeparableConv2D(filters, (3, 3), padding='same',
kernel_regularizer=regularization,
use_bias=False)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = SeparableConv2D(filters, (3, 3), padding='same',
kernel_regularizer=regularization,
use_bias=False)(x)
x = BatchNormalization()(x)
x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
x = layers.add([x, residual])
x = Dropout(rate_dropout)(x)
# Output
x = GlobalAveragePooling2D()(x)
x = Dense(128, activation='relu')(x)
x = Dropout(rate_dropout)(x)
x = Dense(64, activation='relu')(x)
x = Dropout(rate_dropout)(x)
x = Dense(32, activation='relu')(x)
x = Dropout(rate_dropout)(x)
x = Dense(num_classes, activation='relu')(x)
output = Activation('softmax', name='predictions')(x)
model = Model(img_input, output)
return model
def mini_XCEPTION_base(input_shape, num_classes, l2_regularization=0.01):
regularization = l2(l2_regularization)
# base
img_input = Input(input_shape)
x = Conv2D(8, (3, 3), strides=(1, 1), kernel_regularizer=regularization,
use_bias=False)(img_input)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(8, (3, 3), strides=(1, 1), kernel_regularizer=regularization,
use_bias=False)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
# module 1
residual = Conv2D(16, (1, 1), strides=(2, 2),
padding='same', use_bias=False)(x)
residual = BatchNormalization()(residual)
x = SeparableConv2D(16, (3, 3), padding='same',
kernel_regularizer=regularization,
use_bias=False)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = SeparableConv2D(16, (3, 3), padding='same',
kernel_regularizer=regularization,
use_bias=False)(x)
x = BatchNormalization()(x)
x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
x = layers.add([x, residual])
# module 2
residual = Conv2D(32, (1, 1), strides=(2, 2),
padding='same', use_bias=False)(x)
residual = BatchNormalization()(residual)
x = SeparableConv2D(32, (3, 3), padding='same',
kernel_regularizer=regularization,
use_bias=False)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = SeparableConv2D(32, (3, 3), padding='same',
kernel_regularizer=regularization,
use_bias=False)(x)
x = BatchNormalization()(x)
x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
x = layers.add([x, residual])
# module 3
residual = Conv2D(64, (1, 1), strides=(2, 2),
padding='same', use_bias=False)(x)
residual = BatchNormalization()(residual)
x = SeparableConv2D(64, (3, 3), padding='same',
kernel_regularizer=regularization,
use_bias=False)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = SeparableConv2D(64, (3, 3), padding='same',
kernel_regularizer=regularization,
use_bias=False)(x)
x = BatchNormalization()(x)
x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
x = layers.add([x, residual])
# module 4
residual = Conv2D(128, (1, 1), strides=(2, 2),
padding='same', use_bias=False)(x)
residual = BatchNormalization()(residual)
x = SeparableConv2D(128, (3, 3), padding='same',
kernel_regularizer=regularization,
use_bias=False)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = SeparableConv2D(128, (3, 3), padding='same',
kernel_regularizer=regularization,
use_bias=False)(x)
x = BatchNormalization()(x)
x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
x = layers.add([x, residual])
x = Conv2D(num_classes, (3, 3),
# kernel_regularizer=regularization,
padding='same')(x)
x = GlobalAveragePooling2D()(x)
output = Activation('softmax', name='predictions')(x)
model = Model(img_input, output)
return model
def mini_XCEPTION(input_shape, num_classes, l2_regularization=0.01):
regularization = l2(l2_regularization)
# base
img_input = Input(input_shape)
x = Conv2D(8, (3, 3), strides=(1, 1), kernel_regularizer=regularization,
use_bias=False)(img_input)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(8, (3, 3), strides=(1, 1), kernel_regularizer=regularization,
use_bias=False)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
# module 1
residual = Conv2D(16, (1, 1), strides=(2, 2),
padding='same', use_bias=False)(x)
residual = BatchNormalization()(residual)
x = SeparableConv2D(16, (3, 3), padding='same',
kernel_regularizer=regularization,
use_bias=False)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = SeparableConv2D(16, (3, 3), padding='same',
kernel_regularizer=regularization,
use_bias=False)(x)
x = BatchNormalization()(x)
x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
x = layers.add([x, residual])
# module 2
residual = Conv2D(32, (1, 1), strides=(2, 2),
padding='same', use_bias=False)(x)
residual = BatchNormalization()(residual)
x = SeparableConv2D(32, (3, 3), padding='same',
kernel_regularizer=regularization,
use_bias=False)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = SeparableConv2D(32, (3, 3), padding='same',
kernel_regularizer=regularization,
use_bias=False)(x)
x = BatchNormalization()(x)
x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
x = layers.add([x, residual])
# module 3
residual = Conv2D(64, (1, 1), strides=(2, 2),
padding='same', use_bias=False)(x)
residual = BatchNormalization()(residual)
x = SeparableConv2D(64, (3, 3), padding='same',
kernel_regularizer=regularization,
use_bias=False)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = SeparableConv2D(64, (3, 3), padding='same',
kernel_regularizer=regularization,
use_bias=False)(x)
x = BatchNormalization()(x)
x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
x = layers.add([x, residual])
# module 4
residual = Conv2D(128, (1, 1), strides=(2, 2),
padding='same', use_bias=False)(x)
residual = BatchNormalization()(residual)
x = SeparableConv2D(128, (3, 3), padding='same',
kernel_regularizer=regularization,
use_bias=False)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = SeparableConv2D(128, (3, 3), padding='same',
kernel_regularizer=regularization,
use_bias=False)(x)
x = BatchNormalization()(x)
x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
x = layers.add([x, residual])
# module 5
x = Conv2D(256, (3, 3),
kernel_regularizer=regularization,
padding='same')(x)
x = BatchNormalization()(x)
x = GlobalAveragePooling2D()(x)
x = Dense(128, activation='relu', use_bias=False)(x)
x = Dense(64, activation='relu', use_bias=False)(x)
x = Dense(32, activation='relu', use_bias=False)(x)
x = Dense(num_classes, activation='relu', use_bias=False)(x)
output = Activation('softmax', name='predictions')(x)
model = Model(img_input, output)
return model
if __name__ == "__main__":
input_shape = (64, 64, 1)
num_classes = 7
# model = tiny_XCEPTION(input_shape, num_classes)
# model.summary()
model = mini_XCEPTION(input_shape, num_classes)
model.summary()
# model = big_XCEPTION(input_shape, num_classes)
# model.summary()
| 36.712644
| 93
| 0.578794
| 1,095
| 9,582
| 4.957991
| 0.07032
| 0.028366
| 0.079573
| 0.07902
| 0.895745
| 0.888746
| 0.884325
| 0.868116
| 0.813962
| 0.797016
| 0
| 0.043289
| 0.279169
| 9,582
| 260
| 94
| 36.853846
| 0.742725
| 0.027552
| 0
| 0.842365
| 0
| 0
| 0.032477
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.014778
| false
| 0
| 0.054187
| 0
| 0.083744
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
151218e84ce0abae4687f630d2adbc4064100418
| 113
|
py
|
Python
|
openselfsup/version.py
|
ZHTushar23/OpenSelfSup
|
d64588438f498d1f13f8be65c0c6fc9942e1c859
|
[
"Apache-2.0"
] | 1
|
2021-02-15T19:32:04.000Z
|
2021-02-15T19:32:04.000Z
|
openselfsup/version.py
|
ZHTushar23/OpenSelfSup
|
d64588438f498d1f13f8be65c0c6fc9942e1c859
|
[
"Apache-2.0"
] | null | null | null |
openselfsup/version.py
|
ZHTushar23/OpenSelfSup
|
d64588438f498d1f13f8be65c0c6fc9942e1c859
|
[
"Apache-2.0"
] | null | null | null |
# GENERATED VERSION FILE
# TIME: Sun May 9 13:50:54 2021
__version__ = '0.3.0+34e25ea'
short_version = '0.3.0'
| 18.833333
| 32
| 0.690265
| 21
| 113
| 3.47619
| 0.714286
| 0.219178
| 0.246575
| 0.273973
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.223404
| 0.168142
| 113
| 5
| 33
| 22.6
| 0.553191
| 0.469027
| 0
| 0
| 1
| 0
| 0.315789
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1278236f603d3f4a2579341ab9d2f84e4c96fac4
| 610
|
py
|
Python
|
Algo and DSA/LeetCode-Solutions-master/Python/concatenation-of-array.py
|
Sourav692/FAANG-Interview-Preparation
|
f523e5c94d582328b3edc449ea16ac6ab28cdc81
|
[
"Unlicense"
] | 3,269
|
2018-10-12T01:29:40.000Z
|
2022-03-31T17:58:41.000Z
|
Algo and DSA/LeetCode-Solutions-master/Python/concatenation-of-array.py
|
Sourav692/FAANG-Interview-Preparation
|
f523e5c94d582328b3edc449ea16ac6ab28cdc81
|
[
"Unlicense"
] | 53
|
2018-12-16T22:54:20.000Z
|
2022-02-25T08:31:20.000Z
|
Algo and DSA/LeetCode-Solutions-master/Python/concatenation-of-array.py
|
Sourav692/FAANG-Interview-Preparation
|
f523e5c94d582328b3edc449ea16ac6ab28cdc81
|
[
"Unlicense"
] | 1,236
|
2018-10-12T02:51:40.000Z
|
2022-03-30T13:30:37.000Z
|
# Time: O(n)
# Space: O(1)
class Solution(object):
def getConcatenation(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
nums.extend(nums)
return nums
# Time: O(n)
# Space: O(1)
class Solution2(object):
def getConcatenation(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
return nums+nums
# Time: O(n)
# Space: O(1)
class Solution3(object):
def getConcatenation(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
return nums*2
| 17.941176
| 37
| 0.506557
| 71
| 610
| 4.352113
| 0.295775
| 0.135922
| 0.058252
| 0.106796
| 0.847896
| 0.847896
| 0.847896
| 0.789644
| 0.647249
| 0.647249
| 0
| 0.014963
| 0.342623
| 610
| 33
| 38
| 18.484848
| 0.755611
| 0.314754
| 0
| 0.3
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.3
| false
| 0
| 0
| 0
| 0.9
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
12dcc366839b52963ec568e12595110dadcbc952
| 2,659
|
py
|
Python
|
pauldepriest_Day9p1.py
|
pauldepriest/AdventOfCode2021
|
a9dca2c1c18314a9fb984e091e5c2facf177a91f
|
[
"MIT"
] | null | null | null |
pauldepriest_Day9p1.py
|
pauldepriest/AdventOfCode2021
|
a9dca2c1c18314a9fb984e091e5c2facf177a91f
|
[
"MIT"
] | null | null | null |
pauldepriest_Day9p1.py
|
pauldepriest/AdventOfCode2021
|
a9dca2c1c18314a9fb984e091e5c2facf177a91f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 3 16:26:08 2021
@author: pauldepriest
"""
import numpy
f1 = open("day9.txt",'r')
indata = f1.readlines()
f1.close()
ocean = numpy.zeros((100,100),dtype=int)
basin = numpy.zeros((100,100),dtype=int)
for row in range(0,100,1):
for column in range(0,100,1):
ocean[column,row] = int(indata[row][column])
total = 0
basins = []
for row in range(0,100,1):
for column in range(0,100,1):
val = ocean[column,row]
if (row > 0) and (column > 0) and (row < 99) and (column < 99) :
if (val < ocean[column-1,row]) and (val < ocean[column+1,row]) and (val < ocean[column,row-1]) and (val < ocean[column,row+1]) :
# local min found
total += val + 1
basins.append([colum,row,0])
elif (row == 0) and (column > 0) and (column < 99) :
if (val < ocean[column-1,row]) and (val < ocean[column+1,row]) and (val < ocean[column,row+1]):
total += val + 1
basins.append([colum,row,0])
elif (row > 0) and (row < 99) and (column == 0) :
if (val < ocean[column+1,row]) and (val < ocean[column,row-1]) and (val < ocean[column,row+1]) :
total += val + 1
basins.append([colum,row,0])
elif (row == 99) and (column > 0) and (column < 99) :
if (val < ocean[column-1,row]) and (val < ocean[column+1,row]) and (val < ocean[column,row-1]) :
total += val + 1
basins.append([colum,row,0])
elif (row > 0) and (row < 99) and (column == 99) :
if (val < ocean[column-1,row]) and (val < ocean[column,row-1]) and (val < ocean[column,row+1]) :
total += val + 1
basins.append([colum,row,0])
elif (row == 0) and (column == 0) :
if (val < ocean[column+1,row]) and (val < ocean[column,row+1]) :
total += val + 1
basins.append([colum,row,0])
elif (row == 0) and (column == 99) :
if (val < ocean[column-1,row]) and (val < ocean[column,row+1]) :
total += val + 1
basins.append([colum,row,0])
elif (row == 99) and (column == 0) :
if (val < ocean[column+1,row]) and (val < ocean[column,row-1]) :
total += val + 1
basins.append([colum,row,0])
elif (row == 99) and (column == 99) :
if (val < ocean[column-1,row]) and (val < ocean[column,row-1]) :
total += val + 1
basins.append([colum,row,0])
print(total)
| 34.089744
| 140
| 0.49906
| 379
| 2,659
| 3.501319
| 0.14248
| 0.215524
| 0.263753
| 0.192163
| 0.83798
| 0.83798
| 0.792012
| 0.792012
| 0.792012
| 0.792012
| 0
| 0.071669
| 0.328319
| 2,659
| 77
| 141
| 34.532468
| 0.671333
| 0.044378
| 0
| 0.431373
| 0
| 0
| 0.003567
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.019608
| 0
| 0.019608
| 0.019608
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
12e762dab8c0a77851e23c875ed545a7e4108c49
| 4,528
|
py
|
Python
|
python/test/grammar_translator/testCudaMemcpy.py
|
ROCmSoftwarePlatform/gpufort
|
b3d392cf28200cd9b3b2f77689d5a81176b3ec42
|
[
"MIT"
] | 57
|
2021-10-04T19:52:55.000Z
|
2022-03-29T17:41:36.000Z
|
python/test/grammar_translator/testCudaMemcpy.py
|
odellus/gpufort
|
f24f299e0a3a55c71bec8875e443be28413c9851
|
[
"MIT"
] | 12
|
2021-09-29T11:32:59.000Z
|
2021-12-09T11:39:54.000Z
|
python/test/grammar_translator/testCudaMemcpy.py
|
ROCmSoftwarePlatform/gpufort
|
b3d392cf28200cd9b3b2f77689d5a81176b3ec42
|
[
"MIT"
] | 5
|
2021-10-05T06:16:28.000Z
|
2022-02-24T14:32:24.000Z
|
#!/usr/bin/env python3
# SPDX-License-Identifier: MIT
# Copyright (c) 2021 Advanced Micro Devices, Inc. All rights reserved.
import test
import translator.translator
translator.bounds.parseString("(1:n)")
testdata ="""
cudaMemcpy(a,b,c)
cudaMemcpy(a%b%c,b%e%f,c*5)
cudaMemcpyAsync(a,b,c,stream)
cudaMemcpy(a,b,c,cudaMemcpyHostToDevice)
""".strip(" ").strip("\n").split("\n")
test.run(
expression = translator.cuf_cudamemcpy,
testdata = testdata,
tag = "cudaMemcpy",
raiseException = True
)
v = testdata[0]
assert translator.cuf_cudamemcpy.parseString(v)[0].f_str(True,True,8).replace(" ","") == "hipMemcpy(a,b,1_8*(c)*(8),hipMemcpyDeviceToDevice)"
assert translator.cuf_cudamemcpy.parseString(v)[0].f_str(False,True,8).replace(" ","") == "hipMemcpy(c_loc(a),b,1_8*(c)*(8),hipMemcpyDeviceToHost)"
assert translator.cuf_cudamemcpy.parseString(v)[0].f_str(True,False,8).replace(" ","") == "hipMemcpy(a,c_loc(b),1_8*(c)*(8),hipMemcpyHostToDevice)"
assert translator.cuf_cudamemcpy.parseString(v)[0].f_str(False,False,8).replace(" ","") == "hipMemcpy(c_loc(a),c_loc(b),1_8*(c)*(8),hipMemcpyHostToHost)"
#print(translator.cuf_cudamemcpy.parseString(v)[0].f_str(True,True,8).replace(" ","") )
#print(translator.cuf_cudamemcpy.parseString(v)[0].f_str(False,True,8).replace(" ","") )
#print(translator.cuf_cudamemcpy.parseString(v)[0].f_str(True,False,8).replace(" ","") )
#print(translator.cuf_cudamemcpy.parseString(v)[0].f_str(False,False,8).replace(" ",""))
#
v = testdata[1]
assert translator.cuf_cudamemcpy.parseString(v)[0].f_str(True,True,8).replace(" ","") == "hipMemcpy(a%b%c,b%e%f,1_8*((c*5))*(8),hipMemcpyDeviceToDevice)"
assert translator.cuf_cudamemcpy.parseString(v)[0].f_str(False,True,8).replace(" ","") == "hipMemcpy(c_loc(a%b%c),b%e%f,1_8*((c*5))*(8),hipMemcpyDeviceToHost)"
assert translator.cuf_cudamemcpy.parseString(v)[0].f_str(True,False,8).replace(" ","") == "hipMemcpy(a%b%c,c_loc(b%e%f),1_8*((c*5))*(8),hipMemcpyHostToDevice)"
assert translator.cuf_cudamemcpy.parseString(v)[0].f_str(False,False,8).replace(" ","") == "hipMemcpy(c_loc(a%b%c),c_loc(b%e%f),1_8*((c*5))*(8),hipMemcpyHostToHost)"
#print(translator.cuf_cudamemcpy.parseString(v)[0].f_str(True,True,8).replace(" ","") )
#print(translator.cuf_cudamemcpy.parseString(v)[0].f_str(False,True,8).replace(" ","") )
#print(translator.cuf_cudamemcpy.parseString(v)[0].f_str(True,False,8).replace(" ","") )
#print(translator.cuf_cudamemcpy.parseString(v)[0].f_str(False,False,8).replace(" ",""))
v = testdata[2]
assert translator.cuf_cudamemcpy.parseString(v)[0].f_str(True,True,8).replace(" ","") == "hipMemcpyAsync(a,b,1_8*(c)*(8),hipMemcpyDeviceToDevice,stream)"
assert translator.cuf_cudamemcpy.parseString(v)[0].f_str(False,True,8).replace(" ","") == "hipMemcpyAsync(c_loc(a),b,1_8*(c)*(8),hipMemcpyDeviceToHost,stream)"
assert translator.cuf_cudamemcpy.parseString(v)[0].f_str(True,False,8).replace(" ","") == "hipMemcpyAsync(a,c_loc(b),1_8*(c)*(8),hipMemcpyHostToDevice,stream)"
assert translator.cuf_cudamemcpy.parseString(v)[0].f_str(False,False,8).replace(" ","") == "hipMemcpyAsync(c_loc(a),c_loc(b),1_8*(c)*(8),hipMemcpyHostToHost,stream)"
#print(translator.cuf_cudamemcpy.parseString(v)[0].f_str(True,True,8).replace(" ","") )
#print(translator.cuf_cudamemcpy.parseString(v)[0].f_str(False,True,8).replace(" ","") )
#print(translator.cuf_cudamemcpy.parseString(v)[0].f_str(True,False,8).replace(" ","") )
#print(translator.cuf_cudamemcpy.parseString(v)[0].f_str(False,False,8).replace(" ",""))
v = testdata[3]
assert translator.cuf_cudamemcpy.parseString(v)[0].f_str(True,True,8).replace(" ","") == "hipMemcpy(a,c_loc(b),1_8*(c)*(8),cudaMemcpyHostToDevice)"
assert translator.cuf_cudamemcpy.parseString(v)[0].f_str(False,True,8).replace(" ","") == "hipMemcpy(a,c_loc(b),1_8*(c)*(8),cudaMemcpyHostToDevice)"
assert translator.cuf_cudamemcpy.parseString(v)[0].f_str(True,False,8).replace(" ","") == "hipMemcpy(a,c_loc(b),1_8*(c)*(8),cudaMemcpyHostToDevice)"
assert translator.cuf_cudamemcpy.parseString(v)[0].f_str(False,False,8).replace(" ","") == "hipMemcpy(a,c_loc(b),1_8*(c)*(8),cudaMemcpyHostToDevice)"
#print(translator.cuf_cudamemcpy.parseString(v)[0].f_str(True,True,8).replace(" ","") )
#print(translator.cuf_cudamemcpy.parseString(v)[0].f_str(False,True,8).replace(" ","") )
#print(translator.cuf_cudamemcpy.parseString(v)[0].f_str(True,False,8).replace(" ","") )
#print(translator.cuf_cudamemcpy.parseString(v)[0].f_str(False,False,8).replace(" ",""))
print("SUCCESS")
| 71.873016
| 165
| 0.710689
| 683
| 4,528
| 4.569546
| 0.089312
| 0.137456
| 0.243191
| 0.348606
| 0.86735
| 0.86735
| 0.856456
| 0.846203
| 0.846203
| 0.825376
| 0
| 0.029673
| 0.05477
| 4,528
| 63
| 166
| 71.873016
| 0.699533
| 0.335468
| 0
| 0
| 0
| 0.111111
| 0.381605
| 0.359866
| 0
| 0
| 0
| 0
| 0.444444
| 1
| 0
| false
| 0
| 0.055556
| 0
| 0.055556
| 0.027778
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
4242fdb7e3efc5e572f9de5bdd3b4c985a93674b
| 55
|
py
|
Python
|
python/addfive.py
|
parkison/nodenopython
|
2f897476bee0e8f709aa5d8b0f7e6d34a06a83d8
|
[
"MIT"
] | null | null | null |
python/addfive.py
|
parkison/nodenopython
|
2f897476bee0e8f709aa5d8b0f7e6d34a06a83d8
|
[
"MIT"
] | null | null | null |
python/addfive.py
|
parkison/nodenopython
|
2f897476bee0e8f709aa5d8b0f7e6d34a06a83d8
|
[
"MIT"
] | null | null | null |
import sys
print float(sys.argv[1])+float(sys.argv[2])
| 18.333333
| 43
| 0.727273
| 11
| 55
| 3.636364
| 0.636364
| 0.4
| 0.6
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.039216
| 0.072727
| 55
| 3
| 43
| 18.333333
| 0.745098
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.5
| null | null | 0.5
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 1
|
0
| 7
|
4261d05a6fc47744d5c9bf1b83eada3b16ad4c05
| 2,779
|
py
|
Python
|
tests/plugins/test_yamlargs_filters.py
|
dsoto/dexy
|
0f2090250040c3c54c8481a16de8e476b559e87c
|
[
"MIT"
] | 136
|
2015-01-06T15:04:47.000Z
|
2021-12-21T22:52:41.000Z
|
tests/plugins/test_yamlargs_filters.py
|
dsoto/dexy
|
0f2090250040c3c54c8481a16de8e476b559e87c
|
[
"MIT"
] | 13
|
2015-01-26T14:06:58.000Z
|
2020-03-27T21:16:10.000Z
|
tests/plugins/test_yamlargs_filters.py
|
dsoto/dexy
|
0f2090250040c3c54c8481a16de8e476b559e87c
|
[
"MIT"
] | 34
|
2015-01-02T16:24:53.000Z
|
2021-11-27T05:38:30.000Z
|
from dexy.doc import Doc
from tests.utils import wrap
from dexy.wrapper import Wrapper
def test_yamlargs_with_caching():
with wrap() as wrapper:
doc = Doc("example.txt|yamlargs",
wrapper,
[],
contents = "title: My Title\n---\r\nThis is the content."
)
wrapper.run_docs(doc)
task = wrapper.nodes["doc:example.txt|yamlargs"]
assert task.output_data().title() == "My Title"
assert task.state == 'ran'
wrapper = Wrapper()
doc = Doc("example.txt|yamlargs",
wrapper,
[],
contents = "title: My Title\n---\r\nThis is the content."
)
wrapper.run_docs(doc)
task = wrapper.nodes["doc:example.txt|yamlargs"]
assert task.output_data().title() == "My Title"
assert task.state == 'consolidated'
wrapper = Wrapper()
doc = Doc("example.txt|yamlargs",
wrapper,
[],
contents = "title: My Title\n---\r\nThis is the content."
)
wrapper.run_docs(doc)
task = wrapper.nodes["doc:example.txt|yamlargs"]
assert task.output_data().title() == "My Title"
assert task.state == 'consolidated'
def test_yamlargs_no_yaml():
with wrap() as wrapper:
doc = Doc("example.txt|yamlargs",
wrapper,
[],
contents = "This is the content.")
wrapper.run_docs(doc)
assert doc.output_data().as_text() == "This is the content."
def test_yamlargs():
with wrap() as wrapper:
doc = Doc("example.txt|yamlargs",
wrapper,
[],
contents = "title: My Title\n---\r\nThis is the content."
)
wrapper.run_docs(doc)
assert doc.output_data().title() == "My Title"
assert doc.output_data().as_text() == "This is the content."
YAML = """filterargs:
abc: xyz
foo: 5
"""
def test_yamlargs_filterargs():
with wrap() as wrapper:
doc = Doc("example.txt|yamlargs|filterargs",
wrapper,
[],
contents = "%s\n---\r\nThis is the content." % YAML,
)
wrapper.run_docs(doc)
output = doc.output_data().as_text()
assert "abc: xyz" in output
assert "foo: 5" in output
wrapper = Wrapper()
doc = Doc("example.txt|yamlargs|filterargs",
wrapper,
[],
contents = "%s\n---\r\nThis is the content." % YAML,
)
wrapper.run_docs(doc)
output = doc.output_data().as_text()
assert "abc: xyz" in output
assert "foo: 5" in output
| 29.88172
| 73
| 0.517812
| 308
| 2,779
| 4.581169
| 0.149351
| 0.070872
| 0.092133
| 0.148831
| 0.865344
| 0.865344
| 0.856839
| 0.851878
| 0.851878
| 0.844791
| 0
| 0.00167
| 0.353724
| 2,779
| 92
| 74
| 30.206522
| 0.783964
| 0
| 0
| 0.74026
| 0
| 0
| 0.234257
| 0.048219
| 0
| 0
| 0
| 0
| 0.168831
| 1
| 0.051948
| false
| 0
| 0.038961
| 0
| 0.090909
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
42aaadb86dd6cfa32855f6266beb23ac68c0f1e4
| 16,538
|
py
|
Python
|
sinergym/__init__.py
|
AlejandroCN7/sinergym
|
4e89e478b5c939323e7ddf6a6ecf25a9a13251c6
|
[
"MIT"
] | null | null | null |
sinergym/__init__.py
|
AlejandroCN7/sinergym
|
4e89e478b5c939323e7ddf6a6ecf25a9a13251c6
|
[
"MIT"
] | null | null | null |
sinergym/__init__.py
|
AlejandroCN7/sinergym
|
4e89e478b5c939323e7ddf6a6ecf25a9a13251c6
|
[
"MIT"
] | null | null | null |
import os
from gym.envs.registration import register
from sinergym.utils.rewards import LinearReward
# Set __version__ in module
version_file = os.path.join(os.path.dirname(__file__), "version.txt")
with open(version_file, "r") as file_handler:
__version__ = file_handler.read().strip()
#========================5ZoneAutoDXVAV========================#
# 0) Demo environment
register(
id='Eplus-demo-v1',
entry_point='sinergym.envs:EplusEnv',
kwargs={
'idf_file': '5ZoneAutoDXVAV.idf',
'weather_file': 'USA_PA_Pittsburgh-Allegheny.County.AP.725205_TMY3.epw',
'variables_file': 'variablesDXVAV.cfg',
'spaces_file': '5ZoneAutoDXVAV_spaces.cfg',
'reward': LinearReward(),
'env_name': 'demo-v1'})
# 1) 5-zone, hot weather, discrete actions
register(
id='Eplus-5Zone-hot-discrete-v1',
entry_point='sinergym.envs:EplusEnv',
kwargs={
'idf_file': '5ZoneAutoDXVAV.idf',
'weather_file': 'USA_AZ_Davis-Monthan.AFB.722745_TMY3.epw',
'variables_file': 'variablesDXVAV.cfg',
'spaces_file': '5ZoneAutoDXVAV_spaces.cfg',
'discrete_actions': True,
'reward': LinearReward(),
'env_name': '5Zone-hot-discrete-v1'
}
)
# 2) 5-zone, mixed weather, discrete actions
register(
id='Eplus-5Zone-mixed-discrete-v1',
entry_point='sinergym.envs:EplusEnv',
kwargs={
'idf_file': '5ZoneAutoDXVAV.idf',
'weather_file': 'USA_NY_New.York-J.F.Kennedy.Intl.AP.744860_TMY3.epw',
'variables_file': 'variablesDXVAV.cfg',
'spaces_file': '5ZoneAutoDXVAV_spaces.cfg',
'discrete_actions': True,
'reward': LinearReward(),
'env_name': '5Zone-mixed-discrete-v1'})
# 3) 5-zone, cool weather, discrete actions
register(
id='Eplus-5Zone-cool-discrete-v1',
entry_point='sinergym.envs:EplusEnv',
kwargs={
'idf_file': '5ZoneAutoDXVAV.idf',
'weather_file': 'USA_WA_Port.Angeles-William.R.Fairchild.Intl.AP.727885_TMY3.epw',
'variables_file': 'variablesDXVAV.cfg',
'spaces_file': '5ZoneAutoDXVAV_spaces.cfg',
'discrete_actions': True,
'reward': LinearReward(),
'env_name': '5Zone-cool-discrete-v1'})
# 4) 5-zone, hot weather, discrete actions and stochastic
register(
id='Eplus-5Zone-hot-discrete-stochastic-v1',
entry_point='sinergym.envs:EplusEnv',
kwargs={
'idf_file': '5ZoneAutoDXVAV.idf',
'weather_file': 'USA_AZ_Davis-Monthan.AFB.722745_TMY3.epw',
'variables_file': 'variablesDXVAV.cfg',
'spaces_file': '5ZoneAutoDXVAV_spaces.cfg',
'discrete_actions': True,
'weather_variability': (1.0, 0.0, 0.001),
'reward': LinearReward(),
'env_name': '5Zone-hot-discrete-stochastic-v1'
}
)
# 5) 5-zone, mixed weather, discrete actions and stochastic
register(
id='Eplus-5Zone-mixed-discrete-stochastic-v1',
entry_point='sinergym.envs:EplusEnv',
kwargs={
'idf_file': '5ZoneAutoDXVAV.idf',
'weather_file': 'USA_NY_New.York-J.F.Kennedy.Intl.AP.744860_TMY3.epw',
'variables_file': 'variablesDXVAV.cfg',
'spaces_file': '5ZoneAutoDXVAV_spaces.cfg',
'discrete_actions': True,
'weather_variability': (
1.0,
0.0,
0.001),
'reward': LinearReward(),
'env_name': '5Zone-mixed-discrete-stochastic-v1'})
# 6) 5-zone, cool weather, discrete actions and stochastic
register(
id='Eplus-5Zone-cool-discrete-stochastic-v1',
entry_point='sinergym.envs:EplusEnv',
kwargs={
'idf_file': '5ZoneAutoDXVAV.idf',
'weather_file': 'USA_WA_Port.Angeles-William.R.Fairchild.Intl.AP.727885_TMY3.epw',
'variables_file': 'variablesDXVAV.cfg',
'spaces_file': '5ZoneAutoDXVAV_spaces.cfg',
'discrete_actions': True,
'weather_variability': (
1.0,
0.0,
0.001),
'reward': LinearReward(),
'env_name': '5Zone-cool-discrete-stochastic-v1'})
# 7) 5-zone, hot weather, continuous actions
register(
id='Eplus-5Zone-hot-continuous-v1',
entry_point='sinergym.envs:EplusEnv',
kwargs={
'idf_file': '5ZoneAutoDXVAV.idf',
'weather_file': 'USA_AZ_Davis-Monthan.AFB.722745_TMY3.epw',
'variables_file': 'variablesDXVAV.cfg',
'spaces_file': '5ZoneAutoDXVAV_spaces.cfg',
'discrete_actions': False,
'reward': LinearReward(),
'env_name': '5Zone-hot-continuous-v1'
}
)
# 8) 5-zone, mixed weather, continuous actions
register(
id='Eplus-5Zone-mixed-continuous-v1',
entry_point='sinergym.envs:EplusEnv',
kwargs={
'idf_file': '5ZoneAutoDXVAV.idf',
'weather_file': 'USA_NY_New.York-J.F.Kennedy.Intl.AP.744860_TMY3.epw',
'variables_file': 'variablesDXVAV.cfg',
'spaces_file': '5ZoneAutoDXVAV_spaces.cfg',
'discrete_actions': False,
'reward': LinearReward(),
'env_name': '5Zone-mixed-continuous-v1'})
# 9) 5-zone, cool weather, continuous actions
register(
id='Eplus-5Zone-cool-continuous-v1',
entry_point='sinergym.envs:EplusEnv',
kwargs={
'idf_file': '5ZoneAutoDXVAV.idf',
'weather_file': 'USA_WA_Port.Angeles-William.R.Fairchild.Intl.AP.727885_TMY3.epw',
'variables_file': 'variablesDXVAV.cfg',
'spaces_file': '5ZoneAutoDXVAV_spaces.cfg',
'discrete_actions': False,
'reward': LinearReward(),
'env_name': '5Zone-cool-continuous-v1'})
# 10) 5-zone, hot weather, continuous actions and stochastic
register(
id='Eplus-5Zone-hot-continuous-stochastic-v1',
entry_point='sinergym.envs:EplusEnv',
kwargs={
'idf_file': '5ZoneAutoDXVAV.idf',
'weather_file': 'USA_AZ_Davis-Monthan.AFB.722745_TMY3.epw',
'variables_file': 'variablesDXVAV.cfg',
'spaces_file': '5ZoneAutoDXVAV_spaces.cfg',
'discrete_actions': False,
'weather_variability': (1.0, 0.0, 0.001),
'reward': LinearReward(),
'env_name': '5Zone-hot-continuous-stochastic-v1'
}
)
# 11) 5-zone, mixed weather, continuous actions and stochastic
register(
id='Eplus-5Zone-mixed-continuous-stochastic-v1',
entry_point='sinergym.envs:EplusEnv',
kwargs={
'idf_file': '5ZoneAutoDXVAV.idf',
'weather_file': 'USA_NY_New.York-J.F.Kennedy.Intl.AP.744860_TMY3.epw',
'variables_file': 'variablesDXVAV.cfg',
'spaces_file': '5ZoneAutoDXVAV_spaces.cfg',
'discrete_actions': False,
'weather_variability': (
1.0,
0.0,
0.001),
'reward': LinearReward(),
'env_name': '5Zone-mixed-continuous-stochastic-v1'})
# 12) 5-zone, cool weather, continuous actions and stochastic
register(
id='Eplus-5Zone-cool-continuous-stochastic-v1',
entry_point='sinergym.envs:EplusEnv',
kwargs={
'idf_file': '5ZoneAutoDXVAV.idf',
'weather_file': 'USA_WA_Port.Angeles-William.R.Fairchild.Intl.AP.727885_TMY3.epw',
'variables_file': 'variablesDXVAV.cfg',
'spaces_file': '5ZoneAutoDXVAV_spaces.cfg',
'discrete_actions': False,
'weather_variability': (
1.0,
0.0,
0.001),
'reward': LinearReward(),
'env_name': '5Zone-cool-continuous-stochastic-v1'})
#========================DATACENTER========================#
# 13) DC, hot weather, discrete actions
register(
id='Eplus-datacenter-hot-discrete-v1',
entry_point='sinergym.envs:EplusEnv',
kwargs={
'idf_file': '2ZoneDataCenterHVAC_wEconomizer.idf',
'weather_file': 'USA_AZ_Davis-Monthan.AFB.722745_TMY3.epw',
'variables_file': 'variablesDataCenter.cfg',
'spaces_file': '2ZoneDataCenterHVAC_wEconomizer_spaces.cfg',
'discrete_actions': True,
'reward': LinearReward(),
'env_name': 'datacenter-hot-discrete-v1'
}
)
# 14) DC, hot weather, continuous actions
register(
id='Eplus-datacenter-hot-continuous-v1',
entry_point='sinergym.envs:EplusEnv',
kwargs={
'idf_file': '2ZoneDataCenterHVAC_wEconomizer.idf',
'weather_file': 'USA_AZ_Davis-Monthan.AFB.722745_TMY3.epw',
'variables_file': 'variablesDataCenter.cfg',
'spaces_file': '2ZoneDataCenterHVAC_wEconomizer_spaces.cfg',
'discrete_actions': False,
'reward': LinearReward(),
'env_name': 'datacenter-hot-continuous-v1'
}
)
# 15) DC, hot weather, discrete actions and stochastic
register(
id='Eplus-datacenter-hot-discrete-stochastic-v1',
entry_point='sinergym.envs:EplusEnv',
kwargs={
'idf_file': '2ZoneDataCenterHVAC_wEconomizer.idf',
'weather_file': 'USA_AZ_Davis-Monthan.AFB.722745_TMY3.epw',
'variables_file': 'variablesDataCenter.cfg',
'spaces_file': '2ZoneDataCenterHVAC_wEconomizer_spaces.cfg',
'discrete_actions': True,
'weather_variability': (1.0, 0.0, 0.001),
'reward': LinearReward(),
'env_name': 'datacenter-hot-discrete-stochastic-v1'
}
)
# 16) DC, hot weather, continuous actions and stochastic
register(
id='Eplus-datacenter-hot-continuous-stochastic-v1',
entry_point='sinergym.envs:EplusEnv',
kwargs={
'idf_file': '2ZoneDataCenterHVAC_wEconomizer.idf',
'weather_file': 'USA_AZ_Davis-Monthan.AFB.722745_TMY3.epw',
'variables_file': 'variablesDataCenter.cfg',
'spaces_file': '2ZoneDataCenterHVAC_wEconomizer_spaces.cfg',
'discrete_actions': False,
'weather_variability': (1.0, 0.0, 0.001),
'reward': LinearReward(),
'env_name': 'datacenter-hot-continuous-stochastic-v1'
}
)
# 17) DC, mixed weather, discrete actions
register(
id='Eplus-datacenter-mixed-discrete-v1',
entry_point='sinergym.envs:EplusEnv',
kwargs={
'idf_file': '2ZoneDataCenterHVAC_wEconomizer.idf',
'weather_file': 'USA_NY_New.York-J.F.Kennedy.Intl.AP.744860_TMY3.epw',
'variables_file': 'variablesDataCenter.cfg',
'spaces_file': '2ZoneDataCenterHVAC_wEconomizer_spaces.cfg',
'discrete_actions': True,
'reward': LinearReward(),
'env_name': 'datacenter-mixed-discrete-v1'})
# 18) DC, mixed weather, continuous actions
register(
id='Eplus-datacenter-mixed-continuous-v1',
entry_point='sinergym.envs:EplusEnv',
kwargs={
'idf_file': '2ZoneDataCenterHVAC_wEconomizer.idf',
'weather_file': 'USA_NY_New.York-J.F.Kennedy.Intl.AP.744860_TMY3.epw',
'variables_file': 'variablesDataCenter.cfg',
'spaces_file': '2ZoneDataCenterHVAC_wEconomizer_spaces.cfg',
'discrete_actions': False,
'reward': LinearReward(),
'env_name': 'datacenter-mixed-continuous-v1'})
# 19) DC, mixed weather, discrete actions and stochastic
register(
id='Eplus-datacenter-mixed-discrete-stochastic-v1',
entry_point='sinergym.envs:EplusEnv',
kwargs={
'idf_file': '2ZoneDataCenterHVAC_wEconomizer.idf',
'weather_file': 'USA_NY_New.York-J.F.Kennedy.Intl.AP.744860_TMY3.epw',
'variables_file': 'variablesDataCenter.cfg',
'spaces_file': '2ZoneDataCenterHVAC_wEconomizer_spaces.cfg',
'discrete_actions': True,
'weather_variability': (
1.0,
0.0,
0.001),
'reward': LinearReward(),
'env_name': 'datacenter-mixed-discrete-stochastic-v1'})
# 20) DC, mixed weather, continuous actions and stochastic
register(
id='Eplus-datacenter-mixed-continuous-stochastic-v1',
entry_point='sinergym.envs:EplusEnv',
kwargs={
'idf_file': '2ZoneDataCenterHVAC_wEconomizer.idf',
'weather_file': 'USA_NY_New.York-J.F.Kennedy.Intl.AP.744860_TMY3.epw',
'variables_file': 'variablesDataCenter.cfg',
'spaces_file': '2ZoneDataCenterHVAC_wEconomizer_spaces.cfg',
'discrete_actions': False,
'weather_variability': (
1.0,
0.0,
0.001),
'reward': LinearReward(),
'env_name': 'datacenter-mixed-continuous-stochastic-v1'})
#========================MULLION========================#
# 21) IW, mixed weather, discrete actions
register(
id='Eplus-IWMullion-mixed-discrete-v1',
entry_point='sinergym.envs:EplusEnv',
kwargs={
'idf_file': 'IW_Mullion.idf',
'weather_file': 'USA_NY_New.York-J.F.Kennedy.Intl.AP.744860_TMY3.epw',
'variables_file': 'variablesIW.cfg',
'spaces_file': 'IW_Mullion_spaces.cfg',
'discrete_actions': True,
'reward': LinearReward(),
'env_name': 'IWMullion-mixed-discrete-v1'})
# 22) IW, mixed weather, discrete actions and stochastic
register(
id='Eplus-IWMullion-mixed-discrete-stochastic-v1',
entry_point='sinergym.envs:EplusEnv',
kwargs={
'idf_file': 'IW_Mullion.idf',
'weather_file': 'USA_NY_New.York-J.F.Kennedy.Intl.AP.744860_TMY3.epw',
'variables_file': 'variablesIW.cfg',
'spaces_file': 'IW_Mullion_spaces.cfg',
'discrete_actions': True,
'weather_variability': (
1.0,
0.0,
0.001),
'reward': LinearReward(),
'env_name': 'IWMullion-mixed-discrete-stochastic-v1'})
# 23) IW, mixed weather, continuous actions
register(
id='Eplus-IWMullion-mixed-continuous-v1',
entry_point='sinergym.envs:EplusEnv',
kwargs={
'idf_file': 'IW_Mullion.idf',
'weather_file': 'USA_NY_New.York-J.F.Kennedy.Intl.AP.744860_TMY3.epw',
'variables_file': 'variablesIW.cfg',
'spaces_file': 'IW_Mullion_spaces.cfg',
'discrete_actions': False,
'reward': LinearReward(),
'env_name': 'IWMullion-mixed-continuous-v1'})
# 24) IW, mixed weather, continuous actions and stochastic
register(
id='Eplus-IWMullion-mixed-continuous-stochastic-v1',
entry_point='sinergym.envs:EplusEnv',
kwargs={
'idf_file': 'IW_Mullion.idf',
'weather_file': 'USA_NY_New.York-J.F.Kennedy.Intl.AP.744860_TMY3.epw',
'variables_file': 'variablesIW.cfg',
'spaces_file': 'IW_Mullion_spaces.cfg',
'discrete_actions': False,
'weather_variability': (
1.0,
0.0,
0.001),
'reward': LinearReward(),
'env_name': 'IWMullion-mixed-continuous-stochastic-v1'})
# 25) IW, cool weather, discrete actions
register(
id='Eplus-IWMullion-cool-discrete-v1',
entry_point='sinergym.envs:EplusEnv',
kwargs={
'idf_file': 'IW_Mullion.idf',
'weather_file': 'USA_WA_Port.Angeles-William.R.Fairchild.Intl.AP.727885_TMY3.epw',
'variables_file': 'variablesIW.cfg',
'spaces_file': 'IW_Mullion_spaces.cfg',
'discrete_actions': True,
'reward': LinearReward(),
'env_name': 'IWMullion-cool-discrete-v1'})
# 26) IW, cool weather, discrete actions and stochastic
register(
id='Eplus-IWMullion-cool-discrete-stochastic-v1',
entry_point='sinergym.envs:EplusEnv',
kwargs={
'idf_file': 'IW_Mullion.idf',
'weather_file': 'USA_WA_Port.Angeles-William.R.Fairchild.Intl.AP.727885_TMY3.epw',
'variables_file': 'variablesIW.cfg',
'spaces_file': 'IW_Mullion_spaces.cfg',
'discrete_actions': True,
'weather_variability': (
1.0,
0.0,
0.001),
'reward': LinearReward(),
'env_name': 'IWMullion-cool-discrete-stochastic-v1'})
# 27) IW, cool weather, continuous actions
register(
id='Eplus-IWMullion-cool-continuous-v1',
entry_point='sinergym.envs:EplusEnv',
kwargs={
'idf_file': 'IW_Mullion.idf',
'weather_file': 'USA_WA_Port.Angeles-William.R.Fairchild.Intl.AP.727885_TMY3.epw',
'variables_file': 'variablesIW.cfg',
'spaces_file': 'IW_Mullion_spaces.cfg',
'discrete_actions': False,
'reward': LinearReward(),
'env_name': 'IWMullion-cool-continuous-v1'})
# 28) IW, cool weather, continuous actions and stochastic
register(
id='Eplus-IWMullion-cool-continuous-stochastic-v1',
entry_point='sinergym.envs:EplusEnv',
kwargs={
'idf_file': 'IW_Mullion.idf',
'weather_file': 'USA_WA_Port.Angeles-William.R.Fairchild.Intl.AP.727885_TMY3.epw',
'variables_file': 'variablesIW.cfg',
'spaces_file': 'IW_Mullion_spaces.cfg',
'discrete_actions': False,
'weather_variability': (
1.0,
0.0,
0.001),
'reward': LinearReward(),
'env_name': 'IWMullion-cool-continuous-stochastic-v1'})
| 36.588496
| 90
| 0.643246
| 1,865
| 16,538
| 5.500268
| 0.06756
| 0.061415
| 0.042406
| 0.056541
| 0.959056
| 0.939559
| 0.93371
| 0.83564
| 0.83564
| 0.750049
| 0
| 0.037959
| 0.203531
| 16,538
| 451
| 91
| 36.669623
| 0.740814
| 0.096324
| 0
| 0.789063
| 0
| 0
| 0.569694
| 0.357929
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.007813
| 0
| 0.007813
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c4452e5ac2d94cb18455267a62ca3a93daa6881f
| 69,491
|
py
|
Python
|
kernel/linear.py
|
pan185/UnarySim
|
c03386efdbb8151f3c33f34b44d1d6a6fc960434
|
[
"MIT"
] | 1
|
2021-11-29T23:51:15.000Z
|
2021-11-29T23:51:15.000Z
|
kernel/linear.py
|
pan185/UnarySim
|
c03386efdbb8151f3c33f34b44d1d6a6fc960434
|
[
"MIT"
] | null | null | null |
kernel/linear.py
|
pan185/UnarySim
|
c03386efdbb8151f3c33f34b44d1d6a6fc960434
|
[
"MIT"
] | null | null | null |
import torch
import math
from UnarySim.stream.gen import RNG, RNGMulti, SourceGen, BSGen, BSGenMulti
from torch.cuda.amp import autocast
from UnarySim.kernel.add import FSUAdd
class FSULinear(torch.nn.Module):
"""
This module is the fully connected layer,
its API is similar to the parent class (input/output feature count, bias flag), except:
1) accumulation mode
2) unary data mode
3) binary data width
4) binary weight
5) binary bias
"""
def __init__(self,
in_features,
out_features,
bias=True,
binary_weight=None,
binary_bias=None,
bitwidth=8,
mode="bipolar",
scaled=True,
scale=None,
depth=12,
btype=torch.float,
rtype=torch.float,
stype=torch.float):
super(FSULinear, self).__init__()
self.stype = stype
self.PC = FSULinearPC(in_features,
out_features,
bias=bias,
binary_weight=binary_weight,
binary_bias=binary_bias,
bitwidth=bitwidth,
mode=mode,
btype=btype,
rtype=rtype,
stype=stype)
if scaled is True:
if scale is None:
scale_add = in_features + bias
else:
scale_add = scale
else:
scale_add = 1.0
self.ACC = FSUAdd(mode=mode,
scaled=scaled,
scale=scale_add,
dim=0,
depth=depth,
entry=in_features + bias,
stype=stype)
@autocast()
def forward(self, input, scale=None, entry=None):
pc = self.PC(input)
output = self.ACC(pc.unsqueeze(0), scale, entry)
return output.type(self.stype)
class FSULinearPC(torch.nn.Linear):
"""
This module is the parallel counter result of FSULinear before generating the bitstreams.
"""
def __init__(self,
in_features,
out_features,
bias=True,
binary_weight=None,
binary_bias=None,
bitwidth=8,
mode="bipolar",
btype=torch.float,
rtype=torch.float,
stype=torch.float):
super(FSULinearPC, self).__init__(in_features, out_features, bias=bias)
self.stype = stype
self.btype = btype
self.rtype = rtype
self.mode = mode
# bias indication for original linear layer
self.has_bias = bias
# data bit width
self.bitwidth = bitwidth
# random_sequence from sobol RNG
self.rng = RNG(self.bitwidth, 1, "Sobol")()
# define the linear weight and bias
if binary_weight is not None:
assert (binary_weight.size()[0], binary_weight.size()[1]) == (out_features, in_features), "Incorrect weight shape."
self.weight.data = SourceGen(binary_weight, bitwidth=self.bitwidth, mode=mode, rtype=rtype)()
if bias and (binary_bias is not None):
assert binary_bias.size()[0] == out_features, "Incorrect bias shape."
self.bias.data = SourceGen(binary_bias, bitwidth=self.bitwidth, mode=mode, rtype=rtype)()
# define the kernel linear
self.weight_bsg = BSGen(self.weight, self.rng, stype=stype)
self.weight_rng_idx = torch.nn.Parameter(torch.zeros_like(self.weight, dtype=torch.long), requires_grad=False).unsqueeze(0)
if self.has_bias is True:
self.bias_bsg = BSGen(self.bias, self.rng, stype=stype)
self.bias_rng_idx = torch.nn.Parameter(torch.zeros_like(self.bias, dtype=torch.long), requires_grad=False)
# if bipolar, define a kernel with inverse input, note that there is no bias required for this inverse kernel
if self.mode == "bipolar":
self.weight_bsg_inv = BSGen(self.weight, self.rng, stype=stype)
self.weight_rng_idx_inv = torch.nn.Parameter(torch.zeros_like(self.weight, dtype=torch.long), requires_grad=False).unsqueeze(0)
def FSULinear_PC(self, input):
# first dim should always be batch
batch = input.size()[0]
# generate weight and bias bits for current cycle
weight_bs = self.weight_bsg(self.weight_rng_idx).type(torch.float)
if weight_bs.size()[0] != batch:
weight_bs = torch.cat(batch*[weight_bs], 0)
self.weight_rng_idx = torch.cat(batch*[self.weight_rng_idx], 0)
torch.add(self.weight_rng_idx, input.unsqueeze(1).type(torch.long), out=self.weight_rng_idx)
kernel_out = torch.empty(0, device=input.device)
torch.matmul(input.unsqueeze(1).type(torch.float), weight_bs.transpose(1, 2), out=kernel_out)
kernel_out.squeeze_(1)
if self.has_bias is True:
bias_bs = self.bias_bsg(self.bias_rng_idx).type(torch.float)
self.bias_rng_idx.add_(1)
kernel_out += bias_bs.unsqueeze(0).expand_as(kernel_out)
if self.mode == "unipolar":
return kernel_out
if self.mode == "bipolar":
# generate weight and bias bits for current cycle
weight_bs_inv = 1 - self.weight_bsg_inv(self.weight_rng_idx_inv).type(torch.float)
if weight_bs_inv.size()[0] != batch:
weight_bs_inv = torch.cat(batch*[weight_bs_inv], 0)
self.weight_rng_idx_inv = torch.cat(batch*[self.weight_rng_idx_inv], 0)
torch.add(self.weight_rng_idx_inv, 1 - input.unsqueeze(1).type(torch.long), out=self.weight_rng_idx_inv)
kernel_out_inv = torch.empty(0, device=input.device)
torch.matmul(1 - input.unsqueeze(1).type(torch.float), weight_bs_inv.transpose(1, 2), out=kernel_out_inv)
kernel_out_inv.squeeze_(1)
return kernel_out + kernel_out_inv
@autocast()
def forward(self, input):
return self.FSULinear_PC(input).type(self.stype)
class FSULinearuGEMM(torch.nn.Linear):
"""
This module is the fully connected layer using uGEMM add implementation
its API is similar to the parent class (input/output feature count, bias flag), except:
1) accumulation mode
2) unary data mode
3) binary data width
4) binary weight
5) binary bias
"""
def __init__(self,
in_features,
out_features,
bias=True,
binary_weight=None,
binary_bias=None,
bitwidth=8,
mode="bipolar",
scaled=True,
btype=torch.float,
rtype=torch.float,
stype=torch.float):
super(FSULinearuGEMM, self).__init__(in_features, out_features, bias=bias)
self.in_features = in_features
self.out_features = out_features
self.stype = stype
self.btype = btype
self.rtype = rtype
# upper bound for accumulation counter in scaled mode
self.acc_bound = torch.nn.Parameter(torch.zeros(1), requires_grad=False)
self.acc_bound.add_(in_features)
if bias is True:
self.acc_bound.add_(1)
self.mode = mode
self.scaled = scaled
# accumulation offset
self.offset = torch.nn.Parameter(torch.zeros(1), requires_grad=False)
if mode == "unipolar":
pass
elif mode == "bipolar":
self.offset.add_((in_features-1)/2)
if bias is True:
self.offset.add_(1/2)
else:
raise ValueError("FSULinear mode is not implemented.")
# bias indication for original linear layer
self.has_bias = bias
# data bit width
self.bitwidth = bitwidth
# random_sequence from sobol RNG
self.rng = RNG(self.bitwidth, 1, "Sobol")()
# define the linear weight and bias
if binary_weight is not None:
self.weight.data = SourceGen(binary_weight, bitwidth=self.bitwidth, mode=mode, rtype=rtype)()
if bias and (binary_bias is not None):
self.bias.data = SourceGen(binary_bias, bitwidth=self.bitwidth, mode=mode, rtype=rtype)()
# define the kernel linear
self.weight_bsg = BSGen(self.weight, self.rng, stype=stype)
self.weight_rng_idx = torch.nn.Parameter(torch.zeros_like(self.weight, dtype=torch.long), requires_grad=False).unsqueeze(0)
if self.has_bias is True:
self.bias_bsg = BSGen(self.bias, self.rng, stype=stype)
self.bias_rng_idx = torch.nn.Parameter(torch.zeros_like(self.bias, dtype=torch.long), requires_grad=False)
# if bipolar, define a kernel with inverse input, note that there is no bias required for this inverse kernel
if self.mode == "bipolar":
self.weight_bsg_inv = BSGen(self.weight, self.rng, stype=stype)
self.weight_rng_idx_inv = torch.nn.Parameter(torch.zeros_like(self.weight, dtype=torch.long), requires_grad=False).unsqueeze(0)
self.accumulator = torch.nn.Parameter(torch.zeros(1), requires_grad=False)
if self.scaled is False:
self.out_accumulator = torch.nn.Parameter(torch.zeros(1), requires_grad=False)
def FSUKernel_accumulation(self, input):
# first dim should always be batch
batch = input.size()[0]
# generate weight and bias bits for current cycle
weight_bs = self.weight_bsg(self.weight_rng_idx).type(torch.float)
if weight_bs.size()[0] != batch:
weight_bs = torch.cat(batch*[weight_bs], 0)
self.weight_rng_idx = torch.cat(batch*[self.weight_rng_idx], 0)
torch.add(self.weight_rng_idx, input.unsqueeze(1).type(torch.long), out=self.weight_rng_idx)
kernel_out = torch.empty(0, device=input.device)
torch.matmul(input.unsqueeze(1).type(torch.float), weight_bs.transpose(1, 2), out=kernel_out)
kernel_out.squeeze_(1)
if self.has_bias is True:
bias_bs = self.bias_bsg(self.bias_rng_idx).type(torch.float)
self.bias_rng_idx.add_(1)
kernel_out += bias_bs.unsqueeze(0).expand_as(kernel_out)
if self.mode == "unipolar":
return kernel_out
if self.mode == "bipolar":
# generate weight and bias bits for current cycle
weight_bs_inv = 1 - self.weight_bsg_inv(self.weight_rng_idx_inv).type(torch.float)
if weight_bs_inv.size()[0] != batch:
weight_bs_inv = torch.cat(batch*[weight_bs_inv], 0)
self.weight_rng_idx_inv = torch.cat(batch*[self.weight_rng_idx_inv], 0)
torch.add(self.weight_rng_idx_inv, 1 - input.unsqueeze(1).type(torch.long), out=self.weight_rng_idx_inv)
kernel_out_inv = torch.empty(0, device=input.device)
torch.matmul(1 - input.unsqueeze(1).type(torch.float), weight_bs_inv.transpose(1, 2), out=kernel_out_inv)
kernel_out_inv.squeeze_(1)
return kernel_out + kernel_out_inv
@autocast()
def forward(self, input):
kernel_out_total = self.FSUKernel_accumulation(input)
self.accumulator.data = self.accumulator.add(kernel_out_total)
if self.scaled is True:
output = torch.ge(self.accumulator, self.acc_bound).type(torch.float)
self.accumulator.sub_(output * self.acc_bound)
else:
self.accumulator.sub_(self.offset)
output = torch.gt(self.accumulator, self.out_accumulator).type(torch.float)
self.out_accumulator.data = self.out_accumulator.add(output)
return output.type(self.stype)
class GainesLinear1(torch.nn.Module):
"""
gMUL + gADD
this module is the fully connected layer,
its API is similar to the parent class (input/output feature count, bias flag), except:
1) accumulation mode
2) unary data mode
3) binary data width
4) binary weight
5) binary bias
"""
def __init__(self,
in_features,
out_features,
binary_weight=None,
binary_bias=None,
bitwidth=8,
bias=True,
mode="bipolar",
scaled=True,
depth=8,
rng_idx=1):
super(GainesLinear1, self).__init__()
self.in_features = in_features
self.out_features = out_features
# upper bound for accumulation counter in non-scaled mode
self.acc_bound = torch.nn.Parameter(torch.zeros(1), requires_grad=False)
self.acc_bound.add_(in_features)
if bias is True:
self.acc_bound.add_(1)
self.mode = mode
self.scaled = scaled
# accumulation offset
self.offset = torch.nn.Parameter(torch.zeros(1), requires_grad=False)
if mode == "unipolar":
pass
elif mode == "bipolar":
self.offset.add_((in_features-1)/2)
if bias is True:
self.offset.add_(1/2)
else:
raise ValueError("GainesLinear1 mode is not implemented.")
# bias indication for original linear layer
self.has_bias = bias
# data bit width
self.bitwidth = bitwidth
# random_sequence from sobol RNG
self.rng = RNGMulti(self.bitwidth, in_features, "Sobol")()
self.rng_bias = RNG(self.bitwidth, in_features+1, "Sobol")()
# define the convolution weight and bias
self.buf_wght = SourceGen(binary_weight, bitwidth=self.bitwidth, mode=mode)()
if self.has_bias is True:
self.buf_bias = SourceGen(binary_bias, bitwidth=self.bitwidth, mode=mode)()
# define the kernel linear
self.kernel = torch.nn.Linear(self.in_features, self.out_features, bias=self.has_bias)
self.buf_wght_bs = BSGenMulti(self.buf_wght, self.rng, dim=0)
self.rng_wght_idx = torch.nn.Parameter(torch.zeros_like(self.kernel.weight, dtype=torch.long), requires_grad=False)
if self.has_bias is True:
self.buf_bias_bs = BSGen(self.buf_bias, self.rng_bias)
self.rng_bias_idx = torch.nn.Parameter(torch.zeros_like(self.kernel.bias, dtype=torch.long), requires_grad=False)
# if bipolar, define a kernel with inverse input, note that there is no bias required for this inverse kernel
if self.mode == "bipolar":
self.kernel_inv = torch.nn.Linear(self.in_features, self.out_features, bias=False)
self.parallel_cnt = torch.nn.Parameter(torch.zeros(1, dtype=torch.long), requires_grad=False)
if self.scaled is True:
self.rng_scale = RNG(round(math.log2(self.acc_bound.item())), (rng_idx+5)%1111, "Sobol")()
self.rng_scale_idx = torch.nn.Parameter(torch.zeros(1, dtype=torch.long), requires_grad=False)
elif self.scaled is False:
self.input_cnt = self.acc_bound.item()
self.max = torch.nn.Parameter(torch.ones(1, dtype=torch.long).fill_(2**depth-1), requires_grad=False)
self.half_max = torch.nn.Parameter(torch.ones(1, dtype=torch.long).fill_(2**(depth-1)), requires_grad=False)
self.cnt = torch.nn.Parameter(torch.zeros(1, dtype=torch.long).fill_(2**(depth-1)), requires_grad=False)
def GainesKernel_accumulation(self, input):
# generate weight and bias bits for current cycle
self.kernel.weight.data = self.buf_wght_bs(self.rng_wght_idx).type(torch.float)
self.rng_wght_idx.add_(1)
if self.has_bias is True:
self.kernel.bias.data = self.buf_bias_bs(self.rng_bias_idx).type(torch.float)
self.rng_bias_idx.add_(1)
kernel_out = self.kernel(input.type(torch.float))
if self.mode == "unipolar":
return kernel_out
if self.mode == "bipolar":
self.kernel_inv.weight.data = 1 - self.kernel.weight.data
kernel_out_inv = self.kernel_inv(1 - input.type(torch.float))
return kernel_out + kernel_out_inv
def forward(self, input):
self.parallel_cnt.data = self.GainesKernel_accumulation(input).type(torch.long)
if self.scaled is True:
output = torch.ge(self.parallel_cnt.data, self.rng_scale[self.rng_scale_idx%len(self.rng_scale)])
self.rng_scale_idx.add_(1)
else:
if self.mode == "unipolar":
output = torch.gt(self.parallel_cnt, 0)
elif self.mode == "bipolar":
self.parallel_cnt.mul_(2).sub_(self.input_cnt)
self.cnt.data = self.cnt.add(self.parallel_cnt).clamp(0, self.max.item())
output = torch.gt(self.cnt, self.half_max)
return output.type(torch.int8)
class GainesLinear2(torch.nn.Module):
"""
gMUL + uADD
this module is the fully connected layer,
its API is similar to the parent class (input/output feature count, bias flag), except:
1) accumulation mode
2) unary data mode
3) binary data width
4) binary weight
5) binary bias
"""
def __init__(self,
in_features,
out_features,
binary_weight=None,
binary_bias=None,
bitwidth=8,
bias=True,
mode="bipolar",
scaled=True,
depth=8,
rng_idx=1):
super(GainesLinear2, self).__init__()
self.in_features = in_features
self.out_features = out_features
# upper bound for accumulation counter in non-scaled mode
self.acc_bound = torch.nn.Parameter(torch.zeros(1), requires_grad=False)
self.acc_bound.add_(in_features)
if bias is True:
self.acc_bound.add_(1)
self.mode = mode
self.scaled = scaled
# accumulation offset
self.offset = torch.nn.Parameter(torch.zeros(1), requires_grad=False)
if mode == "unipolar":
pass
elif mode == "bipolar":
self.offset.add_((in_features-1)/2)
if bias is True:
self.offset.add_(1/2)
else:
raise ValueError("GainesLinear2 mode is not implemented.")
# bias indication for original linear layer
self.has_bias = bias
# data bit width
self.bitwidth = bitwidth
# random_sequence from sobol RNG
self.rng = RNGMulti(self.bitwidth, in_features, "Sobol")()
self.rng_bias = RNG(self.bitwidth, in_features+1, "Sobol")()
# define the convolution weight and bias
self.buf_wght = SourceGen(binary_weight, bitwidth=self.bitwidth, mode=mode)()
if self.has_bias is True:
self.buf_bias = SourceGen(binary_bias, bitwidth=self.bitwidth, mode=mode)()
# define the kernel linear
self.kernel = torch.nn.Linear(self.in_features, self.out_features, bias=self.has_bias)
self.buf_wght_bs = BSGenMulti(self.buf_wght, self.rng, dim=0)
self.rng_wght_idx = torch.nn.Parameter(torch.zeros_like(self.kernel.weight, dtype=torch.long), requires_grad=False)
if self.has_bias is True:
self.buf_bias_bs = BSGen(self.buf_bias, self.rng_bias)
self.rng_bias_idx = torch.nn.Parameter(torch.zeros_like(self.kernel.bias, dtype=torch.long), requires_grad=False)
# if bipolar, define a kernel with inverse input, note that there is no bias required for this inverse kernel
if self.mode == "bipolar":
self.kernel_inv = torch.nn.Linear(self.in_features, self.out_features, bias=False)
self.accumulator = torch.nn.Parameter(torch.zeros(1), requires_grad=False)
if self.scaled is False:
self.out_accumulator = torch.nn.Parameter(torch.zeros(1), requires_grad=False)
def GainesKernel_accumulation(self, input):
# generate weight and bias bits for current cycle
self.kernel.weight.data = self.buf_wght_bs(self.rng_wght_idx).type(torch.float)
self.rng_wght_idx.add_(1)
if self.has_bias is True:
self.kernel.bias.data = self.buf_bias_bs(self.rng_bias_idx).type(torch.float)
self.rng_bias_idx.add_(1)
kernel_out = self.kernel(input.type(torch.float))
if self.mode == "unipolar":
return kernel_out
if self.mode == "bipolar":
self.kernel_inv.weight.data = 1 - self.kernel.weight.data
kernel_out_inv = self.kernel_inv(1 - input.type(torch.float))
return kernel_out + kernel_out_inv
def forward(self, input):
if self.scaled is True:
self.accumulator.data = self.accumulator.add(self.GainesKernel_accumulation(input))
output = torch.ge(self.accumulator, self.acc_bound).type(torch.float)
self.accumulator.sub_(output * self.acc_bound)
else:
self.accumulator.data = self.accumulator.add(self.GainesKernel_accumulation(input))
self.accumulator.sub_(self.offset)
output = torch.gt(self.accumulator, self.out_accumulator).type(torch.float)
self.out_accumulator.data = self.out_accumulator.add(output)
return output.type(torch.int8)
class GainesLinear3(torch.nn.Module):
"""
uMUL + gADD: this version will not work well, due to same rng is used in uMUL, the accumulation
will be inaccurate.
this module is the fully connected layer,
its API is similar to the parent class (input/output feature count, bias flag), except:
1) accumulation mode
2) unary data mode
3) binary data width
4) binary weight
5) binary bias
"""
def __init__(self,
in_features,
out_features,
binary_weight=None,
binary_bias=None,
bitwidth=8,
bias=True,
mode="bipolar",
scaled=True,
depth=8,
rng_idx=1):
super(GainesLinear3, self).__init__()
self.in_features = in_features
self.out_features = out_features
# upper bound for accumulation counter in non-scaled mode
self.acc_bound = torch.nn.Parameter(torch.zeros(1), requires_grad=False)
self.acc_bound.add_(in_features)
if bias is True:
self.acc_bound.add_(1)
self.mode = mode
self.scaled = scaled
# accumulation offset
self.offset = torch.nn.Parameter(torch.zeros(1), requires_grad=False)
if mode == "unipolar":
pass
elif mode == "bipolar":
self.offset.add_((in_features-1)/2)
if bias is True:
self.offset.add_(1/2)
else:
raise ValueError("GainesLinear3 mode is not implemented.")
# bias indication for original linear layer
self.has_bias = bias
# data bit width
self.bitwidth = bitwidth
# random_sequence from sobol RNG
self.rng = RNG(self.bitwidth, 1, "Sobol")()
# define the convolution weight and bias
self.buf_wght = SourceGen(binary_weight, bitwidth=self.bitwidth, mode=mode)()
if self.has_bias is True:
self.buf_bias = SourceGen(binary_bias, bitwidth=self.bitwidth, mode=mode)()
# define the kernel linear
self.kernel = torch.nn.Linear(self.in_features, self.out_features, bias=self.has_bias)
self.buf_wght_bs = BSGen(self.buf_wght, self.rng)
self.rng_wght_idx = torch.nn.Parameter(torch.zeros_like(self.kernel.weight, dtype=torch.long), requires_grad=False)
if self.has_bias is True:
self.buf_bias_bs = BSGen(self.buf_bias, self.rng)
self.rng_bias_idx = torch.nn.Parameter(torch.zeros_like(self.kernel.bias, dtype=torch.long), requires_grad=False)
# if bipolar, define a kernel with inverse input, note that there is no bias required for this inverse kernel
if self.mode == "bipolar":
self.kernel_inv = torch.nn.Linear(self.in_features, self.out_features, bias=False)
self.buf_wght_bs_inv = BSGen(self.buf_wght, self.rng)
self.rng_wght_idx_inv = torch.nn.Parameter(torch.zeros_like(self.kernel_inv.weight, dtype=torch.long), requires_grad=False)
self.parallel_cnt = torch.nn.Parameter(torch.zeros(1, dtype=torch.long), requires_grad=False)
if self.scaled is True:
self.rng_scale = RNG(round(math.log2(self.acc_bound.item())), (rng_idx+5)%1111, "Sobol")()
self.rng_scale_idx = torch.nn.Parameter(torch.zeros(1, dtype=torch.long), requires_grad=False)
elif self.scaled is False:
self.input_cnt = self.acc_bound.item()
self.max = torch.nn.Parameter(torch.ones(1, dtype=torch.long).fill_(2**depth-1), requires_grad=False)
self.half_max = torch.nn.Parameter(torch.ones(1, dtype=torch.long).fill_(2**(depth-1)), requires_grad=False)
self.cnt = torch.nn.Parameter(torch.zeros(1, dtype=torch.long).fill_(2**(depth-1)), requires_grad=False)
def GainesKernel_accumulation(self, input):
# generate weight and bias bits for current cycle
self.kernel.weight.data = self.buf_wght_bs(self.rng_wght_idx).type(torch.float)
self.rng_wght_idx.add_(input.type(torch.long))
if self.has_bias is True:
self.kernel.bias.data = self.buf_bias_bs(self.rng_bias_idx).type(torch.float)
self.rng_bias_idx.add_(1)
kernel_out = self.kernel(input.type(torch.float))
if self.mode == "unipolar":
return kernel_out
if self.mode == "bipolar":
self.kernel_inv.weight.data = 1 - self.buf_wght_bs_inv(self.rng_wght_idx_inv).type(torch.float)
self.rng_wght_idx_inv.add_(1 - input.type(torch.long))
kernel_out_inv = self.kernel_inv(1 - input.type(torch.float))
return kernel_out + kernel_out_inv
def forward(self, input):
self.parallel_cnt.data = self.GainesKernel_accumulation(input).type(torch.long)
if self.scaled is True:
output = torch.ge(self.parallel_cnt.data, self.rng_scale[self.rng_scale_idx%len(self.rng_scale)])
self.rng_scale_idx.add_(1)
else:
if self.mode == "unipolar":
output = torch.gt(self.parallel_cnt, 0)
elif self.mode == "bipolar":
self.parallel_cnt.mul_(2).sub_(self.input_cnt)
self.cnt.data = self.cnt.add(self.parallel_cnt).clamp(0, self.max.item())
output = torch.gt(self.cnt, self.half_max)
return output.type(torch.int8)
class GainesLinear4(torch.nn.Module):
"""
gMUL + gADD,
this module is the same as GainesLinear1, except the rng is lfsr
this module is the fully connected layer,
its API is similar to the parent class (input/output feature count, bias flag), except:
1) accumulation mode
2) unary data mode
3) binary data width
4) binary weight
5) binary bias
"""
def __init__(self,
in_features,
out_features,
binary_weight=None,
binary_bias=None,
bitwidth=8,
bias=True,
mode="bipolar",
scaled=True,
depth=8,
rng_idx=1):
super(GainesLinear4, self).__init__()
self.in_features = in_features
self.out_features = out_features
# upper bound for accumulation counter in non-scaled mode
self.acc_bound = torch.nn.Parameter(torch.zeros(1), requires_grad=False)
self.acc_bound.add_(in_features)
if bias is True:
self.acc_bound.add_(1)
self.mode = mode
self.scaled = scaled
# accumulation offset
self.offset = torch.nn.Parameter(torch.zeros(1), requires_grad=False)
if mode == "unipolar":
pass
elif mode == "bipolar":
self.offset.add_((in_features-1)/2)
if bias is True:
self.offset.add_(1/2)
else:
raise ValueError("GainesLinear4 mode is not implemented.")
# bias indication for original linear layer
self.has_bias = bias
# data bit width
self.bitwidth = bitwidth
# random_sequence from sobol RNG
self.rng = RNGMulti(self.bitwidth, in_features, "LFSR")()
self.rng_bias = RNG(self.bitwidth, in_features+1, "LFSR")()
# define the convolution weight and bias
self.buf_wght = SourceGen(binary_weight, bitwidth=self.bitwidth, mode=mode)()
if self.has_bias is True:
self.buf_bias = SourceGen(binary_bias, bitwidth=self.bitwidth, mode=mode)()
# define the kernel linear
self.kernel = torch.nn.Linear(self.in_features, self.out_features, bias=self.has_bias)
self.buf_wght_bs = BSGenMulti(self.buf_wght, self.rng, dim=0)
self.rng_wght_idx = torch.nn.Parameter(torch.zeros_like(self.kernel.weight, dtype=torch.long), requires_grad=False)
if self.has_bias is True:
self.buf_bias_bs = BSGen(self.buf_bias, self.rng_bias)
self.rng_bias_idx = torch.nn.Parameter(torch.zeros_like(self.kernel.bias, dtype=torch.long), requires_grad=False)
# if bipolar, define a kernel with inverse input, note that there is no bias required for this inverse kernel
if self.mode == "bipolar":
self.kernel_inv = torch.nn.Linear(self.in_features, self.out_features, bias=False)
self.parallel_cnt = torch.nn.Parameter(torch.zeros(1, dtype=torch.long), requires_grad=False)
if self.scaled is True:
self.rng_scale = RNG(round(math.log2(self.acc_bound.item())), (rng_idx+5)%1111, "LFSR")()
self.rng_scale_idx = torch.nn.Parameter(torch.zeros(1, dtype=torch.long), requires_grad=False)
elif self.scaled is False:
self.input_cnt = self.acc_bound.item()
self.max = torch.nn.Parameter(torch.ones(1, dtype=torch.long).fill_(2**depth-1), requires_grad=False)
self.half_max = torch.nn.Parameter(torch.ones(1, dtype=torch.long).fill_(2**(depth-1)), requires_grad=False)
self.cnt = torch.nn.Parameter(torch.zeros(1, dtype=torch.long).fill_(2**(depth-1)), requires_grad=False)
def GainesKernel_accumulation(self, input):
# generate weight and bias bits for current cycle
self.kernel.weight.data = self.buf_wght_bs(self.rng_wght_idx).type(torch.float)
self.rng_wght_idx.add_(1)
if self.has_bias is True:
self.kernel.bias.data = self.buf_bias_bs(self.rng_bias_idx).type(torch.float)
self.rng_bias_idx.add_(1)
kernel_out = self.kernel(input.type(torch.float))
if self.mode == "unipolar":
return kernel_out
if self.mode == "bipolar":
self.kernel_inv.weight.data = 1 - self.kernel.weight.data
kernel_out_inv = self.kernel_inv(1 - input.type(torch.float))
return kernel_out + kernel_out_inv
def forward(self, input):
self.parallel_cnt.data = self.GainesKernel_accumulation(input).type(torch.long)
if self.scaled is True:
output = torch.ge(self.parallel_cnt.data, self.rng_scale[self.rng_scale_idx%len(self.rng_scale)])
self.rng_scale_idx.add_(1)
else:
if self.mode == "unipolar":
output = torch.gt(self.parallel_cnt, 0)
elif self.mode == "bipolar":
self.parallel_cnt.mul_(2).sub_(self.input_cnt)
self.cnt.data = self.cnt.add(self.parallel_cnt).clamp(0, self.max.item())
output = torch.gt(self.cnt, self.half_max)
return output.type(torch.int8)
# the commented FSULinearSA and FSULinearSAFunction are cycle accurate implementations
# class FSULinearSA(torch.nn.Linear):
# """
# this module is the fully connected layer, with binary input and binary output
# its API is similar to the parent class (input/output feature count, bias flag), except:
# 1) binary data scale factor
# 2) binary weight
# 3) binary bias
# 4) mac cycle
# """
# def __init__(self,
# in_features,
# out_features,
# bias=True,
# binary_weight=None,
# binary_bias=None,
# input_format=(1, 3, 4),
# weight_format=(1, 3, 4),
# cycle=128):
# super(FSULinearSA, self).__init__(in_features, out_features, bias)
# # weight and bias
# if binary_weight is not None:
# self.weight.data = binary_weight
# if bias and (binary_bias is not None):
# self.bias.data = binary_bias
# # input format
# self.input_format = input_format
# # weight format
# self.weight_format = weight_format
# # mac computing cycle
# self.cycle = min(cycle, 2**(input_format[1] + input_format[2]), 2**(weight_format[1] + weight_format[2]))
# # bitwidth of rng
# self.bitwidth = (self.cycle - 1).bit_length()
# assert cycle == 2**self.bitwidth, "Input cycle count is not power of 2."
# # random_sequence from sobol RNG
# self.rng = RNG(self.bitwidth, 1, "Sobol")()
# @autocast()
# def forward(self, input):
# # See the autograd section for explanation of what happens here.
# return FSULinearSAFunction.apply(input, self.weight, self.bias, self.input_format, self.weight_format, self.cycle, self.bitwidth, self.rng)
# # Inherit from Function
# class FSULinearSAFunction(torch.autograd.Function):
# # Note that both forward and backward are @staticmethods
# @staticmethod
# # bias is an optional argument
# def forward(ctx, input, weight, bias=None,
# input_format=(1, 3, 4),
# weight_format=(1, 3, 4),
# cycle=128,
# bitwidth=7,
# rng=None):
# ctx.save_for_backward(input, weight, bias)
# # first dim should always be batch
# batch = input.size()[0]
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # input bsg prepare
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # scale input to range [0, 1]
# scaled_abs_input = torch.zeros(0, device=input.device)
# torch.abs((input >> input_format[1]), out=scaled_abs_input)
# # generate src, valued 0~2^bitwidth-1
# buf_input = torch.zeros(0, device=input.device)
# buf_input = scaled_abs_input << bitwidth
# buf_input.unsqueeze_(1)
# # rng index
# rng_input_idx = torch.zeros(1, dtype=torch.long, device=input.device)
# # sign for accumulation
# sign_input = torch.zeros(0, device=input.device)
# torch.sign(input, out=sign_input)
# sign_input.unsqueeze_(1)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # weight bsg prepare
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # scale weight to range [0, 1]
# scaled_abs_wght = torch.zeros(0, device=weight.device)
# torch.abs((weight >> weight_format[1]), out=scaled_abs_wght)
# # generate src with batch, valued 0~2^bitwidth-1
# buf_wght_no_batch = torch.zeros(0, device=weight.device)
# buf_wght_no_batch = scaled_abs_wght << bitwidth
# buf_wght_no_batch.unsqueeze_(0)
# buf_wght = torch.zeros(0, device=weight.device)
# torch.cat(batch*[buf_wght_no_batch], 0, out=buf_wght)
# # rng index
# rng_wght_idx = torch.zeros(0, device=weight.device)
# torch.zeros(buf_wght.size(), out=rng_wght_idx, device=weight.device)
# # sign for accumulation
# sign_wght_no_batch = torch.zeros(0, device=weight.device)
# torch.sign(weight, out=sign_wght_no_batch)
# sign_wght_no_batch.unsqueeze_(0)
# sign_wght = torch.zeros(0, device=weight.device)
# torch.cat(batch*[sign_wght_no_batch], 0, out=sign_wght)
# mm_out = torch.zeros(0, device=input.device)
# output = torch.zeros(input.matmul(weight.t()).size(), device=input.device).unsqueeze_(1)
# input_b_unsign = torch.zeros(0, device=input.device)
# input_b = torch.zeros(0, device=input.device)
# wght_b_unsign = torch.zeros(0, device=weight.device)
# wght_b = torch.zeros(0, device=weight.device)
# wght_rand = torch.zeros(0, device=weight.device)
# for c in range(cycle):
# rng_input_idx.fill_(c)
# torch.gt(buf_input, rng[rng_input_idx], out=input_b_unsign)
# torch.mul(input_b_unsign.type(torch.float), sign_input, out=input_b)
# torch.gt(buf_wght, rng[rng_wght_idx.type(torch.long)], out=wght_b_unsign)
# torch.mul(wght_b_unsign.type(torch.float), sign_wght, out=wght_b)
# torch.add(rng_wght_idx, input_b_unsign.type(torch.float), out=rng_wght_idx)
# torch.baddbmm(output, input_b, wght_b.transpose(1, 2), out=output)
# output = (((output >> bitwidth) << input_format[1]) << weight_format[1]).squeeze_(1)
# if bias is not None:
# output += bias.unsqueeze(0).expand_as(output)
# return output
# # This function has only a single output, so it gets only one gradient
# @staticmethod
# def backward(ctx, grad_output):
# # This is a pattern that is very convenient - at the top of backward
# # unpack saved_tensors and initialize all gradients w.r.t. inputs to
# # None. Thanks to the fact that additional trailing Nones are
# # ignored, the return statement is simple even when the function has
# # optional inputs.
# input, weight, bias = ctx.saved_tensors
# grad_input = grad_weight = grad_bias = None
# # These needs_input_grad checks are optional and there only to
# # improve efficiency. If you want to make your code simpler, you can
# # skip them. Returning gradients for inputs that don't require it is
# # not an error.
# if ctx.needs_input_grad[0]:
# grad_input = grad_output.matmul(weight)
# if ctx.needs_input_grad[1]:
# grad_weight = grad_output.t().matmul(input)
# if bias is not None and ctx.needs_input_grad[2]:
# grad_bias = grad_output.sum(0)
# return grad_input, grad_weight, grad_bias, None, None, None, None, None
# the HUBLinear and HUBLinearFunction are parallel implementations
class HUBLinear(torch.nn.Linear):
"""
this module is the fully connected layer, with binary input and binary output
its API is similar to the parent class (input/output feature count, bias flag), except:
1) binary data scale factor
2) binary weight
3) binary bias
4) mac cycle
This cycle is the mac cycle using unipolar umul, i.e., half the bipolar umul.
As such, cycle = 2 ^ (bitwidth - 1).
"""
def __init__(self,
in_features,
out_features,
bias=True,
binary_weight=None,
binary_bias=None,
rng="Sobol",
cycle=128,
rounding="round"):
super(HUBLinear, self).__init__(in_features, out_features, bias)
# weight and bias
if binary_weight is not None:
self.weight.data = binary_weight
if bias and (binary_bias is not None):
self.bias.data = binary_bias
# mac computing cycle
self.cycle = cycle
# bitwidth of rng
self.bitwidth = (self.cycle - 1).bit_length()
# random_sequence from sobol RNG
self.irng = RNG(self.bitwidth, 1, rng)()
self.wrng = RNG(self.bitwidth, 1, "Sobol")()
# generate the value map for mul using current rng
# dim 0 is input index
# the tensor input value is the actual value produced by the rng
self.input_map = torch.nn.Parameter(torch.empty(cycle), requires_grad=False)
input_val_cycle = torch.empty(0)
torch.cat(cycle*[torch.arange(cycle, dtype=torch.float).unsqueeze(1)], 1, out=input_val_cycle)
input_bit_cycle = torch.empty(0)
torch.gt(input_val_cycle, self.irng.unsqueeze(0), out=input_bit_cycle)
self.input_map.data = torch.sum(input_bit_cycle, 1).squeeze_().type(torch.long)
# dim 0 is input index, dim 1 is weight index
# the tensor value is the actual weight value produced by the rng, under a specific input and weight
self.wght_map = torch.nn.Parameter(torch.empty(cycle, cycle), requires_grad=False)
wght_bit_cycle = torch.empty(0)
torch.gt(input_val_cycle, self.wrng.unsqueeze(0), out=wght_bit_cycle)
for c in range(cycle):
self.wght_map.data[c] = torch.sum(wght_bit_cycle[:, 0:self.input_map.data[c]], 1).squeeze_()
# rounding mode
self.rounding = rounding
self.rshift_input = None
self.rshift_wght = None
self.rshift_output = None
@autocast()
def forward(self, input):
# See the autograd section for explanation of what happens here.
with torch.no_grad():
input_max_int = input.abs().max().log2()
wght_max_int = self.weight.abs().max().log2()
if self.rounding == "round":
input_max_int = input_max_int.round()
wght_max_int = wght_max_int.round()
elif self.rounding == "floor":
input_max_int = input_max_int.floor()
wght_max_int = wght_max_int.floor()
elif self.rounding == "ceil":
input_max_int = input_max_int.ceil()
wght_max_int = wght_max_int.ceil()
self.rshift_input = input_max_int - self.bitwidth
self.rshift_wght = wght_max_int - self.bitwidth
self.rshift_output = self.bitwidth - input_max_int - wght_max_int
return HUBLinearFunction.apply(input, self.weight, self.bias, self.rshift_input, self.rshift_wght, self.rshift_output, self.cycle, self.wght_map)
class HUBLinear_flex(torch.nn.Linear):
"""
this module is the fully connected layer, with binary input and binary output
its API is similar to the parent class (input/output feature count, bias flag), except:
1) binary data scale factor
2) binary weight
3) binary bias
4) bitwidth now has to be a tuple for (input, weight)
Note: No et supported
"""
def __init__(self,
in_features,
out_features,
bias=True,
binary_weight=None,
binary_bias=None,
rng="Sobol",
#cycle=128,
bitwidth = None,
rounding="round"):
super(HUBLinear_flex, self).__init__(in_features, out_features, bias)
# weight and bias
if binary_weight is not None:
self.weight.data = binary_weight
if bias and (binary_bias is not None):
self.bias.data = binary_bias
# bitwidth of rng
#self.bitwidth = (self.cycle - 1).bit_length()
if isinstance(bitwidth, tuple):
self.bw_input, self.bw_wght = (bitwidth[0]-1, bitwidth[1]-1)
else: raise ValueError("HUBLinearFlex layer only supports explict bitwidth tuple assignment.")
# which ever is the smaller bitwidth, repeat that bitstream to do population count
ratio = int(2**max(self.bw_wght, self.bw_input) / 2**min(self.bw_wght, self.bw_input))
self.max_bw = max(self.bw_wght, self.bw_input)
cycle = 2 ** self.max_bw
input_repeat = 1
wght_repeat = 1
if self.bw_input > self.bw_wght:
wght_repeat = ratio
elif self.bw_input < self.bw_wght:
input_repeat = ratio
else: pass
# random_sequence from sobol RNG
self.irng = RNG(self.bw_input, 1, rng)().repeat(input_repeat) # temporal input
self.wrng = RNG(self.bw_wght, 1, "Sobol")().repeat(wght_repeat) # rate weight
# print("rng sizes ", self.irng.size(), self.wrng.size())
# generate the value map for mul using current rng
# dim 0 is input index
# the tensor input value is the actual value produced by the rng
self.input_map = torch.nn.Parameter(torch.empty(cycle), requires_grad=False)
input_val_cycle = torch.empty(0)
torch.cat(cycle*[torch.arange(cycle, dtype=torch.float).unsqueeze(1)], 1, out=input_val_cycle)
input_bit_cycle = torch.empty(0)
torch.gt(input_val_cycle, self.irng.unsqueeze(0), out=input_bit_cycle)
self.input_map.data = torch.sum(input_bit_cycle, 1).squeeze_().type(torch.long)
# dim 0 is input index, dim 1 is weight index
# the tensor value is the actual weight value produced by the rng, under a specific input and weight
self.wght_map = torch.nn.Parameter(torch.empty(cycle, cycle), requires_grad=False)
wght_bit_cycle = torch.empty(0)
torch.gt(input_val_cycle, self.wrng.unsqueeze(0), out=wght_bit_cycle)
for c in range(cycle):
self.wght_map.data[c] = torch.sum(wght_bit_cycle[:, 0:self.input_map.data[c]], 1).squeeze_()
# rounding mode
self.rounding = rounding
self.rshift_input = None
self.rshift_wght = None
self.rshift_output = None
@autocast()
def forward(self, input):
# See the autograd section for explanation of what happens here.
with torch.no_grad():
input_max_int = input.abs().max().log2()
wght_max_int = self.weight.abs().max().log2()
if self.rounding == "round":
input_max_int = input_max_int.round()
wght_max_int = wght_max_int.round()
elif self.rounding == "floor":
input_max_int = input_max_int.floor()
wght_max_int = wght_max_int.floor()
elif self.rounding == "ceil":
input_max_int = input_max_int.ceil()
wght_max_int = wght_max_int.ceil()
self.rshift_input = input_max_int - self.bw_input
self.rshift_wght = wght_max_int - self.bw_wght
# self.rshift_output = self.bitwidth - input_max_int - wght_max_int
self.rshift_output = self.max_bw - input_max_int - wght_max_int
return HUBLinearFunction_flex.apply(input, self.weight, self.bias, self.rshift_input, self.rshift_wght, self.rshift_output, self.bw_input, self.bw_wght, self.wght_map)
# Inherit from Function
class HUBLinearFunction_flex(torch.autograd.Function):
# Note that both forward and backward are @staticmethods
@staticmethod
# bias is an optional argument
def forward(ctx, input, weight, bias=None,
rshift_input=3,
rshift_wght=3,
rshift_output=3,
eff_input_bitwidth=None,
eff_wght_bitwidth=None,
wght_map=None):
ctx.save_for_backward(input, weight, bias)
# first dim should always be batch
batch = input.size()[0]
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# input preparation
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# scale input to range 0~2^bitwidth-1
buf_input = torch.empty(0, dtype=torch.long, device=input.device)
torch.abs((input >> rshift_input).unsqueeze_(1).type(torch.long), out=buf_input)
torch.clamp(buf_input, 0, 2**eff_input_bitwidth-1, out=buf_input)
# actual input: its sign
act_input = torch.empty(0, device=input.device)
torch.sign(input, out=act_input)
act_input.unsqueeze_(1)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# weight preparation
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# scale weight with batch to range 0~2^bitwidth-1
buf_wght_no_batch = torch.empty(0, dtype=torch.long, device=weight.device)
torch.abs((weight >> rshift_wght).unsqueeze_(0).type(torch.long), out=buf_wght_no_batch)
torch.clamp(buf_wght_no_batch, 0, 2**eff_wght_bitwidth-1, out=buf_wght_no_batch)
buf_wght = torch.empty(0, dtype=torch.long, device=weight.device)
torch.cat(batch*[buf_wght_no_batch], 0, out=buf_wght)
# get actual weight for calculation
sign_wght_no_batch = torch.empty(0, device=weight.device)
torch.sign(weight, out=sign_wght_no_batch)
sign_wght_no_batch.unsqueeze_(0)
act_wght = torch.empty(0, device=weight.device)
torch.cat(batch*[sign_wght_no_batch], 0, out=act_wght)
torch.mul(wght_map[buf_input, buf_wght], act_wght, out=act_wght)
output = torch.empty(0, device=weight.device)
torch.matmul(act_input, act_wght.transpose(1, 2), out=output)
output = (output >> rshift_output).squeeze_(1)
if bias is not None:
output += bias.unsqueeze(0).expand_as(output)
return output
# This function has only a single output, so it gets only one gradient
@staticmethod
def backward(ctx, grad_output):
# This is a pattern that is very convenient - at the top of backward
# unpack saved_tensors and initialize all gradients w.r.t. inputs to
# None. Thanks to the fact that additional trailing Nones are
# ignored, the return statement is simple even when the function has
# optional inputs.
input, weight, bias = ctx.saved_tensors
grad_input = grad_weight = grad_bias = None
# These needs_input_grad checks are optional and there only to
# improve efficiency. If you want to make your code simpler, you can
# skip them. Returning gradients for inputs that don't require it is
# not an error.
if ctx.needs_input_grad[0]:
grad_input = grad_output.matmul(weight)
if ctx.needs_input_grad[1]:
grad_weight = grad_output.t().matmul(input)
if bias is not None and ctx.needs_input_grad[2]:
grad_bias = grad_output.sum(0)
return grad_input, grad_weight, grad_bias, None, None, None, None, None, None
# Inherit from Function
class HUBLinearFunction(torch.autograd.Function):
# Note that both forward and backward are @staticmethods
@staticmethod
# bias is an optional argument
def forward(ctx, input, weight, bias=None,
rshift_input=3,
rshift_wght=3,
rshift_output=3,
cycle=128,
wght_map=None):
ctx.save_for_backward(input, weight, bias)
# first dim should always be batch
batch = input.size()[0]
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# input preparation
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# scale input to range 0~2^bitwidth-1
buf_input = torch.empty(0, dtype=torch.long, device=input.device)
torch.abs((input >> rshift_input).unsqueeze_(1).type(torch.long), out=buf_input)
torch.clamp(buf_input, 0, cycle-1, out=buf_input)
# actual input: its sign
act_input = torch.empty(0, device=input.device)
torch.sign(input, out=act_input)
act_input.unsqueeze_(1)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# weight preparation
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# scale weight with batch to range 0~2^bitwidth-1
buf_wght_no_batch = torch.empty(0, dtype=torch.long, device=weight.device)
torch.abs((weight >> rshift_wght).unsqueeze_(0).type(torch.long), out=buf_wght_no_batch)
torch.clamp(buf_wght_no_batch, 0, cycle-1, out=buf_wght_no_batch)
buf_wght = torch.empty(0, dtype=torch.long, device=weight.device)
torch.cat(batch*[buf_wght_no_batch], 0, out=buf_wght)
# get actual weight for calculation
sign_wght_no_batch = torch.empty(0, device=weight.device)
torch.sign(weight, out=sign_wght_no_batch)
sign_wght_no_batch.unsqueeze_(0)
act_wght = torch.empty(0, device=weight.device)
torch.cat(batch*[sign_wght_no_batch], 0, out=act_wght)
torch.mul(wght_map[buf_input, buf_wght], act_wght, out=act_wght)
output = torch.empty(0, device=weight.device)
torch.matmul(act_input, act_wght.transpose(1, 2), out=output)
output = (output >> rshift_output).squeeze_(1)
if bias is not None:
output += bias.unsqueeze(0).expand_as(output)
return output
# This function has only a single output, so it gets only one gradient
@staticmethod
def backward(ctx, grad_output):
# This is a pattern that is very convenient - at the top of backward
# unpack saved_tensors and initialize all gradients w.r.t. inputs to
# None. Thanks to the fact that additional trailing Nones are
# ignored, the return statement is simple even when the function has
# optional inputs.
input, weight, bias = ctx.saved_tensors
grad_input = grad_weight = grad_bias = None
# These needs_input_grad checks are optional and there only to
# improve efficiency. If you want to make your code simpler, you can
# skip them. Returning gradients for inputs that don't require it is
# not an error.
if ctx.needs_input_grad[0]:
grad_input = grad_output.matmul(weight)
if ctx.needs_input_grad[1]:
grad_weight = grad_output.t().matmul(input)
if bias is not None and ctx.needs_input_grad[2]:
grad_bias = grad_output.sum(0)
return grad_input, grad_weight, grad_bias, None, None, None, None, None
class TlutLinear(torch.nn.Linear):
"""
this module is the fully connected layer, with binary input and binary output
its API is similar to the parent class (input/output feature count, bias flag), except:
1) binary data scale factor
2) binary weight
3) binary bias
4) mac cycle
"""
def __init__(self,
in_features,
out_features,
bias=True,
binary_weight=None,
binary_bias=None,
cycle = 16,
bitwidth=8,
rounding="round"):
super(TlutLinear, self).__init__(in_features, out_features, bias)
# weight and bias
if binary_weight is not None:
self.weight.data = binary_weight
if bias and (binary_bias is not None):
self.bias.data = binary_bias
# bitwidth of abs
if isinstance(bitwidth, tuple):
self.bw_input, self.bw_wght = (bitwidth[0]-1, bitwidth[1]-1)
# self.bw_input, self.bw_wght = (bitwidth[0], bitwidth[1]-1) # By default unsigned input value and signed wght
else:
raise ValueError("Specify bitwidth tuple explicitly.")
# max abs value
self.max_abs_input = 2**self.bw_input
self.max_abs_wght = 2**self.bw_wght
# rounding mode
self.rounding = rounding
# Early termination cycle
self.cycle = cycle
self.rshift_input = None
self.rshift_wght = None
self.rshift_output = None
@autocast()
def forward(self, input):
# Preparing quantization/round config
with torch.no_grad():
# Preparing input shift value
if self.rshift_input is None:
input_max_int = input.abs().max().log2()
if self.rounding == "round":
input_max_int = input_max_int.round()
elif self.rounding == "floor":
input_max_int = input_max_int.floor()
elif self.rounding == "ceil":
input_max_int = input_max_int.ceil()
self.rshift_input = input_max_int - self.bw_input
# Preparing weight shift value
if self.rshift_wght is None:
wght_max_int = self.weight.abs().max().log2()
if self.rounding == "round":
wght_max_int = wght_max_int.round()
elif self.rounding == "floor":
wght_max_int = wght_max_int.floor()
elif self.rounding == "ceil":
wght_max_int = wght_max_int.ceil()
self.rshift_wght = wght_max_int - self.bw_wght
# Preparing output shift value
if self.rshift_output is None:
self.rshift_output = 0 - self.rshift_input - self.rshift_wght
# Preparing input clamp value based on cycle
self.input_clamp_val = 2**self.bw_input
if self.cycle != None and self.cycle < 2**self.bw_input-1:
self.input_clamp_val = self.cycle
else:
self.input_clamp_val = None
return TlutLinearFunction.apply(input, self.weight, self.bias, self.rshift_input, self.rshift_wght, self.rshift_output, self.max_abs_input, self.max_abs_wght, self.input_clamp_val)
# Inherit from Function
class TlutLinearFunction(torch.autograd.Function):
# Note that both forward and backward are @staticmethods
@staticmethod
# bias is an optional argument
def forward(ctx, input, weight, bias=None,
rshift_input=3,
rshift_wght=3,
rshift_output=3,
max_abs_input=128,
max_abs_wght=128,
input_clamp_val=None):
ctx.save_for_backward(input, weight, bias)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# input preparation
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# round input to (bot, top)
if input_clamp_val != None: max_abs_input = input_clamp_val
bot_input = 0 - max_abs_input
top_input = max_abs_input - 1
input_round = torch.empty(0, device=input.device)
torch.round(input >> rshift_input, out=input_round)
torch.clamp(input_round.unsqueeze_(1), bot_input, top_input, out=input_round)
# print(f"Input clamped to {bot_input}, {top_input}")
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# weight preparation
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# round input to (bot, top)
bot_wght = 0 - max_abs_wght
top_wght = max_abs_wght - 1
wght_round = torch.empty(0, device=input.device)
torch.round(weight >> rshift_wght, out=wght_round)
torch.clamp(wght_round.unsqueeze_(0), bot_wght, top_wght, out=wght_round)
output = torch.empty(0, device=weight.device)
torch.matmul(input_round, wght_round.transpose(1, 2), out=output)
output = (output >> rshift_output).squeeze_(1)
if bias is not None:
output += bias.unsqueeze(0).expand_as(output)
return output
# This function has only a single output, so it gets only one gradient
@staticmethod
def backward(ctx, grad_output):
# This is a pattern that is very convenient - at the top of backward
# unpack saved_tensors and initialize all gradients w.r.t. inputs to
# None. Thanks to the fact that additional trailing Nones are
# ignored, the return statement is simple even when the function has
# optional inputs.
input, weight, bias = ctx.saved_tensors
grad_input = grad_weight = grad_bias = None
# These needs_input_grad checks are optional and there only to
# improve efficiency. If you want to make your code simpler, you can
# skip them. Returning gradients for inputs that don't require it is
# not an error.
if ctx.needs_input_grad[0]:
grad_input = grad_output.matmul(weight)
if ctx.needs_input_grad[1]:
grad_weight = grad_output.t().matmul(input)
if bias is not None and ctx.needs_input_grad[2]:
grad_bias = grad_output.sum(0)
return grad_input, grad_weight, grad_bias, None, None, None, None, None, None
class FxpLinear(torch.nn.Linear):
"""
this module is the fully connected layer, with binary input and binary output
its API is similar to the parent class (input/output feature count, bias flag), except:
1) binary data scale factor
2) binary weight
3) binary bias
4) mac cycle
"""
def __init__(self,
in_features,
out_features,
bias=True,
binary_weight=None,
binary_bias=None,
bitwidth=8,
keep_res="input", # keep the resolution of input/output
more_res="input", # assign more resolution to input/weight
rounding="round"):
super(FxpLinear, self).__init__(in_features, out_features, bias)
# weight and bias
if binary_weight is not None:
self.weight.data = binary_weight
if bias and (binary_bias is not None):
self.bias.data = binary_bias
self.keep_res = keep_res
# bitwidth of abs
if isinstance(bitwidth, tuple):
self.bw_input, self.bw_wght = (bitwidth[0]-1, bitwidth[1]-1)
if keep_res == "output":
self.bw_target_output = max(bitwidth)
else:
if keep_res == "input":
self.bw_input, self.bw_wght = (bitwidth-1, bitwidth-1)
elif keep_res == "output":
if bitwidth % 2 == 0:
self.bw_input, self.bw_wght = (int(bitwidth/2 - 1), int(bitwidth/2 - 1))
else:
if more_res == "input":
self.bw_input, self.bw_wght = (int((bitwidth+1)/2 - 1), int((bitwidth-1)/2 - 1))
elif more_res == "weight":
self.bw_input, self.bw_wght = (int((bitwidth-1)/2 - 1), int((bitwidth+1)/2 - 1))
else:
raise ValueError("more_res should be either 'input' or 'weight' when bitwidth is not a tuple and keep_res is 'output'.")
else:
raise ValueError("keep_res should be either 'input' or 'output' when bitwidth is not a tuple.")
# max abs value
self.max_abs_input = 2**self.bw_input
self.max_abs_wght = 2**self.bw_wght
# rounding mode
self.rounding = rounding
self.rshift_input = None
self.rshift_wght = None
self.rshift_output = None
@autocast()
def forward(self, input):
# See the autograd section for explanation of what happens here.
with torch.no_grad():
if self.rshift_input is None:
input_max_int = input.abs().max().log2()
if self.rounding == "round":
input_max_int = input_max_int.round()
elif self.rounding == "floor":
input_max_int = input_max_int.floor()
elif self.rounding == "ceil":
input_max_int = input_max_int.ceil()
self.rshift_input = input_max_int - self.bw_input
if self.rshift_wght is None:
wght_max_int = self.weight.abs().max().log2()
if self.rounding == "round":
wght_max_int = wght_max_int.round()
elif self.rounding == "floor":
wght_max_int = wght_max_int.floor()
elif self.rounding == "ceil":
wght_max_int = wght_max_int.ceil()
self.rshift_wght = wght_max_int - self.bw_wght
if self.rshift_output is None:
self.rshift_output = 0 - self.rshift_input - self.rshift_wght
if self.keep_res == "input":
return FxpLinearFunction.apply(input, self.weight, self.bias, self.rshift_input, self.rshift_wght, self.rshift_output, self.max_abs_input, self.max_abs_wght)
else:
output = FxpLinearFunction.apply(input, self.weight, self.bias, self.rshift_input, self.rshift_wght, self.rshift_output, self.max_abs_input, self.max_abs_wght)
extra_rshift = self.rshift_output - self.bw_target_output
return (output >> extra_rshift).round() << extra_rshift
# Inherit from Function
class FxpLinearFunction(torch.autograd.Function):
# Note that both forward and backward are @staticmethods
@staticmethod
# bias is an optional argument
def forward(ctx, input, weight, bias=None,
rshift_input=3,
rshift_wght=3,
rshift_output=3,
max_abs_input=128,
max_abs_wght=128):
ctx.save_for_backward(input, weight, bias)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# input preparation
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# round input to (bot, top)
bot_input = 0 - max_abs_input
top_input = max_abs_input - 1
input_round = torch.empty(0, device=input.device)
torch.round(input >> rshift_input, out=input_round)
torch.clamp(input_round.unsqueeze_(1), bot_input, top_input, out=input_round)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# weight preparation
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# round input to (bot, top)
bot_wght = 0 - max_abs_wght
top_wght = max_abs_wght - 1
wght_round = torch.empty(0, device=input.device)
torch.round(weight >> rshift_wght, out=wght_round)
torch.clamp(wght_round.unsqueeze_(0), bot_wght, top_wght, out=wght_round)
output = torch.empty(0, device=weight.device)
torch.matmul(input_round, wght_round.transpose(1, 2), out=output)
output = (output >> rshift_output).squeeze_(1)
if bias is not None:
output += bias.unsqueeze(0).expand_as(output)
return output
# This function has only a single output, so it gets only one gradient
@staticmethod
def backward(ctx, grad_output):
# This is a pattern that is very convenient - at the top of backward
# unpack saved_tensors and initialize all gradients w.r.t. inputs to
# None. Thanks to the fact that additional trailing Nones are
# ignored, the return statement is simple even when the function has
# optional inputs.
input, weight, bias = ctx.saved_tensors
grad_input = grad_weight = grad_bias = None
# These needs_input_grad checks are optional and there only to
# improve efficiency. If you want to make your code simpler, you can
# skip them. Returning gradients for inputs that don't require it is
# not an error.
if ctx.needs_input_grad[0]:
grad_input = grad_output.matmul(weight)
if ctx.needs_input_grad[1]:
grad_weight = grad_output.t().matmul(input)
if bias is not None and ctx.needs_input_grad[2]:
grad_bias = grad_output.sum(0)
return grad_input, grad_weight, grad_bias, None, None, None, None, None
| 43.540727
| 188
| 0.592868
| 8,884
| 69,491
| 4.450923
| 0.042323
| 0.012569
| 0.019422
| 0.025492
| 0.892949
| 0.878964
| 0.861034
| 0.851348
| 0.84371
| 0.831116
| 0
| 0.011852
| 0.304255
| 69,491
| 1,596
| 189
| 43.540727
| 0.806011
| 0.247428
| 0
| 0.850325
| 0
| 0.001085
| 0.020278
| 0
| 0
| 0
| 0
| 0
| 0.002169
| 1
| 0.039046
| false
| 0.006508
| 0.005423
| 0.001085
| 0.095445
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c4631c1ea7438a29c59868eb4685799fec4c3957
| 150,723
|
py
|
Python
|
huaweicloud-sdk-apig/huaweicloudsdkapig/v2/apig_client.py
|
wuchen-huawei/huaweicloud-sdk-python-v3
|
3683d703f4320edb2b8516f36f16d485cff08fc2
|
[
"Apache-2.0"
] | 1
|
2021-04-16T07:59:28.000Z
|
2021-04-16T07:59:28.000Z
|
huaweicloud-sdk-apig/huaweicloudsdkapig/v2/apig_client.py
|
wuchen-huawei/huaweicloud-sdk-python-v3
|
3683d703f4320edb2b8516f36f16d485cff08fc2
|
[
"Apache-2.0"
] | null | null | null |
huaweicloud-sdk-apig/huaweicloudsdkapig/v2/apig_client.py
|
wuchen-huawei/huaweicloud-sdk-python-v3
|
3683d703f4320edb2b8516f36f16d485cff08fc2
|
[
"Apache-2.0"
] | 1
|
2022-01-17T02:24:18.000Z
|
2022-01-17T02:24:18.000Z
|
# coding: utf-8
from __future__ import absolute_import
import datetime
import re
import importlib
import six
from huaweicloudsdkcore.client import Client, ClientBuilder
from huaweicloudsdkcore.exceptions import exceptions
from huaweicloudsdkcore.utils import http_utils
from huaweicloudsdkcore.sdk_stream_request import SdkStreamRequest
class ApigClient(Client):
"""
:param configuration: .Configuration object for this client
:param pool_threads: The number of threads to use for async requests
to the API. More threads means more concurrent API requests.
"""
PRIMITIVE_TYPES = (float, bool, bytes, six.text_type) + six.integer_types
NATIVE_TYPES_MAPPING = {
'int': int,
'long': int if six.PY3 else long,
'float': float,
'str': str,
'bool': bool,
'date': datetime.date,
'datetime': datetime.datetime,
'object': object,
}
def __init__(self):
super(ApigClient, self).__init__()
self.model_package = importlib.import_module("huaweicloudsdkapig.v2.model")
self.preset_headers = {'User-Agent': 'HuaweiCloud-SDK-Python'}
@classmethod
def new_builder(cls, clazz=None):
if clazz is None:
return ClientBuilder(cls)
if clazz.__name__ != "ApigClient":
raise TypeError("client type error, support client type is ApigClient")
return ClientBuilder(clazz)
def associate_certificate_v2(self, request):
"""绑定域名证书
如果创建API时,“定义API请求”使用HTTPS请求协议,那么在独立域名中需要添加SSL证书。 本章节主要介绍为特定域名绑定证书。
:param AssociateCertificateV2Request request
:return: AssociateCertificateV2Response
"""
return self.associate_certificate_v2_with_http_info(request)
def associate_certificate_v2_with_http_info(self, request):
"""绑定域名证书
如果创建API时,“定义API请求”使用HTTPS请求协议,那么在独立域名中需要添加SSL证书。 本章节主要介绍为特定域名绑定证书。
:param AssociateCertificateV2Request request
:return: AssociateCertificateV2Response
"""
all_params = ['instance_id', 'domain_id', 'group_id', 'associate_certificate_v2_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
if 'domain_id' in local_var_params:
path_params['domain_id'] = local_var_params['domain_id']
if 'group_id' in local_var_params:
path_params['group_id'] = local_var_params['group_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/v2/{project_id}/apigw/instances/{instance_id}/api-groups/{group_id}/domains/{domain_id}/certificate',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='AssociateCertificateV2Response',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def associate_domain_v2(self, request):
"""绑定域名
用户自定义的域名,需要CNAME到API分组的子域名上才能生效,具体方法请参见[增加CNAME类型记录集](https://support.huaweicloud.com/usermanual-dns/dns_usermanual_0010.html)。 每个API分组下最多可绑定5个域名。绑定域名后,用户可通过自定义域名调用API。
:param AssociateDomainV2Request request
:return: AssociateDomainV2Response
"""
return self.associate_domain_v2_with_http_info(request)
def associate_domain_v2_with_http_info(self, request):
"""绑定域名
用户自定义的域名,需要CNAME到API分组的子域名上才能生效,具体方法请参见[增加CNAME类型记录集](https://support.huaweicloud.com/usermanual-dns/dns_usermanual_0010.html)。 每个API分组下最多可绑定5个域名。绑定域名后,用户可通过自定义域名调用API。
:param AssociateDomainV2Request request
:return: AssociateDomainV2Response
"""
all_params = ['instance_id', 'group_id', 'associate_domain_v2_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
if 'group_id' in local_var_params:
path_params['group_id'] = local_var_params['group_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/v2/{project_id}/apigw/instances/{instance_id}/api-groups/{group_id}/domains',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='AssociateDomainV2Response',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def associate_signature_key_v2(self, request):
"""绑定签名密钥
签名密钥创建后,需要绑定到API才能生效。 将签名密钥绑定到API后,则API网关请求后端服务时就会使用这个签名密钥进行加密签名,后端服务可以校验这个签名来验证请求来源。 将指定的签名密钥绑定到一个或多个已发布的API上。同一个API发布到不同的环境可以绑定不同的签名密钥;一个API在发布到特定环境后只能绑定一个签名密钥。
:param AssociateSignatureKeyV2Request request
:return: AssociateSignatureKeyV2Response
"""
return self.associate_signature_key_v2_with_http_info(request)
def associate_signature_key_v2_with_http_info(self, request):
"""绑定签名密钥
签名密钥创建后,需要绑定到API才能生效。 将签名密钥绑定到API后,则API网关请求后端服务时就会使用这个签名密钥进行加密签名,后端服务可以校验这个签名来验证请求来源。 将指定的签名密钥绑定到一个或多个已发布的API上。同一个API发布到不同的环境可以绑定不同的签名密钥;一个API在发布到特定环境后只能绑定一个签名密钥。
:param AssociateSignatureKeyV2Request request
:return: AssociateSignatureKeyV2Response
"""
all_params = ['instance_id', 'associate_signature_key_v2_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/v2/{project_id}/apigw/instances/{instance_id}/sign-bindings',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='AssociateSignatureKeyV2Response',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def create_environment_v2(self, request):
"""创建环境
在实际的生产中,API提供者可能有多个环境,如开发环境、测试环境、生产环境等,用户可以自由将API发布到某个环境,供调用者调用。 对于不同的环境,API的版本、请求地址甚至于包括请求消息等均有可能不同。如:某个API,v1.0的版本为稳定版本,发布到了生产环境供生产使用,同时,该API正处于迭代中,v1.1的版本是开发人员交付测试人员进行测试的版本,发布在测试环境上,而v1.2的版本目前开发团队正处于开发过程中,可以发布到开发环境进行自测等。 为此,API网关提供多环境管理功能,使租户能够最大化的模拟实际场景,低成本的接入API网关。
:param CreateEnvironmentV2Request request
:return: CreateEnvironmentV2Response
"""
return self.create_environment_v2_with_http_info(request)
def create_environment_v2_with_http_info(self, request):
"""创建环境
在实际的生产中,API提供者可能有多个环境,如开发环境、测试环境、生产环境等,用户可以自由将API发布到某个环境,供调用者调用。 对于不同的环境,API的版本、请求地址甚至于包括请求消息等均有可能不同。如:某个API,v1.0的版本为稳定版本,发布到了生产环境供生产使用,同时,该API正处于迭代中,v1.1的版本是开发人员交付测试人员进行测试的版本,发布在测试环境上,而v1.2的版本目前开发团队正处于开发过程中,可以发布到开发环境进行自测等。 为此,API网关提供多环境管理功能,使租户能够最大化的模拟实际场景,低成本的接入API网关。
:param CreateEnvironmentV2Request request
:return: CreateEnvironmentV2Response
"""
all_params = ['instance_id', 'create_environment_v2_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/v2/{project_id}/apigw/instances/{instance_id}/envs',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='CreateEnvironmentV2Response',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def create_environment_variable_v2(self, request):
"""新建变量
将API发布到不同的环境后,对于不同的环境,可能会有不同的环境变量,比如,API的服务部署地址,请求的版本号等。 用户可以定义不同的环境变量,用户在定义API时,在API的定义中使用这些变量,当调用API时,API网关会将这些变量替换成真实的变量值,以达到不同环境的区分效果。 环境变量定义在API分组上,该分组下的所有API都可以使用这些变量。 > 1.环境变量的变量名称必须保持唯一,即一个分组在同一个环境上不能有两个同名的变量 2.环境变量区分大小写,即变量ABC与变量abc是两个不同的变量 3.设置了环境变量后,使用到该变量的API的调试功能将不可使用。 4.定义了环境变量后,使用到环境变量的地方应该以对称的#标识环境变量,当API发布到相应的环境后,会对环境变量的值进行替换,如:定义的API的URL为:https://#address#:8080,环境变量address在RELEASE环境上的值为:192.168.1.5,则API发布到RELEASE环境后的真实的URL为:https://192.168.1.5:8080。
:param CreateEnvironmentVariableV2Request request
:return: CreateEnvironmentVariableV2Response
"""
return self.create_environment_variable_v2_with_http_info(request)
def create_environment_variable_v2_with_http_info(self, request):
"""新建变量
将API发布到不同的环境后,对于不同的环境,可能会有不同的环境变量,比如,API的服务部署地址,请求的版本号等。 用户可以定义不同的环境变量,用户在定义API时,在API的定义中使用这些变量,当调用API时,API网关会将这些变量替换成真实的变量值,以达到不同环境的区分效果。 环境变量定义在API分组上,该分组下的所有API都可以使用这些变量。 > 1.环境变量的变量名称必须保持唯一,即一个分组在同一个环境上不能有两个同名的变量 2.环境变量区分大小写,即变量ABC与变量abc是两个不同的变量 3.设置了环境变量后,使用到该变量的API的调试功能将不可使用。 4.定义了环境变量后,使用到环境变量的地方应该以对称的#标识环境变量,当API发布到相应的环境后,会对环境变量的值进行替换,如:定义的API的URL为:https://#address#:8080,环境变量address在RELEASE环境上的值为:192.168.1.5,则API发布到RELEASE环境后的真实的URL为:https://192.168.1.5:8080。
:param CreateEnvironmentVariableV2Request request
:return: CreateEnvironmentVariableV2Response
"""
all_params = ['instance_id', 'create_environment_variable_v2_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/v2/{project_id}/apigw/instances/{instance_id}/env-variables',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='CreateEnvironmentVariableV2Response',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def create_request_throttling_policy_v2(self, request):
"""创建流控策略
当API上线后,系统会默认给每个API提供一个流控策略,API提供者可以根据自身API的服务能力及负载情况变更这个流控策略。 流控策略即限制API在一定长度的时间内,能够允许被访问的最大次数。
:param CreateRequestThrottlingPolicyV2Request request
:return: CreateRequestThrottlingPolicyV2Response
"""
return self.create_request_throttling_policy_v2_with_http_info(request)
def create_request_throttling_policy_v2_with_http_info(self, request):
"""创建流控策略
当API上线后,系统会默认给每个API提供一个流控策略,API提供者可以根据自身API的服务能力及负载情况变更这个流控策略。 流控策略即限制API在一定长度的时间内,能够允许被访问的最大次数。
:param CreateRequestThrottlingPolicyV2Request request
:return: CreateRequestThrottlingPolicyV2Response
"""
all_params = ['instance_id', 'create_request_throttling_policy_v2_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/v2/{project_id}/apigw/instances/{instance_id}/throttles',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='CreateRequestThrottlingPolicyV2Response',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def create_signature_key_v2(self, request):
"""创建签名密钥
为了保护API的安全性,建议租户为API的访问提供一套保护机制,即租户开放的API,需要对请求来源进行认证,不符合认证的请求直接拒绝访问。 其中,签名密钥就是API安全保护机制的一种。 租户创建一个签名密钥,并将签名密钥与API进行绑定,则API网关在请求这个API时,就会使用绑定的签名密钥对请求参数进行数据加密,生成签名。当租户的后端服务收到请求时,可以校验这个签名,如果签名校验不通过,则该请求不是API网关发出的请求,租户可以拒绝这个请求,从而保证API的安全性,避免API被未知来源的请求攻击。
:param CreateSignatureKeyV2Request request
:return: CreateSignatureKeyV2Response
"""
return self.create_signature_key_v2_with_http_info(request)
def create_signature_key_v2_with_http_info(self, request):
"""创建签名密钥
为了保护API的安全性,建议租户为API的访问提供一套保护机制,即租户开放的API,需要对请求来源进行认证,不符合认证的请求直接拒绝访问。 其中,签名密钥就是API安全保护机制的一种。 租户创建一个签名密钥,并将签名密钥与API进行绑定,则API网关在请求这个API时,就会使用绑定的签名密钥对请求参数进行数据加密,生成签名。当租户的后端服务收到请求时,可以校验这个签名,如果签名校验不通过,则该请求不是API网关发出的请求,租户可以拒绝这个请求,从而保证API的安全性,避免API被未知来源的请求攻击。
:param CreateSignatureKeyV2Request request
:return: CreateSignatureKeyV2Response
"""
all_params = ['instance_id', 'create_signature_key_v2_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/v2/{project_id}/apigw/instances/{instance_id}/signs',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='CreateSignatureKeyV2Response',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def create_special_throttling_configuration_v2(self, request):
"""创建特殊设置
流控策略可以限制一段时间内可以访问API的最大次数,也可以限制一段时间内单个租户和单个APP可以访问API的最大次数。 如果想要对某个特定的APP进行特殊设置,例如设置所有APP每分钟的访问次数为500次,但想设置APP1每分钟的访问次数为800次,可以通过在流控策略中设置特殊APP来实现该功能。 为流控策略添加一个特殊设置的对象,可以是APP,也可以是租户。
:param CreateSpecialThrottlingConfigurationV2Request request
:return: CreateSpecialThrottlingConfigurationV2Response
"""
return self.create_special_throttling_configuration_v2_with_http_info(request)
def create_special_throttling_configuration_v2_with_http_info(self, request):
"""创建特殊设置
流控策略可以限制一段时间内可以访问API的最大次数,也可以限制一段时间内单个租户和单个APP可以访问API的最大次数。 如果想要对某个特定的APP进行特殊设置,例如设置所有APP每分钟的访问次数为500次,但想设置APP1每分钟的访问次数为800次,可以通过在流控策略中设置特殊APP来实现该功能。 为流控策略添加一个特殊设置的对象,可以是APP,也可以是租户。
:param CreateSpecialThrottlingConfigurationV2Request request
:return: CreateSpecialThrottlingConfigurationV2Response
"""
all_params = ['instance_id', 'throttle_id', 'create_special_throttling_configuration_v2_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
if 'throttle_id' in local_var_params:
path_params['throttle_id'] = local_var_params['throttle_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/v2/{project_id}/apigw/instances/{instance_id}/throttles/{throttle_id}/throttle-specials',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='CreateSpecialThrottlingConfigurationV2Response',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def delete_environment_v2(self, request):
"""删除环境
删除指定的环境。 该操作将导致此API在指定的环境无法被访问,可能会影响相当一部分应用和用户。请确保已经告知用户,或者确认需要强制下线。
:param DeleteEnvironmentV2Request request
:return: DeleteEnvironmentV2Response
"""
return self.delete_environment_v2_with_http_info(request)
def delete_environment_v2_with_http_info(self, request):
"""删除环境
删除指定的环境。 该操作将导致此API在指定的环境无法被访问,可能会影响相当一部分应用和用户。请确保已经告知用户,或者确认需要强制下线。
:param DeleteEnvironmentV2Request request
:return: DeleteEnvironmentV2Response
"""
all_params = ['instance_id', 'env_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
if 'env_id' in local_var_params:
path_params['env_id'] = local_var_params['env_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/{project_id}/apigw/instances/{instance_id}/envs/{env_id}',
method='DELETE',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='DeleteEnvironmentV2Response',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def delete_environment_variable_v2(self, request):
"""删除变量
删除指定的环境变量。
:param DeleteEnvironmentVariableV2Request request
:return: DeleteEnvironmentVariableV2Response
"""
return self.delete_environment_variable_v2_with_http_info(request)
def delete_environment_variable_v2_with_http_info(self, request):
"""删除变量
删除指定的环境变量。
:param DeleteEnvironmentVariableV2Request request
:return: DeleteEnvironmentVariableV2Response
"""
all_params = ['instance_id', 'env_variable_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
if 'env_variable_id' in local_var_params:
path_params['env_variable_id'] = local_var_params['env_variable_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/{project_id}/apigw/instances/{instance_id}/env-variables/{env_variable_id}',
method='DELETE',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='DeleteEnvironmentVariableV2Response',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def delete_request_throttling_policy_v2(self, request):
"""删除流控策略
删除指定的流控策略,以及该流控策略与API的所有绑定关系。
:param DeleteRequestThrottlingPolicyV2Request request
:return: DeleteRequestThrottlingPolicyV2Response
"""
return self.delete_request_throttling_policy_v2_with_http_info(request)
def delete_request_throttling_policy_v2_with_http_info(self, request):
"""删除流控策略
删除指定的流控策略,以及该流控策略与API的所有绑定关系。
:param DeleteRequestThrottlingPolicyV2Request request
:return: DeleteRequestThrottlingPolicyV2Response
"""
all_params = ['instance_id', 'throttle_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
if 'throttle_id' in local_var_params:
path_params['throttle_id'] = local_var_params['throttle_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/{project_id}/apigw/instances/{instance_id}/throttles/{throttle_id}',
method='DELETE',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='DeleteRequestThrottlingPolicyV2Response',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def delete_signature_key_v2(self, request):
"""删除签名密钥
删除指定的签名密钥,删除签名密钥时,其配置的绑定关系会一并删除,相应的签名密钥会失效。
:param DeleteSignatureKeyV2Request request
:return: DeleteSignatureKeyV2Response
"""
return self.delete_signature_key_v2_with_http_info(request)
def delete_signature_key_v2_with_http_info(self, request):
"""删除签名密钥
删除指定的签名密钥,删除签名密钥时,其配置的绑定关系会一并删除,相应的签名密钥会失效。
:param DeleteSignatureKeyV2Request request
:return: DeleteSignatureKeyV2Response
"""
all_params = ['instance_id', 'sign_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
if 'sign_id' in local_var_params:
path_params['sign_id'] = local_var_params['sign_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/{project_id}/apigw/instances/{instance_id}/signs/{sign_id}',
method='DELETE',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='DeleteSignatureKeyV2Response',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def delete_special_throttling_configuration_v2(self, request):
"""删除特殊设置
删除某个流控策略的某个特殊配置。
:param DeleteSpecialThrottlingConfigurationV2Request request
:return: DeleteSpecialThrottlingConfigurationV2Response
"""
return self.delete_special_throttling_configuration_v2_with_http_info(request)
def delete_special_throttling_configuration_v2_with_http_info(self, request):
"""删除特殊设置
删除某个流控策略的某个特殊配置。
:param DeleteSpecialThrottlingConfigurationV2Request request
:return: DeleteSpecialThrottlingConfigurationV2Response
"""
all_params = ['instance_id', 'throttle_id', 'strategy_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
if 'throttle_id' in local_var_params:
path_params['throttle_id'] = local_var_params['throttle_id']
if 'strategy_id' in local_var_params:
path_params['strategy_id'] = local_var_params['strategy_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/{project_id}/apigw/instances/{instance_id}/throttles/{throttle_id}/throttle-specials/{strategy_id}',
method='DELETE',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='DeleteSpecialThrottlingConfigurationV2Response',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def disassociate_certificate_v2(self, request):
"""删除域名证书
如果域名证书不再需要或者已过期,则可以删除证书内容。
:param DisassociateCertificateV2Request request
:return: DisassociateCertificateV2Response
"""
return self.disassociate_certificate_v2_with_http_info(request)
def disassociate_certificate_v2_with_http_info(self, request):
"""删除域名证书
如果域名证书不再需要或者已过期,则可以删除证书内容。
:param DisassociateCertificateV2Request request
:return: DisassociateCertificateV2Response
"""
all_params = ['instance_id', 'domain_id', 'group_id', 'certificate_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
if 'domain_id' in local_var_params:
path_params['domain_id'] = local_var_params['domain_id']
if 'group_id' in local_var_params:
path_params['group_id'] = local_var_params['group_id']
if 'certificate_id' in local_var_params:
path_params['certificate_id'] = local_var_params['certificate_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/{project_id}/apigw/instances/{instance_id}/api-groups/{group_id}/domains/{domain_id}/certificate/{certificate_id}',
method='DELETE',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='DisassociateCertificateV2Response',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def disassociate_domain_v2(self, request):
"""解绑域名
如果API分组不再需要绑定某个自定义域名,则可以为此API分组解绑此域名。
:param DisassociateDomainV2Request request
:return: DisassociateDomainV2Response
"""
return self.disassociate_domain_v2_with_http_info(request)
def disassociate_domain_v2_with_http_info(self, request):
"""解绑域名
如果API分组不再需要绑定某个自定义域名,则可以为此API分组解绑此域名。
:param DisassociateDomainV2Request request
:return: DisassociateDomainV2Response
"""
all_params = ['instance_id', 'domain_id', 'group_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
if 'domain_id' in local_var_params:
path_params['domain_id'] = local_var_params['domain_id']
if 'group_id' in local_var_params:
path_params['group_id'] = local_var_params['group_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/{project_id}/apigw/instances/{instance_id}/api-groups/{group_id}/domains/{domain_id}',
method='DELETE',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='DisassociateDomainV2Response',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def disassociate_signature_key_v2(self, request):
"""解除绑定
解除API与签名密钥的绑定关系。
:param DisassociateSignatureKeyV2Request request
:return: DisassociateSignatureKeyV2Response
"""
return self.disassociate_signature_key_v2_with_http_info(request)
def disassociate_signature_key_v2_with_http_info(self, request):
"""解除绑定
解除API与签名密钥的绑定关系。
:param DisassociateSignatureKeyV2Request request
:return: DisassociateSignatureKeyV2Response
"""
all_params = ['instance_id', 'sign_bindings_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
if 'sign_bindings_id' in local_var_params:
path_params['sign_bindings_id'] = local_var_params['sign_bindings_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/{project_id}/apigw/instances/{instance_id}/sign-bindings/{sign_bindings_id}',
method='DELETE',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='DisassociateSignatureKeyV2Response',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_api_groups_quantities_v2(self, request):
"""查询API分组概况
查询租户名下的API分组概况。
:param ListApiGroupsQuantitiesV2Request request
:return: ListApiGroupsQuantitiesV2Response
"""
return self.list_api_groups_quantities_v2_with_http_info(request)
def list_api_groups_quantities_v2_with_http_info(self, request):
"""查询API分组概况
查询租户名下的API分组概况。
:param ListApiGroupsQuantitiesV2Request request
:return: ListApiGroupsQuantitiesV2Response
"""
all_params = ['instance_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/{project_id}/apigw/instances/{instance_id}/resources/outline/groups',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListApiGroupsQuantitiesV2Response',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_api_quantities_v2(self, request):
"""查询API概况
查询租户名下的API概况:已发布到RELEASE环境的API个数,未发布到RELEASE环境的API个数。
:param ListApiQuantitiesV2Request request
:return: ListApiQuantitiesV2Response
"""
return self.list_api_quantities_v2_with_http_info(request)
def list_api_quantities_v2_with_http_info(self, request):
"""查询API概况
查询租户名下的API概况:已发布到RELEASE环境的API个数,未发布到RELEASE环境的API个数。
:param ListApiQuantitiesV2Request request
:return: ListApiQuantitiesV2Response
"""
all_params = ['instance_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/{project_id}/apigw/instances/{instance_id}/resources/outline/apis',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListApiQuantitiesV2Response',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_apis_binded_to_signature_key_v2(self, request):
"""查看签名密钥绑定的API列表
查询某个签名密钥上已经绑定的API列表。
:param ListApisBindedToSignatureKeyV2Request request
:return: ListApisBindedToSignatureKeyV2Response
"""
return self.list_apis_binded_to_signature_key_v2_with_http_info(request)
def list_apis_binded_to_signature_key_v2_with_http_info(self, request):
"""查看签名密钥绑定的API列表
查询某个签名密钥上已经绑定的API列表。
:param ListApisBindedToSignatureKeyV2Request request
:return: ListApisBindedToSignatureKeyV2Response
"""
all_params = ['instance_id', 'sign_id', 'env_id', 'api_id', 'api_name', 'group_id', 'offset', 'limit']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
query_params = []
if 'sign_id' in local_var_params:
query_params.append(('sign_id', local_var_params['sign_id']))
if 'env_id' in local_var_params:
query_params.append(('env_id', local_var_params['env_id']))
if 'api_id' in local_var_params:
query_params.append(('api_id', local_var_params['api_id']))
if 'api_name' in local_var_params:
query_params.append(('api_name', local_var_params['api_name']))
if 'group_id' in local_var_params:
query_params.append(('group_id', local_var_params['group_id']))
if 'offset' in local_var_params:
query_params.append(('offset', local_var_params['offset']))
if 'limit' in local_var_params:
query_params.append(('limit', local_var_params['limit']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/{project_id}/apigw/instances/{instance_id}/sign-bindings/binded-apis',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListApisBindedToSignatureKeyV2Response',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_apis_not_bound_with_signature_key_v2(self, request):
"""查看签名密钥未绑定的API列表
查询所有未绑定到该签名密钥上的API列表。需要API已经发布,未发布的API不予展示。
:param ListApisNotBoundWithSignatureKeyV2Request request
:return: ListApisNotBoundWithSignatureKeyV2Response
"""
return self.list_apis_not_bound_with_signature_key_v2_with_http_info(request)
def list_apis_not_bound_with_signature_key_v2_with_http_info(self, request):
"""查看签名密钥未绑定的API列表
查询所有未绑定到该签名密钥上的API列表。需要API已经发布,未发布的API不予展示。
:param ListApisNotBoundWithSignatureKeyV2Request request
:return: ListApisNotBoundWithSignatureKeyV2Response
"""
all_params = ['instance_id', 'sign_id', 'env_id', 'api_id', 'api_name', 'group_id', 'offset', 'limit']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
query_params = []
if 'sign_id' in local_var_params:
query_params.append(('sign_id', local_var_params['sign_id']))
if 'env_id' in local_var_params:
query_params.append(('env_id', local_var_params['env_id']))
if 'api_id' in local_var_params:
query_params.append(('api_id', local_var_params['api_id']))
if 'api_name' in local_var_params:
query_params.append(('api_name', local_var_params['api_name']))
if 'group_id' in local_var_params:
query_params.append(('group_id', local_var_params['group_id']))
if 'offset' in local_var_params:
query_params.append(('offset', local_var_params['offset']))
if 'limit' in local_var_params:
query_params.append(('limit', local_var_params['limit']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/{project_id}/apigw/instances/{instance_id}/sign-bindings/unbinded-apis',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListApisNotBoundWithSignatureKeyV2Response',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_app_quantities_v2(self, request):
"""查询APP概况
查询租户名下的APP概况:已进行API访问授权的APP个数,未进行API访问授权的APP个数。
:param ListAppQuantitiesV2Request request
:return: ListAppQuantitiesV2Response
"""
return self.list_app_quantities_v2_with_http_info(request)
def list_app_quantities_v2_with_http_info(self, request):
"""查询APP概况
查询租户名下的APP概况:已进行API访问授权的APP个数,未进行API访问授权的APP个数。
:param ListAppQuantitiesV2Request request
:return: ListAppQuantitiesV2Response
"""
all_params = ['instance_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/{project_id}/apigw/instances/{instance_id}/resources/outline/apps',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListAppQuantitiesV2Response',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_environment_variables_v2(self, request):
"""查询变量列表
查询分组下的所有环境变量的列表。
:param ListEnvironmentVariablesV2Request request
:return: ListEnvironmentVariablesV2Response
"""
return self.list_environment_variables_v2_with_http_info(request)
def list_environment_variables_v2_with_http_info(self, request):
"""查询变量列表
查询分组下的所有环境变量的列表。
:param ListEnvironmentVariablesV2Request request
:return: ListEnvironmentVariablesV2Response
"""
all_params = ['instance_id', 'group_id', 'env_id', 'variable_name', 'offset', 'limit', 'precise_search']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
query_params = []
if 'group_id' in local_var_params:
query_params.append(('group_id', local_var_params['group_id']))
if 'env_id' in local_var_params:
query_params.append(('env_id', local_var_params['env_id']))
if 'variable_name' in local_var_params:
query_params.append(('variable_name', local_var_params['variable_name']))
if 'offset' in local_var_params:
query_params.append(('offset', local_var_params['offset']))
if 'limit' in local_var_params:
query_params.append(('limit', local_var_params['limit']))
if 'precise_search' in local_var_params:
query_params.append(('precise_search', local_var_params['precise_search']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/{project_id}/apigw/instances/{instance_id}/env-variables',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListEnvironmentVariablesV2Response',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_environments_v2(self, request):
"""查询环境列表
查询符合条件的环境列表。
:param ListEnvironmentsV2Request request
:return: ListEnvironmentsV2Response
"""
return self.list_environments_v2_with_http_info(request)
def list_environments_v2_with_http_info(self, request):
"""查询环境列表
查询符合条件的环境列表。
:param ListEnvironmentsV2Request request
:return: ListEnvironmentsV2Response
"""
all_params = ['instance_id', 'name', 'offset', 'limit']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
query_params = []
if 'name' in local_var_params:
query_params.append(('name', local_var_params['name']))
if 'offset' in local_var_params:
query_params.append(('offset', local_var_params['offset']))
if 'limit' in local_var_params:
query_params.append(('limit', local_var_params['limit']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/{project_id}/apigw/instances/{instance_id}/envs',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListEnvironmentsV2Response',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_request_throttling_policy_v2(self, request):
"""查询流控策略列表
查询所有流控策略的信息。
:param ListRequestThrottlingPolicyV2Request request
:return: ListRequestThrottlingPolicyV2Response
"""
return self.list_request_throttling_policy_v2_with_http_info(request)
def list_request_throttling_policy_v2_with_http_info(self, request):
"""查询流控策略列表
查询所有流控策略的信息。
:param ListRequestThrottlingPolicyV2Request request
:return: ListRequestThrottlingPolicyV2Response
"""
all_params = ['instance_id', 'id', 'name', 'offset', 'limit', 'precise_search']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
query_params = []
if 'id' in local_var_params:
query_params.append(('id', local_var_params['id']))
if 'name' in local_var_params:
query_params.append(('name', local_var_params['name']))
if 'offset' in local_var_params:
query_params.append(('offset', local_var_params['offset']))
if 'limit' in local_var_params:
query_params.append(('limit', local_var_params['limit']))
if 'precise_search' in local_var_params:
query_params.append(('precise_search', local_var_params['precise_search']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/{project_id}/apigw/instances/{instance_id}/throttles',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListRequestThrottlingPolicyV2Response',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_signature_keys_binded_to_api_v2(self, request):
"""查看API绑定的签名密钥列表
查询某个API绑定的签名密钥列表。每个API在每个环境上应该最多只会绑定一个签名密钥。
:param ListSignatureKeysBindedToApiV2Request request
:return: ListSignatureKeysBindedToApiV2Response
"""
return self.list_signature_keys_binded_to_api_v2_with_http_info(request)
def list_signature_keys_binded_to_api_v2_with_http_info(self, request):
"""查看API绑定的签名密钥列表
查询某个API绑定的签名密钥列表。每个API在每个环境上应该最多只会绑定一个签名密钥。
:param ListSignatureKeysBindedToApiV2Request request
:return: ListSignatureKeysBindedToApiV2Response
"""
all_params = ['instance_id', 'api_id', 'sign_id', 'sign_name', 'env_id', 'offset', 'limit']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
query_params = []
if 'api_id' in local_var_params:
query_params.append(('api_id', local_var_params['api_id']))
if 'sign_id' in local_var_params:
query_params.append(('sign_id', local_var_params['sign_id']))
if 'sign_name' in local_var_params:
query_params.append(('sign_name', local_var_params['sign_name']))
if 'env_id' in local_var_params:
query_params.append(('env_id', local_var_params['env_id']))
if 'offset' in local_var_params:
query_params.append(('offset', local_var_params['offset']))
if 'limit' in local_var_params:
query_params.append(('limit', local_var_params['limit']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/{project_id}/apigw/instances/{instance_id}/sign-bindings/binded-signs',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListSignatureKeysBindedToApiV2Response',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_signature_keys_v2(self, request):
"""查询签名密钥列表
查询所有签名密钥的信息。
:param ListSignatureKeysV2Request request
:return: ListSignatureKeysV2Response
"""
return self.list_signature_keys_v2_with_http_info(request)
def list_signature_keys_v2_with_http_info(self, request):
"""查询签名密钥列表
查询所有签名密钥的信息。
:param ListSignatureKeysV2Request request
:return: ListSignatureKeysV2Response
"""
all_params = ['instance_id', 'id', 'name', 'offset', 'limit', 'precise_search']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
query_params = []
if 'id' in local_var_params:
query_params.append(('id', local_var_params['id']))
if 'name' in local_var_params:
query_params.append(('name', local_var_params['name']))
if 'offset' in local_var_params:
query_params.append(('offset', local_var_params['offset']))
if 'limit' in local_var_params:
query_params.append(('limit', local_var_params['limit']))
if 'precise_search' in local_var_params:
query_params.append(('precise_search', local_var_params['precise_search']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/{project_id}/apigw/instances/{instance_id}/signs',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListSignatureKeysV2Response',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_special_throttling_configurations_v2(self, request):
"""查看特殊设置列表
查看给流控策略设置的特殊配置。
:param ListSpecialThrottlingConfigurationsV2Request request
:return: ListSpecialThrottlingConfigurationsV2Response
"""
return self.list_special_throttling_configurations_v2_with_http_info(request)
def list_special_throttling_configurations_v2_with_http_info(self, request):
"""查看特殊设置列表
查看给流控策略设置的特殊配置。
:param ListSpecialThrottlingConfigurationsV2Request request
:return: ListSpecialThrottlingConfigurationsV2Response
"""
all_params = ['instance_id', 'throttle_id', 'object_type', 'app_name', 'offset', 'limit']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
if 'throttle_id' in local_var_params:
path_params['throttle_id'] = local_var_params['throttle_id']
query_params = []
if 'object_type' in local_var_params:
query_params.append(('object_type', local_var_params['object_type']))
if 'app_name' in local_var_params:
query_params.append(('app_name', local_var_params['app_name']))
if 'offset' in local_var_params:
query_params.append(('offset', local_var_params['offset']))
if 'limit' in local_var_params:
query_params.append(('limit', local_var_params['limit']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/{project_id}/apigw/instances/{instance_id}/throttles/{throttle_id}/throttle-specials',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListSpecialThrottlingConfigurationsV2Response',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def show_details_of_domain_name_certificate_v2(self, request):
"""查看域名证书
查看域名下绑定的证书详情。
:param ShowDetailsOfDomainNameCertificateV2Request request
:return: ShowDetailsOfDomainNameCertificateV2Response
"""
return self.show_details_of_domain_name_certificate_v2_with_http_info(request)
def show_details_of_domain_name_certificate_v2_with_http_info(self, request):
"""查看域名证书
查看域名下绑定的证书详情。
:param ShowDetailsOfDomainNameCertificateV2Request request
:return: ShowDetailsOfDomainNameCertificateV2Response
"""
all_params = ['instance_id', 'domain_id', 'group_id', 'certificate_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
if 'domain_id' in local_var_params:
path_params['domain_id'] = local_var_params['domain_id']
if 'group_id' in local_var_params:
path_params['group_id'] = local_var_params['group_id']
if 'certificate_id' in local_var_params:
path_params['certificate_id'] = local_var_params['certificate_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/{project_id}/apigw/instances/{instance_id}/api-groups/{group_id}/domains/{domain_id}/certificate/{certificate_id}',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ShowDetailsOfDomainNameCertificateV2Response',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def show_details_of_environment_variable_v2(self, request):
"""查看变量详情
查看指定的环境变量的详情。
:param ShowDetailsOfEnvironmentVariableV2Request request
:return: ShowDetailsOfEnvironmentVariableV2Response
"""
return self.show_details_of_environment_variable_v2_with_http_info(request)
def show_details_of_environment_variable_v2_with_http_info(self, request):
"""查看变量详情
查看指定的环境变量的详情。
:param ShowDetailsOfEnvironmentVariableV2Request request
:return: ShowDetailsOfEnvironmentVariableV2Response
"""
all_params = ['instance_id', 'env_variable_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
if 'env_variable_id' in local_var_params:
path_params['env_variable_id'] = local_var_params['env_variable_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/{project_id}/apigw/instances/{instance_id}/env-variables/{env_variable_id}',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ShowDetailsOfEnvironmentVariableV2Response',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def show_details_of_request_throttling_policy_v2(self, request):
"""查看流控策略详情
查看指定流控策略的详细信息。
:param ShowDetailsOfRequestThrottlingPolicyV2Request request
:return: ShowDetailsOfRequestThrottlingPolicyV2Response
"""
return self.show_details_of_request_throttling_policy_v2_with_http_info(request)
def show_details_of_request_throttling_policy_v2_with_http_info(self, request):
"""查看流控策略详情
查看指定流控策略的详细信息。
:param ShowDetailsOfRequestThrottlingPolicyV2Request request
:return: ShowDetailsOfRequestThrottlingPolicyV2Response
"""
all_params = ['instance_id', 'throttle_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
if 'throttle_id' in local_var_params:
path_params['throttle_id'] = local_var_params['throttle_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/{project_id}/apigw/instances/{instance_id}/throttles/{throttle_id}',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ShowDetailsOfRequestThrottlingPolicyV2Response',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def update_environment_v2(self, request):
"""修改环境
修改指定环境的信息。其中可修改的属性为:name、remark,其它属性不可修改。
:param UpdateEnvironmentV2Request request
:return: UpdateEnvironmentV2Response
"""
return self.update_environment_v2_with_http_info(request)
def update_environment_v2_with_http_info(self, request):
"""修改环境
修改指定环境的信息。其中可修改的属性为:name、remark,其它属性不可修改。
:param UpdateEnvironmentV2Request request
:return: UpdateEnvironmentV2Response
"""
all_params = ['instance_id', 'env_id', 'update_environment_v2_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
if 'env_id' in local_var_params:
path_params['env_id'] = local_var_params['env_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/v2/{project_id}/apigw/instances/{instance_id}/envs/{env_id}',
method='PUT',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='UpdateEnvironmentV2Response',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def update_request_throttling_policy_v2(self, request):
"""修改流控策略
修改指定流控策略的详细信息。
:param UpdateRequestThrottlingPolicyV2Request request
:return: UpdateRequestThrottlingPolicyV2Response
"""
return self.update_request_throttling_policy_v2_with_http_info(request)
def update_request_throttling_policy_v2_with_http_info(self, request):
"""修改流控策略
修改指定流控策略的详细信息。
:param UpdateRequestThrottlingPolicyV2Request request
:return: UpdateRequestThrottlingPolicyV2Response
"""
all_params = ['instance_id', 'throttle_id', 'update_request_throttling_policy_v2_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
if 'throttle_id' in local_var_params:
path_params['throttle_id'] = local_var_params['throttle_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/v2/{project_id}/apigw/instances/{instance_id}/throttles/{throttle_id}',
method='PUT',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='UpdateRequestThrottlingPolicyV2Response',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def update_signature_key_v2(self, request):
"""修改签名密钥
修改指定签名密钥的详细信息。
:param UpdateSignatureKeyV2Request request
:return: UpdateSignatureKeyV2Response
"""
return self.update_signature_key_v2_with_http_info(request)
def update_signature_key_v2_with_http_info(self, request):
"""修改签名密钥
修改指定签名密钥的详细信息。
:param UpdateSignatureKeyV2Request request
:return: UpdateSignatureKeyV2Response
"""
all_params = ['instance_id', 'sign_id', 'update_signature_key_v2_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
if 'sign_id' in local_var_params:
path_params['sign_id'] = local_var_params['sign_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/v2/{project_id}/apigw/instances/{instance_id}/signs/{sign_id}',
method='PUT',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='UpdateSignatureKeyV2Response',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def update_special_throttling_configuration_v2(self, request):
"""修改特殊设置
修改某个流控策略下的某个特殊设置。
:param UpdateSpecialThrottlingConfigurationV2Request request
:return: UpdateSpecialThrottlingConfigurationV2Response
"""
return self.update_special_throttling_configuration_v2_with_http_info(request)
def update_special_throttling_configuration_v2_with_http_info(self, request):
"""修改特殊设置
修改某个流控策略下的某个特殊设置。
:param UpdateSpecialThrottlingConfigurationV2Request request
:return: UpdateSpecialThrottlingConfigurationV2Response
"""
all_params = ['instance_id', 'throttle_id', 'strategy_id', 'update_special_throttling_configuration_v2_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
if 'throttle_id' in local_var_params:
path_params['throttle_id'] = local_var_params['throttle_id']
if 'strategy_id' in local_var_params:
path_params['strategy_id'] = local_var_params['strategy_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/v2/{project_id}/apigw/instances/{instance_id}/throttles/{throttle_id}/throttle-specials/{strategy_id}',
method='PUT',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='UpdateSpecialThrottlingConfigurationV2Response',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def associate_request_throttling_policy_v2(self, request):
"""绑定流控策略
将流控策略应用于API,则所有对该API的访问将会受到该流控策略的限制。 当一定时间内的访问次数超过流控策略设置的API最大访问次数限制后,后续的访问将会被拒绝,从而能够较好的保护后端API免受异常流量的冲击,保障服务的稳定运行。 为指定的API绑定流控策略,绑定时,需要指定在哪个环境上生效。同一个API发布到不同的环境可以绑定不同的流控策略;一个API在发布到特定环境后只能绑定一个默认的流控策略。
:param AssociateRequestThrottlingPolicyV2Request request
:return: AssociateRequestThrottlingPolicyV2Response
"""
return self.associate_request_throttling_policy_v2_with_http_info(request)
def associate_request_throttling_policy_v2_with_http_info(self, request):
"""绑定流控策略
将流控策略应用于API,则所有对该API的访问将会受到该流控策略的限制。 当一定时间内的访问次数超过流控策略设置的API最大访问次数限制后,后续的访问将会被拒绝,从而能够较好的保护后端API免受异常流量的冲击,保障服务的稳定运行。 为指定的API绑定流控策略,绑定时,需要指定在哪个环境上生效。同一个API发布到不同的环境可以绑定不同的流控策略;一个API在发布到特定环境后只能绑定一个默认的流控策略。
:param AssociateRequestThrottlingPolicyV2Request request
:return: AssociateRequestThrottlingPolicyV2Response
"""
all_params = ['instance_id', 'associate_request_throttling_policy_v2_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/v2/{project_id}/apigw/instances/{instance_id}/throttle-bindings',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='AssociateRequestThrottlingPolicyV2Response',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def batch_disassociate_throttling_policy_v2(self, request):
"""批量解绑流控策略
批量解除API与流控策略的绑定关系
:param BatchDisassociateThrottlingPolicyV2Request request
:return: BatchDisassociateThrottlingPolicyV2Response
"""
return self.batch_disassociate_throttling_policy_v2_with_http_info(request)
def batch_disassociate_throttling_policy_v2_with_http_info(self, request):
"""批量解绑流控策略
批量解除API与流控策略的绑定关系
:param BatchDisassociateThrottlingPolicyV2Request request
:return: BatchDisassociateThrottlingPolicyV2Response
"""
all_params = ['instance_id', 'action', 'batch_disassociate_throttling_policy_v2_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
query_params = []
if 'action' in local_var_params:
query_params.append(('action', local_var_params['action']))
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/v2/{project_id}/apigw/instances/{instance_id}/throttle-bindings',
method='PUT',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='BatchDisassociateThrottlingPolicyV2Response',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def create_api_group_v2(self, request):
"""创建API分组
API分组是API的管理单元,一个API分组等同于一个服务入口,创建API分组时,返回一个子域名作为访问入口。建议一个API分组下的API具有一定的相关性。
:param CreateApiGroupV2Request request
:return: CreateApiGroupV2Response
"""
return self.create_api_group_v2_with_http_info(request)
def create_api_group_v2_with_http_info(self, request):
"""创建API分组
API分组是API的管理单元,一个API分组等同于一个服务入口,创建API分组时,返回一个子域名作为访问入口。建议一个API分组下的API具有一定的相关性。
:param CreateApiGroupV2Request request
:return: CreateApiGroupV2Response
"""
all_params = ['instance_id', 'create_api_group_v2_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/v2/{project_id}/apigw/instances/{instance_id}/api-groups',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='CreateApiGroupV2Response',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def create_api_v2(self, request):
"""注册API
添加一个API,API即一个服务接口,具体的服务能力。 API分为两部分,第一部分为面向API使用者的API接口,定义了使用者如何调用这个API。第二部分面向API提供者,由API提供者定义这个API的真实的后端情况,定义了API网关如何去访问真实的后端服务。API的真实后端服务目前支持三种类型:传统的HTTP/HTTPS形式的web后端、函数工作流、MOCK。
:param CreateApiV2Request request
:return: CreateApiV2Response
"""
return self.create_api_v2_with_http_info(request)
def create_api_v2_with_http_info(self, request):
"""注册API
添加一个API,API即一个服务接口,具体的服务能力。 API分为两部分,第一部分为面向API使用者的API接口,定义了使用者如何调用这个API。第二部分面向API提供者,由API提供者定义这个API的真实的后端情况,定义了API网关如何去访问真实的后端服务。API的真实后端服务目前支持三种类型:传统的HTTP/HTTPS形式的web后端、函数工作流、MOCK。
:param CreateApiV2Request request
:return: CreateApiV2Response
"""
all_params = ['instance_id', 'create_api_v2_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/v2/{project_id}/apigw/instances/{instance_id}/apis',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='CreateApiV2Response',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def create_or_delete_publish_record_for_api_v2(self, request):
"""发布或下线API
对API进行发布或下线。 发布操作是将一个指定的API发布到一个指定的环境,API只有发布后,才能够被调用,且只能在该环境上才能被调用。未发布的API无法被调用。 下线操作是将API从某个已发布的环境上下线,下线后,API将无法再被调用。
:param CreateOrDeletePublishRecordForApiV2Request request
:return: CreateOrDeletePublishRecordForApiV2Response
"""
return self.create_or_delete_publish_record_for_api_v2_with_http_info(request)
def create_or_delete_publish_record_for_api_v2_with_http_info(self, request):
"""发布或下线API
对API进行发布或下线。 发布操作是将一个指定的API发布到一个指定的环境,API只有发布后,才能够被调用,且只能在该环境上才能被调用。未发布的API无法被调用。 下线操作是将API从某个已发布的环境上下线,下线后,API将无法再被调用。
:param CreateOrDeletePublishRecordForApiV2Request request
:return: CreateOrDeletePublishRecordForApiV2Response
"""
all_params = ['instance_id', 'create_or_delete_publish_record_for_api_v2_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/v2/{project_id}/apigw/instances/{instance_id}/apis/action',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='CreateOrDeletePublishRecordForApiV2Response',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def delete_api_group_v2(self, request):
"""删除API分组
删除指定的API分组。 删除时,会一并删除直接或间接关联到该分组下的所有资源,包括API、独立域名、SSL证书、上架信息、分组下所有API的授权信息、编排信息、白名单配置、认证增强信息等等。并会将外部域名与子域名的绑定关系进行解除(取决于域名cname方式)。
:param DeleteApiGroupV2Request request
:return: DeleteApiGroupV2Response
"""
return self.delete_api_group_v2_with_http_info(request)
def delete_api_group_v2_with_http_info(self, request):
"""删除API分组
删除指定的API分组。 删除时,会一并删除直接或间接关联到该分组下的所有资源,包括API、独立域名、SSL证书、上架信息、分组下所有API的授权信息、编排信息、白名单配置、认证增强信息等等。并会将外部域名与子域名的绑定关系进行解除(取决于域名cname方式)。
:param DeleteApiGroupV2Request request
:return: DeleteApiGroupV2Response
"""
all_params = ['instance_id', 'group_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
if 'group_id' in local_var_params:
path_params['group_id'] = local_var_params['group_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/{project_id}/apigw/instances/{instance_id}/api-groups/{group_id}',
method='DELETE',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='DeleteApiGroupV2Response',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def delete_api_v2(self, request):
"""删除API
删除指定的API。 删除API时,会删除该API所有相关的资源信息或绑定关系,如API的发布记录,绑定的后端服务,对APP的授权信息等。
:param DeleteApiV2Request request
:return: DeleteApiV2Response
"""
return self.delete_api_v2_with_http_info(request)
def delete_api_v2_with_http_info(self, request):
"""删除API
删除指定的API。 删除API时,会删除该API所有相关的资源信息或绑定关系,如API的发布记录,绑定的后端服务,对APP的授权信息等。
:param DeleteApiV2Request request
:return: DeleteApiV2Response
"""
all_params = ['instance_id', 'api_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
if 'api_id' in local_var_params:
path_params['api_id'] = local_var_params['api_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/{project_id}/apigw/instances/{instance_id}/apis/{api_id}',
method='DELETE',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='DeleteApiV2Response',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def disassociate_request_throttling_policy_v2(self, request):
"""解除API与流控策略的绑定关系
解除API与流控策略的绑定关系。
:param DisassociateRequestThrottlingPolicyV2Request request
:return: DisassociateRequestThrottlingPolicyV2Response
"""
return self.disassociate_request_throttling_policy_v2_with_http_info(request)
def disassociate_request_throttling_policy_v2_with_http_info(self, request):
"""解除API与流控策略的绑定关系
解除API与流控策略的绑定关系。
:param DisassociateRequestThrottlingPolicyV2Request request
:return: DisassociateRequestThrottlingPolicyV2Response
"""
all_params = ['instance_id', 'throttle_binding_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
if 'throttle_binding_id' in local_var_params:
path_params['throttle_binding_id'] = local_var_params['throttle_binding_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/{project_id}/apigw/instances/{instance_id}/throttle-bindings/{throttle_binding_id}',
method='DELETE',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='DisassociateRequestThrottlingPolicyV2Response',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_api_groups_v2(self, request):
"""查询分组列表
查询API分组列表。 如果是租户操作,则查询该租户下所有的分组;如果是管理员操作,则查询的是所有租户的分组。
:param ListApiGroupsV2Request request
:return: ListApiGroupsV2Response
"""
return self.list_api_groups_v2_with_http_info(request)
def list_api_groups_v2_with_http_info(self, request):
"""查询分组列表
查询API分组列表。 如果是租户操作,则查询该租户下所有的分组;如果是管理员操作,则查询的是所有租户的分组。
:param ListApiGroupsV2Request request
:return: ListApiGroupsV2Response
"""
all_params = ['instance_id', 'id', 'name', 'offset', 'limit', 'precise_search']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
query_params = []
if 'id' in local_var_params:
query_params.append(('id', local_var_params['id']))
if 'name' in local_var_params:
query_params.append(('name', local_var_params['name']))
if 'offset' in local_var_params:
query_params.append(('offset', local_var_params['offset']))
if 'limit' in local_var_params:
query_params.append(('limit', local_var_params['limit']))
if 'precise_search' in local_var_params:
query_params.append(('precise_search', local_var_params['precise_search']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/{project_id}/apigw/instances/{instance_id}/api-groups',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListApiGroupsV2Response',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_apis_binded_to_request_throttling_policy_v2(self, request):
"""查看流控策略绑定的API列表
查询某个流控策略上已经绑定的API列表。
:param ListApisBindedToRequestThrottlingPolicyV2Request request
:return: ListApisBindedToRequestThrottlingPolicyV2Response
"""
return self.list_apis_binded_to_request_throttling_policy_v2_with_http_info(request)
def list_apis_binded_to_request_throttling_policy_v2_with_http_info(self, request):
"""查看流控策略绑定的API列表
查询某个流控策略上已经绑定的API列表。
:param ListApisBindedToRequestThrottlingPolicyV2Request request
:return: ListApisBindedToRequestThrottlingPolicyV2Response
"""
all_params = ['instance_id', 'throttle_id', 'env_id', 'group_id', 'api_id', 'api_name', 'offset', 'limit']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
query_params = []
if 'throttle_id' in local_var_params:
query_params.append(('throttle_id', local_var_params['throttle_id']))
if 'env_id' in local_var_params:
query_params.append(('env_id', local_var_params['env_id']))
if 'group_id' in local_var_params:
query_params.append(('group_id', local_var_params['group_id']))
if 'api_id' in local_var_params:
query_params.append(('api_id', local_var_params['api_id']))
if 'api_name' in local_var_params:
query_params.append(('api_name', local_var_params['api_name']))
if 'offset' in local_var_params:
query_params.append(('offset', local_var_params['offset']))
if 'limit' in local_var_params:
query_params.append(('limit', local_var_params['limit']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/{project_id}/apigw/instances/{instance_id}/throttle-bindings/binded-apis',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListApisBindedToRequestThrottlingPolicyV2Response',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_apis_unbinded_to_request_throttling_policy_v2(self, request):
"""查看流控策略未绑定的API列表
查询所有未绑定到该流控策略上的自有API列表。需要API已经发布,未发布的API不予展示。
:param ListApisUnbindedToRequestThrottlingPolicyV2Request request
:return: ListApisUnbindedToRequestThrottlingPolicyV2Response
"""
return self.list_apis_unbinded_to_request_throttling_policy_v2_with_http_info(request)
def list_apis_unbinded_to_request_throttling_policy_v2_with_http_info(self, request):
"""查看流控策略未绑定的API列表
查询所有未绑定到该流控策略上的自有API列表。需要API已经发布,未发布的API不予展示。
:param ListApisUnbindedToRequestThrottlingPolicyV2Request request
:return: ListApisUnbindedToRequestThrottlingPolicyV2Response
"""
all_params = ['instance_id', 'throttle_id', 'env_id', 'group_id', 'api_id', 'api_name', 'offset', 'limit']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
query_params = []
if 'throttle_id' in local_var_params:
query_params.append(('throttle_id', local_var_params['throttle_id']))
if 'env_id' in local_var_params:
query_params.append(('env_id', local_var_params['env_id']))
if 'group_id' in local_var_params:
query_params.append(('group_id', local_var_params['group_id']))
if 'api_id' in local_var_params:
query_params.append(('api_id', local_var_params['api_id']))
if 'api_name' in local_var_params:
query_params.append(('api_name', local_var_params['api_name']))
if 'offset' in local_var_params:
query_params.append(('offset', local_var_params['offset']))
if 'limit' in local_var_params:
query_params.append(('limit', local_var_params['limit']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/{project_id}/apigw/instances/{instance_id}/throttle-bindings/unbinded-apis',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListApisUnbindedToRequestThrottlingPolicyV2Response',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_apis_v2(self, request):
"""查询API列表
查看API列表,返回API详细信息、发布信息等,但不能查看到后端服务信息。
:param ListApisV2Request request
:return: ListApisV2Response
"""
return self.list_apis_v2_with_http_info(request)
def list_apis_v2_with_http_info(self, request):
"""查询API列表
查看API列表,返回API详细信息、发布信息等,但不能查看到后端服务信息。
:param ListApisV2Request request
:return: ListApisV2Response
"""
all_params = ['instance_id', 'id', 'name', 'group_id', 'req_protocol', 'req_method', 'req_uri', 'auth_type', 'env_id', 'type', 'offset', 'limit', 'precise_search']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
query_params = []
if 'id' in local_var_params:
query_params.append(('id', local_var_params['id']))
if 'name' in local_var_params:
query_params.append(('name', local_var_params['name']))
if 'group_id' in local_var_params:
query_params.append(('group_id', local_var_params['group_id']))
if 'req_protocol' in local_var_params:
query_params.append(('req_protocol', local_var_params['req_protocol']))
if 'req_method' in local_var_params:
query_params.append(('req_method', local_var_params['req_method']))
if 'req_uri' in local_var_params:
query_params.append(('req_uri', local_var_params['req_uri']))
if 'auth_type' in local_var_params:
query_params.append(('auth_type', local_var_params['auth_type']))
if 'env_id' in local_var_params:
query_params.append(('env_id', local_var_params['env_id']))
if 'type' in local_var_params:
query_params.append(('type', local_var_params['type']))
if 'offset' in local_var_params:
query_params.append(('offset', local_var_params['offset']))
if 'limit' in local_var_params:
query_params.append(('limit', local_var_params['limit']))
if 'precise_search' in local_var_params:
query_params.append(('precise_search', local_var_params['precise_search']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/{project_id}/apigw/instances/{instance_id}/apis',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListApisV2Response',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_request_throttling_policies_binded_to_api_v2(self, request):
"""查看API绑定的流控策略列表
查询某个API绑定的流控策略列表。每个环境上应该最多只有一个流控策略。
:param ListRequestThrottlingPoliciesBindedToApiV2Request request
:return: ListRequestThrottlingPoliciesBindedToApiV2Response
"""
return self.list_request_throttling_policies_binded_to_api_v2_with_http_info(request)
def list_request_throttling_policies_binded_to_api_v2_with_http_info(self, request):
"""查看API绑定的流控策略列表
查询某个API绑定的流控策略列表。每个环境上应该最多只有一个流控策略。
:param ListRequestThrottlingPoliciesBindedToApiV2Request request
:return: ListRequestThrottlingPoliciesBindedToApiV2Response
"""
all_params = ['instance_id', 'api_id', 'throttle_id', 'throttle_name', 'env_id', 'offset', 'limit']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
query_params = []
if 'api_id' in local_var_params:
query_params.append(('api_id', local_var_params['api_id']))
if 'throttle_id' in local_var_params:
query_params.append(('throttle_id', local_var_params['throttle_id']))
if 'throttle_name' in local_var_params:
query_params.append(('throttle_name', local_var_params['throttle_name']))
if 'env_id' in local_var_params:
query_params.append(('env_id', local_var_params['env_id']))
if 'offset' in local_var_params:
query_params.append(('offset', local_var_params['offset']))
if 'limit' in local_var_params:
query_params.append(('limit', local_var_params['limit']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/{project_id}/apigw/instances/{instance_id}/throttle-bindings/binded-throttles',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListRequestThrottlingPoliciesBindedToApiV2Response',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def show_details_of_api_group_v2(self, request):
"""查询分组详情
查询指定分组的详细信息。
:param ShowDetailsOfApiGroupV2Request request
:return: ShowDetailsOfApiGroupV2Response
"""
return self.show_details_of_api_group_v2_with_http_info(request)
def show_details_of_api_group_v2_with_http_info(self, request):
"""查询分组详情
查询指定分组的详细信息。
:param ShowDetailsOfApiGroupV2Request request
:return: ShowDetailsOfApiGroupV2Response
"""
all_params = ['instance_id', 'group_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
if 'group_id' in local_var_params:
path_params['group_id'] = local_var_params['group_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/{project_id}/apigw/instances/{instance_id}/api-groups/{group_id}',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ShowDetailsOfApiGroupV2Response',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def show_details_of_api_v2(self, request):
"""查询API详情
查看指定的API的详细信息。
:param ShowDetailsOfApiV2Request request
:return: ShowDetailsOfApiV2Response
"""
return self.show_details_of_api_v2_with_http_info(request)
def show_details_of_api_v2_with_http_info(self, request):
"""查询API详情
查看指定的API的详细信息。
:param ShowDetailsOfApiV2Request request
:return: ShowDetailsOfApiV2Response
"""
all_params = ['instance_id', 'api_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
if 'api_id' in local_var_params:
path_params['api_id'] = local_var_params['api_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/{project_id}/apigw/instances/{instance_id}/apis/{api_id}',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ShowDetailsOfApiV2Response',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def update_api_group_v2(self, request):
"""修改API分组
修改API分组属性。其中name和remark可修改,其他属性不可修改。
:param UpdateApiGroupV2Request request
:return: UpdateApiGroupV2Response
"""
return self.update_api_group_v2_with_http_info(request)
def update_api_group_v2_with_http_info(self, request):
"""修改API分组
修改API分组属性。其中name和remark可修改,其他属性不可修改。
:param UpdateApiGroupV2Request request
:return: UpdateApiGroupV2Response
"""
all_params = ['instance_id', 'group_id', 'update_api_group_v2_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
if 'group_id' in local_var_params:
path_params['group_id'] = local_var_params['group_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/v2/{project_id}/apigw/instances/{instance_id}/api-groups/{group_id}',
method='PUT',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='UpdateApiGroupV2Response',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def update_api_v2(self, request):
"""修改API
修改指定API的信息,包括后端服务信息。
:param UpdateApiV2Request request
:return: UpdateApiV2Response
"""
return self.update_api_v2_with_http_info(request)
def update_api_v2_with_http_info(self, request):
"""修改API
修改指定API的信息,包括后端服务信息。
:param UpdateApiV2Request request
:return: UpdateApiV2Response
"""
all_params = ['instance_id', 'api_id', 'update_api_v2_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
if 'api_id' in local_var_params:
path_params['api_id'] = local_var_params['api_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/v2/{project_id}/apigw/instances/{instance_id}/apis/{api_id}',
method='PUT',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='UpdateApiV2Response',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def canceling_authorization_v2(self, request):
"""解除授权
解除API对APP的授权关系。解除授权后,APP将不再能够调用该API。
:param CancelingAuthorizationV2Request request
:return: CancelingAuthorizationV2Response
"""
return self.canceling_authorization_v2_with_http_info(request)
def canceling_authorization_v2_with_http_info(self, request):
"""解除授权
解除API对APP的授权关系。解除授权后,APP将不再能够调用该API。
:param CancelingAuthorizationV2Request request
:return: CancelingAuthorizationV2Response
"""
all_params = ['instance_id', 'app_auth_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
if 'app_auth_id' in local_var_params:
path_params['app_auth_id'] = local_var_params['app_auth_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/{project_id}/apigw/instances/{instance_id}/app-auths/{app_auth_id}',
method='DELETE',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='CancelingAuthorizationV2Response',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def check_app_v2(self, request):
"""校验APP
校验app是否存在,非APP所有者可以调用该接口校验APP是否真实存在。这个接口只展示app的基本信息id 、name、 remark,其他信息不显示。
:param CheckAppV2Request request
:return: CheckAppV2Response
"""
return self.check_app_v2_with_http_info(request)
def check_app_v2_with_http_info(self, request):
"""校验APP
校验app是否存在,非APP所有者可以调用该接口校验APP是否真实存在。这个接口只展示app的基本信息id 、name、 remark,其他信息不显示。
:param CheckAppV2Request request
:return: CheckAppV2Response
"""
all_params = ['instance_id', 'app_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
if 'app_id' in local_var_params:
path_params['app_id'] = local_var_params['app_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/{project_id}/apigw/instances/{instance_id}/apps/validation/{app_id}',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='CheckAppV2Response',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def create_an_app_v2(self, request):
"""创建APP
APP即应用,是一个可以访问API的身份标识。将API授权给APP后,APP即可调用API。 创建一个APP。
:param CreateAnAppV2Request request
:return: CreateAnAppV2Response
"""
return self.create_an_app_v2_with_http_info(request)
def create_an_app_v2_with_http_info(self, request):
"""创建APP
APP即应用,是一个可以访问API的身份标识。将API授权给APP后,APP即可调用API。 创建一个APP。
:param CreateAnAppV2Request request
:return: CreateAnAppV2Response
"""
all_params = ['instance_id', 'create_an_app_v2_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/v2/{project_id}/apigw/instances/{instance_id}/apps',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='CreateAnAppV2Response',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def create_authorizing_apps_v2(self, request):
"""APP授权
APP创建成功后,还不能访问API,如果想要访问某个环境上的API,需要将该API在该环境上授权给APP。授权成功后,APP即可访问该环境上的这个API。
:param CreateAuthorizingAppsV2Request request
:return: CreateAuthorizingAppsV2Response
"""
return self.create_authorizing_apps_v2_with_http_info(request)
def create_authorizing_apps_v2_with_http_info(self, request):
"""APP授权
APP创建成功后,还不能访问API,如果想要访问某个环境上的API,需要将该API在该环境上授权给APP。授权成功后,APP即可访问该环境上的这个API。
:param CreateAuthorizingAppsV2Request request
:return: CreateAuthorizingAppsV2Response
"""
all_params = ['instance_id', 'create_authorizing_apps_v2_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/v2/{project_id}/apigw/instances/{instance_id}/app-auths',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='CreateAuthorizingAppsV2Response',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def delete_app_v2(self, request):
"""删除APP
删除指定的APP。 APP删除后,将无法再调用任何API;其中,云市场自动创建的APP无法被删除。
:param DeleteAppV2Request request
:return: DeleteAppV2Response
"""
return self.delete_app_v2_with_http_info(request)
def delete_app_v2_with_http_info(self, request):
"""删除APP
删除指定的APP。 APP删除后,将无法再调用任何API;其中,云市场自动创建的APP无法被删除。
:param DeleteAppV2Request request
:return: DeleteAppV2Response
"""
all_params = ['instance_id', 'app_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
if 'app_id' in local_var_params:
path_params['app_id'] = local_var_params['app_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/{project_id}/apigw/instances/{instance_id}/apps/{app_id}',
method='DELETE',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='DeleteAppV2Response',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_apis_binded_to_app_v2(self, request):
"""查看APP已绑定的API列表
查询APP已经绑定的API列表。
:param ListApisBindedToAppV2Request request
:return: ListApisBindedToAppV2Response
"""
return self.list_apis_binded_to_app_v2_with_http_info(request)
def list_apis_binded_to_app_v2_with_http_info(self, request):
"""查看APP已绑定的API列表
查询APP已经绑定的API列表。
:param ListApisBindedToAppV2Request request
:return: ListApisBindedToAppV2Response
"""
all_params = ['instance_id', 'app_id', 'api_id', 'api_name', 'group_id', 'group_name', 'env_id', 'offset', 'limit']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
query_params = []
if 'app_id' in local_var_params:
query_params.append(('app_id', local_var_params['app_id']))
if 'api_id' in local_var_params:
query_params.append(('api_id', local_var_params['api_id']))
if 'api_name' in local_var_params:
query_params.append(('api_name', local_var_params['api_name']))
if 'group_id' in local_var_params:
query_params.append(('group_id', local_var_params['group_id']))
if 'group_name' in local_var_params:
query_params.append(('group_name', local_var_params['group_name']))
if 'env_id' in local_var_params:
query_params.append(('env_id', local_var_params['env_id']))
if 'offset' in local_var_params:
query_params.append(('offset', local_var_params['offset']))
if 'limit' in local_var_params:
query_params.append(('limit', local_var_params['limit']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/{project_id}/apigw/instances/{instance_id}/app-auths/binded-apis',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListApisBindedToAppV2Response',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_apis_unbinded_to_app_v2(self, request):
"""查看APP未绑定的API列表
查询指定环境上某个APP未绑定的API列表,包括自有API和从云市场购买的API。
:param ListApisUnbindedToAppV2Request request
:return: ListApisUnbindedToAppV2Response
"""
return self.list_apis_unbinded_to_app_v2_with_http_info(request)
def list_apis_unbinded_to_app_v2_with_http_info(self, request):
"""查看APP未绑定的API列表
查询指定环境上某个APP未绑定的API列表,包括自有API和从云市场购买的API。
:param ListApisUnbindedToAppV2Request request
:return: ListApisUnbindedToAppV2Response
"""
all_params = ['instance_id', 'app_id', 'env_id', 'group_id', 'api_id', 'api_name', 'offset', 'limit']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
query_params = []
if 'app_id' in local_var_params:
query_params.append(('app_id', local_var_params['app_id']))
if 'env_id' in local_var_params:
query_params.append(('env_id', local_var_params['env_id']))
if 'group_id' in local_var_params:
query_params.append(('group_id', local_var_params['group_id']))
if 'api_id' in local_var_params:
query_params.append(('api_id', local_var_params['api_id']))
if 'api_name' in local_var_params:
query_params.append(('api_name', local_var_params['api_name']))
if 'offset' in local_var_params:
query_params.append(('offset', local_var_params['offset']))
if 'limit' in local_var_params:
query_params.append(('limit', local_var_params['limit']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/{project_id}/apigw/instances/{instance_id}/app-auths/unbinded-apis',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListApisUnbindedToAppV2Response',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_apps_binded_to_api_v2(self, request):
"""查看API已绑定的APP列表
查询API绑定的APP列表。
:param ListAppsBindedToApiV2Request request
:return: ListAppsBindedToApiV2Response
"""
return self.list_apps_binded_to_api_v2_with_http_info(request)
def list_apps_binded_to_api_v2_with_http_info(self, request):
"""查看API已绑定的APP列表
查询API绑定的APP列表。
:param ListAppsBindedToApiV2Request request
:return: ListAppsBindedToApiV2Response
"""
all_params = ['instance_id', 'api_id', 'app_name', 'app_id', 'env_id', 'offset', 'limit']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
query_params = []
if 'api_id' in local_var_params:
query_params.append(('api_id', local_var_params['api_id']))
if 'app_name' in local_var_params:
query_params.append(('app_name', local_var_params['app_name']))
if 'app_id' in local_var_params:
query_params.append(('app_id', local_var_params['app_id']))
if 'env_id' in local_var_params:
query_params.append(('env_id', local_var_params['env_id']))
if 'offset' in local_var_params:
query_params.append(('offset', local_var_params['offset']))
if 'limit' in local_var_params:
query_params.append(('limit', local_var_params['limit']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/{project_id}/apigw/instances/{instance_id}/app-auths/binded-apps',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListAppsBindedToApiV2Response',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_apps_v2(self, request):
"""查询APP列表
查询APP列表。
:param ListAppsV2Request request
:return: ListAppsV2Response
"""
return self.list_apps_v2_with_http_info(request)
def list_apps_v2_with_http_info(self, request):
"""查询APP列表
查询APP列表。
:param ListAppsV2Request request
:return: ListAppsV2Response
"""
all_params = ['instance_id', 'id', 'name', 'status', 'app_key', 'creator', 'offset', 'limit', 'precise_search']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
query_params = []
if 'id' in local_var_params:
query_params.append(('id', local_var_params['id']))
if 'name' in local_var_params:
query_params.append(('name', local_var_params['name']))
if 'status' in local_var_params:
query_params.append(('status', local_var_params['status']))
if 'app_key' in local_var_params:
query_params.append(('app_key', local_var_params['app_key']))
if 'creator' in local_var_params:
query_params.append(('creator', local_var_params['creator']))
if 'offset' in local_var_params:
query_params.append(('offset', local_var_params['offset']))
if 'limit' in local_var_params:
query_params.append(('limit', local_var_params['limit']))
if 'precise_search' in local_var_params:
query_params.append(('precise_search', local_var_params['precise_search']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/{project_id}/apigw/instances/{instance_id}/apps',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListAppsV2Response',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def resetting_app_secret_v2(self, request):
"""重置密钥
重置指定APP的密钥。
:param ResettingAppSecretV2Request request
:return: ResettingAppSecretV2Response
"""
return self.resetting_app_secret_v2_with_http_info(request)
def resetting_app_secret_v2_with_http_info(self, request):
"""重置密钥
重置指定APP的密钥。
:param ResettingAppSecretV2Request request
:return: ResettingAppSecretV2Response
"""
all_params = ['instance_id', 'app_id', 'resetting_app_secret_v2_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
if 'app_id' in local_var_params:
path_params['app_id'] = local_var_params['app_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/v2/{project_id}/apigw/instances/{instance_id}/apps/secret/{app_id}',
method='PUT',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ResettingAppSecretV2Response',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def show_details_of_app_v2(self, request):
"""查看APP详情
查看指定APP的详细信息。
:param ShowDetailsOfAppV2Request request
:return: ShowDetailsOfAppV2Response
"""
return self.show_details_of_app_v2_with_http_info(request)
def show_details_of_app_v2_with_http_info(self, request):
"""查看APP详情
查看指定APP的详细信息。
:param ShowDetailsOfAppV2Request request
:return: ShowDetailsOfAppV2Response
"""
all_params = ['instance_id', 'app_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
if 'app_id' in local_var_params:
path_params['app_id'] = local_var_params['app_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/{project_id}/apigw/instances/{instance_id}/apps/{app_id}',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ShowDetailsOfAppV2Response',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def update_app_v2(self, request):
"""修改APP
修改指定APP的信息。其中可修改的属性为:name、remark,当支持用户自定义key和secret的开关开启时,app_key和app_secret也支持修改,其它属性不可修改。
:param UpdateAppV2Request request
:return: UpdateAppV2Response
"""
return self.update_app_v2_with_http_info(request)
def update_app_v2_with_http_info(self, request):
"""修改APP
修改指定APP的信息。其中可修改的属性为:name、remark,当支持用户自定义key和secret的开关开启时,app_key和app_secret也支持修改,其它属性不可修改。
:param UpdateAppV2Request request
:return: UpdateAppV2Response
"""
all_params = ['instance_id', 'app_id', 'update_app_v2_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
if 'app_id' in local_var_params:
path_params['app_id'] = local_var_params['app_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/v2/{project_id}/apigw/instances/{instance_id}/apps/{app_id}',
method='PUT',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='UpdateAppV2Response',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def call_api(self, resource_path, method, path_params=None, query_params=None, header_params=None, body=None,
post_params=None, response_type=None, response_headers=None, auth_settings=None,
collection_formats=None, request_type=None):
"""Makes the HTTP request and returns deserialized data.
:param resource_path: Path to method endpoint.
:param method: Method to call.
:param path_params: Path parameters in the url.
:param query_params: Query parameters in the url.
:param header_params: Header parameters to be placed in the request header.
:param body: Request body.
:param post_params dict: Request post form parameters,
for `application/x-www-form-urlencoded`, `multipart/form-data`.
:param auth_settings list: Auth Settings names for the request.
:param response_type: Response data type.
:param response_headers: Header should be added to response data.
:param collection_formats: dict of collection formats for path, query,
header, and post parameters.
:param request_type: Request data type.
:return:
Return the response directly.
"""
return self.do_http_request(
method=method,
resource_path=resource_path,
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body,
post_params=post_params,
response_type=response_type,
response_headers=response_headers,
collection_formats=collection_formats,
request_type=request_type)
| 34.255227
| 487
| 0.649158
| 15,271
| 150,723
| 5.989785
| 0.04466
| 0.052301
| 0.091527
| 0.041281
| 0.941456
| 0.934503
| 0.91969
| 0.904712
| 0.881469
| 0.79848
| 0
| 0.006368
| 0.262342
| 150,723
| 4,399
| 488
| 34.263014
| 0.816337
| 0.142991
| 0
| 0.8495
| 0
| 0.002309
| 0.138685
| 0.063365
| 0
| 0
| 0
| 0
| 0
| 1
| 0.049654
| false
| 0
| 0.003849
| 0
| 0.104311
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c48b7102cd83adb7faf937a0eccded10d95f90c1
| 29
|
py
|
Python
|
tests/resources/missing_types/missing_types.py
|
lleites/topyn
|
69e2bd100e71bb0323adadb857aea724647f456e
|
[
"MIT"
] | 10
|
2019-11-21T22:25:34.000Z
|
2022-01-13T13:44:54.000Z
|
tests/resources/missing_types/missing_types.py
|
lleites/topyn
|
69e2bd100e71bb0323adadb857aea724647f456e
|
[
"MIT"
] | null | null | null |
tests/resources/missing_types/missing_types.py
|
lleites/topyn
|
69e2bd100e71bb0323adadb857aea724647f456e
|
[
"MIT"
] | null | null | null |
def function():
return 0
| 9.666667
| 15
| 0.62069
| 4
| 29
| 4.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.047619
| 0.275862
| 29
| 2
| 16
| 14.5
| 0.809524
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0.5
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
673cade9e206d3d5975d305b2232b0faeb1d3dcf
| 5,404
|
py
|
Python
|
bookings/email_templates.py
|
tintinnabulate/cautious-potato
|
85c74beb55ac01f51bf5f7b7f3bbd0b028e072a6
|
[
"MIT"
] | null | null | null |
bookings/email_templates.py
|
tintinnabulate/cautious-potato
|
85c74beb55ac01f51bf5f7b7f3bbd0b028e072a6
|
[
"MIT"
] | null | null | null |
bookings/email_templates.py
|
tintinnabulate/cautious-potato
|
85c74beb55ac01f51bf5f7b7f3bbd0b028e072a6
|
[
"MIT"
] | null | null | null |
def get_pseudo_pdf_attachment_body():
return u"""\
<hr>
<h4>Booking Terms & Conditions</h4>
<small>
<p>Our booking terms and conditions are listed below. Please read these
carefully as they form the basis of the contract between us.</p>
<p><b>I confirm that I have read and accept the booking conditions.</b></p>
<p>Signed............................................................</p>
<p>Date...............................................................</p>
"""
def get_booking_confirmation_email_body(data_dict):
return u"""\
<p><a href=""><img src="cid:image1"></a></p>
<p>Dear %(first_name)s,<br><br>
Thank you for choosing us and the lovely %(property)s apartment.</p>
<p>We are writing to confirm that we have received your booking for %(number_of_nights)d night(s) from %(arrival_date)s to %(departure_date)s.</p>
<p>Your booking reference is %(booking_reference)s (please use this with all payments made to us).</p>
<p>Your arrival time can be any time after 3pm but not later than 10pm on %(arrival_date)s. Please let us know your arrival time so we can meet you with the keys. Your checkout time will be anytime before 11am on %(departure_date)s. Please leave the keys in the key bowl in the hall.</p>
<p>If you have not already done so please can you provide the following information:<br>
<ol>
<li>Contact telephone number</li>
<li>Expected time of arrival</li>
</ol></p>
<p>We now require full payment of GBP%(gross)d.</p>
<p>Please use one of the payment methods below always including your booking reference %(booking_reference)s. As soon as we confirm the payment we will arrange details for your arrival.</p>
<p>Payment Details:<br>
<pre>
UK Bank Transfer (please put you booking reference in the transfer reference)
</pre>
</p>
<p>Attached is a copy of our terms and conditions. Please make sure you have read these as they form the basis for the contract between us.</p>
<p>Please do not hesitate to use the contact information below for any further correspondence or questions you may have.</p>
<p>We very much look forward to your arrival here and hope you enjoy your stay with us.</p>
<p>Kind regards,<br><br>
Foo</p>
<p><a href=""><img src="cid:image2"></a></p>
<p>Foo 01010 101010</p><br>
<p>Address1<br>
Address2<br>
Address3</p>
<p><font color="green">Please consider the environment before printing this email</font></p>
<p><small>This email and its attachments are intended for the addressee only and may be confidential or the subject of legal privilege. This email cannot be distributed by the receiver to anyone else other than the addressee without the permission of the sender. If this email and its attachments have come to you by mistake please delete from your hard drive, and please contact us.</small></p>
""" % data_dict
def get_booking_confirmation_email_no_payment_body(data_dict):
return u"""\
<p><a href=""><img src="cid:image1"></a></p>
<p>Dear %(first_name)s,<br><br>
Thank you for choosing us and the lovely %(property)s apartment.</p>
<p>We are writing to confirm that we have received your booking for %(number_of_nights)d night(s) from %(arrival_date)s to %(departure_date)s.</p>
<p>Your booking reference is %(booking_reference)s.</p>
<p>Your arrival time can be any time after 3pm but not later than 10pm on %(arrival_date)s. Please let us know your arrival time so we can meet you with the keys. Your checkout time will be anytime before 11am on %(departure_date)s. Please leave the keys in the key bowl in the hall.</p>
<p>If you have not already done so please can you provide the following information:<br>
<ol>
<li>Contact telephone number</li>
<li>Expected time of arrival</li>
</ol></p>
<p>Attached is a copy of our terms and conditions. Please make sure you have read these as they form the basis for the contract between us.</p>
<p>Please do not hesitate to use the contact information below for any further correspondence or questions you may have.</p>
<p>We very much look forward to your arrival here and hope you enjoy your stay with us.</p>
<p>Kind regards,<br><br>
Foo</p>
<p><a href=""><img src="cid:image2"></a></p>
<p><font color="green">Please consider the environment before printing this email</font></p>
<p><small>This email and its attachments are intended for the addressee only and may be confidential or the subject of legal privilege. This email cannot be distributed by the receiver to anyone else other than the addressee without the permission of the sender. If this email and its attachments have come to you by mistake please delete from your hard drive, and please contact us.</small></p>
""" % data_dict
| 55.142857
| 409
| 0.623612
| 810
| 5,404
| 4.117284
| 0.245679
| 0.018591
| 0.007196
| 0.010795
| 0.826987
| 0.803598
| 0.797002
| 0.797002
| 0.797002
| 0.797002
| 0
| 0.007559
| 0.265544
| 5,404
| 97
| 410
| 55.71134
| 0.832703
| 0
| 0
| 0.621212
| 0
| 0.257576
| 0.954108
| 0.079756
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045455
| false
| 0
| 0
| 0.045455
| 0.090909
| 0.030303
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
676aae76d4eec519e449aee0ffcca8cbc1349165
| 2,793
|
py
|
Python
|
.github/tests/test_mr_generate_summary.py
|
Next-Trends/rasa
|
c06dc26b3a57dd1114b60aebcc9ccd3bbb8308d7
|
[
"Apache-2.0"
] | 1
|
2022-01-29T09:09:54.000Z
|
2022-01-29T09:09:54.000Z
|
.github/tests/test_mr_generate_summary.py
|
Next-Trends/rasa
|
c06dc26b3a57dd1114b60aebcc9ccd3bbb8308d7
|
[
"Apache-2.0"
] | 30
|
2022-02-07T01:20:20.000Z
|
2022-03-28T12:20:50.000Z
|
.github/tests/test_mr_generate_summary.py
|
Next-Trends/rasa
|
c06dc26b3a57dd1114b60aebcc9ccd3bbb8308d7
|
[
"Apache-2.0"
] | 1
|
2022-03-12T09:24:53.000Z
|
2022-03-12T09:24:53.000Z
|
import sys
sys.path.append(".github/scripts")
from mr_generate_summary import combine_result # noqa: E402
RESULT1 = {
"financial-demo": {
"BERT + DIET(bow) + ResponseSelector(bow)": {
"Entity Prediction": {"macro avg": {"f1-score": 0.7333333333333333,}},
"test_run_time": "47s",
}
}
}
def test_same_ds_different_config():
result2 = {
"financial-demo": {
"Sparse + DIET(bow) + ResponseSelector(bow)": {
"Entity Prediction": {"macro avg": {"f1-score": 0.88,}},
"test_run_time": "47s",
}
}
}
expected_combined = {
"financial-demo": {
"BERT + DIET(bow) + ResponseSelector(bow)": {
"Entity Prediction": {"macro avg": {"f1-score": 0.7333333333333333,}},
"test_run_time": "47s",
},
"Sparse + DIET(bow) + ResponseSelector(bow)": {
"Entity Prediction": {"macro avg": {"f1-score": 0.88,}},
"test_run_time": "47s",
},
}
}
actual_combined = combine_result(RESULT1, result2)
assert actual_combined == expected_combined
actual_combined = combine_result(result2, RESULT1)
assert actual_combined == expected_combined
def test_different_ds_same_config():
result2 = {
"Carbon Bot": {
"Sparse + DIET(bow) + ResponseSelector(bow)": {
"Entity Prediction": {"macro avg": {"f1-score": 0.88,}},
"test_run_time": "47s",
}
}
}
expected_combined = {
"financial-demo": {
"BERT + DIET(bow) + ResponseSelector(bow)": {
"Entity Prediction": {"macro avg": {"f1-score": 0.7333333333333333,}},
"test_run_time": "47s",
},
},
"Carbon Bot": {
"Sparse + DIET(bow) + ResponseSelector(bow)": {
"Entity Prediction": {"macro avg": {"f1-score": 0.88,}},
"test_run_time": "47s",
}
},
}
actual_combined = combine_result(RESULT1, result2)
assert actual_combined == expected_combined
actual_combined = combine_result(result2, RESULT1)
assert actual_combined == expected_combined
def test_start_empty():
result2 = {}
expected_combined = {
"financial-demo": {
"BERT + DIET(bow) + ResponseSelector(bow)": {
"Entity Prediction": {"macro avg": {"f1-score": 0.7333333333333333,}},
"test_run_time": "47s",
},
}
}
actual_combined = combine_result(RESULT1, result2)
assert actual_combined == expected_combined
actual_combined = combine_result(result2, RESULT1)
assert actual_combined == expected_combined
| 30.032258
| 86
| 0.544576
| 258
| 2,793
| 5.678295
| 0.189922
| 0.114676
| 0.125597
| 0.14198
| 0.875085
| 0.875085
| 0.875085
| 0.875085
| 0.875085
| 0.875085
| 0
| 0.06423
| 0.314357
| 2,793
| 92
| 87
| 30.358696
| 0.700783
| 0.00358
| 0
| 0.623377
| 1
| 0
| 0.299533
| 0.06041
| 0
| 0
| 0
| 0
| 0.077922
| 1
| 0.038961
| false
| 0
| 0.025974
| 0
| 0.064935
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
67ac13b5c09d15285bb3512725f9a8735c54b3d5
| 97,210
|
py
|
Python
|
iriusrisk-python-client-lib/iriusrisk_python_client_lib/api/libraries_api.py
|
iriusrisk/iriusrisk-python-client-lib
|
4912706cd1e5c0bc555dbc7da02fb64cbeab3b18
|
[
"Apache-2.0"
] | null | null | null |
iriusrisk-python-client-lib/iriusrisk_python_client_lib/api/libraries_api.py
|
iriusrisk/iriusrisk-python-client-lib
|
4912706cd1e5c0bc555dbc7da02fb64cbeab3b18
|
[
"Apache-2.0"
] | null | null | null |
iriusrisk-python-client-lib/iriusrisk_python_client_lib/api/libraries_api.py
|
iriusrisk/iriusrisk-python-client-lib
|
4912706cd1e5c0bc555dbc7da02fb64cbeab3b18
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
IriusRisk API
Products API # noqa: E501
OpenAPI spec version: 1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from iriusrisk_python_client_lib.api_client import ApiClient
class LibrariesApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def libraries_library_ref_delete(self, api_token, library_ref, **kwargs): # noqa: E501
"""Deletes a Library # noqa: E501
Deletes a library. Conditions to be able to perform the action: - To have the permission **LIBRARY_UPDATE** granted. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.libraries_library_ref_delete(api_token, library_ref, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str api_token: Authentication token (required)
:param str library_ref: Reference for library (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.libraries_library_ref_delete_with_http_info(api_token, library_ref, **kwargs) # noqa: E501
else:
(data) = self.libraries_library_ref_delete_with_http_info(api_token, library_ref, **kwargs) # noqa: E501
return data
def libraries_library_ref_delete_with_http_info(self, api_token, library_ref, **kwargs): # noqa: E501
"""Deletes a Library # noqa: E501
Deletes a library. Conditions to be able to perform the action: - To have the permission **LIBRARY_UPDATE** granted. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.libraries_library_ref_delete_with_http_info(api_token, library_ref, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str api_token: Authentication token (required)
:param str library_ref: Reference for library (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['api_token', 'library_ref'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method libraries_library_ref_delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'api_token' is set
if ('api_token' not in params or
params['api_token'] is None):
raise ValueError("Missing the required parameter `api_token` when calling `libraries_library_ref_delete`") # noqa: E501
# verify the required parameter 'library_ref' is set
if ('library_ref' not in params or
params['library_ref'] is None):
raise ValueError("Missing the required parameter `library_ref` when calling `libraries_library_ref_delete`") # noqa: E501
collection_formats = {}
path_params = {}
if 'library_ref' in params:
path_params['libraryRef'] = params['library_ref'] # noqa: E501
query_params = []
header_params = {}
if 'api_token' in params:
header_params['api-token'] = params['api_token'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/libraries/{libraryRef}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def libraries_library_ref_get(self, api_token, library_ref, **kwargs): # noqa: E501
"""Gets library details # noqa: E501
Gets the library details. Conditions to be able to perform the action: - To have the permission **LIBRARY_UPDATE** granted. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.libraries_library_ref_get(api_token, library_ref, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str api_token: Authentication token (required)
:param str library_ref: Reference for library (required)
:return: Library
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.libraries_library_ref_get_with_http_info(api_token, library_ref, **kwargs) # noqa: E501
else:
(data) = self.libraries_library_ref_get_with_http_info(api_token, library_ref, **kwargs) # noqa: E501
return data
def libraries_library_ref_get_with_http_info(self, api_token, library_ref, **kwargs): # noqa: E501
"""Gets library details # noqa: E501
Gets the library details. Conditions to be able to perform the action: - To have the permission **LIBRARY_UPDATE** granted. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.libraries_library_ref_get_with_http_info(api_token, library_ref, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str api_token: Authentication token (required)
:param str library_ref: Reference for library (required)
:return: Library
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['api_token', 'library_ref'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method libraries_library_ref_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'api_token' is set
if ('api_token' not in params or
params['api_token'] is None):
raise ValueError("Missing the required parameter `api_token` when calling `libraries_library_ref_get`") # noqa: E501
# verify the required parameter 'library_ref' is set
if ('library_ref' not in params or
params['library_ref'] is None):
raise ValueError("Missing the required parameter `library_ref` when calling `libraries_library_ref_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'library_ref' in params:
path_params['libraryRef'] = params['library_ref'] # noqa: E501
query_params = []
header_params = {}
if 'api_token' in params:
header_params['api-token'] = params['api_token'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/libraries/{libraryRef}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Library', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def libraries_library_ref_riskpatterns_post(self, api_token, library_ref, create_risk_pattern_request_body, **kwargs): # noqa: E501
"""Creates new Risk Pattern # noqa: E501
Creates new Risk Pattern. Conditions to be able to perform the action: - To have the permission **LIBRARY_UPDATE** granted. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.libraries_library_ref_riskpatterns_post(api_token, library_ref, create_risk_pattern_request_body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str api_token: Authentication token (required)
:param str library_ref: Reference for library (required)
:param CreateRiskPatternRequestBody create_risk_pattern_request_body: JSON data that contains information of the fields (required)
:return: RiskPattern
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.libraries_library_ref_riskpatterns_post_with_http_info(api_token, library_ref, create_risk_pattern_request_body, **kwargs) # noqa: E501
else:
(data) = self.libraries_library_ref_riskpatterns_post_with_http_info(api_token, library_ref, create_risk_pattern_request_body, **kwargs) # noqa: E501
return data
def libraries_library_ref_riskpatterns_post_with_http_info(self, api_token, library_ref, create_risk_pattern_request_body, **kwargs): # noqa: E501
"""Creates new Risk Pattern # noqa: E501
Creates new Risk Pattern. Conditions to be able to perform the action: - To have the permission **LIBRARY_UPDATE** granted. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.libraries_library_ref_riskpatterns_post_with_http_info(api_token, library_ref, create_risk_pattern_request_body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str api_token: Authentication token (required)
:param str library_ref: Reference for library (required)
:param CreateRiskPatternRequestBody create_risk_pattern_request_body: JSON data that contains information of the fields (required)
:return: RiskPattern
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['api_token', 'library_ref', 'create_risk_pattern_request_body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method libraries_library_ref_riskpatterns_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'api_token' is set
if ('api_token' not in params or
params['api_token'] is None):
raise ValueError("Missing the required parameter `api_token` when calling `libraries_library_ref_riskpatterns_post`") # noqa: E501
# verify the required parameter 'library_ref' is set
if ('library_ref' not in params or
params['library_ref'] is None):
raise ValueError("Missing the required parameter `library_ref` when calling `libraries_library_ref_riskpatterns_post`") # noqa: E501
# verify the required parameter 'create_risk_pattern_request_body' is set
if ('create_risk_pattern_request_body' not in params or
params['create_risk_pattern_request_body'] is None):
raise ValueError("Missing the required parameter `create_risk_pattern_request_body` when calling `libraries_library_ref_riskpatterns_post`") # noqa: E501
collection_formats = {}
path_params = {}
if 'library_ref' in params:
path_params['libraryRef'] = params['library_ref'] # noqa: E501
query_params = []
header_params = {}
if 'api_token' in params:
header_params['api-token'] = params['api_token'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'create_risk_pattern_request_body' in params:
body_params = params['create_risk_pattern_request_body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/libraries/{libraryRef}/riskpatterns', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RiskPattern', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def libraries_library_ref_riskpatterns_risk_pattern_ref_countermeasures_post(self, api_token, library_ref, risk_pattern_ref, create_countermeasure_library_request_body, **kwargs): # noqa: E501
"""Creates new countermeasure in a risk pattern # noqa: E501
Creates new countermeasure in a risk pattern. Conditions to be able to perform the action: - To have the permission **LIBRARY_UPDATE** granted. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.libraries_library_ref_riskpatterns_risk_pattern_ref_countermeasures_post(api_token, library_ref, risk_pattern_ref, create_countermeasure_library_request_body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str api_token: Authentication token (required)
:param str library_ref: Reference for library (required)
:param str risk_pattern_ref: Reference for Risk Pattern (required)
:param ControlCommand create_countermeasure_library_request_body: JSON data that contains information of the fields (required)
:return: LibraryControl
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.libraries_library_ref_riskpatterns_risk_pattern_ref_countermeasures_post_with_http_info(api_token, library_ref, risk_pattern_ref, create_countermeasure_library_request_body, **kwargs) # noqa: E501
else:
(data) = self.libraries_library_ref_riskpatterns_risk_pattern_ref_countermeasures_post_with_http_info(api_token, library_ref, risk_pattern_ref, create_countermeasure_library_request_body, **kwargs) # noqa: E501
return data
def libraries_library_ref_riskpatterns_risk_pattern_ref_countermeasures_post_with_http_info(self, api_token, library_ref, risk_pattern_ref, create_countermeasure_library_request_body, **kwargs): # noqa: E501
"""Creates new countermeasure in a risk pattern # noqa: E501
Creates new countermeasure in a risk pattern. Conditions to be able to perform the action: - To have the permission **LIBRARY_UPDATE** granted. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.libraries_library_ref_riskpatterns_risk_pattern_ref_countermeasures_post_with_http_info(api_token, library_ref, risk_pattern_ref, create_countermeasure_library_request_body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str api_token: Authentication token (required)
:param str library_ref: Reference for library (required)
:param str risk_pattern_ref: Reference for Risk Pattern (required)
:param ControlCommand create_countermeasure_library_request_body: JSON data that contains information of the fields (required)
:return: LibraryControl
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['api_token', 'library_ref', 'risk_pattern_ref', 'create_countermeasure_library_request_body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method libraries_library_ref_riskpatterns_risk_pattern_ref_countermeasures_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'api_token' is set
if ('api_token' not in params or
params['api_token'] is None):
raise ValueError("Missing the required parameter `api_token` when calling `libraries_library_ref_riskpatterns_risk_pattern_ref_countermeasures_post`") # noqa: E501
# verify the required parameter 'library_ref' is set
if ('library_ref' not in params or
params['library_ref'] is None):
raise ValueError("Missing the required parameter `library_ref` when calling `libraries_library_ref_riskpatterns_risk_pattern_ref_countermeasures_post`") # noqa: E501
# verify the required parameter 'risk_pattern_ref' is set
if ('risk_pattern_ref' not in params or
params['risk_pattern_ref'] is None):
raise ValueError("Missing the required parameter `risk_pattern_ref` when calling `libraries_library_ref_riskpatterns_risk_pattern_ref_countermeasures_post`") # noqa: E501
# verify the required parameter 'create_countermeasure_library_request_body' is set
if ('create_countermeasure_library_request_body' not in params or
params['create_countermeasure_library_request_body'] is None):
raise ValueError("Missing the required parameter `create_countermeasure_library_request_body` when calling `libraries_library_ref_riskpatterns_risk_pattern_ref_countermeasures_post`") # noqa: E501
collection_formats = {}
path_params = {}
if 'library_ref' in params:
path_params['libraryRef'] = params['library_ref'] # noqa: E501
if 'risk_pattern_ref' in params:
path_params['riskPatternRef'] = params['risk_pattern_ref'] # noqa: E501
query_params = []
header_params = {}
if 'api_token' in params:
header_params['api-token'] = params['api_token'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'create_countermeasure_library_request_body' in params:
body_params = params['create_countermeasure_library_request_body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/libraries/{libraryRef}/riskpatterns/{riskPatternRef}/countermeasures', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='LibraryControl', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def libraries_library_ref_riskpatterns_risk_pattern_ref_delete(self, api_token, library_ref, risk_pattern_ref, **kwargs): # noqa: E501
"""Deletes a Risk Pattern # noqa: E501
Deletes a Risk Pattern. Conditions to be able to perform the action: - To have the permission **LIBRARY_UPDATE** granted. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.libraries_library_ref_riskpatterns_risk_pattern_ref_delete(api_token, library_ref, risk_pattern_ref, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str api_token: Authentication token (required)
:param str library_ref: Reference for library (required)
:param str risk_pattern_ref: Reference for Risk Pattern (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.libraries_library_ref_riskpatterns_risk_pattern_ref_delete_with_http_info(api_token, library_ref, risk_pattern_ref, **kwargs) # noqa: E501
else:
(data) = self.libraries_library_ref_riskpatterns_risk_pattern_ref_delete_with_http_info(api_token, library_ref, risk_pattern_ref, **kwargs) # noqa: E501
return data
def libraries_library_ref_riskpatterns_risk_pattern_ref_delete_with_http_info(self, api_token, library_ref, risk_pattern_ref, **kwargs): # noqa: E501
"""Deletes a Risk Pattern # noqa: E501
Deletes a Risk Pattern. Conditions to be able to perform the action: - To have the permission **LIBRARY_UPDATE** granted. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.libraries_library_ref_riskpatterns_risk_pattern_ref_delete_with_http_info(api_token, library_ref, risk_pattern_ref, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str api_token: Authentication token (required)
:param str library_ref: Reference for library (required)
:param str risk_pattern_ref: Reference for Risk Pattern (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['api_token', 'library_ref', 'risk_pattern_ref'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method libraries_library_ref_riskpatterns_risk_pattern_ref_delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'api_token' is set
if ('api_token' not in params or
params['api_token'] is None):
raise ValueError("Missing the required parameter `api_token` when calling `libraries_library_ref_riskpatterns_risk_pattern_ref_delete`") # noqa: E501
# verify the required parameter 'library_ref' is set
if ('library_ref' not in params or
params['library_ref'] is None):
raise ValueError("Missing the required parameter `library_ref` when calling `libraries_library_ref_riskpatterns_risk_pattern_ref_delete`") # noqa: E501
# verify the required parameter 'risk_pattern_ref' is set
if ('risk_pattern_ref' not in params or
params['risk_pattern_ref'] is None):
raise ValueError("Missing the required parameter `risk_pattern_ref` when calling `libraries_library_ref_riskpatterns_risk_pattern_ref_delete`") # noqa: E501
collection_formats = {}
path_params = {}
if 'library_ref' in params:
path_params['libraryRef'] = params['library_ref'] # noqa: E501
if 'risk_pattern_ref' in params:
path_params['riskPatternRef'] = params['risk_pattern_ref'] # noqa: E501
query_params = []
header_params = {}
if 'api_token' in params:
header_params['api-token'] = params['api_token'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/libraries/{libraryRef}/riskpatterns/{riskPatternRef}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def libraries_library_ref_riskpatterns_risk_pattern_ref_get(self, api_token, library_ref, risk_pattern_ref, **kwargs): # noqa: E501
"""Gets Risk Pattern details # noqa: E501
Gets Risk Pattern details. Conditions to be able to perform the action: - To have the permission **LIBRARY_UPDATE** granted. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.libraries_library_ref_riskpatterns_risk_pattern_ref_get(api_token, library_ref, risk_pattern_ref, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str api_token: Authentication token (required)
:param str library_ref: Reference for library (required)
:param str risk_pattern_ref: Reference for Risk Pattern (required)
:return: RiskPattern
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.libraries_library_ref_riskpatterns_risk_pattern_ref_get_with_http_info(api_token, library_ref, risk_pattern_ref, **kwargs) # noqa: E501
else:
(data) = self.libraries_library_ref_riskpatterns_risk_pattern_ref_get_with_http_info(api_token, library_ref, risk_pattern_ref, **kwargs) # noqa: E501
return data
def libraries_library_ref_riskpatterns_risk_pattern_ref_get_with_http_info(self, api_token, library_ref, risk_pattern_ref, **kwargs): # noqa: E501
"""Gets Risk Pattern details # noqa: E501
Gets Risk Pattern details. Conditions to be able to perform the action: - To have the permission **LIBRARY_UPDATE** granted. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.libraries_library_ref_riskpatterns_risk_pattern_ref_get_with_http_info(api_token, library_ref, risk_pattern_ref, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str api_token: Authentication token (required)
:param str library_ref: Reference for library (required)
:param str risk_pattern_ref: Reference for Risk Pattern (required)
:return: RiskPattern
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['api_token', 'library_ref', 'risk_pattern_ref'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method libraries_library_ref_riskpatterns_risk_pattern_ref_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'api_token' is set
if ('api_token' not in params or
params['api_token'] is None):
raise ValueError("Missing the required parameter `api_token` when calling `libraries_library_ref_riskpatterns_risk_pattern_ref_get`") # noqa: E501
# verify the required parameter 'library_ref' is set
if ('library_ref' not in params or
params['library_ref'] is None):
raise ValueError("Missing the required parameter `library_ref` when calling `libraries_library_ref_riskpatterns_risk_pattern_ref_get`") # noqa: E501
# verify the required parameter 'risk_pattern_ref' is set
if ('risk_pattern_ref' not in params or
params['risk_pattern_ref'] is None):
raise ValueError("Missing the required parameter `risk_pattern_ref` when calling `libraries_library_ref_riskpatterns_risk_pattern_ref_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'library_ref' in params:
path_params['libraryRef'] = params['library_ref'] # noqa: E501
if 'risk_pattern_ref' in params:
path_params['riskPatternRef'] = params['risk_pattern_ref'] # noqa: E501
query_params = []
header_params = {}
if 'api_token' in params:
header_params['api-token'] = params['api_token'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/libraries/{libraryRef}/riskpatterns/{riskPatternRef}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RiskPattern', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_post(self, api_token, library_ref, risk_pattern_ref, create_use_case_library_request_body, **kwargs): # noqa: E501
"""Creates new use case in a library. # noqa: E501
Creates new use case in a library. Conditions to be able to perform the action: - To have the permission **LIBRARY_UPDATE** granted. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_post(api_token, library_ref, risk_pattern_ref, create_use_case_library_request_body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str api_token: Authentication token (required)
:param str library_ref: Reference for library (required)
:param str risk_pattern_ref: Reference for Risk Pattern (required)
:param CreateUseCaseLibraryRequestBody create_use_case_library_request_body: JSON data that contains information of the fields (required)
:return: LibraryUseCase
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_post_with_http_info(api_token, library_ref, risk_pattern_ref, create_use_case_library_request_body, **kwargs) # noqa: E501
else:
(data) = self.libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_post_with_http_info(api_token, library_ref, risk_pattern_ref, create_use_case_library_request_body, **kwargs) # noqa: E501
return data
def libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_post_with_http_info(self, api_token, library_ref, risk_pattern_ref, create_use_case_library_request_body, **kwargs): # noqa: E501
"""Creates new use case in a library. # noqa: E501
Creates new use case in a library. Conditions to be able to perform the action: - To have the permission **LIBRARY_UPDATE** granted. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_post_with_http_info(api_token, library_ref, risk_pattern_ref, create_use_case_library_request_body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str api_token: Authentication token (required)
:param str library_ref: Reference for library (required)
:param str risk_pattern_ref: Reference for Risk Pattern (required)
:param CreateUseCaseLibraryRequestBody create_use_case_library_request_body: JSON data that contains information of the fields (required)
:return: LibraryUseCase
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['api_token', 'library_ref', 'risk_pattern_ref', 'create_use_case_library_request_body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'api_token' is set
if ('api_token' not in params or
params['api_token'] is None):
raise ValueError("Missing the required parameter `api_token` when calling `libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_post`") # noqa: E501
# verify the required parameter 'library_ref' is set
if ('library_ref' not in params or
params['library_ref'] is None):
raise ValueError("Missing the required parameter `library_ref` when calling `libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_post`") # noqa: E501
# verify the required parameter 'risk_pattern_ref' is set
if ('risk_pattern_ref' not in params or
params['risk_pattern_ref'] is None):
raise ValueError("Missing the required parameter `risk_pattern_ref` when calling `libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_post`") # noqa: E501
# verify the required parameter 'create_use_case_library_request_body' is set
if ('create_use_case_library_request_body' not in params or
params['create_use_case_library_request_body'] is None):
raise ValueError("Missing the required parameter `create_use_case_library_request_body` when calling `libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_post`") # noqa: E501
collection_formats = {}
path_params = {}
if 'library_ref' in params:
path_params['libraryRef'] = params['library_ref'] # noqa: E501
if 'risk_pattern_ref' in params:
path_params['riskPatternRef'] = params['risk_pattern_ref'] # noqa: E501
query_params = []
header_params = {}
if 'api_token' in params:
header_params['api-token'] = params['api_token'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'create_use_case_library_request_body' in params:
body_params = params['create_use_case_library_request_body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/libraries/{libraryRef}/riskpatterns/{riskPatternRef}/usecases', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='LibraryUseCase', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_use_case_ref_threats_post(self, api_token, library_ref, risk_pattern_ref, use_case_ref, create_threat_library_request_body, **kwargs): # noqa: E501
"""Creates a new threat in a library. # noqa: E501
Creates a new threat in a library. Conditions to be able to perform the action: - To have the permission **LIBRARY_UPDATE** granted. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_use_case_ref_threats_post(api_token, library_ref, risk_pattern_ref, use_case_ref, create_threat_library_request_body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str api_token: Authentication token (required)
:param str library_ref: Reference for library (required)
:param str risk_pattern_ref: Reference for Risk Pattern (required)
:param str use_case_ref: Reference for Use Case (required)
:param CreateThreatLibraryRequestBody create_threat_library_request_body: JSON data that contains information of the fields (required)
:return: LibraryThreat
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_use_case_ref_threats_post_with_http_info(api_token, library_ref, risk_pattern_ref, use_case_ref, create_threat_library_request_body, **kwargs) # noqa: E501
else:
(data) = self.libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_use_case_ref_threats_post_with_http_info(api_token, library_ref, risk_pattern_ref, use_case_ref, create_threat_library_request_body, **kwargs) # noqa: E501
return data
def libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_use_case_ref_threats_post_with_http_info(self, api_token, library_ref, risk_pattern_ref, use_case_ref, create_threat_library_request_body, **kwargs): # noqa: E501
"""Creates a new threat in a library. # noqa: E501
Creates a new threat in a library. Conditions to be able to perform the action: - To have the permission **LIBRARY_UPDATE** granted. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_use_case_ref_threats_post_with_http_info(api_token, library_ref, risk_pattern_ref, use_case_ref, create_threat_library_request_body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str api_token: Authentication token (required)
:param str library_ref: Reference for library (required)
:param str risk_pattern_ref: Reference for Risk Pattern (required)
:param str use_case_ref: Reference for Use Case (required)
:param CreateThreatLibraryRequestBody create_threat_library_request_body: JSON data that contains information of the fields (required)
:return: LibraryThreat
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['api_token', 'library_ref', 'risk_pattern_ref', 'use_case_ref', 'create_threat_library_request_body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_use_case_ref_threats_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'api_token' is set
if ('api_token' not in params or
params['api_token'] is None):
raise ValueError("Missing the required parameter `api_token` when calling `libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_use_case_ref_threats_post`") # noqa: E501
# verify the required parameter 'library_ref' is set
if ('library_ref' not in params or
params['library_ref'] is None):
raise ValueError("Missing the required parameter `library_ref` when calling `libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_use_case_ref_threats_post`") # noqa: E501
# verify the required parameter 'risk_pattern_ref' is set
if ('risk_pattern_ref' not in params or
params['risk_pattern_ref'] is None):
raise ValueError("Missing the required parameter `risk_pattern_ref` when calling `libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_use_case_ref_threats_post`") # noqa: E501
# verify the required parameter 'use_case_ref' is set
if ('use_case_ref' not in params or
params['use_case_ref'] is None):
raise ValueError("Missing the required parameter `use_case_ref` when calling `libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_use_case_ref_threats_post`") # noqa: E501
# verify the required parameter 'create_threat_library_request_body' is set
if ('create_threat_library_request_body' not in params or
params['create_threat_library_request_body'] is None):
raise ValueError("Missing the required parameter `create_threat_library_request_body` when calling `libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_use_case_ref_threats_post`") # noqa: E501
collection_formats = {}
path_params = {}
if 'library_ref' in params:
path_params['libraryRef'] = params['library_ref'] # noqa: E501
if 'risk_pattern_ref' in params:
path_params['riskPatternRef'] = params['risk_pattern_ref'] # noqa: E501
if 'use_case_ref' in params:
path_params['useCaseRef'] = params['use_case_ref'] # noqa: E501
query_params = []
header_params = {}
if 'api_token' in params:
header_params['api-token'] = params['api_token'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'create_threat_library_request_body' in params:
body_params = params['create_threat_library_request_body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/libraries/{libraryRef}/riskpatterns/{riskPatternRef}/usecases/{useCaseRef}/threats', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='LibraryThreat', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_use_case_ref_threats_threat_ref_countermeasures_put(self, api_token, library_ref, risk_pattern_ref, use_case_ref, threat_ref, associate_countermeasure_threat_library_request_body, **kwargs): # noqa: E501
"""Associates a countermeasure to a threat in a risk pattern. # noqa: E501
Associates a countermeasure to a threat in a risk pattern. Conditions to be able to perform the action: - To have the permission **LIBRARY_UPDATE** granted. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_use_case_ref_threats_threat_ref_countermeasures_put(api_token, library_ref, risk_pattern_ref, use_case_ref, threat_ref, associate_countermeasure_threat_library_request_body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str api_token: Authentication token (required)
:param str library_ref: Reference for library (required)
:param str risk_pattern_ref: Reference for Risk Pattern (required)
:param str use_case_ref: Reference for Use Case (required)
:param str threat_ref: Reference for Threat (required)
:param AssociateCountermeasureThreatLibraryRequestBody associate_countermeasure_threat_library_request_body: JSON data that contains information of the fields (required)
:return: LibraryControl
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_use_case_ref_threats_threat_ref_countermeasures_put_with_http_info(api_token, library_ref, risk_pattern_ref, use_case_ref, threat_ref, associate_countermeasure_threat_library_request_body, **kwargs) # noqa: E501
else:
(data) = self.libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_use_case_ref_threats_threat_ref_countermeasures_put_with_http_info(api_token, library_ref, risk_pattern_ref, use_case_ref, threat_ref, associate_countermeasure_threat_library_request_body, **kwargs) # noqa: E501
return data
def libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_use_case_ref_threats_threat_ref_countermeasures_put_with_http_info(self, api_token, library_ref, risk_pattern_ref, use_case_ref, threat_ref, associate_countermeasure_threat_library_request_body, **kwargs): # noqa: E501
"""Associates a countermeasure to a threat in a risk pattern. # noqa: E501
Associates a countermeasure to a threat in a risk pattern. Conditions to be able to perform the action: - To have the permission **LIBRARY_UPDATE** granted. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_use_case_ref_threats_threat_ref_countermeasures_put_with_http_info(api_token, library_ref, risk_pattern_ref, use_case_ref, threat_ref, associate_countermeasure_threat_library_request_body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str api_token: Authentication token (required)
:param str library_ref: Reference for library (required)
:param str risk_pattern_ref: Reference for Risk Pattern (required)
:param str use_case_ref: Reference for Use Case (required)
:param str threat_ref: Reference for Threat (required)
:param AssociateCountermeasureThreatLibraryRequestBody associate_countermeasure_threat_library_request_body: JSON data that contains information of the fields (required)
:return: LibraryControl
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['api_token', 'library_ref', 'risk_pattern_ref', 'use_case_ref', 'threat_ref', 'associate_countermeasure_threat_library_request_body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_use_case_ref_threats_threat_ref_countermeasures_put" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'api_token' is set
if ('api_token' not in params or
params['api_token'] is None):
raise ValueError("Missing the required parameter `api_token` when calling `libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_use_case_ref_threats_threat_ref_countermeasures_put`") # noqa: E501
# verify the required parameter 'library_ref' is set
if ('library_ref' not in params or
params['library_ref'] is None):
raise ValueError("Missing the required parameter `library_ref` when calling `libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_use_case_ref_threats_threat_ref_countermeasures_put`") # noqa: E501
# verify the required parameter 'risk_pattern_ref' is set
if ('risk_pattern_ref' not in params or
params['risk_pattern_ref'] is None):
raise ValueError("Missing the required parameter `risk_pattern_ref` when calling `libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_use_case_ref_threats_threat_ref_countermeasures_put`") # noqa: E501
# verify the required parameter 'use_case_ref' is set
if ('use_case_ref' not in params or
params['use_case_ref'] is None):
raise ValueError("Missing the required parameter `use_case_ref` when calling `libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_use_case_ref_threats_threat_ref_countermeasures_put`") # noqa: E501
# verify the required parameter 'threat_ref' is set
if ('threat_ref' not in params or
params['threat_ref'] is None):
raise ValueError("Missing the required parameter `threat_ref` when calling `libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_use_case_ref_threats_threat_ref_countermeasures_put`") # noqa: E501
# verify the required parameter 'associate_countermeasure_threat_library_request_body' is set
if ('associate_countermeasure_threat_library_request_body' not in params or
params['associate_countermeasure_threat_library_request_body'] is None):
raise ValueError("Missing the required parameter `associate_countermeasure_threat_library_request_body` when calling `libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_use_case_ref_threats_threat_ref_countermeasures_put`") # noqa: E501
collection_formats = {}
path_params = {}
if 'library_ref' in params:
path_params['libraryRef'] = params['library_ref'] # noqa: E501
if 'risk_pattern_ref' in params:
path_params['riskPatternRef'] = params['risk_pattern_ref'] # noqa: E501
if 'use_case_ref' in params:
path_params['useCaseRef'] = params['use_case_ref'] # noqa: E501
if 'threat_ref' in params:
path_params['threatRef'] = params['threat_ref'] # noqa: E501
query_params = []
header_params = {}
if 'api_token' in params:
header_params['api-token'] = params['api_token'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'associate_countermeasure_threat_library_request_body' in params:
body_params = params['associate_countermeasure_threat_library_request_body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/libraries/{libraryRef}/riskpatterns/{riskPatternRef}/usecases/{useCaseRef}/threats/{threatRef}/countermeasures', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='LibraryControl', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_use_case_ref_threats_threat_ref_weaknesses_put(self, api_token, library_ref, risk_pattern_ref, use_case_ref, threat_ref, associate_weakness_threat_library_request_body, **kwargs): # noqa: E501
"""Associates weakness to a threat in a risk pattern. # noqa: E501
Associates weakness to a threat in a risk pattern. Conditions to be able to perform the action: - To have the permission **LIBRARY_UPDATE** granted. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_use_case_ref_threats_threat_ref_weaknesses_put(api_token, library_ref, risk_pattern_ref, use_case_ref, threat_ref, associate_weakness_threat_library_request_body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str api_token: Authentication token (required)
:param str library_ref: Reference for library (required)
:param str risk_pattern_ref: Reference for Risk Pattern (required)
:param str use_case_ref: Reference for Use Case (required)
:param str threat_ref: Reference for Threat (required)
:param AssociateWeaknessThreatLibraryRequestBody associate_weakness_threat_library_request_body: JSON data that contains information of the fields (required)
:return: LibraryWeakness
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_use_case_ref_threats_threat_ref_weaknesses_put_with_http_info(api_token, library_ref, risk_pattern_ref, use_case_ref, threat_ref, associate_weakness_threat_library_request_body, **kwargs) # noqa: E501
else:
(data) = self.libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_use_case_ref_threats_threat_ref_weaknesses_put_with_http_info(api_token, library_ref, risk_pattern_ref, use_case_ref, threat_ref, associate_weakness_threat_library_request_body, **kwargs) # noqa: E501
return data
def libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_use_case_ref_threats_threat_ref_weaknesses_put_with_http_info(self, api_token, library_ref, risk_pattern_ref, use_case_ref, threat_ref, associate_weakness_threat_library_request_body, **kwargs): # noqa: E501
"""Associates weakness to a threat in a risk pattern. # noqa: E501
Associates weakness to a threat in a risk pattern. Conditions to be able to perform the action: - To have the permission **LIBRARY_UPDATE** granted. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_use_case_ref_threats_threat_ref_weaknesses_put_with_http_info(api_token, library_ref, risk_pattern_ref, use_case_ref, threat_ref, associate_weakness_threat_library_request_body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str api_token: Authentication token (required)
:param str library_ref: Reference for library (required)
:param str risk_pattern_ref: Reference for Risk Pattern (required)
:param str use_case_ref: Reference for Use Case (required)
:param str threat_ref: Reference for Threat (required)
:param AssociateWeaknessThreatLibraryRequestBody associate_weakness_threat_library_request_body: JSON data that contains information of the fields (required)
:return: LibraryWeakness
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['api_token', 'library_ref', 'risk_pattern_ref', 'use_case_ref', 'threat_ref', 'associate_weakness_threat_library_request_body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_use_case_ref_threats_threat_ref_weaknesses_put" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'api_token' is set
if ('api_token' not in params or
params['api_token'] is None):
raise ValueError("Missing the required parameter `api_token` when calling `libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_use_case_ref_threats_threat_ref_weaknesses_put`") # noqa: E501
# verify the required parameter 'library_ref' is set
if ('library_ref' not in params or
params['library_ref'] is None):
raise ValueError("Missing the required parameter `library_ref` when calling `libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_use_case_ref_threats_threat_ref_weaknesses_put`") # noqa: E501
# verify the required parameter 'risk_pattern_ref' is set
if ('risk_pattern_ref' not in params or
params['risk_pattern_ref'] is None):
raise ValueError("Missing the required parameter `risk_pattern_ref` when calling `libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_use_case_ref_threats_threat_ref_weaknesses_put`") # noqa: E501
# verify the required parameter 'use_case_ref' is set
if ('use_case_ref' not in params or
params['use_case_ref'] is None):
raise ValueError("Missing the required parameter `use_case_ref` when calling `libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_use_case_ref_threats_threat_ref_weaknesses_put`") # noqa: E501
# verify the required parameter 'threat_ref' is set
if ('threat_ref' not in params or
params['threat_ref'] is None):
raise ValueError("Missing the required parameter `threat_ref` when calling `libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_use_case_ref_threats_threat_ref_weaknesses_put`") # noqa: E501
# verify the required parameter 'associate_weakness_threat_library_request_body' is set
if ('associate_weakness_threat_library_request_body' not in params or
params['associate_weakness_threat_library_request_body'] is None):
raise ValueError("Missing the required parameter `associate_weakness_threat_library_request_body` when calling `libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_use_case_ref_threats_threat_ref_weaknesses_put`") # noqa: E501
collection_formats = {}
path_params = {}
if 'library_ref' in params:
path_params['libraryRef'] = params['library_ref'] # noqa: E501
if 'risk_pattern_ref' in params:
path_params['riskPatternRef'] = params['risk_pattern_ref'] # noqa: E501
if 'use_case_ref' in params:
path_params['useCaseRef'] = params['use_case_ref'] # noqa: E501
if 'threat_ref' in params:
path_params['threatRef'] = params['threat_ref'] # noqa: E501
query_params = []
header_params = {}
if 'api_token' in params:
header_params['api-token'] = params['api_token'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'associate_weakness_threat_library_request_body' in params:
body_params = params['associate_weakness_threat_library_request_body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/libraries/{libraryRef}/riskpatterns/{riskPatternRef}/usecases/{useCaseRef}/threats/{threatRef}/weaknesses', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='LibraryWeakness', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_use_case_ref_threats_threat_ref_weaknesses_weakness_ref_countermeasures_put(self, api_token, library_ref, risk_pattern_ref, use_case_ref, threat_ref, weakness_ref, associate_countermeasure_weakness_library_request_body, **kwargs): # noqa: E501
"""Associates a countermeasure to a weakness in a risk pattern. # noqa: E501
Associates a countermeasure to a weakness in a risk pattern. Conditions to be able to perform the action: - To have the permission **LIBRARY_UPDATE** granted. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_use_case_ref_threats_threat_ref_weaknesses_weakness_ref_countermeasures_put(api_token, library_ref, risk_pattern_ref, use_case_ref, threat_ref, weakness_ref, associate_countermeasure_weakness_library_request_body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str api_token: Authentication token (required)
:param str library_ref: Reference for library (required)
:param str risk_pattern_ref: Reference for Risk Pattern (required)
:param str use_case_ref: Reference for Use Case (required)
:param str threat_ref: Reference for Threat (required)
:param str weakness_ref: Reference for Weakness (required)
:param AssociateCountermeasureWeaknessLibraryRequestBody associate_countermeasure_weakness_library_request_body: JSON data that contains information of the fields (required)
:return: LibraryControl
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_use_case_ref_threats_threat_ref_weaknesses_weakness_ref_countermeasures_put_with_http_info(api_token, library_ref, risk_pattern_ref, use_case_ref, threat_ref, weakness_ref, associate_countermeasure_weakness_library_request_body, **kwargs) # noqa: E501
else:
(data) = self.libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_use_case_ref_threats_threat_ref_weaknesses_weakness_ref_countermeasures_put_with_http_info(api_token, library_ref, risk_pattern_ref, use_case_ref, threat_ref, weakness_ref, associate_countermeasure_weakness_library_request_body, **kwargs) # noqa: E501
return data
def libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_use_case_ref_threats_threat_ref_weaknesses_weakness_ref_countermeasures_put_with_http_info(self, api_token, library_ref, risk_pattern_ref, use_case_ref, threat_ref, weakness_ref, associate_countermeasure_weakness_library_request_body, **kwargs): # noqa: E501
"""Associates a countermeasure to a weakness in a risk pattern. # noqa: E501
Associates a countermeasure to a weakness in a risk pattern. Conditions to be able to perform the action: - To have the permission **LIBRARY_UPDATE** granted. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_use_case_ref_threats_threat_ref_weaknesses_weakness_ref_countermeasures_put_with_http_info(api_token, library_ref, risk_pattern_ref, use_case_ref, threat_ref, weakness_ref, associate_countermeasure_weakness_library_request_body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str api_token: Authentication token (required)
:param str library_ref: Reference for library (required)
:param str risk_pattern_ref: Reference for Risk Pattern (required)
:param str use_case_ref: Reference for Use Case (required)
:param str threat_ref: Reference for Threat (required)
:param str weakness_ref: Reference for Weakness (required)
:param AssociateCountermeasureWeaknessLibraryRequestBody associate_countermeasure_weakness_library_request_body: JSON data that contains information of the fields (required)
:return: LibraryControl
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['api_token', 'library_ref', 'risk_pattern_ref', 'use_case_ref', 'threat_ref', 'weakness_ref', 'associate_countermeasure_weakness_library_request_body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_use_case_ref_threats_threat_ref_weaknesses_weakness_ref_countermeasures_put" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'api_token' is set
if ('api_token' not in params or
params['api_token'] is None):
raise ValueError("Missing the required parameter `api_token` when calling `libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_use_case_ref_threats_threat_ref_weaknesses_weakness_ref_countermeasures_put`") # noqa: E501
# verify the required parameter 'library_ref' is set
if ('library_ref' not in params or
params['library_ref'] is None):
raise ValueError("Missing the required parameter `library_ref` when calling `libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_use_case_ref_threats_threat_ref_weaknesses_weakness_ref_countermeasures_put`") # noqa: E501
# verify the required parameter 'risk_pattern_ref' is set
if ('risk_pattern_ref' not in params or
params['risk_pattern_ref'] is None):
raise ValueError("Missing the required parameter `risk_pattern_ref` when calling `libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_use_case_ref_threats_threat_ref_weaknesses_weakness_ref_countermeasures_put`") # noqa: E501
# verify the required parameter 'use_case_ref' is set
if ('use_case_ref' not in params or
params['use_case_ref'] is None):
raise ValueError("Missing the required parameter `use_case_ref` when calling `libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_use_case_ref_threats_threat_ref_weaknesses_weakness_ref_countermeasures_put`") # noqa: E501
# verify the required parameter 'threat_ref' is set
if ('threat_ref' not in params or
params['threat_ref'] is None):
raise ValueError("Missing the required parameter `threat_ref` when calling `libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_use_case_ref_threats_threat_ref_weaknesses_weakness_ref_countermeasures_put`") # noqa: E501
# verify the required parameter 'weakness_ref' is set
if ('weakness_ref' not in params or
params['weakness_ref'] is None):
raise ValueError("Missing the required parameter `weakness_ref` when calling `libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_use_case_ref_threats_threat_ref_weaknesses_weakness_ref_countermeasures_put`") # noqa: E501
# verify the required parameter 'associate_countermeasure_weakness_library_request_body' is set
if ('associate_countermeasure_weakness_library_request_body' not in params or
params['associate_countermeasure_weakness_library_request_body'] is None):
raise ValueError("Missing the required parameter `associate_countermeasure_weakness_library_request_body` when calling `libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_use_case_ref_threats_threat_ref_weaknesses_weakness_ref_countermeasures_put`") # noqa: E501
collection_formats = {}
path_params = {}
if 'library_ref' in params:
path_params['libraryRef'] = params['library_ref'] # noqa: E501
if 'risk_pattern_ref' in params:
path_params['riskPatternRef'] = params['risk_pattern_ref'] # noqa: E501
if 'use_case_ref' in params:
path_params['useCaseRef'] = params['use_case_ref'] # noqa: E501
if 'threat_ref' in params:
path_params['threatRef'] = params['threat_ref'] # noqa: E501
if 'weakness_ref' in params:
path_params['weaknessRef'] = params['weakness_ref'] # noqa: E501
query_params = []
header_params = {}
if 'api_token' in params:
header_params['api-token'] = params['api_token'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'associate_countermeasure_weakness_library_request_body' in params:
body_params = params['associate_countermeasure_weakness_library_request_body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/libraries/{libraryRef}/riskpatterns/{riskPatternRef}/usecases/{useCaseRef}/threats/{threatRef}/weaknesses/{weaknessRef}/countermeasures', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='LibraryControl', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def libraries_library_ref_riskpatterns_risk_pattern_ref_weaknesses_post(self, api_token, library_ref, risk_pattern_ref, create_weakness_library_request_body, **kwargs): # noqa: E501
"""Creates a new weakness in a risk pattern # noqa: E501
Creates a new Weakness in a risk pattern. Conditions to be able to perform the action: - To have the permission **LIBRARY_UPDATE** granted. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.libraries_library_ref_riskpatterns_risk_pattern_ref_weaknesses_post(api_token, library_ref, risk_pattern_ref, create_weakness_library_request_body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str api_token: Authentication token (required)
:param str library_ref: Reference for library (required)
:param str risk_pattern_ref: Reference for Risk Pattern (required)
:param CreateWeaknessLibraryRequestBody create_weakness_library_request_body: JSON data that contains information of the fields (required)
:return: LibraryWeakness
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.libraries_library_ref_riskpatterns_risk_pattern_ref_weaknesses_post_with_http_info(api_token, library_ref, risk_pattern_ref, create_weakness_library_request_body, **kwargs) # noqa: E501
else:
(data) = self.libraries_library_ref_riskpatterns_risk_pattern_ref_weaknesses_post_with_http_info(api_token, library_ref, risk_pattern_ref, create_weakness_library_request_body, **kwargs) # noqa: E501
return data
def libraries_library_ref_riskpatterns_risk_pattern_ref_weaknesses_post_with_http_info(self, api_token, library_ref, risk_pattern_ref, create_weakness_library_request_body, **kwargs): # noqa: E501
"""Creates a new weakness in a risk pattern # noqa: E501
Creates a new Weakness in a risk pattern. Conditions to be able to perform the action: - To have the permission **LIBRARY_UPDATE** granted. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.libraries_library_ref_riskpatterns_risk_pattern_ref_weaknesses_post_with_http_info(api_token, library_ref, risk_pattern_ref, create_weakness_library_request_body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str api_token: Authentication token (required)
:param str library_ref: Reference for library (required)
:param str risk_pattern_ref: Reference for Risk Pattern (required)
:param CreateWeaknessLibraryRequestBody create_weakness_library_request_body: JSON data that contains information of the fields (required)
:return: LibraryWeakness
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['api_token', 'library_ref', 'risk_pattern_ref', 'create_weakness_library_request_body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method libraries_library_ref_riskpatterns_risk_pattern_ref_weaknesses_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'api_token' is set
if ('api_token' not in params or
params['api_token'] is None):
raise ValueError("Missing the required parameter `api_token` when calling `libraries_library_ref_riskpatterns_risk_pattern_ref_weaknesses_post`") # noqa: E501
# verify the required parameter 'library_ref' is set
if ('library_ref' not in params or
params['library_ref'] is None):
raise ValueError("Missing the required parameter `library_ref` when calling `libraries_library_ref_riskpatterns_risk_pattern_ref_weaknesses_post`") # noqa: E501
# verify the required parameter 'risk_pattern_ref' is set
if ('risk_pattern_ref' not in params or
params['risk_pattern_ref'] is None):
raise ValueError("Missing the required parameter `risk_pattern_ref` when calling `libraries_library_ref_riskpatterns_risk_pattern_ref_weaknesses_post`") # noqa: E501
# verify the required parameter 'create_weakness_library_request_body' is set
if ('create_weakness_library_request_body' not in params or
params['create_weakness_library_request_body'] is None):
raise ValueError("Missing the required parameter `create_weakness_library_request_body` when calling `libraries_library_ref_riskpatterns_risk_pattern_ref_weaknesses_post`") # noqa: E501
collection_formats = {}
path_params = {}
if 'library_ref' in params:
path_params['libraryRef'] = params['library_ref'] # noqa: E501
if 'risk_pattern_ref' in params:
path_params['riskPatternRef'] = params['risk_pattern_ref'] # noqa: E501
query_params = []
header_params = {}
if 'api_token' in params:
header_params['api-token'] = params['api_token'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'create_weakness_library_request_body' in params:
body_params = params['create_weakness_library_request_body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/libraries/{libraryRef}/riskpatterns/{riskPatternRef}/weaknesses', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='LibraryWeakness', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def libraries_post(self, api_token, create_library_request_body, **kwargs): # noqa: E501
"""Creates a Library # noqa: E501
Creates a new empty Library. Conditions to be able to perform the action: - To have the permission **LIBRARY_UPDATE** granted. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.libraries_post(api_token, create_library_request_body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str api_token: Authentication token (required)
:param CreateLibraryRequestBody create_library_request_body: JSON data that contains information of the fields (required)
:return: Library
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.libraries_post_with_http_info(api_token, create_library_request_body, **kwargs) # noqa: E501
else:
(data) = self.libraries_post_with_http_info(api_token, create_library_request_body, **kwargs) # noqa: E501
return data
def libraries_post_with_http_info(self, api_token, create_library_request_body, **kwargs): # noqa: E501
"""Creates a Library # noqa: E501
Creates a new empty Library. Conditions to be able to perform the action: - To have the permission **LIBRARY_UPDATE** granted. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.libraries_post_with_http_info(api_token, create_library_request_body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str api_token: Authentication token (required)
:param CreateLibraryRequestBody create_library_request_body: JSON data that contains information of the fields (required)
:return: Library
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['api_token', 'create_library_request_body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method libraries_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'api_token' is set
if ('api_token' not in params or
params['api_token'] is None):
raise ValueError("Missing the required parameter `api_token` when calling `libraries_post`") # noqa: E501
# verify the required parameter 'create_library_request_body' is set
if ('create_library_request_body' not in params or
params['create_library_request_body'] is None):
raise ValueError("Missing the required parameter `create_library_request_body` when calling `libraries_post`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'api_token' in params:
header_params['api-token'] = params['api_token'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'create_library_request_body' in params:
body_params = params['create_library_request_body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/libraries', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Library', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def products_upload_post(self, api_token, ref, name, file_name, **kwargs): # noqa: E501
"""Creates a new product, library or template from a XML file upload. # noqa: E501
Creates a new product, library or template from a XML file upload. Conditions to be able to perform the action: - To have the permission **PRODUCT_CREATE** granted allows to create a product. - To have the permission **LIBRARY_UPDATE** granted allows to create a library. - To have the permission **TEMPLATE_UPDATE** granted allows to create a template. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.products_upload_post(api_token, ref, name, file_name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str api_token: Authentication token (required)
:param str ref: Product ref (required)
:param str name: Product name (required)
:param file file_name: File to upload in XML format (required)
:param str type: Product type - STANDARD (By default), TEMPLATE or LIBRARY
:return: ProductShort
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.products_upload_post_with_http_info(api_token, ref, name, file_name, **kwargs) # noqa: E501
else:
(data) = self.products_upload_post_with_http_info(api_token, ref, name, file_name, **kwargs) # noqa: E501
return data
def products_upload_post_with_http_info(self, api_token, ref, name, file_name, **kwargs): # noqa: E501
"""Creates a new product, library or template from a XML file upload. # noqa: E501
Creates a new product, library or template from a XML file upload. Conditions to be able to perform the action: - To have the permission **PRODUCT_CREATE** granted allows to create a product. - To have the permission **LIBRARY_UPDATE** granted allows to create a library. - To have the permission **TEMPLATE_UPDATE** granted allows to create a template. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.products_upload_post_with_http_info(api_token, ref, name, file_name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str api_token: Authentication token (required)
:param str ref: Product ref (required)
:param str name: Product name (required)
:param file file_name: File to upload in XML format (required)
:param str type: Product type - STANDARD (By default), TEMPLATE or LIBRARY
:return: ProductShort
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['api_token', 'ref', 'name', 'file_name', 'type'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method products_upload_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'api_token' is set
if ('api_token' not in params or
params['api_token'] is None):
raise ValueError("Missing the required parameter `api_token` when calling `products_upload_post`") # noqa: E501
# verify the required parameter 'ref' is set
if ('ref' not in params or
params['ref'] is None):
raise ValueError("Missing the required parameter `ref` when calling `products_upload_post`") # noqa: E501
# verify the required parameter 'name' is set
if ('name' not in params or
params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `products_upload_post`") # noqa: E501
# verify the required parameter 'file_name' is set
if ('file_name' not in params or
params['file_name'] is None):
raise ValueError("Missing the required parameter `file_name` when calling `products_upload_post`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'api_token' in params:
header_params['api-token'] = params['api_token'] # noqa: E501
form_params = []
local_var_files = {}
if 'ref' in params:
form_params.append(('ref', params['ref'])) # noqa: E501
if 'name' in params:
form_params.append(('name', params['name'])) # noqa: E501
if 'type' in params:
form_params.append(('type', params['type'])) # noqa: E501
if 'file_name' in params:
local_var_files['fileName'] = params['file_name'] # noqa: E501
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['multipart/form-data']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/products/upload', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ProductShort', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 55.707736
| 376
| 0.685516
| 11,861
| 97,210
| 5.272827
| 0.017789
| 0.050047
| 0.053725
| 0.057003
| 0.987288
| 0.985002
| 0.980477
| 0.975984
| 0.969684
| 0.954478
| 0
| 0.012054
| 0.238782
| 97,210
| 1,744
| 377
| 55.739679
| 0.833117
| 0.348153
| 0
| 0.762397
| 1
| 0.001033
| 0.305022
| 0.151221
| 0
| 0
| 0
| 0
| 0
| 1
| 0.029959
| false
| 0
| 0.004132
| 0
| 0.078512
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
67b1bf9fbddf80ee8e44c04aac2dd1052361de34
| 2,163
|
py
|
Python
|
mistree/tests/test_construct.py
|
knaidoo29/MiSTree
|
20ef822ca349d2cc8118bbeca277713f03e10cd2
|
[
"MIT"
] | 27
|
2019-07-03T08:01:10.000Z
|
2022-03-29T09:28:58.000Z
|
mistree/tests/test_construct.py
|
knaidoo29/MiSTree
|
20ef822ca349d2cc8118bbeca277713f03e10cd2
|
[
"MIT"
] | 15
|
2019-09-12T03:56:20.000Z
|
2021-12-14T22:27:44.000Z
|
mistree/tests/test_construct.py
|
knaidoo29/MiSTree
|
20ef822ca349d2cc8118bbeca277713f03e10cd2
|
[
"MIT"
] | 15
|
2019-07-03T05:00:20.000Z
|
2022-03-03T19:37:32.000Z
|
import numpy as np
import mistree as mist
def test_construct_mst_2d():
x = np.random.random_sample(100)
y = np.random.random_sample(100)
edge_length, edge_x, edge_y, edge_index = mist.construct_mst(x, y, two_dimensions=True)
assert len(edge_length) == 99
def test_construct_mst_3d():
x = np.random.random_sample(100)
y = np.random.random_sample(100)
z = np.random.random_sample(100)
edge_length, edge_x, edge_y, edge_z, edge_index = mist.construct_mst(x, y, z=z, two_dimensions=False)
assert len(edge_length) == 99
def test_construct_mst_k_neighbours():
x = np.random.random_sample(100)
y = np.random.random_sample(100)
edge_length, edge_x, edge_y, edge_index = mist.construct_mst(x, y, two_dimensions=True)
edge_length2, edge_x2, edge_2, edge_index2 = mist.construct_mst(x, y, k_neighbours=30, two_dimensions=True)
condition = np.where(np.sort(edge_length) == np.sort(edge_length2))[0]
assert len(condition) == len(edge_length)
def test_construct_mst_2d_scale_cut():
x = np.random.random_sample(100)
y = np.random.random_sample(100)
edge_length, edge_x, edge_y, edge_index, num_removed_edges = mist.construct_mst(x, y, two_dimensions=True, scale_cut_length=0.01)
condition = np.where(edge_length >= 0.01)[0]
assert len(condition) == len(edge_length)
def test_construct_mst_3d_scale_cut():
x = np.random.random_sample(100)
y = np.random.random_sample(100)
z = np.random.random_sample(100)
edge_length, edge_x, edge_y, edge_z, edge_index, num_removed_edges = mist.construct_mst(x, y, z=z, two_dimensions=False, scale_cut_length=0.01)
condition = np.where(edge_length >= 0.01)[0]
assert len(condition) == len(edge_length)
def test_construct_mst_tomo_scale_cut():
phi = 360.*np.random.random_sample(100)
theta = 180.*np.random.random_sample(100)
x, y, z = mist.spherical_2_unit_sphere(phi, theta)
edge_length, edge_x, edge_y, edge_z, edge_index, num_removed_edges = mist.construct_mst(x, y, z=z, two_dimensions=False, scale_cut_length=0.2)
condition = np.where(edge_length >= 0.2)[0]
assert len(condition) == len(edge_length)
| 40.811321
| 147
| 0.72908
| 362
| 2,163
| 4.049724
| 0.146409
| 0.109141
| 0.133697
| 0.190996
| 0.850614
| 0.78854
| 0.770123
| 0.748295
| 0.736699
| 0.682128
| 0
| 0.045603
| 0.148405
| 2,163
| 52
| 148
| 41.596154
| 0.750271
| 0
| 0
| 0.55
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15
| 1
| 0.15
| false
| 0
| 0.05
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e1fe4252da4b502e5f8822155ad0b8cf62b008e5
| 6,423
|
py
|
Python
|
misc/python_sealog/lowerings.py
|
WHOIGit/ndsf-sealog-server
|
e57843e3e23a924ccf6fc1ef1e40d92f36a3b612
|
[
"MIT"
] | 4
|
2019-10-29T21:53:13.000Z
|
2021-12-02T00:38:42.000Z
|
misc/python_sealog/lowerings.py
|
WHOIGit/ndsf-sealog-server
|
e57843e3e23a924ccf6fc1ef1e40d92f36a3b612
|
[
"MIT"
] | 14
|
2020-05-28T16:39:30.000Z
|
2021-05-22T06:01:40.000Z
|
misc/python_sealog/lowerings.py
|
WHOIGit/ndsf-sealog-server
|
e57843e3e23a924ccf6fc1ef1e40d92f36a3b612
|
[
"MIT"
] | 1
|
2020-01-31T00:00:42.000Z
|
2020-01-31T00:00:42.000Z
|
#!/usr/bin/env python3
'''
FILE: lowerings.py
DESCRIPTION: This script contains the wrapper functions for the sealog-
server lowering routes.
BUGS:
NOTES:
AUTHOR: Webb Pinner
COMPANY: OceanDataTools.org
VERSION: 0.1
CREATED: 2021-01-01
REVISION:
LICENSE INFO: This code is licensed under MIT license (see LICENSE.txt for details)
Copyright (C) OceanDataTools.org 2021
'''
import json
import logging
import requests
from .settings import API_SERVER_URL, HEADERS, LOWERINGS_API_PATH
def get_lowering_uid_by_id(lowering_id, api_server_url=API_SERVER_URL, headers=HEADERS):
'''
Return the UID for a lowering record based on the lowering_id.
'''
try:
url = api_server_url + LOWERINGS_API_PATH + '?lowering_id=' + lowering_id
req = requests.get(url, headers=headers)
if req.status_code == 200:
lowering = json.loads(req.text)[0]
return lowering['id']
except Exception as error:
logging.error(str(error))
raise error
return None
def get_lowerings(export_format='json', api_server_url=API_SERVER_URL, headers=HEADERS):
'''
Return all lowering records. Returns the records as json objects by
default. Set export_format to 'csv' to return the records in csv format.
'''
try:
url = api_server_url + LOWERINGS_API_PATH + '?format=' + export_format
req = requests.get(url, headers=headers)
if req.status_code == 200:
if export_format == 'json':
return json.loads(req.text)
if export_format == 'csv':
return req.text
if req.status_code == 404:
if export_format == 'json':
return []
if export_format == 'csv':
return ""
except Exception as error:
logging.error(str(error))
raise error
return None
def get_lowering_uids_by_cruise(cruise_uid, api_server_url=API_SERVER_URL, headers=HEADERS):
'''
Return the lowering UIDs for the given cruise_uid
'''
try:
url = api_server_url + LOWERINGS_API_PATH + '/bycruise/' + cruise_uid
req = requests.get(url, headers=headers)
if req.status_code == 200:
lowerings = json.loads(req.text)
return (lowering['id'] for lowering in lowerings)
if req.status_code == 404:
return []
except Exception as error:
logging.error(str(error))
raise error
return None
def get_lowering_ids_by_cruise(cruise_uid, api_server_url=API_SERVER_URL, headers=HEADERS):
'''
Return the lowering_ids for the given cruise_uid
'''
try:
url = api_server_url + LOWERINGS_API_PATH + '/bycruise/' + cruise_uid
req = requests.get(url, headers=headers)
if req.status_code == 200:
lowerings = json.loads(req.text)
return (lowering['lowering_id'] for lowering in lowerings)
if req.status_code == 404:
return []
except Exception as error:
logging.error(str(error))
raise error
return None
def get_lowering(lowering_uid, export_format='json', api_server_url=API_SERVER_URL, headers=HEADERS):
'''
Return a lowering record based on the lowering_id. Returns the record as a
json object by default. Set export_format to 'csv' to return the record in
csv format.
'''
try:
url = api_server_url + LOWERINGS_API_PATH + '/' + lowering_uid + '?format=' + export_format
req = requests.get(url, headers=headers)
if req.status_code == 200:
if export_format == 'json':
return json.loads(req.text)
if export_format == 'csv':
return req.text
except Exception as error:
logging.error(str(error))
raise error
return None
def get_lowering_by_id(lowering_id, export_format='json', api_server_url=API_SERVER_URL, headers=HEADERS):
'''
Return the lowering record based on the lowering_id. Returns the records
as json object by default. Set export_format to 'csv' to return the record
in csv format.
'''
try:
url = api_server_url + LOWERINGS_API_PATH + '?lowering_id=' + lowering_id + '&format=' + export_format
req = requests.get(url, headers=headers)
if req.status_code == 200:
if export_format == 'json':
return json.loads(req.text)[0]
if export_format == 'csv':
return req.text
except Exception as error:
logging.error(str(error))
raise error
return None
def get_lowerings_by_cruise(cruise_uid, export_format='json', api_server_url=API_SERVER_URL, headers=HEADERS):
'''
Return the lowering records contained within the cruise whose uid is
cruise_uid. Returns the record as a json object by default. Set
export_format to 'csv' to return the record in csv format.
'''
try:
url = api_server_url + LOWERINGS_API_PATH + '/bycruise/' + cruise_uid + '?format=' + export_format
req = requests.get(url, headers=headers)
if req.status_code == 200:
if export_format == 'json':
return json.loads(req.text)
if export_format == 'csv':
return req.text
if req.status_code == 404:
if export_format == 'json':
return []
if export_format == 'csv':
return ""
except Exception as error:
logging.error(str(error))
raise error
return None
def get_lowering_by_event(event_uid, export_format='json', api_server_url=API_SERVER_URL, headers=HEADERS):
'''
Return the lowering record containing the event whose uid is event_uid.
Returns the record as a json object by default. Set export_format to 'csv'
to return the record in csv format.
'''
try:
url = api_server_url + LOWERINGS_API_PATH + '/byevent/' + event_uid + '?format=' + export_format
req = requests.get(url, headers=headers)
if req.status_code == 200:
if export_format == 'json':
return json.loads(req.text)
if export_format == 'csv':
return req.text
except Exception as error:
logging.error(str(error))
raise error
return None
| 28.171053
| 110
| 0.625564
| 825
| 6,423
| 4.68
| 0.126061
| 0.090132
| 0.0777
| 0.06216
| 0.838643
| 0.825693
| 0.825693
| 0.825693
| 0.816369
| 0.799793
| 0
| 0.011559
| 0.286159
| 6,423
| 227
| 111
| 28.295154
| 0.830534
| 0.223727
| 0
| 0.801724
| 0
| 0
| 0.039403
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.068966
| false
| 0
| 0.034483
| 0
| 0.336207
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c01c591411f6d6e8a14feeb103177d6cfae6a875
| 259
|
py
|
Python
|
dist/assets/code/seance1/2.py
|
Mistergix/deficode
|
6460ec3e22d36b67cef6815d9977fba973ab139b
|
[
"MIT"
] | null | null | null |
dist/assets/code/seance1/2.py
|
Mistergix/deficode
|
6460ec3e22d36b67cef6815d9977fba973ab139b
|
[
"MIT"
] | 10
|
2018-07-11T22:40:57.000Z
|
2018-11-24T21:05:14.000Z
|
dist/assets/code/seance1/2.py
|
Mistergix/deficode
|
6460ec3e22d36b67cef6815d9977fba973ab139b
|
[
"MIT"
] | null | null | null |
import turtle as trt
tortue = trt.Turtle()
distance = 70
angle = 90
tortue.forward(distance)
tortue.left(angle)
tortue.forward(distance)
tortue.left(angle)
tortue.forward(distance)
tortue.left(angle)
tortue.forward(distance)
tortue.left(angle)
trt.done()
| 14.388889
| 24
| 0.772201
| 37
| 259
| 5.405405
| 0.324324
| 0.26
| 0.42
| 0.54
| 0.72
| 0.72
| 0.72
| 0.72
| 0.72
| 0.72
| 0
| 0.017167
| 0.100386
| 259
| 17
| 25
| 15.235294
| 0.841202
| 0
| 0
| 0.615385
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.076923
| 0
| 0.076923
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
c0256236bb0d4ffc1f7a7cdfce622d5e62e96b83
| 28
|
py
|
Python
|
api/honeyhole/api/__init__.py
|
exleym/HoneyHole
|
135c7d0f8f3bb504b3196fcef9ecce1e19ecf8b6
|
[
"Unlicense"
] | null | null | null |
api/honeyhole/api/__init__.py
|
exleym/HoneyHole
|
135c7d0f8f3bb504b3196fcef9ecce1e19ecf8b6
|
[
"Unlicense"
] | 6
|
2021-03-11T04:40:22.000Z
|
2022-03-02T09:51:55.000Z
|
api/honeyhole/api/__init__.py
|
exleym/HoneyHole
|
135c7d0f8f3bb504b3196fcef9ecce1e19ecf8b6
|
[
"Unlicense"
] | null | null | null |
from . api import blueprint
| 14
| 27
| 0.785714
| 4
| 28
| 5.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.178571
| 28
| 1
| 28
| 28
| 0.956522
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
|
0
| 7
|
c03c0fdcfbeeacf38a4e636955fa0eaa7ff584d6
| 4,438
|
py
|
Python
|
beam_search.py
|
fabriziocosta/GraphOptimizer
|
2f297ff17cb851529882cb342754c56d2aa222eb
|
[
"MIT"
] | null | null | null |
beam_search.py
|
fabriziocosta/GraphOptimizer
|
2f297ff17cb851529882cb342754c56d2aa222eb
|
[
"MIT"
] | null | null | null |
beam_search.py
|
fabriziocosta/GraphOptimizer
|
2f297ff17cb851529882cb342754c56d2aa222eb
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""Provides scikit interface."""
import numpy as np
from GraphOptimizer.parallel_utils import parallel_map
class BeamSearch(object):
def __init__(self,
neighborhood=None,
objective_func=None,
n_iter=100,
beam_size=10,
parallel=False):
self.neighborhood = neighborhood
self.objective_func = objective_func
self.n_iter = n_iter
self.beam_size = beam_size
self.parallel = parallel
def remove_duplicates(self, graphs):
_eps_ = 1e-5
vals = [self.objective_func(g) for g in graphs]
ids = list(np.argsort(vals).reshape(-1))
sorted_vals = sorted(vals)
new_graphs = []
prev = 1e9
for val, id in zip(sorted_vals, ids):
if abs(val - prev) > _eps_:
new_graphs.append(graphs[id])
prev = val
return new_graphs
def select_beam(self, graphs):
out_graphs = self.remove_duplicates(graphs)
sorted_graphs = sorted(
out_graphs, key=lambda g: self.objective_func(g), reverse=True)
out_graphs = sorted_graphs[:self.beam_size]
return out_graphs
def optimize_single(self, graph):
out_graphs = [graph]
for i in range(self.n_iter):
gen_graphs = []
for g in out_graphs:
gen_graphs += self.neighborhood.make_gradient_neighbors(g)
out_graphs = self.select_beam(gen_graphs)
return out_graphs
def _optimize(self, graph, i):
opt_g = self.optimize_single(graph)
return (i, opt_g)
def parallel_optimize(self, graphs):
out_graphs = []
for res_graphs in parallel_map(self._optimize, graphs):
out_graphs += res_graphs
return out_graphs
def serial_optimize(self, graphs):
out_graphs = []
for graph in graphs:
out_graphs += self.optimize_single(graph)
return out_graphs
def optimize(self, graphs):
if self.parallel:
out_graphs = self.parallel_optimize(graphs)
else:
out_graphs = self.serial_optimize(graphs)
out_graphs = self.select_beam(out_graphs)
return out_graphs
class BeamRandomSearch(object):
def __init__(self,
neighborhood=None,
objective_func=None,
n_iter=100,
beam_size=10,
parallel=False):
self.neighborhood = neighborhood
self.objective_func = objective_func
self.n_iter = n_iter
self.beam_size = beam_size
self.parallel = parallel
def remove_duplicates(self, graphs):
_eps_ = 1e-5
vals = [self.objective_func(g) for g in graphs]
ids = list(np.argsort(vals).reshape(-1))
sorted_vals = sorted(vals)
new_graphs = []
prev = 1e9
for val, id in zip(sorted_vals, ids):
if abs(val - prev) > _eps_:
new_graphs.append(graphs[id])
prev = val
return new_graphs
def select_beam(self, graphs):
out_graphs = self.remove_duplicates(graphs)
sorted_graphs = sorted(
out_graphs, key=lambda g: self.objective_func(g), reverse=True)
out_graphs = sorted_graphs[:self.beam_size]
return out_graphs
def optimize_single(self, graph):
out_graphs = [graph]
for i in range(self.n_iter):
gen_graphs = []
for g in out_graphs:
gen_graphs += self.neighborhood.make_neighbors(g)
out_graphs = self.select_beam(gen_graphs)
return out_graphs
def _optimize(self, graph, i):
opt_g = self.optimize_single(graph)
return (i, opt_g)
def parallel_optimize(self, graphs):
out_graphs = []
for res_graphs in parallel_map(self._optimize, graphs):
out_graphs += res_graphs
return out_graphs
def serial_optimize(self, graphs):
out_graphs = []
for graph in graphs:
out_graphs += self.optimize_single(graph)
return out_graphs
def optimize(self, graphs):
if self.parallel:
out_graphs = self.parallel_optimize(graphs)
else:
out_graphs = self.serial_optimize(graphs)
out_graphs = self.select_beam(out_graphs)
return out_graphs
| 31.475177
| 75
| 0.596665
| 533
| 4,438
| 4.699812
| 0.148218
| 0.136527
| 0.071856
| 0.057485
| 0.942116
| 0.942116
| 0.942116
| 0.942116
| 0.942116
| 0.942116
| 0
| 0.006629
| 0.320189
| 4,438
| 140
| 76
| 31.7
| 0.823666
| 0.01059
| 0
| 0.949153
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.135593
| false
| 0
| 0.016949
| 0
| 0.288136
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c05230494834bae35eab671db2507036f9971dda
| 8,843
|
py
|
Python
|
scripts/slave/recipe_modules/chromium_tests/chromium_android_fyi.py
|
bopopescu/chromium-build
|
f8e42c70146c1b668421ee6358dc550a955770a3
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/slave/recipe_modules/chromium_tests/chromium_android_fyi.py
|
bopopescu/chromium-build
|
f8e42c70146c1b668421ee6358dc550a955770a3
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/slave/recipe_modules/chromium_tests/chromium_android_fyi.py
|
bopopescu/chromium-build
|
f8e42c70146c1b668421ee6358dc550a955770a3
|
[
"BSD-3-Clause"
] | 1
|
2020-07-22T09:16:32.000Z
|
2020-07-22T09:16:32.000Z
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from . import steps
SPEC = {
'settings': {
'build_gs_bucket': 'chromium-android-archive',
},
'builders': {
'Android Tests (trial)(dbg)': {
'chromium_config': 'android',
'gclient_config': 'chromium',
'gclient_apply_config': ['android'],
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Debug',
'TARGET_BITS': 32,
'TARGET_PLATFORM': 'android',
},
'bot_type': 'tester',
'parent_mastername': 'chromium.android',
'parent_buildername': 'Android arm Builder (dbg)',
'android_config': 'non_device_wipe_provisioning',
'remove_system_webview': True,
'root_devices': True,
'tests': [
steps.GTestTest('gfx_unittests'),
steps.AndroidInstrumentationTest('WebViewUiTest'),
],
'test_results_config': 'staging_server',
'testing': {
'platform': 'linux',
},
},
'Jelly Bean Tester': {
'chromium_config': 'android',
'gclient_config': 'chromium',
'gclient_apply_config': ['android'],
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Debug',
'TARGET_PLATFORM': 'android',
},
'parent_buildername': 'Android arm Builder (dbg)',
'parent_mastername': 'chromium.android',
'bot_type': 'tester',
'android_config': 'main_builder_mb',
'android_apply_config': ['use_devil_provision'],
'test_results_config': 'public_server',
'testing': {
'platform': 'linux',
},
},
'Lollipop Consumer Tester': {
'chromium_config': 'android',
'gclient_config': 'chromium',
'gclient_apply_config': ['android'],
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Debug',
'TARGET_PLATFORM': 'android',
},
'parent_buildername': 'Android arm64 Builder (dbg)',
'parent_mastername': 'chromium.android',
'bot_type': 'tester',
'android_config': 'arm64_builder_mb',
'android_apply_config': ['use_devil_provision'],
'test_results_config': 'public_server',
'test_results_config': 'public_server',
'testing': {
'platform': 'linux',
},
},
'Lollipop Low-end Tester': {
'enable_swarming': True,
'chromium_config': 'android',
'gclient_config': 'chromium',
'gclient_apply_config': ['android'],
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Debug',
'TARGET_PLATFORM': 'android',
},
'parent_buildername': 'Android arm Builder (dbg)',
'parent_mastername': 'chromium.android',
'bot_type': 'tester',
'android_config': 'main_builder_mb',
'android_apply_config': ['use_devil_provision'],
'test_results_config': 'public_server',
'test_results_config': 'public_server',
'testing': {
'platform': 'linux',
},
},
'Memory Infra Tester': {
'enable_swarming': True,
'chromium_config': 'android',
'gclient_config': 'chromium',
'gclient_apply_config': ['android'],
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 32,
'TARGET_PLATFORM': 'android',
},
'bot_type': 'builder_tester',
'android_config': 'main_builder_mb',
'testing': {
'platform': 'linux',
},
},
'NDK Next arm Builder': {
'chromium_config': 'android',
'gclient_config': 'chromium',
'gclient_apply_config': ['android', 'ndk_next'],
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Debug',
'TARGET_PLATFORM': 'android',
},
'bot_type': 'builder',
'android_config': 'main_builder_mb',
'testing': {
'platform': 'linux',
},
},
'NDK Next arm64 Builder': {
'chromium_config': 'android',
'gclient_config': 'chromium',
'gclient_apply_config': ['android', 'ndk_next'],
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Debug',
'TARGET_PLATFORM': 'android',
},
'bot_type': 'builder',
'android_config': 'arm64_builder_mb',
'testing': {
'platform': 'linux',
},
},
'NDK Next MIPS Builder': {
'chromium_config': 'android',
'gclient_config': 'chromium',
'gclient_apply_config': ['android', 'ndk_next'],
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Debug',
'TARGET_PLATFORM': 'android',
},
'bot_type': 'builder',
'android_config': 'mipsel_builder_mb',
'testing': {
'platform': 'linux',
},
},
'NDK Next x64 Builder': {
'chromium_config': 'android',
'gclient_config': 'chromium',
'gclient_apply_config': ['android', 'ndk_next'],
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Debug',
'TARGET_PLATFORM': 'android',
},
'bot_type': 'builder',
'android_config': 'x64_builder_mb',
'testing': {
'platform': 'linux',
},
},
'NDK Next x86 Builder': {
'chromium_config': 'android',
'gclient_config': 'chromium',
'gclient_apply_config': ['android', 'ndk_next'],
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Debug',
'TARGET_PLATFORM': 'android',
},
'bot_type': 'builder',
'android_config': 'x86_builder_mb',
'testing': {
'platform': 'linux',
},
},
# These don't actually run on the master. They're here to configure
# stand-alone trybots on tryserver.chromium.android.
'Unswarmed N5 Tests Dummy Builder': {
'chromium_config': 'android',
'chromium_apply_config': ['chrome_with_codecs'],
'gclient_config': 'chromium',
'gclient_apply_config': ['android'],
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 32,
'TARGET_PLATFORM': 'android',
},
'android_config': 'main_builder_mb',
'android_apply_config': ['use_devil_provision'],
'test_results_config': 'public_server',
'test_results_config': 'public_server',
'bot_type': 'builder_tester',
'testing': {
'platform': 'linux',
},
},
'Unswarmed N5X Tests Dummy Builder': {
'chromium_config': 'android',
'chromium_apply_config': ['chrome_with_codecs'],
'gclient_config': 'chromium',
'gclient_apply_config': ['android'],
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 64,
'TARGET_PLATFORM': 'android',
},
'android_config': 'arm64_builder_mb',
'android_apply_config': ['use_devil_provision'],
'test_results_config': 'public_server',
'test_results_config': 'public_server',
'bot_type': 'builder_tester',
'testing': {
'platform': 'linux',
},
},
'Nougat Phone Tester': {
'enable_swarming': True,
'chromium_config': 'android',
'gclient_config': 'chromium',
'gclient_apply_config': ['android'],
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Debug',
'TARGET_PLATFORM': 'android',
},
'parent_buildername': 'Android arm64 Builder (dbg)',
'parent_mastername': 'chromium.android',
'bot_type': 'tester',
'android_config': 'arm64_builder_mb',
'android_apply_config': ['use_devil_provision'],
'test_results_config': 'public_server',
'test_results_config': 'public_server',
'testing': {
'platform': 'linux',
},
},
'x64 Device Tester': {
'chromium_config': 'android',
'gclient_config': 'chromium',
'gclient_apply_config': ['android'],
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 64,
'TARGET_PLATFORM': 'android',
},
'bot_type': 'builder_tester',
'android_config': 'x64_builder_mb',
'test_results_config': 'staging_server',
'testing': {
'platform': 'linux',
},
'enable_swarming': True,
# Workaround so that recipes doesn't add random build targets to our
# compile line. We want to build everything.
'add_tests_as_compile_targets': False,
'compile_targets': [
'all',
],
},
'x86 Cloud Tester': {
'chromium_config': 'android',
'gclient_config': 'chromium',
'gclient_apply_config': ['android'],
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 32,
'TARGET_PLATFORM': 'android',
},
'bot_type': 'builder_tester',
'android_config': 'x86_builder_mb',
'test_results_config': 'staging_server',
'testing': {
'platform': 'linux',
},
'enable_swarming': True,
},
},
}
| 29.875
| 74
| 0.583399
| 826
| 8,843
| 5.886199
| 0.173123
| 0.086384
| 0.064788
| 0.086384
| 0.848828
| 0.839367
| 0.839367
| 0.808104
| 0.790827
| 0.776635
| 0
| 0.007328
| 0.259301
| 8,843
| 295
| 75
| 29.976271
| 0.734962
| 0.043198
| 0
| 0.712177
| 0
| 0
| 0.569857
| 0.055956
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.00369
| 0
| 0.00369
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
221777ae46713fe5505ce84d896be864a6a829b3
| 37,323
|
py
|
Python
|
models/resnext101_32x4d.py
|
rwightman/pytorch-planet-amazon
|
20d4e9531c211b12ce2ebc1f3d6ee2ceaae5e266
|
[
"Apache-2.0"
] | 30
|
2017-07-23T23:23:28.000Z
|
2019-11-20T09:00:27.000Z
|
models/resnext101_32x4d.py
|
rwightman/pytorch-planet-amazon
|
20d4e9531c211b12ce2ebc1f3d6ee2ceaae5e266
|
[
"Apache-2.0"
] | 2
|
2017-08-29T18:28:52.000Z
|
2018-02-11T21:10:10.000Z
|
models/resnext101_32x4d.py
|
rwightman/pytorch-planet-amazon
|
20d4e9531c211b12ce2ebc1f3d6ee2ceaae5e266
|
[
"Apache-2.0"
] | 7
|
2017-09-02T12:15:22.000Z
|
2019-02-07T10:50:57.000Z
|
""" Pytorch Resnet-101-32x4d impl
Sourced by running https://github.com/clcarwin/convert_torch_to_pytorch (MIT) on
https://github.com/facebookresearch/ResNeXt (BSD-3-Clause)
Pretrained weights are not being used as they are CC BY-NC 4.0 license.
"""
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
from torch.autograd import Variable
from functools import reduce
class LambdaBase(nn.Sequential):
def __init__(self, fn, *args):
super(LambdaBase, self).__init__(*args)
self.lambda_func = fn
def forward_prepare(self, input):
output = []
for module in self._modules.values():
output.append(module(input))
return output if output else input
class Lambda(LambdaBase):
def forward(self, input):
return self.lambda_func(self.forward_prepare(input))
class LambdaMap(LambdaBase):
def forward(self, input):
return list(map(self.lambda_func, self.forward_prepare(input)))
class LambdaReduce(LambdaBase):
def forward(self, input):
return reduce(self.lambda_func, self.forward_prepare(input))
def resnext_101_32x4d_features(activation_fn=nn.ReLU()):
features = nn.Sequential( # Sequential,
nn.Conv2d(3, 64, (7, 7), (2, 2), (3, 3), 1, 1, bias=False),
nn.BatchNorm2d(64),
activation_fn,
nn.MaxPool2d((3, 3), (2, 2), (1, 1)),
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(64, 128, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(128),
activation_fn,
nn.Conv2d(128, 128, (3, 3), (1, 1), (1, 1), 1, 32, bias=False),
nn.BatchNorm2d(128),
activation_fn,
),
nn.Conv2d(128, 256, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(256),
),
nn.Sequential( # Sequential,
nn.Conv2d(64, 256, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(256),
),
),
LambdaReduce(lambda x, y: x + y), # CAddTable,
activation_fn,
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(256, 128, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(128),
activation_fn,
nn.Conv2d(128, 128, (3, 3), (1, 1), (1, 1), 1, 32, bias=False),
nn.BatchNorm2d(128),
activation_fn,
),
nn.Conv2d(128, 256, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(256),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x, y: x + y), # CAddTable,
activation_fn,
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(256, 128, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(128),
activation_fn,
nn.Conv2d(128, 128, (3, 3), (1, 1), (1, 1), 1, 32, bias=False),
nn.BatchNorm2d(128),
activation_fn,
),
nn.Conv2d(128, 256, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(256),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x, y: x + y), # CAddTable,
activation_fn,
),
),
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(256, 256, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(256),
activation_fn,
nn.Conv2d(256, 256, (3, 3), (2, 2), (1, 1), 1, 32, bias=False),
nn.BatchNorm2d(256),
activation_fn,
),
nn.Conv2d(256, 512, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(512),
),
nn.Sequential( # Sequential,
nn.Conv2d(256, 512, (1, 1), (2, 2), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(512),
),
),
LambdaReduce(lambda x, y: x + y), # CAddTable,
activation_fn,
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(512, 256, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(256),
activation_fn,
nn.Conv2d(256, 256, (3, 3), (1, 1), (1, 1), 1, 32, bias=False),
nn.BatchNorm2d(256),
activation_fn,
),
nn.Conv2d(256, 512, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(512),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x, y: x + y), # CAddTable,
activation_fn,
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(512, 256, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(256),
activation_fn,
nn.Conv2d(256, 256, (3, 3), (1, 1), (1, 1), 1, 32, bias=False),
nn.BatchNorm2d(256),
activation_fn,
),
nn.Conv2d(256, 512, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(512),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x, y: x + y), # CAddTable,
activation_fn,
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(512, 256, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(256),
activation_fn,
nn.Conv2d(256, 256, (3, 3), (1, 1), (1, 1), 1, 32, bias=False),
nn.BatchNorm2d(256),
activation_fn,
),
nn.Conv2d(256, 512, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(512),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x, y: x + y), # CAddTable,
activation_fn,
),
),
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(512, 512, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(512),
activation_fn,
nn.Conv2d(512, 512, (3, 3), (2, 2), (1, 1), 1, 32, bias=False),
nn.BatchNorm2d(512),
activation_fn,
),
nn.Conv2d(512, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(1024),
),
nn.Sequential( # Sequential,
nn.Conv2d(512, 1024, (1, 1), (2, 2), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(1024),
),
),
LambdaReduce(lambda x, y: x + y), # CAddTable,
activation_fn,
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024, 512, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(512),
activation_fn,
nn.Conv2d(512, 512, (3, 3), (1, 1), (1, 1), 1, 32, bias=False),
nn.BatchNorm2d(512),
activation_fn,
),
nn.Conv2d(512, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x, y: x + y), # CAddTable,
activation_fn,
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024, 512, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(512),
activation_fn,
nn.Conv2d(512, 512, (3, 3), (1, 1), (1, 1), 1, 32, bias=False),
nn.BatchNorm2d(512),
activation_fn,
),
nn.Conv2d(512, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x, y: x + y), # CAddTable,
activation_fn,
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024, 512, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(512),
activation_fn,
nn.Conv2d(512, 512, (3, 3), (1, 1), (1, 1), 1, 32, bias=False),
nn.BatchNorm2d(512),
activation_fn,
),
nn.Conv2d(512, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x, y: x + y), # CAddTable,
activation_fn,
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024, 512, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(512),
activation_fn,
nn.Conv2d(512, 512, (3, 3), (1, 1), (1, 1), 1, 32, bias=False),
nn.BatchNorm2d(512),
activation_fn,
),
nn.Conv2d(512, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x, y: x + y), # CAddTable,
activation_fn,
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024, 512, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(512),
activation_fn,
nn.Conv2d(512, 512, (3, 3), (1, 1), (1, 1), 1, 32, bias=False),
nn.BatchNorm2d(512),
activation_fn,
),
nn.Conv2d(512, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x, y: x + y), # CAddTable,
activation_fn,
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024, 512, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(512),
activation_fn,
nn.Conv2d(512, 512, (3, 3), (1, 1), (1, 1), 1, 32, bias=False),
nn.BatchNorm2d(512),
activation_fn,
),
nn.Conv2d(512, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x, y: x + y), # CAddTable,
activation_fn,
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024, 512, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(512),
activation_fn,
nn.Conv2d(512, 512, (3, 3), (1, 1), (1, 1), 1, 32, bias=False),
nn.BatchNorm2d(512),
activation_fn,
),
nn.Conv2d(512, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x, y: x + y), # CAddTable,
activation_fn,
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024, 512, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(512),
activation_fn,
nn.Conv2d(512, 512, (3, 3), (1, 1), (1, 1), 1, 32, bias=False),
nn.BatchNorm2d(512),
activation_fn,
),
nn.Conv2d(512, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x, y: x + y), # CAddTable,
activation_fn,
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024, 512, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(512),
activation_fn,
nn.Conv2d(512, 512, (3, 3), (1, 1), (1, 1), 1, 32, bias=False),
nn.BatchNorm2d(512),
activation_fn,
),
nn.Conv2d(512, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x, y: x + y), # CAddTable,
activation_fn,
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024, 512, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(512),
activation_fn,
nn.Conv2d(512, 512, (3, 3), (1, 1), (1, 1), 1, 32, bias=False),
nn.BatchNorm2d(512),
activation_fn,
),
nn.Conv2d(512, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x, y: x + y), # CAddTable,
activation_fn,
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024, 512, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(512),
activation_fn,
nn.Conv2d(512, 512, (3, 3), (1, 1), (1, 1), 1, 32, bias=False),
nn.BatchNorm2d(512),
activation_fn,
),
nn.Conv2d(512, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x, y: x + y), # CAddTable,
activation_fn,
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024, 512, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(512),
activation_fn,
nn.Conv2d(512, 512, (3, 3), (1, 1), (1, 1), 1, 32, bias=False),
nn.BatchNorm2d(512),
activation_fn,
),
nn.Conv2d(512, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x, y: x + y), # CAddTable,
activation_fn,
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024, 512, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(512),
activation_fn,
nn.Conv2d(512, 512, (3, 3), (1, 1), (1, 1), 1, 32, bias=False),
nn.BatchNorm2d(512),
activation_fn,
),
nn.Conv2d(512, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x, y: x + y), # CAddTable,
activation_fn,
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024, 512, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(512),
activation_fn,
nn.Conv2d(512, 512, (3, 3), (1, 1), (1, 1), 1, 32, bias=False),
nn.BatchNorm2d(512),
activation_fn,
),
nn.Conv2d(512, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x, y: x + y), # CAddTable,
activation_fn,
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024, 512, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(512),
activation_fn,
nn.Conv2d(512, 512, (3, 3), (1, 1), (1, 1), 1, 32, bias=False),
nn.BatchNorm2d(512),
activation_fn,
),
nn.Conv2d(512, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x, y: x + y), # CAddTable,
activation_fn,
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024, 512, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(512),
activation_fn,
nn.Conv2d(512, 512, (3, 3), (1, 1), (1, 1), 1, 32, bias=False),
nn.BatchNorm2d(512),
activation_fn,
),
nn.Conv2d(512, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x, y: x + y), # CAddTable,
activation_fn,
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024, 512, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(512),
activation_fn,
nn.Conv2d(512, 512, (3, 3), (1, 1), (1, 1), 1, 32, bias=False),
nn.BatchNorm2d(512),
activation_fn,
),
nn.Conv2d(512, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x, y: x + y), # CAddTable,
activation_fn,
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024, 512, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(512),
activation_fn,
nn.Conv2d(512, 512, (3, 3), (1, 1), (1, 1), 1, 32, bias=False),
nn.BatchNorm2d(512),
activation_fn,
),
nn.Conv2d(512, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x, y: x + y), # CAddTable,
activation_fn,
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024, 512, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(512),
activation_fn,
nn.Conv2d(512, 512, (3, 3), (1, 1), (1, 1), 1, 32, bias=False),
nn.BatchNorm2d(512),
activation_fn,
),
nn.Conv2d(512, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x, y: x + y), # CAddTable,
activation_fn,
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024, 512, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(512),
activation_fn,
nn.Conv2d(512, 512, (3, 3), (1, 1), (1, 1), 1, 32, bias=False),
nn.BatchNorm2d(512),
activation_fn,
),
nn.Conv2d(512, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x, y: x + y), # CAddTable,
activation_fn,
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024, 512, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(512),
activation_fn,
nn.Conv2d(512, 512, (3, 3), (1, 1), (1, 1), 1, 32, bias=False),
nn.BatchNorm2d(512),
activation_fn,
),
nn.Conv2d(512, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x, y: x + y), # CAddTable,
activation_fn,
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024, 512, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(512),
activation_fn,
nn.Conv2d(512, 512, (3, 3), (1, 1), (1, 1), 1, 32, bias=False),
nn.BatchNorm2d(512),
activation_fn,
),
nn.Conv2d(512, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x, y: x + y), # CAddTable,
activation_fn,
),
),
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(1024),
activation_fn,
nn.Conv2d(1024, 1024, (3, 3), (2, 2), (1, 1), 1, 32, bias=False),
nn.BatchNorm2d(1024),
activation_fn,
),
nn.Conv2d(1024, 2048, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(2048),
),
nn.Sequential( # Sequential,
nn.Conv2d(1024, 2048, (1, 1), (2, 2), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(2048),
),
),
LambdaReduce(lambda x, y: x + y), # CAddTable,
activation_fn,
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(2048, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(1024),
activation_fn,
nn.Conv2d(1024, 1024, (3, 3), (1, 1), (1, 1), 1, 32, bias=False),
nn.BatchNorm2d(1024),
activation_fn,
),
nn.Conv2d(1024, 2048, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(2048),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x, y: x + y), # CAddTable,
activation_fn,
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(2048, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(1024),
activation_fn,
nn.Conv2d(1024, 1024, (3, 3), (1, 1), (1, 1), 1, 32, bias=False),
nn.BatchNorm2d(1024),
activation_fn,
),
nn.Conv2d(1024, 2048, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(2048),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x, y: x + y), # CAddTable,
activation_fn,
),
),
)
return features
class ResNeXt101_32x4d(nn.Module):
def __init__(self, num_classes=1000, activation_fn=nn.ReLU(), drop_rate=0, global_pool='avg'):
self.drop_rate = drop_rate
self.global_pool = global_pool
super(ResNeXt101_32x4d, self).__init__()
self.features = resnext_101_32x4d_features(activation_fn=activation_fn)
self.pool = nn.AdaptiveAvgPool2d(1)
assert global_pool == 'avg' # other options not supported
self.fc = nn.Linear(2048, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal(m.weight)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, input):
x = self.features(input)
x = self.pool(x)
x = x.view(x.size(0), -1)
if self.drop_rate > 0:
x = F.dropout(x, p=self.drop_rate, training=self.training)
x = self.fc(x)
return x
def resnext101_32x4d(pretrained=False, num_classes=1000, **kwargs):
model = ResNeXt101_32x4d(num_classes=num_classes, **kwargs)
if pretrained:
print('Warning: No pretrained weights setup.')
return model
| 51.057456
| 99
| 0.346864
| 3,173
| 37,323
| 4.02994
| 0.041286
| 0.062876
| 0.053257
| 0.039728
| 0.895832
| 0.893486
| 0.866114
| 0.86322
| 0.856651
| 0.855791
| 0
| 0.126225
| 0.538113
| 37,323
| 730
| 100
| 51.127397
| 0.615523
| 0.070948
| 0
| 0.906648
| 0
| 0
| 0.001248
| 0
| 0
| 0
| 0
| 0
| 0.001414
| 1
| 0.01273
| false
| 0
| 0.008487
| 0.004243
| 0.03819
| 0.001414
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
225522d46b908d31bc677acb8978277a39ee9ff7
| 65
|
py
|
Python
|
blog/secret_key.py
|
Miklosh1989/FinalProjectBlog
|
51de8df8003b0c4ac14a04392a505a911d409fb6
|
[
"CC-BY-3.0"
] | null | null | null |
blog/secret_key.py
|
Miklosh1989/FinalProjectBlog
|
51de8df8003b0c4ac14a04392a505a911d409fb6
|
[
"CC-BY-3.0"
] | null | null | null |
blog/secret_key.py
|
Miklosh1989/FinalProjectBlog
|
51de8df8003b0c4ac14a04392a505a911d409fb6
|
[
"CC-BY-3.0"
] | null | null | null |
SECRET_KEY = '-1a@ts0&f84%g#hh5_7#$uyt5f3-ln%nu9%y=-4&&0gb9g+n$8'
| 65
| 65
| 0.676923
| 16
| 65
| 2.625
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.206349
| 0.030769
| 65
| 1
| 65
| 65
| 0.460317
| 0
| 0
| 0
| 0
| 1
| 0.757576
| 0.757576
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
226a75accd0b1a6c83394cad32d774708475e2a8
| 149
|
py
|
Python
|
astarte/device/__init__.py
|
bettio/astarte-device-sdk-python
|
835e0250e437067518f5c820f5e72507071c6295
|
[
"Apache-2.0"
] | null | null | null |
astarte/device/__init__.py
|
bettio/astarte-device-sdk-python
|
835e0250e437067518f5c820f5e72507071c6295
|
[
"Apache-2.0"
] | null | null | null |
astarte/device/__init__.py
|
bettio/astarte-device-sdk-python
|
835e0250e437067518f5c820f5e72507071c6295
|
[
"Apache-2.0"
] | null | null | null |
# Export what we care about
from .device import Device
from .pairing_handler import register_device_with_jwt_token, register_device_with_private_key
| 37.25
| 93
| 0.872483
| 23
| 149
| 5.26087
| 0.695652
| 0.231405
| 0.297521
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.100671
| 149
| 3
| 94
| 49.666667
| 0.902985
| 0.167785
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
226bb887b0574daac6f4af5d3adc83c7aa0c5708
| 83
|
py
|
Python
|
airflow_dbt/operators/__init__.py
|
ayobamshy/airflow-dbt
|
7e3e7e99e959a912721f6e2c4b45192bd0d769be
|
[
"MIT"
] | null | null | null |
airflow_dbt/operators/__init__.py
|
ayobamshy/airflow-dbt
|
7e3e7e99e959a912721f6e2c4b45192bd0d769be
|
[
"MIT"
] | null | null | null |
airflow_dbt/operators/__init__.py
|
ayobamshy/airflow-dbt
|
7e3e7e99e959a912721f6e2c4b45192bd0d769be
|
[
"MIT"
] | null | null | null |
from .dbt_operator import DbtRunOperator, DbtTestOperator, DbtDocsGenerateOperator
| 41.5
| 82
| 0.891566
| 7
| 83
| 10.428571
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.072289
| 83
| 1
| 83
| 83
| 0.948052
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
3f1cc643538f3cf0bb46a4f7d304f6fcc6ccfb30
| 128
|
py
|
Python
|
tools/patch_codegen/dbg.py
|
fengjixuchui/src
|
0c5a6cd8057717f73b1373f8d85eb9b19e1934e1
|
[
"BSD-3-Clause"
] | 1,160
|
2015-05-02T15:13:20.000Z
|
2022-03-31T20:04:28.000Z
|
tools/patch_codegen/dbg.py
|
fengjixuchui/src
|
0c5a6cd8057717f73b1373f8d85eb9b19e1934e1
|
[
"BSD-3-Clause"
] | 19
|
2015-04-20T13:47:00.000Z
|
2021-07-07T13:00:42.000Z
|
tools/patch_codegen/dbg.py
|
fengjixuchui/src
|
0c5a6cd8057717f73b1373f8d85eb9b19e1934e1
|
[
"BSD-3-Clause"
] | 257
|
2015-04-01T21:42:33.000Z
|
2022-03-10T11:57:51.000Z
|
{
"SwigDirector_DBG_Hooks::SwigDirector_DBG_Hooks" : [
("maybe_collect_director_fixed_method_set", None),
],
}
| 18.285714
| 58
| 0.679688
| 13
| 128
| 6
| 0.769231
| 0.384615
| 0.512821
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.195313
| 128
| 6
| 59
| 21.333333
| 0.757282
| 0
| 0
| 0
| 0
| 0
| 0.664063
| 0.664063
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
3f5671b6b88bbbc96b414434311ec19cf165cda3
| 15,496
|
py
|
Python
|
python/orca/test/bigdl/orca/learn/ray/tf/test_tf_spark_estimator.py
|
ryandoren/BigDL
|
9f8f47414f78324d8d94c3802c54a7df8a764a11
|
[
"Apache-2.0"
] | null | null | null |
python/orca/test/bigdl/orca/learn/ray/tf/test_tf_spark_estimator.py
|
ryandoren/BigDL
|
9f8f47414f78324d8d94c3802c54a7df8a764a11
|
[
"Apache-2.0"
] | null | null | null |
python/orca/test/bigdl/orca/learn/ray/tf/test_tf_spark_estimator.py
|
ryandoren/BigDL
|
9f8f47414f78324d8d94c3802c54a7df8a764a11
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import shutil
import tempfile
from unittest import TestCase
import time
import time
import numpy as np
import pytest
import tensorflow as tf
from bigdl.orca.learn.tf2 import Estimator
from bigdl.orca import OrcaContext
import os
resource_path = os.path.join(
os.path.realpath(os.path.dirname(__file__)), "../../../../resources")
def simple_model(config):
model = tf.keras.models.Sequential([tf.keras.layers.Dense(10, input_shape=(1,)),
tf.keras.layers.Dense(1)])
return model
def compile_args(config):
import tensorflow as tf
if "lr" in config:
lr = config["lr"]
else:
lr = 1e-3
args = {
"optimizer": tf.keras.optimizers.SGD(lr),
"loss": "mean_squared_error",
"metrics": ["mean_squared_error"]
}
return args
def model_creator(config):
model = simple_model(config)
model.compile(**compile_args(config))
return model
class TestTFEstimator(TestCase):
# todo need more test cases
def test_dataframe(self):
sc = OrcaContext.get_spark_context()
rdd = sc.range(0, 100)
spark = OrcaContext.get_spark_session()
from pyspark.ml.linalg import DenseVector
df = rdd.map(lambda x: (DenseVector(np.random.randn(1, ).astype(np.float)),
int(np.random.randint(0, 2, size=())))).toDF(["feature", "label"])
config = {
"lr": 0.2
}
try:
temp_dir = tempfile.mkdtemp()
trainer = Estimator.from_keras(
model_creator=model_creator,
verbose=True,
config=config,
workers_per_node=2,
backend="spark",
model_dir=temp_dir)
res = trainer.fit(df, epochs=5, batch_size=4, steps_per_epoch=25,
feature_cols=["feature"],
label_cols=["label"],
validation_data=df,
validation_steps=1)
print("start saving")
trainer.save_weights(os.path.join(temp_dir, "cifar10_keras.h5"))
trainer.load_weights(os.path.join(temp_dir, "cifar10_keras.h5"))
trainer.save(os.path.join(temp_dir, "a.ckpt"))
trainer.load(os.path.join(temp_dir, "a.ckpt"))
res = trainer.evaluate(df, batch_size=4, num_steps=25, feature_cols=["feature"],
label_cols=["label"])
print("validation result: ", res)
res = trainer.predict(df, feature_cols=["feature"]).collect()
print("predict result: ", res)
finally:
shutil.rmtree(temp_dir)
def test_dataframe_with_empty_partition(self):
sc = OrcaContext.get_spark_context()
rdd = sc.range(0, 100)
rdd_with_empty = rdd.repartition(4). \
mapPartitionsWithIndex(lambda idx, part: [] if idx == 0 else part)
spark = OrcaContext.get_spark_session()
from pyspark.ml.linalg import DenseVector
df = rdd_with_empty.map(lambda x: (DenseVector(np.random.randn(1, ).astype(np.float)),
int(np.random.randint(0, 2, size=())))) \
.toDF(["feature", "label"])
config = {
"lr": 0.2
}
try:
temp_dir = tempfile.mkdtemp()
trainer = Estimator.from_keras(
model_creator=model_creator,
verbose=True,
config=config,
workers_per_node=2,
backend="spark",
model_dir=temp_dir)
res = trainer.fit(df, epochs=5, batch_size=4, steps_per_epoch=25,
feature_cols=["feature"],
label_cols=["label"]
)
print("start saving")
trainer.save_weights(os.path.join(temp_dir, "cifar10_keras.h5"))
trainer.load_weights(os.path.join(temp_dir, "cifar10_keras.h5"))
trainer.save(os.path.join(temp_dir, "a.ckpt"))
trainer.load(os.path.join(temp_dir, "a.ckpt"))
res = trainer.evaluate(df, batch_size=4, num_steps=25, feature_cols=["feature"],
label_cols=["label"])
print("validation result: ", res)
res = trainer.predict(df, feature_cols=["feature"]).collect()
print("predict result: ", res)
finally:
shutil.rmtree(temp_dir)
def test_checkpoint_weights(self):
sc = OrcaContext.get_spark_context()
rdd = sc.range(0, 100)
spark = OrcaContext.get_spark_session()
from pyspark.ml.linalg import DenseVector
df = rdd.map(lambda x: (DenseVector(np.random.randn(1, ).astype(np.float)),
int(np.random.randint(0, 2, size=())))).toDF(["feature", "label"])
config = {
"lr": 0.2
}
try:
temp_dir = tempfile.mkdtemp()
trainer = Estimator.from_keras(
model_creator=model_creator,
verbose=True,
config=config,
workers_per_node=2,
backend="spark",
model_dir=temp_dir)
callbacks = [
tf.keras.callbacks.ModelCheckpoint(filepath=os.path.join(temp_dir, "ckpt_{epoch}"),
save_weights_only=True)
]
res = trainer.fit(df, epochs=3, batch_size=4, steps_per_epoch=25,
callbacks=callbacks,
feature_cols=["feature"],
label_cols=["label"],
validation_data=df,
validation_steps=1)
latest_checkpoint = Estimator.latest_checkpoint(temp_dir)
trainer.load_weights(latest_checkpoint)
res = trainer.evaluate(df, batch_size=4, num_steps=25, feature_cols=["feature"],
label_cols=["label"])
print("validation result: ", res)
res = trainer.predict(df, feature_cols=["feature"]).collect()
print("predict result: ", res)
finally:
shutil.rmtree(temp_dir)
def test_dataframe_shard_size(self):
sc = OrcaContext.get_spark_context()
OrcaContext._shard_size = 3
rdd = sc.range(0, 100, numSlices=10)
spark = OrcaContext.get_spark_session()
from pyspark.ml.linalg import DenseVector
df = rdd.map(lambda x: (DenseVector(np.random.randn(1, ).astype(np.float)),
int(np.random.randint(0, 2, size=())))).toDF(["feature", "label"])
val_rdd = sc.range(0, 20, numSlices=6)
val_df = val_rdd.map(lambda x: (DenseVector(np.random.randn(1, ).astype(np.float)),
int(np.random.randint(0, 2, size=())))).toDF(["feature", "label"])
config = {
"lr": 0.2
}
try:
temp_dir = tempfile.mkdtemp()
trainer = Estimator.from_keras(
model_creator=model_creator,
verbose=True,
config=config,
workers_per_node=2,
backend="spark",
model_dir=temp_dir)
res = trainer.fit(df, epochs=5, batch_size=4, steps_per_epoch=25,
validation_data=val_df,
validation_steps=2,
feature_cols=["feature"],
label_cols=["label"])
res = trainer.fit(df, epochs=5, batch_size=4, steps_per_epoch=25,
feature_cols=["feature"],
label_cols=["label"])
res = trainer.evaluate(val_df, batch_size=4, num_steps=25, feature_cols=["feature"],
label_cols=["label"])
print("validation result: ", res)
res = trainer.predict(df, feature_cols=["feature"]).collect()
print("predict result: ", res)
finally:
shutil.rmtree(temp_dir)
OrcaContext._shard_size = None
def test_dataframe_different_train_val(self):
sc = OrcaContext.get_spark_context()
rdd = sc.range(0, 100, numSlices=10)
spark = OrcaContext.get_spark_session()
from pyspark.ml.linalg import DenseVector
df = rdd.map(lambda x: (DenseVector(np.random.randn(1, ).astype(np.float)),
int(np.random.randint(0, 2, size=())))).toDF(["feature", "label"])
val_rdd = sc.range(0, 20, numSlices=6)
val_df = val_rdd.map(lambda x: (DenseVector(np.random.randn(1, ).astype(np.float)),
int(np.random.randint(0, 2, size=())))).toDF(["feature", "label"])
config = {
"lr": 0.2
}
try:
temp_dir = tempfile.mkdtemp()
trainer = Estimator.from_keras(
model_creator=model_creator,
verbose=True,
config=config,
workers_per_node=2,
backend="spark",
model_dir=temp_dir)
res = trainer.fit(df, epochs=5, batch_size=4, steps_per_epoch=25,
validation_data=val_df,
validation_steps=2,
feature_cols=["feature"],
label_cols=["label"])
res = trainer.evaluate(val_df, batch_size=4, num_steps=25, feature_cols=["feature"],
label_cols=["label"])
print("validation result: ", res)
res = trainer.predict(df, feature_cols=["feature"]).collect()
print("predict result: ", res)
finally:
shutil.rmtree(temp_dir)
def test_tensorboard(self):
sc = OrcaContext.get_spark_context()
rdd = sc.range(0, 100)
spark = OrcaContext.get_spark_session()
from pyspark.ml.linalg import DenseVector
df = rdd.map(lambda x: (DenseVector(np.random.randn(1, ).astype(np.float)),
int(np.random.randint(0, 2, size=())))).toDF(["feature", "label"])
config = {
"lr": 0.2
}
try:
temp_dir = tempfile.mkdtemp()
trainer = Estimator.from_keras(
model_creator=model_creator,
verbose=True,
config=config,
workers_per_node=2,
backend="spark",
model_dir=temp_dir)
callbacks = [
tf.keras.callbacks.TensorBoard(log_dir=os.path.join(temp_dir, "train_log"),
update_freq='epoch')
]
res = trainer.fit(df, epochs=3, batch_size=4, steps_per_epoch=25,
callbacks=callbacks,
feature_cols=["feature"],
label_cols=["label"],
validation_data=df,
validation_steps=1)
assert len(os.listdir(os.path.join(temp_dir, "train_log"))) > 0
callbacks = [
tf.keras.callbacks.TensorBoard(log_dir=os.path.join(temp_dir, "train_log_2"),
update_freq='batch')
]
res = trainer.fit(df, epochs=3, batch_size=4, steps_per_epoch=25,
callbacks=callbacks,
feature_cols=["feature"],
label_cols=["label"],
validation_data=df,
validation_steps=11)
assert len(os.listdir(os.path.join(temp_dir, "train_log_2"))) > 0
callbacks = [
tf.keras.callbacks.TensorBoard(log_dir=os.path.join(temp_dir, "val_log"),
update_freq='batch')
]
res = trainer.evaluate(df, batch_size=4, num_steps=25,
callbacks=callbacks,
feature_cols=["feature"],
label_cols=["label"])
assert len(os.listdir(os.path.join(temp_dir, "val_log"))) > 0
finally:
shutil.rmtree(temp_dir)
def test_checkpoint_model(self):
sc = OrcaContext.get_spark_context()
rdd = sc.range(0, 100)
spark = OrcaContext.get_spark_session()
from pyspark.ml.linalg import DenseVector
df = rdd.map(lambda x: (DenseVector(np.random.randn(1, ).astype(np.float)),
int(np.random.randint(0, 2, size=())))).toDF(["feature", "label"])
config = {
"lr": 0.2
}
try:
temp_dir = tempfile.mkdtemp()
trainer = Estimator.from_keras(
model_creator=model_creator,
verbose=True,
config=config,
workers_per_node=2,
backend="spark",
model_dir=temp_dir)
callbacks = [
tf.keras.callbacks.ModelCheckpoint(filepath=os.path.join(temp_dir, "ckpt_{epoch}"),
save_weights_only=False
)
]
res = trainer.fit(df, epochs=3, batch_size=4, steps_per_epoch=25,
callbacks=callbacks,
feature_cols=["feature"],
label_cols=["label"],
validation_data=df,
validation_steps=1
)
assert len(os.listdir(os.path.join(temp_dir, "ckpt_3"))) > 0
callbacks = [
tf.keras.callbacks.ModelCheckpoint(filepath=os.path.join(temp_dir, "best"),
save_weights_only=False,
save_best_only=True
)
]
res = trainer.fit(df, epochs=3, batch_size=4, steps_per_epoch=25,
callbacks=callbacks,
feature_cols=["feature"],
label_cols=["label"],
validation_data=df,
validation_steps=1
)
assert len(os.listdir(os.path.join(temp_dir, "best"))) > 0
finally:
shutil.rmtree(temp_dir)
if __name__ == "__main__":
pytest.main([__file__])
| 37.429952
| 99
| 0.510067
| 1,616
| 15,496
| 4.702351
| 0.137995
| 0.037768
| 0.049743
| 0.035005
| 0.80458
| 0.8005
| 0.791025
| 0.789446
| 0.782208
| 0.771023
| 0
| 0.020062
| 0.379195
| 15,496
| 413
| 100
| 37.520581
| 0.769854
| 0.037494
| 0
| 0.747664
| 0
| 0
| 0.058196
| 0.00141
| 0
| 0
| 0
| 0.002421
| 0.015576
| 1
| 0.031153
| false
| 0
| 0.05919
| 0
| 0.102804
| 0.037383
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
58ab0cba40f8475147466deb8a5044575bf9056d
| 32,831
|
py
|
Python
|
tests/test_pose3d.py
|
mfkenson/spatialmath-python
|
525e47b253e3c1a47f8f3a6cb62ba89053c02c91
|
[
"MIT"
] | null | null | null |
tests/test_pose3d.py
|
mfkenson/spatialmath-python
|
525e47b253e3c1a47f8f3a6cb62ba89053c02c91
|
[
"MIT"
] | null | null | null |
tests/test_pose3d.py
|
mfkenson/spatialmath-python
|
525e47b253e3c1a47f8f3a6cb62ba89053c02c91
|
[
"MIT"
] | 2
|
2021-03-06T18:50:14.000Z
|
2021-03-06T18:50:26.000Z
|
import numpy.testing as nt
import matplotlib.pyplot as plt
import unittest
"""
we will assume that the primitives rotx,trotx, etc. all work
"""
from math import pi
from spatialmath.pose3d import *
# from spatialmath import super_pose as sp
from spatialmath.base import *
from spatialmath.base import argcheck
import spatialmath as sm
from spatialmath.super_pose import SMPose
from spatialmath.twist import SMTwist
def array_compare(x, y):
if isinstance(x, SMPose):
x = x.A
if isinstance(y, SMPose):
y = y.A
if isinstance(x, SMTwist):
x = x.S
if isinstance(y, SMTwist):
y = y.S
nt.assert_array_almost_equal(x, y)
class TestSO3(unittest.TestCase):
@classmethod
def tearDownClass(cls):
plt.close('all')
def test_constructor(self):
# null constructor
R = SO3()
nt.assert_equal(len(R), 1)
array_compare(R, np.eye(3))
self.assertIsInstance(R, SO3)
# empty constructor
R = SO3.Empty()
nt.assert_equal(len(R), 0)
self.assertIsInstance(R, SO3)
# construct from matrix
R = SO3(rotx(0.2))
nt.assert_equal(len(R), 1)
array_compare(R, rotx(0.2))
self.assertIsInstance(R, SO3)
# construct from canonic rotation
R = SO3.Rx(0.2)
nt.assert_equal(len(R), 1)
array_compare(R, rotx(0.2))
self.assertIsInstance(R, SO3)
R = SO3.Ry(0.2)
nt.assert_equal(len(R), 1)
array_compare(R, roty(0.2))
self.assertIsInstance(R, SO3)
R = SO3.Rz(0.2)
nt.assert_equal(len(R), 1)
array_compare(R, rotz(0.2))
self.assertIsInstance(R, SO3)
# OA
R = SO3.OA([0, 1, 0], [0, 0, 1])
nt.assert_equal(len(R), 1)
array_compare(R, np.eye(3))
self.assertIsInstance(R, SO3)
# random
R = SO3.Rand()
nt.assert_equal(len(R), 1)
self.assertIsInstance(R, SO3)
# copy constructor
R = SO3.Rx(pi / 2)
R2 = SO3(R)
R = SO3.Ry(pi / 2)
array_compare(R2, rotx(pi / 2))
def test_constructor_Eul(self):
R = SO3.Eul([0.1, 0.2, 0.3])
nt.assert_equal(len(R), 1)
array_compare(R, eul2r([0.1, 0.2, 0.3]))
self.assertIsInstance(R, SO3)
R = SO3.Eul(0.1, 0.2, 0.3)
nt.assert_equal(len(R), 1)
array_compare(R, eul2r([0.1, 0.2, 0.3]))
self.assertIsInstance(R, SO3)
R = SO3.Eul(np.r_[0.1, 0.2, 0.3])
nt.assert_equal(len(R), 1)
array_compare(R, eul2r([0.1, 0.2, 0.3]))
self.assertIsInstance(R, SO3)
R = SO3.Eul([10, 20, 30], unit='deg')
nt.assert_equal(len(R), 1)
array_compare(R, eul2r([10, 20, 30], unit='deg'))
self.assertIsInstance(R, SO3)
R = SO3.Eul(10, 20, 30, unit='deg')
nt.assert_equal(len(R), 1)
array_compare(R, eul2r([10, 20, 30], unit='deg'))
self.assertIsInstance(R, SO3)
# matrix input
angles = np.array([
[0.1, 0.2, 0.3],
[0.2, 0.3, 0.4],
[0.3, 0.4, 0.5],
[0.4, 0.5, 0.6]
])
R = SO3.Eul(angles)
self.assertIsInstance(R, SO3)
nt.assert_equal(len(R), 4)
for i in range(4):
array_compare(R[i], eul2r(angles[i,:]))
angles *= 10
R = SO3.Eul(angles, unit='deg')
self.assertIsInstance(R, SO3)
nt.assert_equal(len(R), 4)
for i in range(4):
array_compare(R[i], eul2r(angles[i,:], unit='deg'))
def test_constructor_RPY(self):
R = SO3.RPY(0.1, 0.2, 0.3, order='zyx')
nt.assert_equal(len(R), 1)
array_compare(R, rpy2r([0.1, 0.2, 0.3], order='zyx'))
self.assertIsInstance(R, SO3)
R = SO3.RPY(10, 20, 30, unit='deg', order='zyx')
nt.assert_equal(len(R), 1)
array_compare(R, rpy2r([10, 20, 30], order='zyx', unit='deg'))
self.assertIsInstance(R, SO3)
R = SO3.RPY([0.1, 0.2, 0.3], order='zyx')
nt.assert_equal(len(R), 1)
array_compare(R, rpy2r([0.1, 0.2, 0.3], order='zyx'))
self.assertIsInstance(R, SO3)
R = SO3.RPY(np.r_[0.1, 0.2, 0.3], order='zyx')
nt.assert_equal(len(R), 1)
array_compare(R, rpy2r([0.1, 0.2, 0.3], order='zyx'))
self.assertIsInstance(R, SO3)
# check default
R = SO3.RPY([0.1, 0.2, 0.3])
nt.assert_equal(len(R), 1)
array_compare(R, rpy2r([0.1, 0.2, 0.3], order='zyx'))
self.assertIsInstance(R, SO3)
# XYZ order
R = SO3.RPY(0.1, 0.2, 0.3, order='xyz')
nt.assert_equal(len(R), 1)
array_compare(R, rpy2r([0.1, 0.2, 0.3], order='xyz'))
self.assertIsInstance(R, SO3)
R = SO3.RPY(10, 20, 30, unit='deg', order='xyz')
nt.assert_equal(len(R), 1)
array_compare(R, rpy2r([10, 20, 30], order='xyz', unit='deg'))
self.assertIsInstance(R, SO3)
R = SO3.RPY([0.1, 0.2, 0.3], order='xyz')
nt.assert_equal(len(R), 1)
array_compare(R, rpy2r([0.1, 0.2, 0.3], order='xyz'))
self.assertIsInstance(R, SO3)
R = SO3.RPY(np.r_[0.1, 0.2, 0.3], order='xyz')
nt.assert_equal(len(R), 1)
array_compare(R, rpy2r([0.1, 0.2, 0.3], order='xyz'))
self.assertIsInstance(R, SO3)
# matrix input
angles = np.array([
[0.1, 0.2, 0.3],
[0.2, 0.3, 0.4],
[0.3, 0.4, 0.5],
[0.4, 0.5, 0.6]
])
R = SO3.RPY(angles, order='zyx')
self.assertIsInstance(R, SO3)
nt.assert_equal(len(R), 4)
for i in range(4):
array_compare(R[i], rpy2r(angles[i,:], order='zyx'))
angles *= 10
R = SO3.RPY(angles, unit='deg', order='zyx')
self.assertIsInstance(R, SO3)
nt.assert_equal(len(R), 4)
for i in range(4):
array_compare(R[i], rpy2r(angles[i,:], unit='deg', order='zyx'))
def test_constructor_AngVec(self):
# angvec
R = SO3.AngVec(0.2, [1, 0, 0])
nt.assert_equal(len(R), 1)
array_compare(R, rotx(0.2))
self.assertIsInstance(R, SO3)
R = SO3.AngVec(0.3, [0, 1, 0])
nt.assert_equal(len(R), 1)
array_compare(R, roty(0.3))
self.assertIsInstance(R, SO3)
def test_shape(self):
a = SO3()
self.assertEqual(a._A.shape, a.shape)
def test_about(self):
R = SO3()
R.about
def test_str(self):
R = SO3()
s = str(R)
self.assertIsInstance(s, str)
self.assertEqual(s.count('\n'), 3)
s = repr(R)
self.assertIsInstance(s, str)
self.assertEqual(s.count('\n'), 2)
def test_printline(self):
R = SO3.Rx( 0.3)
R.printline()
s = R.printline(file=None)
self.assertIsInstance(s, str)
R = SO3.Rx([0.3, 0.4, 0.5])
s = R.printline(file=None)
self.assertIsInstance(s, str)
self.assertEqual(s.count('\n'), 2)
def test_plot(self):
plt.close('all')
R = SO3.Rx( 0.3)
R.plot(block=False)
R2 = SO3.Rx(0.6)
# R.animate()
# R.animate(start=R.inv())
def test_listpowers(self):
R = SO3()
R1 = SO3.Rx(0.2)
R2 = SO3.Ry(0.3)
R.append(R1)
R.append(R2)
nt.assert_equal(len(R), 3)
self.assertIsInstance(R, SO3)
array_compare(R[0], np.eye(3))
array_compare(R[1], R1)
array_compare(R[2], R2)
R = SO3([rotx(0.1), rotx(0.2), rotx(0.3)])
nt.assert_equal(len(R), 3)
self.assertIsInstance(R, SO3)
array_compare(R[0], rotx(0.1))
array_compare(R[1], rotx(0.2))
array_compare(R[2], rotx(0.3))
R = SO3([SO3.Rx(0.1), SO3.Rx(0.2), SO3.Rx(0.3)])
nt.assert_equal(len(R), 3)
self.assertIsInstance(R, SO3)
array_compare(R[0], rotx(0.1))
array_compare(R[1], rotx(0.2))
array_compare(R[2], rotx(0.3))
def test_tests(self):
R = SO3()
self.assertEqual(R.isrot(), True)
self.assertEqual(R.isrot2(), False)
self.assertEqual(R.ishom(), False)
self.assertEqual(R.ishom2(), False)
def test_properties(self):
R = SO3()
self.assertEqual(R.isSO, True)
self.assertEqual(R.isSE, False)
array_compare(R.n, np.r_[1, 0, 0])
array_compare(R.n, np.r_[1, 0, 0])
array_compare(R.n, np.r_[1, 0, 0])
nt.assert_equal(R.N, 3)
nt.assert_equal(R.shape, (3, 3))
R = SO3.Rx(0.3)
array_compare(R.inv() * R, np.eye(3, 3))
def test_arith(self):
R = SO3()
# sum
a = R + R
self.assertNotIsInstance(a, SO3)
array_compare(a, np.array([[2, 0, 0], [0, 2, 0], [0, 0, 2]]))
a = R + 1
self.assertNotIsInstance(a, SO3)
array_compare(a, np.array([[2, 1, 1], [1, 2, 1], [1, 1, 2]]))
# a = 1 + R
# self.assertNotIsInstance(a, SO3)
# array_compare(a, np.array([ [2,1,1], [1,2,1], [1,1,2]]))
a = R + np.eye(3)
self.assertNotIsInstance(a, SO3)
array_compare(a, np.array([[2, 0, 0], [0, 2, 0], [0, 0, 2]]))
# a = np.eye(3) + R
# self.assertNotIsInstance(a, SO3)
# array_compare(a, np.array([ [2,0,0], [0,2,0], [0,0,2]]))
# this invokes the __add__ method for numpy
# difference
R = SO3()
a = R - R
self.assertNotIsInstance(a, SO3)
array_compare(a, np.zeros((3, 3)))
a = R - 1
self.assertNotIsInstance(a, SO3)
array_compare(a, np.array([[0, -1, -1], [-1, 0, -1], [-1, -1, 0]]))
# a = 1 - R
# self.assertNotIsInstance(a, SO3)
# array_compare(a, -np.array([ [0,-1,-1], [-1,0,-1], [-1,-1,0]]))
a = R - np.eye(3)
self.assertNotIsInstance(a, SO3)
array_compare(a, np.zeros((3, 3)))
# a = np.eye(3) - R
# self.assertNotIsInstance(a, SO3)
# array_compare(a, np.zeros((3,3)))
# multiply
R = SO3()
a = R * R
self.assertIsInstance(a, SO3)
array_compare(a, R)
a = R * 2
self.assertNotIsInstance(a, SO3)
array_compare(a, 2 * np.eye(3))
a = 2 * R
self.assertNotIsInstance(a, SO3)
array_compare(a, 2 * np.eye(3))
R = SO3()
R *= SO3.Rx(pi / 2)
self.assertIsInstance(R, SO3)
array_compare(R, rotx(pi / 2))
R = SO3()
R *= 2
self.assertNotIsInstance(R, SO3)
array_compare(R, 2 * np.eye(3))
array_compare(SO3.Rx(pi / 2) * SO3.Ry(pi / 2) * SO3.Rx(-pi / 2), SO3.Rz(pi / 2))
array_compare(SO3.Ry(pi / 2) * [1, 0, 0], np.c_[0, 0, -1].T)
# SO3 x vector
vx = np.r_[1, 0, 0]
vy = np.r_[0, 1, 0]
vz = np.r_[0, 0, 1]
def cv(v):
return np.c_[v]
nt.assert_equal(isinstance(SO3.Rx(pi / 2) * vx, np.ndarray), True)
print(vx)
print(SO3.Rx(pi / 2) * vx)
print(cv(vx))
array_compare(SO3.Rx(pi / 2) * vx, cv(vx))
array_compare(SO3.Rx(pi / 2) * vy, cv(vz))
array_compare(SO3.Rx(pi / 2) * vz, cv(-vy))
array_compare(SO3.Ry(pi / 2) * vx, cv(-vz))
array_compare(SO3.Ry(pi / 2) * vy, cv(vy))
array_compare(SO3.Ry(pi / 2) * vz, cv(vx))
array_compare(SO3.Rz(pi / 2) * vx, cv(vy))
array_compare(SO3.Rz(pi / 2) * vy, cv(-vx))
array_compare(SO3.Rz(pi / 2) * vz, cv(vz))
# divide
R = SO3.Ry(0.3)
a = R / R
self.assertIsInstance(a, SO3)
array_compare(a, np.eye(3))
a = R / 2
self.assertNotIsInstance(a, SO3)
array_compare(a, roty(0.3) / 2)
# power
R = SO3.Rx(pi/2)
R = R**2
array_compare(R, SO3.Rx(pi))
R = SO3.Rx(pi/2)
R **= 2
array_compare(R, SO3.Rx(pi))
R = SO3.Rx(pi/4)
R = R**(-2)
array_compare(R, SO3.Rx(-pi/2))
R = SO3.Rx(pi/4)
R **= -2
array_compare(R, SO3.Rx(-pi/2))
def test_arith_vect(self):
rx = SO3.Rx(pi / 2)
ry = SO3.Ry(pi / 2)
rz = SO3.Rz(pi / 2)
u = SO3()
# multiply
R = SO3([rx, ry, rz])
a = R * rx
self.assertIsInstance(a, SO3)
nt.assert_equal(len(a), 3)
array_compare(a[0], rx * rx)
array_compare(a[1], ry * rx)
array_compare(a[2], rz * rx)
a = rx * R
self.assertIsInstance(a, SO3)
nt.assert_equal(len(a), 3)
array_compare(a[0], rx * rx)
array_compare(a[1], rx * ry)
array_compare(a[2], rx * rz)
a = R * R
self.assertIsInstance(a, SO3)
nt.assert_equal(len(a), 3)
array_compare(a[0], rx * rx)
array_compare(a[1], ry * ry)
array_compare(a[2], rz * rz)
a = R * 2
self.assertNotIsInstance(a, SO3)
nt.assert_equal(len(a), 3)
array_compare(a[0], rx * 2)
array_compare(a[1], ry * 2)
array_compare(a[2], rz * 2)
a = 2 * R
self.assertNotIsInstance(a, SO3)
nt.assert_equal(len(a), 3)
array_compare(a[0], rx * 2)
array_compare(a[1], ry * 2)
array_compare(a[2], rz * 2)
a = R
a *= rx
self.assertIsInstance(a, SO3)
nt.assert_equal(len(a), 3)
array_compare(a[0], rx * rx)
array_compare(a[1], ry * rx)
array_compare(a[2], rz * rx)
a = rx
a *= R
self.assertIsInstance(a, SO3)
nt.assert_equal(len(a), 3)
array_compare(a[0], rx * rx)
array_compare(a[1], rx * ry)
array_compare(a[2], rx * rz)
a = R
a *= R
self.assertIsInstance(a, SO3)
nt.assert_equal(len(a), 3)
array_compare(a[0], rx * rx)
array_compare(a[1], ry * ry)
array_compare(a[2], rz * rz)
a = R
a *= 2
self.assertNotIsInstance(a, SO3)
nt.assert_equal(len(a), 3)
array_compare(a[0], rx * 2)
array_compare(a[1], ry * 2)
array_compare(a[2], rz * 2)
# SO3 x vector
vx = np.r_[1, 0, 0]
vy = np.r_[0, 1, 0]
vz = np.r_[0, 0, 1]
a = R * vx
array_compare(a[:, 0], (rx * vx).flatten())
array_compare(a[:, 1], (ry * vx).flatten())
array_compare(a[:, 2], (rz * vx).flatten())
a = rx * np.vstack((vx, vy, vz)).T
array_compare(a[:, 0], (rx * vx).flatten())
array_compare(a[:, 1], (rx * vy).flatten())
array_compare(a[:, 2], (rx * vz).flatten())
# divide
R = SO3([rx, ry, rz])
a = R / rx
self.assertIsInstance(a, SO3)
nt.assert_equal(len(a), 3)
array_compare(a[0], rx / rx)
array_compare(a[1], ry / rx)
array_compare(a[2], rz / rx)
a = rx / R
self.assertIsInstance(a, SO3)
nt.assert_equal(len(a), 3)
array_compare(a[0], rx / rx)
array_compare(a[1], rx / ry)
array_compare(a[2], rx / rz)
a = R / R
self.assertIsInstance(a, SO3)
nt.assert_equal(len(a), 3)
array_compare(a[0], np.eye(3))
array_compare(a[1], np.eye(3))
array_compare(a[2], np.eye(3))
a = R / 2
self.assertNotIsInstance(a, SO3)
nt.assert_equal(len(a), 3)
array_compare(a[0], rx / 2)
array_compare(a[1], ry / 2)
array_compare(a[2], rz / 2)
a = R
a /= rx
self.assertIsInstance(a, SO3)
nt.assert_equal(len(a), 3)
array_compare(a[0], rx / rx)
array_compare(a[1], ry / rx)
array_compare(a[2], rz / rx)
a = rx
a /= R
self.assertIsInstance(a, SO3)
nt.assert_equal(len(a), 3)
array_compare(a[0], rx / rx)
array_compare(a[1], rx / ry)
array_compare(a[2], rx / rz)
a = R
a /= R
self.assertIsInstance(a, SO3)
nt.assert_equal(len(a), 3)
array_compare(a[0], np.eye(3))
array_compare(a[1], np.eye(3))
array_compare(a[2], np.eye(3))
a = R
a /= 2
self.assertNotIsInstance(a, SO3)
nt.assert_equal(len(a), 3)
array_compare(a[0], rx / 2)
array_compare(a[1], ry / 2)
array_compare(a[2], rz / 2)
# add
R = SO3([rx, ry, rz])
a = R + rx
self.assertNotIsInstance(a, SO3)
nt.assert_equal(len(a), 3)
array_compare(a[0], rx + rx)
array_compare(a[1], ry + rx)
array_compare(a[2], rz + rx)
a = rx + R
self.assertNotIsInstance(a, SO3)
nt.assert_equal(len(a), 3)
array_compare(a[0], rx + rx)
array_compare(a[1], rx + ry)
array_compare(a[2], rx + rz)
a = R + R
self.assertNotIsInstance(a, SO3)
nt.assert_equal(len(a), 3)
array_compare(a[0], rx + rx)
array_compare(a[1], ry + ry)
array_compare(a[2], rz + rz)
a = R + 1
self.assertNotIsInstance(a, SO3)
nt.assert_equal(len(a), 3)
array_compare(a[0], rx + 1)
array_compare(a[1], ry + 1)
array_compare(a[2], rz + 1)
# subtract
R = SO3([rx, ry, rz])
a = R - rx
self.assertNotIsInstance(a, SO3)
nt.assert_equal(len(a), 3)
array_compare(a[0], rx - rx)
array_compare(a[1], ry - rx)
array_compare(a[2], rz - rx)
a = rx - R
self.assertNotIsInstance(a, SO3)
nt.assert_equal(len(a), 3)
array_compare(a[0], rx - rx)
array_compare(a[1], rx - ry)
array_compare(a[2], rx - rz)
a = R - R
self.assertNotIsInstance(a, SO3)
nt.assert_equal(len(a), 3)
array_compare(a[0], rx - rx)
array_compare(a[1], ry - ry)
array_compare(a[2], rz - rz)
def test_functions(self):
# inv
# .T
pass
def test_functions_vect(self):
# inv
# .T
pass
# ============================== SE3 =====================================#
class TestSE3(unittest.TestCase):
@classmethod
def tearDownClass(cls):
plt.close('all')
def test_constructor(self):
# null constructor
R = SE3()
nt.assert_equal(len(R), 1)
array_compare(R, np.eye(4))
self.assertIsInstance(R, SE3)
# construct from matrix
R = SE3(trotx(0.2))
nt.assert_equal(len(R), 1)
array_compare(R, trotx(0.2))
self.assertIsInstance(R, SE3)
# construct from canonic rotation
R = SE3.Rx(0.2)
nt.assert_equal(len(R), 1)
array_compare(R, trotx(0.2))
self.assertIsInstance(R, SE3)
R = SE3.Ry(0.2)
nt.assert_equal(len(R), 1)
array_compare(R, troty(0.2))
self.assertIsInstance(R, SE3)
R = SE3.Rz(0.2)
nt.assert_equal(len(R), 1)
array_compare(R, trotz(0.2))
self.assertIsInstance(R, SE3)
# construct from canonic translation
R = SE3.Tx(0.2)
nt.assert_equal(len(R), 1)
array_compare(R, transl(0.2, 0, 0))
self.assertIsInstance(R, SE3)
R = SE3.Ty(0.2)
nt.assert_equal(len(R), 1)
array_compare(R, transl(0, 0.2, 0))
self.assertIsInstance(R, SE3)
R = SE3.Tz(0.2)
nt.assert_equal(len(R), 1)
array_compare(R, transl(0, 0, 0.2))
self.assertIsInstance(R, SE3)
# triple angle
R = SE3.Eul([0.1, 0.2, 0.3])
nt.assert_equal(len(R), 1)
array_compare(R, eul2tr([0.1, 0.2, 0.3]))
self.assertIsInstance(R, SE3)
R = SE3.Eul(np.r_[0.1, 0.2, 0.3])
nt.assert_equal(len(R), 1)
array_compare(R, eul2tr([0.1, 0.2, 0.3]))
self.assertIsInstance(R, SE3)
R = SE3.Eul([10, 20, 30], unit='deg')
nt.assert_equal(len(R), 1)
array_compare(R, eul2tr([10, 20, 30], unit='deg'))
self.assertIsInstance(R, SE3)
R = SE3.RPY([0.1, 0.2, 0.3])
nt.assert_equal(len(R), 1)
array_compare(R, rpy2tr([0.1, 0.2, 0.3]))
self.assertIsInstance(R, SE3)
R = SE3.RPY(np.r_[0.1, 0.2, 0.3])
nt.assert_equal(len(R), 1)
array_compare(R, rpy2tr([0.1, 0.2, 0.3]))
self.assertIsInstance(R, SE3)
R = SE3.RPY([10, 20, 30], unit='deg')
nt.assert_equal(len(R), 1)
array_compare(R, rpy2tr([10, 20, 30], unit='deg'))
self.assertIsInstance(R, SE3)
R = SE3.RPY([0.1, 0.2, 0.3], order='xyz')
nt.assert_equal(len(R), 1)
array_compare(R, rpy2tr([0.1, 0.2, 0.3], order='xyz'))
self.assertIsInstance(R, SE3)
# angvec
R = SE3.AngVec(0.2, [1, 0, 0])
nt.assert_equal(len(R), 1)
array_compare(R, trotx(0.2))
self.assertIsInstance(R, SE3)
R = SE3.AngVec(0.3, [0, 1, 0])
nt.assert_equal(len(R), 1)
array_compare(R, troty(0.3))
self.assertIsInstance(R, SE3)
# OA
R = SE3.OA([0, 1, 0], [0, 0, 1])
nt.assert_equal(len(R), 1)
array_compare(R, np.eye(4))
self.assertIsInstance(R, SE3)
# random
R = SE3.Rand()
nt.assert_equal(len(R), 1)
self.assertIsInstance(R, SE3)
# copy constructor
R = SE3.Rx(pi / 2)
R2 = SE3(R)
R = SE3.Ry(pi / 2)
array_compare(R2, trotx(pi / 2))
def test_shape(self):
a = SE3()
self.assertEqual(a._A.shape, a.shape)
def test_listpowers(self):
R = SE3()
R1 = SE3.Rx(0.2)
R2 = SE3.Ry(0.3)
R.append(R1)
R.append(R2)
nt.assert_equal(len(R), 3)
self.assertIsInstance(R, SE3)
array_compare(R[0], np.eye(4))
array_compare(R[1], R1)
array_compare(R[2], R2)
R = SE3([trotx(0.1), trotx(0.2), trotx(0.3)])
nt.assert_equal(len(R), 3)
self.assertIsInstance(R, SE3)
array_compare(R[0], trotx(0.1))
array_compare(R[1], trotx(0.2))
array_compare(R[2], trotx(0.3))
R = SE3([SE3.Rx(0.1), SE3.Rx(0.2), SE3.Rx(0.3)])
nt.assert_equal(len(R), 3)
self.assertIsInstance(R, SE3)
array_compare(R[0], trotx(0.1))
array_compare(R[1], trotx(0.2))
array_compare(R[2], trotx(0.3))
def test_tests(self):
R = SE3()
self.assertEqual(R.isrot(), False)
self.assertEqual(R.isrot2(), False)
self.assertEqual(R.ishom(), True)
self.assertEqual(R.ishom2(), False)
def test_properties(self):
R = SE3()
self.assertEqual(R.isSO, False)
self.assertEqual(R.isSE, True)
array_compare(R.n, np.r_[1, 0, 0])
array_compare(R.n, np.r_[1, 0, 0])
array_compare(R.n, np.r_[1, 0, 0])
nt.assert_equal(R.N, 3)
nt.assert_equal(R.shape, (4, 4))
def test_arith(self):
T = SE3(1, 2, 3)
# sum
a = T + T
self.assertNotIsInstance(a, SE3)
array_compare(a, np.array([[2, 0, 0, 2], [0, 2, 0, 4], [0, 0, 2, 6], [0, 0, 0, 2]]))
a = T + 1
self.assertNotIsInstance(a, SE3)
array_compare(a, np.array([[2, 1, 1, 2], [1, 2, 1, 3], [1, 1, 2, 4], [1, 1, 1, 2]]))
# a = 1 + T
# self.assertNotIsInstance(a, SE3)
# array_compare(a, np.array([ [2,1,1], [1,2,1], [1,1,2]]))
a = T + np.eye(4)
self.assertNotIsInstance(a, SE3)
array_compare(a, np.array([[2, 0, 0, 1], [0, 2, 0, 2], [0, 0, 2, 3], [0, 0, 0, 2]]))
# a = np.eye(3) + T
# self.assertNotIsInstance(a, SE3)
# array_compare(a, np.array([ [2,0,0], [0,2,0], [0,0,2]]))
# this invokes the __add__ method for numpy
# difference
T = SE3(1, 2, 3)
a = T - T
self.assertNotIsInstance(a, SE3)
array_compare(a, np.zeros((4, 4)))
a = T - 1
self.assertNotIsInstance(a, SE3)
array_compare(a, np.array([[0, -1, -1, 0], [-1, 0, -1, 1], [-1, -1, 0, 2], [-1, -1, -1, 0]]))
# a = 1 - T
# self.assertNotIsInstance(a, SE3)
# array_compare(a, -np.array([ [0,-1,-1], [-1,0,-1], [-1,-1,0]]))
a = T - np.eye(4)
self.assertNotIsInstance(a, SE3)
array_compare(a, np.array([[0, 0, 0, 1], [0, 0, 0, 2], [0, 0, 0, 3], [0, 0, 0, 0]]))
# a = np.eye(3) - T
# self.assertNotIsInstance(a, SE3)
# array_compare(a, np.zeros((3,3)))
a = T
a -= T
self.assertNotIsInstance(a, SE3)
array_compare(a, np.zeros((4, 4)))
# multiply
T = SE3(1, 2, 3)
a = T * T
self.assertIsInstance(a, SE3)
array_compare(a, transl(2, 4, 6))
a = T * 2
self.assertNotIsInstance(a, SE3)
array_compare(a, 2 * transl(1, 2, 3))
a = 2 * T
self.assertNotIsInstance(a, SE3)
array_compare(a, 2 * transl(1, 2, 3))
T = SE3(1, 2, 3)
T *= SE3.Ry(pi / 2)
self.assertIsInstance(T, SE3)
array_compare(T, np.array([[0, 0, 1, 1], [0, 1, 0, 2], [-1, 0, 0, 3], [0, 0, 0, 1]]))
T = SE3()
T *= 2
self.assertNotIsInstance(T, SE3)
array_compare(T, 2 * np.eye(4))
array_compare(SE3.Rx(pi / 2) * SE3.Ry(pi / 2) * SE3.Rx(-pi / 2), SE3.Rz(pi / 2))
array_compare(SE3.Ry(pi / 2) * [1, 0, 0], np.c_[0, 0, -1].T)
# SE3 x vector
vx = np.r_[1, 0, 0]
vy = np.r_[0, 1, 0]
vz = np.r_[0, 0, 1]
def cv(v):
return np.c_[v]
nt.assert_equal(isinstance(SE3.Tx(pi / 2) * vx, np.ndarray), True)
array_compare(SE3.Rx(pi / 2) * vx, cv(vx))
array_compare(SE3.Rx(pi / 2) * vy, cv(vz))
array_compare(SE3.Rx(pi / 2) * vz, cv(-vy))
array_compare(SE3.Ry(pi / 2) * vx, cv(-vz))
array_compare(SE3.Ry(pi / 2) * vy, cv(vy))
array_compare(SE3.Ry(pi / 2) * vz, cv(vx))
array_compare(SE3.Rz(pi / 2) * vx, cv(vy))
array_compare(SE3.Rz(pi / 2) * vy, cv(-vx))
array_compare(SE3.Rz(pi / 2) * vz, cv(vz))
# divide
T = SE3.Ry(0.3)
a = T / T
self.assertIsInstance(a, SE3)
array_compare(a, np.eye(4))
a = T / 2
self.assertNotIsInstance(a, SE3)
array_compare(a, troty(0.3) / 2)
def test_arith_vect(self):
rx = SE3.Rx(pi / 2)
ry = SE3.Ry(pi / 2)
rz = SE3.Rz(pi / 2)
u = SE3()
# multiply
T = SE3([rx, ry, rz])
a = T * rx
self.assertIsInstance(a, SE3)
nt.assert_equal(len(a), 3)
array_compare(a[0], rx * rx)
array_compare(a[1], ry * rx)
array_compare(a[2], rz * rx)
a = rx * T
self.assertIsInstance(a, SE3)
nt.assert_equal(len(a), 3)
array_compare(a[0], rx * rx)
array_compare(a[1], rx * ry)
array_compare(a[2], rx * rz)
a = T * T
self.assertIsInstance(a, SE3)
nt.assert_equal(len(a), 3)
array_compare(a[0], rx * rx)
array_compare(a[1], ry * ry)
array_compare(a[2], rz * rz)
a = T * 2
self.assertNotIsInstance(a, SE3)
nt.assert_equal(len(a), 3)
array_compare(a[0], rx * 2)
array_compare(a[1], ry * 2)
array_compare(a[2], rz * 2)
a = 2 * T
self.assertNotIsInstance(a, SE3)
nt.assert_equal(len(a), 3)
array_compare(a[0], rx * 2)
array_compare(a[1], ry * 2)
array_compare(a[2], rz * 2)
a = T
a *= rx
self.assertIsInstance(a, SE3)
nt.assert_equal(len(a), 3)
array_compare(a[0], rx * rx)
array_compare(a[1], ry * rx)
array_compare(a[2], rz * rx)
a = rx
a *= T
self.assertIsInstance(a, SE3)
nt.assert_equal(len(a), 3)
array_compare(a[0], rx * rx)
array_compare(a[1], rx * ry)
array_compare(a[2], rx * rz)
a = T
a *= T
self.assertIsInstance(a, SE3)
nt.assert_equal(len(a), 3)
array_compare(a[0], rx * rx)
array_compare(a[1], ry * ry)
array_compare(a[2], rz * rz)
a = T
a *= 2
self.assertNotIsInstance(a, SE3)
nt.assert_equal(len(a), 3)
array_compare(a[0], rx * 2)
array_compare(a[1], ry * 2)
array_compare(a[2], rz * 2)
# SE3 x vector
vx = np.r_[1, 0, 0]
vy = np.r_[0, 1, 0]
vz = np.r_[0, 0, 1]
a = T * vx
array_compare(a[:, 0], (rx * vx).flatten())
array_compare(a[:, 1], (ry * vx).flatten())
array_compare(a[:, 2], (rz * vx).flatten())
a = rx * np.vstack((vx, vy, vz)).T
array_compare(a[:, 0], (rx * vx).flatten())
array_compare(a[:, 1], (rx * vy).flatten())
array_compare(a[:, 2], (rx * vz).flatten())
# divide
T = SE3([rx, ry, rz])
a = T / rx
self.assertIsInstance(a, SE3)
nt.assert_equal(len(a), 3)
array_compare(a[0], rx / rx)
array_compare(a[1], ry / rx)
array_compare(a[2], rz / rx)
a = rx / T
self.assertIsInstance(a, SE3)
nt.assert_equal(len(a), 3)
array_compare(a[0], rx / rx)
array_compare(a[1], rx / ry)
array_compare(a[2], rx / rz)
a = T / T
self.assertIsInstance(a, SE3)
nt.assert_equal(len(a), 3)
array_compare(a[0], np.eye(4))
array_compare(a[1], np.eye(4))
array_compare(a[2], np.eye(4))
a = T / 2
self.assertNotIsInstance(a, SE3)
nt.assert_equal(len(a), 3)
array_compare(a[0], rx / 2)
array_compare(a[1], ry / 2)
array_compare(a[2], rz / 2)
a = T
a /= rx
self.assertIsInstance(a, SE3)
nt.assert_equal(len(a), 3)
array_compare(a[0], rx / rx)
array_compare(a[1], ry / rx)
array_compare(a[2], rz / rx)
a = rx
a /= T
self.assertIsInstance(a, SE3)
nt.assert_equal(len(a), 3)
array_compare(a[0], rx / rx)
array_compare(a[1], rx / ry)
array_compare(a[2], rx / rz)
a = T
a /= T
self.assertIsInstance(a, SE3)
nt.assert_equal(len(a), 3)
array_compare(a[0], np.eye(4))
array_compare(a[1], np.eye(4))
array_compare(a[2], np.eye(4))
a = T
a /= 2
self.assertNotIsInstance(a, SE3)
nt.assert_equal(len(a), 3)
array_compare(a[0], rx / 2)
array_compare(a[1], ry / 2)
array_compare(a[2], rz / 2)
# add
T = SE3([rx, ry, rz])
a = T + rx
self.assertNotIsInstance(a, SE3)
nt.assert_equal(len(a), 3)
array_compare(a[0], rx + rx)
array_compare(a[1], ry + rx)
array_compare(a[2], rz + rx)
a = rx + T
self.assertNotIsInstance(a, SE3)
nt.assert_equal(len(a), 3)
array_compare(a[0], rx + rx)
array_compare(a[1], rx + ry)
array_compare(a[2], rx + rz)
a = T + T
self.assertNotIsInstance(a, SE3)
nt.assert_equal(len(a), 3)
array_compare(a[0], rx + rx)
array_compare(a[1], ry + ry)
array_compare(a[2], rz + rz)
a = T + 1
self.assertNotIsInstance(a, SE3)
nt.assert_equal(len(a), 3)
array_compare(a[0], rx + 1)
array_compare(a[1], ry + 1)
array_compare(a[2], rz + 1)
# subtract
T = SE3([rx, ry, rz])
a = T - rx
self.assertNotIsInstance(a, SE3)
nt.assert_equal(len(a), 3)
array_compare(a[0], rx - rx)
array_compare(a[1], ry - rx)
array_compare(a[2], rz - rx)
a = rx - T
self.assertNotIsInstance(a, SE3)
nt.assert_equal(len(a), 3)
array_compare(a[0], rx - rx)
array_compare(a[1], rx - ry)
array_compare(a[2], rx - rz)
a = T - T
self.assertNotIsInstance(a, SE3)
nt.assert_equal(len(a), 3)
array_compare(a[0], rx - rx)
array_compare(a[1], ry - ry)
array_compare(a[2], rz - rz)
a = T - 1
self.assertNotIsInstance(a, SE3)
nt.assert_equal(len(a), 3)
array_compare(a[0], rx - 1)
array_compare(a[1], ry - 1)
array_compare(a[2], rz - 1)
def test_functions(self):
# inv
# .T
pass
def test_functions_vect(self):
# inv
# .T
pass
# ---------------------------------------------------------------------------------------#
if __name__ == '__main__':
unittest.main()
| 27.775804
| 101
| 0.499041
| 5,028
| 32,831
| 3.162689
| 0.035601
| 0.22035
| 0.155326
| 0.102629
| 0.894227
| 0.874733
| 0.842095
| 0.828764
| 0.797824
| 0.775248
| 0
| 0.070696
| 0.329171
| 32,831
| 1,181
| 102
| 27.799323
| 0.651335
| 0.050562
| 0
| 0.736597
| 0
| 0
| 0.004513
| 0
| 0
| 0
| 0
| 0
| 0.301865
| 1
| 0.034965
| false
| 0.004662
| 0.011655
| 0.002331
| 0.051282
| 0.008159
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
58b7a954c76cda0f496c097df6085378a9fb9d83
| 65,831
|
py
|
Python
|
tests/wasp1/AllAnswerSets/stratcomp_bug_02.test.py
|
bernardocuteri/wasp
|
05c8f961776dbdbf7afbf905ee00fc262eba51ad
|
[
"Apache-2.0"
] | 19
|
2015-12-03T08:53:45.000Z
|
2022-03-31T02:09:43.000Z
|
tests/wasp1/AllAnswerSets/stratcomp_bug_02.test.py
|
bernardocuteri/wasp
|
05c8f961776dbdbf7afbf905ee00fc262eba51ad
|
[
"Apache-2.0"
] | 80
|
2017-11-25T07:57:32.000Z
|
2018-06-10T19:03:30.000Z
|
tests/wasp1/AllAnswerSets/stratcomp_bug_02.test.py
|
bernardocuteri/wasp
|
05c8f961776dbdbf7afbf905ee00fc262eba51ad
|
[
"Apache-2.0"
] | 6
|
2015-01-15T07:51:48.000Z
|
2020-06-18T14:47:48.000Z
|
input = """
% Instantiation generated by
% DLV [build BEN/Oct 14 2010 gcc 4.4.3]
% EDB facts:
controlled_by(1,2,23,21,20).
controlled_by(1,3,6,9,5).
controlled_by(1,5,15,4,28).
controlled_by(1,6,18,17,26).
controlled_by(1,13,20,22,10).
controlled_by(1,15,18,8,21).
controlled_by(1,15,27,11,11).
controlled_by(1,17,14,15,5).
controlled_by(1,19,15,11,17).
controlled_by(1,19,25,14,2).
controlled_by(1,23,18,27,2).
controlled_by(1,29,8,26,13).
controlled_by(1,29,12,25,13).
controlled_by(1,29,21,16,9).
controlled_by(1,30,11,30,6).
controlled_by(2,1,29,6,21).
controlled_by(2,5,21,6,17).
controlled_by(2,6,25,7,26).
controlled_by(2,7,10,11,15).
controlled_by(2,8,1,25,11).
controlled_by(2,8,7,8,6).
controlled_by(2,9,20,28,15).
controlled_by(2,9,21,30,30).
controlled_by(2,14,1,17,27).
controlled_by(2,16,22,4,13).
controlled_by(2,20,27,9,18).
controlled_by(2,21,1,4,1).
controlled_by(2,21,8,21,28).
controlled_by(2,28,1,4,14).
controlled_by(2,30,16,25,25).
controlled_by(3,5,12,4,4).
controlled_by(3,6,5,17,29).
controlled_by(3,6,13,24,21).
controlled_by(3,6,16,9,2).
controlled_by(3,7,24,1,27).
controlled_by(3,10,13,8,18).
controlled_by(3,11,4,2,4).
controlled_by(3,13,30,21,5).
controlled_by(3,15,2,24,12).
controlled_by(3,16,24,28,28).
controlled_by(3,17,17,29,26).
controlled_by(3,23,10,17,30).
controlled_by(3,26,11,20,26).
controlled_by(3,26,26,28,4).
controlled_by(3,29,20,30,28).
controlled_by(4,5,7,1,25).
controlled_by(4,6,29,21,26).
controlled_by(4,8,26,17,6).
controlled_by(4,9,20,26,9).
controlled_by(4,9,30,13,27).
controlled_by(4,13,21,6,18).
controlled_by(4,15,29,19,11).
controlled_by(4,16,6,15,11).
controlled_by(4,18,5,2,21).
controlled_by(4,19,6,3,11).
controlled_by(4,23,1,21,24).
controlled_by(4,25,2,8,19).
controlled_by(4,26,29,29,2).
controlled_by(4,27,27,28,25).
controlled_by(4,29,26,8,19).
controlled_by(5,1,1,22,17).
controlled_by(5,3,26,30,1).
controlled_by(5,4,21,24,21).
controlled_by(5,9,4,30,13).
controlled_by(5,9,25,18,26).
controlled_by(5,13,20,6,29).
controlled_by(5,14,8,11,25).
controlled_by(5,14,13,27,2).
controlled_by(5,15,12,11,10).
controlled_by(5,16,24,28,7).
controlled_by(5,21,20,22,8).
controlled_by(5,24,16,20,9).
controlled_by(5,25,29,25,16).
controlled_by(5,26,29,10,25).
controlled_by(5,29,29,25,6).
controlled_by(6,2,8,22,18).
controlled_by(6,3,11,14,19).
controlled_by(6,4,1,14,26).
controlled_by(6,5,10,8,5).
controlled_by(6,7,23,26,29).
controlled_by(6,10,9,23,22).
controlled_by(6,12,21,13,2).
controlled_by(6,19,1,17,29).
controlled_by(6,19,3,16,7).
controlled_by(6,22,24,20,29).
controlled_by(6,26,15,13,13).
controlled_by(6,28,22,17,26).
controlled_by(6,28,26,1,10).
controlled_by(6,30,7,10,26).
controlled_by(6,30,14,22,1).
controlled_by(7,1,3,1,14).
controlled_by(7,2,8,29,20).
controlled_by(7,3,17,16,21).
controlled_by(7,4,9,15,18).
controlled_by(7,4,25,17,10).
controlled_by(7,6,29,5,30).
controlled_by(7,10,15,5,4).
controlled_by(7,11,30,29,6).
controlled_by(7,12,19,22,1).
controlled_by(7,14,3,10,11).
controlled_by(7,14,13,20,8).
controlled_by(7,15,16,30,28).
controlled_by(7,15,25,18,8).
controlled_by(7,15,25,19,13).
controlled_by(7,30,16,2,30).
controlled_by(8,1,6,3,17).
controlled_by(8,3,7,26,20).
controlled_by(8,5,2,26,6).
controlled_by(8,6,4,20,17).
controlled_by(8,12,4,28,13).
controlled_by(8,12,12,4,15).
controlled_by(8,12,14,13,16).
controlled_by(8,16,21,1,28).
controlled_by(8,16,23,25,14).
controlled_by(8,18,9,2,3).
controlled_by(8,21,14,6,10).
controlled_by(8,21,21,12,13).
controlled_by(8,22,5,28,28).
controlled_by(8,27,6,12,27).
controlled_by(8,29,15,9,2).
controlled_by(9,4,5,16,21).
controlled_by(9,4,18,20,25).
controlled_by(9,5,26,28,19).
controlled_by(9,7,22,11,11).
controlled_by(9,8,17,30,14).
controlled_by(9,8,20,19,4).
controlled_by(9,12,11,24,13).
controlled_by(9,13,3,15,6).
controlled_by(9,14,18,14,21).
controlled_by(9,18,25,26,30).
controlled_by(9,26,6,27,7).
controlled_by(9,28,17,18,27).
controlled_by(9,28,17,19,20).
controlled_by(9,28,20,21,19).
controlled_by(9,29,22,12,27).
controlled_by(10,4,5,15,20).
controlled_by(10,6,2,16,1).
controlled_by(10,9,17,28,6).
controlled_by(10,9,24,17,9).
controlled_by(10,9,29,29,12).
controlled_by(10,11,5,7,8).
controlled_by(10,14,30,27,23).
controlled_by(10,18,9,15,29).
controlled_by(10,18,23,16,18).
controlled_by(10,20,1,18,19).
controlled_by(10,22,5,11,17).
controlled_by(10,23,8,13,20).
controlled_by(10,28,7,26,8).
controlled_by(10,28,19,23,7).
controlled_by(10,28,23,21,15).
controlled_by(11,1,18,28,12).
controlled_by(11,3,4,12,8).
controlled_by(11,7,7,24,17).
controlled_by(11,9,6,7,29).
controlled_by(11,12,15,20,30).
controlled_by(11,14,26,15,30).
controlled_by(11,15,16,25,13).
controlled_by(11,15,19,25,25).
controlled_by(11,17,14,19,1).
controlled_by(11,18,21,9,26).
controlled_by(11,21,18,25,26).
controlled_by(11,22,14,6,6).
controlled_by(11,27,29,5,28).
controlled_by(11,28,6,16,28).
controlled_by(11,30,25,30,10).
controlled_by(12,1,29,19,14).
controlled_by(12,4,9,27,23).
controlled_by(12,5,9,25,23).
controlled_by(12,6,27,8,18).
controlled_by(12,8,24,21,24).
controlled_by(12,9,11,2,29).
controlled_by(12,15,5,13,11).
controlled_by(12,16,3,5,13).
controlled_by(12,18,30,14,18).
controlled_by(12,19,8,30,7).
controlled_by(12,20,2,23,5).
controlled_by(12,25,24,18,28).
controlled_by(12,27,27,10,18).
controlled_by(12,28,27,27,4).
controlled_by(12,30,13,2,15).
controlled_by(13,4,10,29,27).
controlled_by(13,6,18,12,25).
controlled_by(13,7,21,24,14).
controlled_by(13,8,9,29,6).
controlled_by(13,9,29,24,2).
controlled_by(13,11,11,30,4).
controlled_by(13,11,23,10,20).
controlled_by(13,15,3,4,15).
controlled_by(13,20,6,3,22).
controlled_by(13,25,6,24,21).
controlled_by(13,28,5,11,30).
controlled_by(13,29,15,9,20).
controlled_by(13,29,21,17,12).
controlled_by(13,30,4,6,12).
controlled_by(13,30,6,5,22).
controlled_by(14,4,13,30,3).
controlled_by(14,4,29,29,18).
controlled_by(14,5,22,23,4).
controlled_by(14,7,6,6,1).
controlled_by(14,8,3,11,2).
controlled_by(14,8,28,8,21).
controlled_by(14,8,28,18,6).
controlled_by(14,11,5,17,27).
controlled_by(14,16,16,2,15).
controlled_by(14,19,10,30,28).
controlled_by(14,23,27,8,26).
controlled_by(14,25,1,6,27).
controlled_by(14,25,8,2,4).
controlled_by(14,30,20,29,3).
controlled_by(14,30,29,3,27).
controlled_by(15,1,7,30,29).
controlled_by(15,1,8,11,1).
controlled_by(15,5,2,23,23).
controlled_by(15,7,5,24,13).
controlled_by(15,9,26,17,27).
controlled_by(15,16,12,21,14).
controlled_by(15,18,30,16,28).
controlled_by(15,20,2,18,17).
controlled_by(15,21,12,4,9).
controlled_by(15,21,27,23,7).
controlled_by(15,22,21,29,25).
controlled_by(15,27,5,24,6).
controlled_by(15,27,21,7,23).
controlled_by(15,30,10,7,17).
controlled_by(15,30,17,26,29).
controlled_by(16,6,29,13,15).
controlled_by(16,7,21,1,21).
controlled_by(16,9,5,1,17).
controlled_by(16,11,2,15,10).
controlled_by(16,14,1,21,27).
controlled_by(16,14,6,18,1).
controlled_by(16,15,4,10,13).
controlled_by(16,18,25,22,3).
controlled_by(16,19,7,24,3).
controlled_by(16,20,27,30,6).
controlled_by(16,23,1,25,23).
controlled_by(16,23,9,19,8).
controlled_by(16,24,14,3,8).
controlled_by(16,25,29,25,10).
controlled_by(16,30,29,24,4).
controlled_by(17,1,19,24,16).
controlled_by(17,5,13,29,25).
controlled_by(17,7,23,26,11).
controlled_by(17,13,23,16,5).
controlled_by(17,14,13,6,27).
controlled_by(17,14,13,26,11).
controlled_by(17,15,10,21,30).
controlled_by(17,16,15,11,19).
controlled_by(17,16,24,5,9).
controlled_by(17,18,23,7,22).
controlled_by(17,19,13,11,30).
controlled_by(17,20,1,11,6).
controlled_by(17,25,2,4,2).
controlled_by(17,27,16,6,25).
controlled_by(17,29,30,13,5).
controlled_by(18,2,4,22,22).
controlled_by(18,3,8,14,13).
controlled_by(18,3,16,30,20).
controlled_by(18,5,11,17,23).
controlled_by(18,7,4,7,6).
controlled_by(18,9,17,29,26).
controlled_by(18,15,3,26,28).
controlled_by(18,15,16,27,12).
controlled_by(18,20,19,1,5).
controlled_by(18,23,22,28,25).
controlled_by(18,23,29,17,28).
controlled_by(18,24,22,21,15).
controlled_by(18,25,13,20,12).
controlled_by(18,26,20,16,27).
controlled_by(18,27,8,10,25).
controlled_by(19,1,26,17,27).
controlled_by(19,4,18,6,3).
controlled_by(19,6,11,15,13).
controlled_by(19,7,20,1,13).
controlled_by(19,8,17,6,21).
controlled_by(19,8,22,7,1).
controlled_by(19,11,15,15,27).
controlled_by(19,11,30,27,21).
controlled_by(19,13,18,24,18).
controlled_by(19,20,1,10,8).
controlled_by(19,20,9,7,6).
controlled_by(19,21,15,16,29).
controlled_by(19,22,12,22,28).
controlled_by(19,22,26,4,29).
controlled_by(19,23,4,24,9).
controlled_by(20,1,6,26,30).
controlled_by(20,1,25,19,23).
controlled_by(20,4,22,13,18).
controlled_by(20,5,19,5,14).
controlled_by(20,8,14,28,25).
controlled_by(20,12,3,4,16).
controlled_by(20,13,14,21,24).
controlled_by(20,16,17,9,27).
controlled_by(20,17,3,15,30).
controlled_by(20,17,18,17,3).
controlled_by(20,19,7,8,6).
controlled_by(20,23,25,22,4).
controlled_by(20,24,29,16,15).
controlled_by(20,27,2,19,14).
controlled_by(20,28,11,16,21).
controlled_by(21,2,1,16,30).
controlled_by(21,10,11,7,17).
controlled_by(21,11,24,19,1).
controlled_by(21,13,10,25,22).
controlled_by(21,14,10,25,15).
controlled_by(21,16,23,3,19).
controlled_by(21,17,22,5,9).
controlled_by(21,18,19,26,4).
controlled_by(21,19,22,24,26).
controlled_by(21,19,23,8,8).
controlled_by(21,23,29,30,3).
controlled_by(21,25,9,27,12).
controlled_by(21,26,27,16,16).
controlled_by(21,28,7,4,4).
controlled_by(21,29,17,24,10).
controlled_by(22,3,15,23,17).
controlled_by(22,5,10,14,23).
controlled_by(22,6,1,3,23).
controlled_by(22,11,2,3,13).
controlled_by(22,11,25,17,5).
controlled_by(22,12,21,14,18).
controlled_by(22,15,24,18,2).
controlled_by(22,17,27,20,29).
controlled_by(22,18,10,1,28).
controlled_by(22,18,19,28,7).
controlled_by(22,19,10,8,1).
controlled_by(22,19,20,14,3).
controlled_by(22,21,8,10,1).
controlled_by(22,28,28,14,12).
controlled_by(22,30,27,21,11).
controlled_by(23,2,30,16,30).
controlled_by(23,3,11,16,22).
controlled_by(23,4,22,29,13).
controlled_by(23,4,29,27,22).
controlled_by(23,10,7,3,4).
controlled_by(23,10,18,19,24).
controlled_by(23,11,8,27,21).
controlled_by(23,11,29,11,2).
controlled_by(23,13,15,2,14).
controlled_by(23,17,21,18,18).
controlled_by(23,19,30,9,15).
controlled_by(23,20,24,11,25).
controlled_by(23,22,11,25,6).
controlled_by(23,24,18,24,11).
controlled_by(23,27,20,15,28).
controlled_by(24,3,21,19,5).
controlled_by(24,9,15,23,20).
controlled_by(24,14,14,30,19).
controlled_by(24,17,7,12,2).
controlled_by(24,18,15,15,23).
controlled_by(24,20,12,13,18).
controlled_by(24,20,30,6,16).
controlled_by(24,23,29,17,4).
controlled_by(24,25,8,19,11).
controlled_by(24,27,26,14,6).
controlled_by(24,28,4,16,21).
controlled_by(24,28,7,26,13).
controlled_by(24,28,20,7,20).
controlled_by(24,29,20,20,10).
controlled_by(24,29,30,19,30).
controlled_by(25,2,27,17,13).
controlled_by(25,3,7,7,23).
controlled_by(25,5,13,29,2).
controlled_by(25,7,20,23,13).
controlled_by(25,8,6,15,21).
controlled_by(25,12,28,17,11).
controlled_by(25,17,16,20,23).
controlled_by(25,19,17,23,1).
controlled_by(25,20,2,15,4).
controlled_by(25,21,12,19,10).
controlled_by(25,22,12,4,3).
controlled_by(25,22,27,21,27).
controlled_by(25,24,15,2,2).
controlled_by(25,24,21,17,15).
controlled_by(25,26,16,23,7).
controlled_by(26,5,8,5,14).
controlled_by(26,7,28,23,18).
controlled_by(26,9,17,13,25).
controlled_by(26,9,19,13,25).
controlled_by(26,11,14,20,9).
controlled_by(26,12,4,30,23).
controlled_by(26,12,16,24,18).
controlled_by(26,12,30,5,25).
controlled_by(26,15,29,5,24).
controlled_by(26,17,9,9,21).
controlled_by(26,18,8,7,1).
controlled_by(26,22,8,30,30).
controlled_by(26,22,16,24,11).
controlled_by(26,25,9,30,3).
controlled_by(26,28,12,20,30).
controlled_by(27,8,25,29,30).
controlled_by(27,8,29,8,18).
controlled_by(27,13,4,23,14).
controlled_by(27,13,24,23,30).
controlled_by(27,16,11,2,10).
controlled_by(27,17,30,22,17).
controlled_by(27,19,23,5,8).
controlled_by(27,21,13,28,17).
controlled_by(27,21,24,8,25).
controlled_by(27,23,9,14,19).
controlled_by(27,24,8,13,24).
controlled_by(27,26,7,24,1).
controlled_by(27,28,3,25,5).
controlled_by(27,29,17,12,15).
controlled_by(27,30,23,25,18).
controlled_by(28,5,29,12,30).
controlled_by(28,12,11,30,15).
controlled_by(28,12,21,15,8).
controlled_by(28,13,21,24,11).
controlled_by(28,18,12,30,27).
controlled_by(28,18,13,29,17).
controlled_by(28,20,8,8,13).
controlled_by(28,20,21,8,29).
controlled_by(28,20,22,6,2).
controlled_by(28,21,4,14,11).
controlled_by(28,21,30,14,29).
controlled_by(28,22,2,5,12).
controlled_by(28,25,15,24,16).
controlled_by(28,27,8,8,18).
controlled_by(28,30,22,3,12).
controlled_by(29,1,10,19,19).
controlled_by(29,2,19,9,28).
controlled_by(29,3,8,6,14).
controlled_by(29,3,16,11,6).
controlled_by(29,4,18,15,28).
controlled_by(29,11,23,22,18).
controlled_by(29,12,15,2,26).
controlled_by(29,16,16,1,1).
controlled_by(29,18,28,13,30).
controlled_by(29,19,2,3,26).
controlled_by(29,21,5,12,9).
controlled_by(29,21,13,15,3).
controlled_by(29,22,26,21,24).
controlled_by(29,26,27,23,9).
controlled_by(29,28,11,17,24).
controlled_by(30,2,4,12,3).
controlled_by(30,2,13,27,19).
controlled_by(30,7,23,17,7).
controlled_by(30,10,8,18,18).
controlled_by(30,10,10,18,13).
controlled_by(30,13,3,11,11).
controlled_by(30,15,3,4,20).
controlled_by(30,15,27,27,8).
controlled_by(30,16,3,29,2).
controlled_by(30,16,7,21,17).
controlled_by(30,16,11,14,21).
controlled_by(30,17,1,11,27).
controlled_by(30,19,25,13,5).
controlled_by(30,21,22,29,22).
controlled_by(30,22,17,8,23).
produced_by(p1,10,26,14,4).
produced_by(p2,24,15,19,18).
produced_by(p3,10,8,1,2).
produced_by(p4,28,22,25,23).
produced_by(p5,13,28,18,25).
produced_by(p6,22,28,5,29).
produced_by(p7,16,23,6,15).
produced_by(p8,7,14,21,9).
produced_by(p9,9,4,12,3).
produced_by(p10,10,22,20,20).
produced_by(p11,21,20,13,19).
produced_by(p12,4,29,3,16).
produced_by(p13,26,20,3,10).
produced_by(p14,17,29,8,3).
produced_by(p15,22,5,17,20).
produced_by(p16,18,29,28,26).
produced_by(p17,24,2,20,3).
produced_by(p18,23,10,14,6).
produced_by(p19,21,26,24,24).
produced_by(p20,24,26,2,12).
produced_by(p21,8,4,13,24).
produced_by(p22,24,20,18,7).
produced_by(p23,24,26,27,3).
produced_by(p24,16,16,20,9).
produced_by(p25,17,10,4,2).
produced_by(p26,11,17,7,1).
produced_by(p27,5,22,17,20).
produced_by(p28,9,10,1,16).
produced_by(p29,5,5,2,28).
produced_by(p30,24,11,5,9).
produced_by(p31,7,1,3,22).
produced_by(p32,8,23,23,17).
produced_by(p33,2,26,18,12).
produced_by(p34,12,16,4,16).
produced_by(p35,29,20,28,29).
produced_by(p36,29,20,15,3).
produced_by(p37,25,8,1,10).
produced_by(p38,18,27,19,16).
produced_by(p39,27,13,8,26).
produced_by(p40,5,22,12,6).
produced_by(p41,17,21,17,20).
produced_by(p42,6,21,28,26).
produced_by(p43,2,25,25,23).
produced_by(p44,14,1,25,8).
produced_by(p45,8,17,18,17).
produced_by(p46,13,28,3,1).
produced_by(p47,10,2,27,15).
produced_by(p48,23,8,12,9).
produced_by(p49,21,21,28,26).
produced_by(p50,3,25,14,4).
produced_by(p51,11,8,26,25).
produced_by(p52,30,13,24,29).
produced_by(p53,21,3,15,26).
produced_by(p54,30,9,18,2).
produced_by(p55,10,6,8,2).
produced_by(p56,14,19,2,26).
produced_by(p57,1,22,21,3).
produced_by(p58,8,26,29,19).
produced_by(p59,25,16,5,24).
produced_by(p60,20,28,22,11).
produced_by(p61,1,29,28,22).
produced_by(p62,7,15,15,17).
produced_by(p63,21,22,10,26).
produced_by(p64,3,4,21,3).
produced_by(p65,25,3,28,24).
produced_by(p66,29,26,4,23).
produced_by(p67,3,8,17,23).
produced_by(p68,28,30,25,28).
produced_by(p69,28,22,11,5).
produced_by(p70,6,26,13,18).
produced_by(p71,17,22,13,11).
produced_by(p72,17,25,6,3).
produced_by(p73,28,3,27,26).
produced_by(p74,20,30,10,22).
produced_by(p75,30,18,14,27).
produced_by(p76,18,8,16,7).
produced_by(p77,29,26,3,27).
produced_by(p78,13,15,14,30).
produced_by(p79,29,19,2,15).
produced_by(p80,13,7,18,10).
produced_by(p81,9,14,27,28).
produced_by(p82,5,29,12,4).
produced_by(p83,16,25,22,25).
produced_by(p84,25,7,24,15).
produced_by(p85,25,26,3,7).
produced_by(p86,11,9,28,9).
produced_by(p87,27,30,23,1).
produced_by(p88,28,2,11,7).
produced_by(p89,15,29,26,12).
produced_by(p90,27,29,7,5).
produced_by(p91,24,29,21,10).
produced_by(p92,27,14,24,21).
produced_by(p93,2,27,28,12).
produced_by(p94,5,17,12,23).
produced_by(p95,16,26,23,14).
produced_by(p96,28,25,12,4).
produced_by(p97,16,29,15,4).
produced_by(p98,28,22,8,13).
produced_by(p99,20,21,14,16).
produced_by(p100,26,7,29,27).
produced_by(p101,25,18,8,21).
produced_by(p102,4,11,5,12).
produced_by(p103,7,28,17,26).
produced_by(p104,14,20,29,29).
produced_by(p105,10,14,3,29).
produced_by(p106,5,2,11,16).
produced_by(p107,22,16,23,18).
produced_by(p108,23,21,6,9).
produced_by(p109,30,6,22,4).
produced_by(p110,16,26,7,14).
produced_by(p111,15,15,9,29).
produced_by(p112,4,8,19,5).
produced_by(p113,13,21,4,17).
produced_by(p114,15,6,24,28).
produced_by(p115,22,16,7,6).
produced_by(p116,29,13,14,28).
produced_by(p117,18,27,23,3).
produced_by(p118,23,29,9,7).
produced_by(p119,13,17,27,8).
produced_by(p120,16,16,13,28).
produced_by(p121,6,8,6,12).
produced_by(p122,13,29,10,4).
produced_by(p123,7,16,1,5).
produced_by(p124,28,15,2,15).
produced_by(p125,3,25,10,25).
produced_by(p126,15,18,24,20).
produced_by(p127,26,20,27,4).
produced_by(p128,5,1,23,3).
produced_by(p129,8,29,14,13).
produced_by(p130,19,23,8,25).
produced_by(p131,9,9,21,28).
produced_by(p132,15,23,13,17).
produced_by(p133,9,14,12,23).
produced_by(p134,23,27,12,10).
produced_by(p135,16,1,13,13).
produced_by(p136,1,6,15,1).
produced_by(p137,26,20,13,6).
produced_by(p138,5,20,23,5).
produced_by(p139,28,13,2,4).
produced_by(p140,5,6,21,5).
produced_by(p141,19,24,28,3).
produced_by(p142,20,1,13,5).
produced_by(p143,1,25,9,24).
produced_by(p144,22,15,24,9).
produced_by(p145,5,6,15,1).
produced_by(p146,17,7,5,7).
produced_by(p147,11,28,10,8).
produced_by(p148,4,30,12,14).
produced_by(p149,15,1,17,4).
produced_by(p150,2,29,1,24).
produced_by(p151,15,9,17,29).
produced_by(p152,24,10,29,20).
produced_by(p153,7,5,20,16).
produced_by(p154,3,16,22,14).
produced_by(p155,13,1,21,8).
produced_by(p156,23,2,22,7).
produced_by(p157,25,8,3,26).
produced_by(p158,28,3,11,4).
produced_by(p159,11,28,2,26).
produced_by(p160,29,23,15,6).
produced_by(p161,27,26,13,30).
produced_by(p162,11,4,13,16).
produced_by(p163,26,25,23,10).
produced_by(p164,18,6,17,12).
produced_by(p165,5,19,29,2).
produced_by(p166,13,10,28,15).
produced_by(p167,7,21,11,5).
produced_by(p168,13,17,2,10).
produced_by(p169,13,14,9,15).
produced_by(p170,9,13,22,5).
produced_by(p171,7,7,14,24).
produced_by(p172,12,22,6,9).
produced_by(p173,2,4,2,14).
produced_by(p174,5,29,29,11).
produced_by(p175,20,9,8,2).
produced_by(p176,17,1,3,21).
produced_by(p177,15,3,6,15).
produced_by(p178,15,19,19,13).
produced_by(p179,25,25,29,29).
produced_by(p180,8,4,7,10).
produced_by(p181,29,8,23,4).
produced_by(p182,29,13,6,18).
produced_by(p183,13,5,11,30).
produced_by(p184,6,6,20,12).
produced_by(p185,8,17,26,15).
produced_by(p186,28,7,19,22).
produced_by(p187,1,17,12,8).
produced_by(p188,12,10,9,11).
produced_by(p189,10,24,6,8).
produced_by(p190,6,3,25,19).
produced_by(p191,8,27,10,13).
produced_by(p192,24,29,16,2).
produced_by(p193,8,11,8,5).
produced_by(p194,17,26,18,9).
produced_by(p195,13,30,9,24).
produced_by(p196,1,9,26,10).
produced_by(p197,2,23,17,8).
produced_by(p198,26,3,18,25).
produced_by(p199,22,27,29,15).
produced_by(p200,25,14,16,24).
produced_by(p201,24,15,28,3).
produced_by(p202,11,16,3,15).
produced_by(p203,7,3,8,29).
produced_by(p204,4,26,9,5).
produced_by(p205,18,25,12,5).
produced_by(p206,20,29,29,11).
produced_by(p207,17,27,25,12).
produced_by(p208,2,11,27,26).
produced_by(p209,25,25,20,27).
produced_by(p210,2,14,11,30).
produced_by(p211,9,11,28,12).
produced_by(p212,6,6,16,23).
produced_by(p213,23,20,20,12).
produced_by(p214,10,18,22,27).
produced_by(p215,7,16,30,8).
produced_by(p216,18,26,25,5).
produced_by(p217,12,6,23,5).
produced_by(p218,20,4,4,20).
produced_by(p219,6,24,1,11).
produced_by(p220,29,8,25,13).
produced_by(p221,19,14,16,29).
produced_by(p222,24,7,17,30).
produced_by(p223,15,8,7,24).
produced_by(p224,25,24,28,7).
produced_by(p225,29,21,11,18).
produced_by(p226,16,7,7,21).
produced_by(p227,30,29,23,20).
produced_by(p228,7,17,3,25).
produced_by(p229,23,18,15,16).
produced_by(p230,17,23,15,1).
produced_by(p231,30,13,16,25).
produced_by(p232,6,14,23,5).
produced_by(p233,26,25,14,3).
produced_by(p234,1,21,15,22).
produced_by(p235,11,7,12,17).
produced_by(p236,15,6,4,7).
produced_by(p237,15,18,22,1).
produced_by(p238,3,28,23,2).
produced_by(p239,11,9,18,16).
produced_by(p240,14,2,12,1).
produced_by(p241,19,26,25,19).
produced_by(p242,16,1,3,26).
produced_by(p243,7,14,5,13).
produced_by(p244,11,30,12,25).
produced_by(p245,17,3,18,11).
produced_by(p246,1,2,13,3).
produced_by(p247,2,22,10,7).
produced_by(p248,16,22,29,4).
produced_by(p249,17,23,22,24).
produced_by(p250,23,16,11,21).
produced_by(p251,21,15,26,1).
produced_by(p252,14,7,18,23).
produced_by(p253,9,5,3,1).
produced_by(p254,28,7,3,30).
produced_by(p255,21,13,28,6).
produced_by(p256,26,27,9,12).
produced_by(p257,19,22,5,4).
produced_by(p258,8,15,16,28).
produced_by(p259,22,11,21,27).
produced_by(p260,17,8,19,18).
produced_by(p261,4,14,10,1).
produced_by(p262,12,5,22,2).
produced_by(p263,9,20,7,4).
produced_by(p264,8,7,15,18).
produced_by(p265,29,11,21,6).
produced_by(p266,25,29,25,8).
produced_by(p267,9,15,5,18).
produced_by(p268,14,15,5,17).
produced_by(p269,20,14,10,2).
produced_by(p270,18,23,25,26).
produced_by(p271,4,2,29,11).
produced_by(p272,8,5,29,28).
produced_by(p273,15,11,25,10).
produced_by(p274,9,20,9,18).
produced_by(p275,26,13,5,10).
produced_by(p276,20,9,18,9).
produced_by(p277,14,27,10,2).
produced_by(p278,20,5,19,23).
produced_by(p279,28,18,4,27).
produced_by(p280,22,24,25,7).
produced_by(p281,4,19,8,13).
produced_by(p282,30,16,30,26).
produced_by(p283,21,26,5,10).
produced_by(p284,26,22,18,9).
produced_by(p285,11,20,2,30).
produced_by(p286,16,21,14,5).
produced_by(p287,30,9,1,13).
produced_by(p288,2,17,11,6).
produced_by(p289,28,10,10,27).
produced_by(p290,18,1,22,8).
produced_by(p291,26,18,9,13).
produced_by(p292,2,18,21,12).
produced_by(p293,29,15,3,14).
produced_by(p294,27,8,10,18).
produced_by(p295,17,3,30,10).
produced_by(p296,19,3,7,16).
produced_by(p297,12,16,5,21).
produced_by(p298,16,18,20,3).
produced_by(p299,6,20,15,7).
produced_by(p300,8,28,10,6).
produced_by(p301,4,12,12,30).
produced_by(p302,19,13,9,27).
produced_by(p303,15,30,7,4).
produced_by(p304,2,5,11,6).
produced_by(p305,21,15,26,28).
produced_by(p306,3,8,23,30).
produced_by(p307,27,29,28,4).
produced_by(p308,26,7,2,21).
produced_by(p309,18,5,20,28).
produced_by(p310,17,20,25,24).
produced_by(p311,20,23,27,21).
produced_by(p312,27,29,18,17).
produced_by(p313,6,14,7,8).
produced_by(p314,13,29,29,9).
produced_by(p315,27,26,5,15).
produced_by(p316,2,6,5,11).
produced_by(p317,2,17,8,10).
produced_by(p318,6,24,3,25).
produced_by(p319,16,21,8,13).
produced_by(p320,20,25,21,25).
produced_by(p321,30,27,24,12).
produced_by(p322,17,14,13,6).
produced_by(p323,9,17,12,2).
produced_by(p324,14,8,12,15).
produced_by(p325,16,11,16,22).
produced_by(p326,5,11,8,20).
produced_by(p327,1,7,2,12).
produced_by(p328,24,23,28,15).
produced_by(p329,11,13,27,20).
produced_by(p330,26,1,17,26).
produced_by(p331,17,28,27,30).
produced_by(p332,5,30,6,21).
produced_by(p333,11,13,4,15).
produced_by(p334,23,3,26,16).
produced_by(p335,2,20,27,25).
produced_by(p336,4,17,9,14).
produced_by(p337,29,27,25,17).
produced_by(p338,27,11,12,5).
produced_by(p339,30,1,26,27).
produced_by(p340,30,23,17,2).
produced_by(p341,6,12,8,20).
produced_by(p342,6,4,5,7).
produced_by(p343,23,24,1,26).
produced_by(p344,10,2,1,30).
produced_by(p345,28,26,16,17).
produced_by(p346,28,28,21,28).
produced_by(p347,28,9,24,19).
produced_by(p348,1,2,21,6).
produced_by(p349,13,28,26,10).
produced_by(p350,23,22,17,7).
produced_by(p351,15,9,2,16).
produced_by(p352,2,25,16,22).
produced_by(p353,20,1,8,17).
produced_by(p354,20,20,6,9).
produced_by(p355,28,29,20,29).
produced_by(p356,30,10,26,4).
produced_by(p357,29,13,14,14).
produced_by(p358,27,22,20,11).
produced_by(p359,30,22,19,2).
produced_by(p360,8,4,23,19).
produced_by(p361,26,22,5,16).
produced_by(p362,3,11,16,1).
produced_by(p363,1,5,21,1).
produced_by(p364,6,8,4,5).
produced_by(p365,21,17,18,17).
produced_by(p366,8,29,19,30).
produced_by(p367,12,29,23,19).
produced_by(p368,2,7,7,20).
produced_by(p369,28,4,27,30).
produced_by(p370,6,12,22,6).
produced_by(p371,9,4,6,14).
produced_by(p372,12,2,10,24).
produced_by(p373,18,19,2,18).
produced_by(p374,18,12,9,29).
produced_by(p375,11,1,18,12).
produced_by(p376,29,16,23,26).
produced_by(p377,19,19,17,24).
produced_by(p378,1,1,30,9).
produced_by(p379,4,27,14,7).
produced_by(p380,28,24,30,8).
produced_by(p381,12,23,17,21).
produced_by(p382,5,25,12,15).
produced_by(p383,17,29,18,15).
produced_by(p384,6,11,2,25).
produced_by(p385,29,10,18,21).
produced_by(p386,10,9,21,6).
produced_by(p387,28,5,12,25).
produced_by(p388,20,4,24,23).
produced_by(p389,26,10,14,22).
produced_by(p390,26,25,28,12).
produced_by(p391,15,16,26,20).
produced_by(p392,18,19,6,8).
produced_by(p393,29,16,21,30).
produced_by(p394,24,11,27,13).
produced_by(p395,15,9,8,4).
produced_by(p396,4,1,27,21).
produced_by(p397,3,2,13,28).
produced_by(p398,18,10,10,2).
produced_by(p399,17,27,13,26).
produced_by(p400,8,11,26,6).
produced_by(p401,26,16,27,11).
produced_by(p402,26,24,24,3).
produced_by(p403,24,1,28,27).
produced_by(p404,23,16,9,25).
produced_by(p405,17,13,15,26).
produced_by(p406,15,16,27,1).
produced_by(p407,12,2,19,19).
produced_by(p408,12,14,16,7).
produced_by(p409,21,13,9,16).
produced_by(p410,28,2,10,21).
produced_by(p411,24,8,9,17).
produced_by(p412,23,17,3,2).
produced_by(p413,30,17,27,14).
produced_by(p414,24,16,6,6).
produced_by(p415,9,16,16,20).
produced_by(p416,29,24,18,19).
produced_by(p417,28,26,27,25).
produced_by(p418,28,6,15,13).
produced_by(p419,5,15,29,28).
produced_by(p420,1,24,21,30).
produced_by(p421,2,9,5,26).
produced_by(p422,24,11,23,2).
produced_by(p423,26,30,13,17).
produced_by(p424,23,30,27,20).
produced_by(p425,18,15,6,7).
produced_by(p426,21,12,19,25).
produced_by(p427,26,10,14,19).
produced_by(p428,3,26,10,26).
produced_by(p429,5,15,21,28).
produced_by(p430,17,13,22,4).
produced_by(p431,5,4,12,27).
produced_by(p432,26,9,9,13).
produced_by(p433,23,14,19,5).
produced_by(p434,18,29,22,5).
produced_by(p435,8,27,23,10).
produced_by(p436,23,3,6,19).
produced_by(p437,17,18,16,25).
produced_by(p438,23,29,28,27).
produced_by(p439,3,10,23,20).
produced_by(p440,10,23,2,2).
produced_by(p441,29,12,29,16).
produced_by(p442,10,20,12,10).
produced_by(p443,8,5,19,30).
produced_by(p444,7,16,10,15).
produced_by(p445,26,18,9,18).
produced_by(p446,16,6,6,10).
produced_by(p447,7,20,29,16).
produced_by(p448,13,22,10,3).
produced_by(p449,25,8,18,5).
produced_by(p450,19,29,6,26).
produced_by(p451,25,24,18,1).
produced_by(p452,2,27,15,19).
produced_by(p453,14,15,6,22).
produced_by(p454,13,3,1,19).
produced_by(p455,22,30,5,4).
produced_by(p456,21,6,28,8).
produced_by(p457,13,15,12,1).
produced_by(p458,6,17,18,22).
produced_by(p459,2,27,23,25).
produced_by(p460,24,29,13,29).
produced_by(p461,14,10,20,26).
produced_by(p462,12,21,6,26).
produced_by(p463,12,2,21,24).
produced_by(p464,7,19,1,19).
produced_by(p465,3,4,11,30).
produced_by(p466,12,21,22,6).
produced_by(p467,17,6,22,10).
produced_by(p468,26,5,1,1).
produced_by(p469,6,20,18,10).
produced_by(p470,2,24,5,13).
produced_by(p471,25,25,29,2).
produced_by(p472,13,29,12,8).
produced_by(p473,3,15,29,6).
produced_by(p474,5,12,11,21).
produced_by(p475,17,3,23,5).
produced_by(p476,29,23,5,26).
produced_by(p477,4,23,5,6).
produced_by(p478,8,9,10,24).
produced_by(p479,26,8,17,8).
produced_by(p480,7,29,7,1).
produced_by(p481,5,6,28,1).
produced_by(p482,17,1,21,26).
produced_by(p483,3,5,30,23).
produced_by(p484,19,26,10,23).
produced_by(p485,10,15,20,17).
produced_by(p486,15,29,11,10).
produced_by(p487,29,19,10,27).
produced_by(p488,9,16,19,13).
produced_by(p489,21,16,13,30).
produced_by(p490,16,26,25,10).
produced_by(p491,22,16,24,11).
produced_by(p492,3,4,25,13).
produced_by(p493,18,14,21,24).
produced_by(p494,4,23,26,2).
produced_by(p495,12,27,28,20).
produced_by(p496,12,16,3,25).
produced_by(p497,24,7,16,9).
produced_by(p498,2,10,11,24).
produced_by(p499,17,4,26,19).
produced_by(p500,7,20,1,16).
produced_by(p501,25,14,2,28).
produced_by(p502,6,27,22,17).
produced_by(p503,15,19,7,26).
produced_by(p504,27,1,12,20).
produced_by(p505,7,27,20,1).
produced_by(p506,28,30,24,6).
produced_by(p507,26,19,25,2).
produced_by(p508,30,17,10,24).
produced_by(p509,30,3,21,6).
produced_by(p510,29,12,14,5).
produced_by(p511,23,20,30,19).
produced_by(p512,12,4,30,19).
produced_by(p513,22,19,19,20).
produced_by(p514,11,4,25,6).
produced_by(p515,14,11,29,13).
produced_by(p516,20,30,6,11).
produced_by(p517,2,18,8,22).
produced_by(p518,22,22,26,14).
produced_by(p519,3,18,24,15).
produced_by(p520,21,15,3,12).
produced_by(p521,25,13,23,5).
produced_by(p522,16,10,2,29).
produced_by(p523,12,1,3,1).
produced_by(p524,30,30,4,24).
produced_by(p525,17,11,15,8).
produced_by(p526,24,11,13,27).
produced_by(p527,28,28,11,10).
produced_by(p528,12,5,13,7).
produced_by(p529,9,6,3,24).
produced_by(p530,7,5,14,18).
produced_by(p531,27,8,11,26).
produced_by(p532,7,14,19,23).
produced_by(p533,24,26,23,18).
produced_by(p534,6,27,6,25).
produced_by(p535,25,8,4,28).
produced_by(p536,12,16,26,20).
produced_by(p537,13,29,5,19).
produced_by(p538,25,10,7,21).
produced_by(p539,9,17,16,15).
produced_by(p540,30,27,29,15).
produced_by(p541,22,21,24,19).
produced_by(p542,10,29,13,26).
produced_by(p543,6,8,23,9).
produced_by(p544,23,19,20,28).
produced_by(p545,9,16,16,3).
produced_by(p546,25,14,23,25).
produced_by(p547,22,30,9,13).
produced_by(p548,18,8,28,9).
produced_by(p549,20,21,19,29).
produced_by(p550,12,1,24,17).
produced_by(p551,30,9,18,15).
produced_by(p552,27,7,12,5).
produced_by(p553,23,27,29,9).
produced_by(p554,3,21,4,24).
produced_by(p555,20,12,7,8).
produced_by(p556,11,26,8,1).
produced_by(p557,8,19,21,19).
produced_by(p558,19,15,28,11).
produced_by(p559,23,15,25,11).
produced_by(p560,13,6,7,5).
produced_by(p561,24,5,6,26).
produced_by(p562,25,9,12,6).
produced_by(p563,12,10,5,15).
produced_by(p564,5,5,15,4).
produced_by(p565,23,27,15,3).
produced_by(p566,11,12,13,25).
produced_by(p567,18,7,27,30).
produced_by(p568,12,3,27,6).
produced_by(p569,29,2,23,15).
produced_by(p570,2,26,13,5).
produced_by(p571,5,17,19,1).
produced_by(p572,21,25,27,5).
produced_by(p573,22,11,8,24).
produced_by(p574,14,20,11,1).
produced_by(p575,27,7,30,30).
produced_by(p576,2,26,27,30).
produced_by(p577,19,12,15,20).
produced_by(p578,7,27,25,4).
produced_by(p579,5,5,4,26).
produced_by(p580,30,30,22,21).
produced_by(p581,2,29,14,15).
produced_by(p582,19,16,15,7).
produced_by(p583,23,7,28,24).
produced_by(p584,24,25,15,13).
produced_by(p585,6,29,24,4).
produced_by(p586,17,10,7,22).
produced_by(p587,15,3,9,14).
produced_by(p588,24,30,26,26).
produced_by(p589,29,1,2,9).
produced_by(p590,17,17,7,1).
produced_by(p591,15,4,24,30).
produced_by(p592,20,8,4,25).
produced_by(p593,29,28,21,15).
produced_by(p594,29,19,28,13).
produced_by(p595,21,28,18,7).
produced_by(p596,28,13,2,18).
produced_by(p597,6,25,18,22).
produced_by(p598,3,24,22,17).
produced_by(p599,27,7,17,17).
produced_by(p600,6,12,3,4).
produced_by(p601,9,15,11,8).
produced_by(p602,4,8,12,24).
produced_by(p603,6,30,22,25).
produced_by(p604,4,15,4,1).
produced_by(p605,10,21,22,12).
produced_by(p606,6,5,29,24).
produced_by(p607,3,7,2,9).
produced_by(p608,18,5,12,19).
produced_by(p609,11,14,18,14).
produced_by(p610,14,21,30,19).
produced_by(p611,20,21,5,16).
produced_by(p612,6,30,16,15).
produced_by(p613,12,30,18,17).
produced_by(p614,26,8,10,29).
produced_by(p615,6,12,7,24).
produced_by(p616,8,10,4,18).
produced_by(p617,24,13,2,7).
produced_by(p618,3,23,17,15).
produced_by(p619,13,13,30,10).
produced_by(p620,12,7,16,23).
produced_by(p621,6,4,1,24).
produced_by(p622,11,10,22,17).
produced_by(p623,13,20,2,12).
produced_by(p624,21,27,30,14).
produced_by(p625,9,23,12,11).
produced_by(p626,7,20,25,19).
produced_by(p627,2,16,21,5).
produced_by(p628,23,6,19,20).
produced_by(p629,1,19,13,12).
produced_by(p630,21,26,20,25).
produced_by(p631,7,13,7,28).
produced_by(p632,9,28,11,17).
produced_by(p633,20,15,19,26).
produced_by(p634,4,14,6,28).
produced_by(p635,21,26,24,13).
produced_by(p636,24,13,25,24).
produced_by(p637,23,29,27,5).
produced_by(p638,17,8,30,23).
produced_by(p639,20,28,20,28).
produced_by(p640,25,23,6,6).
produced_by(p641,7,25,1,2).
produced_by(p642,30,28,29,20).
produced_by(p643,16,15,25,1).
produced_by(p644,19,11,24,11).
produced_by(p645,9,13,8,17).
produced_by(p646,20,7,10,2).
produced_by(p647,26,21,29,20).
produced_by(p648,13,27,25,11).
produced_by(p649,13,17,13,12).
produced_by(p650,6,3,23,13).
produced_by(p651,17,17,13,5).
produced_by(p652,19,29,8,20).
produced_by(p653,11,15,6,22).
produced_by(p654,13,7,23,8).
produced_by(p655,28,14,19,10).
produced_by(p656,10,5,21,22).
produced_by(p657,21,25,25,18).
produced_by(p658,27,17,1,6).
produced_by(p659,26,13,2,6).
produced_by(p660,3,9,25,5).
"""
output = """
{controlled_by(1,13,20,22,10), controlled_by(1,15,18,8,21), controlled_by(1,15,27,11,11), controlled_by(1,17,14,15,5), controlled_by(1,19,15,11,17), controlled_by(1,19,25,14,2), controlled_by(1,2,23,21,20), controlled_by(1,23,18,27,2), controlled_by(1,29,12,25,13), controlled_by(1,29,21,16,9), controlled_by(1,29,8,26,13), controlled_by(1,3,6,9,5), controlled_by(1,30,11,30,6), controlled_by(1,5,15,4,28), controlled_by(1,6,18,17,26), controlled_by(10,11,5,7,8), controlled_by(10,14,30,27,23), controlled_by(10,18,23,16,18), controlled_by(10,18,9,15,29), controlled_by(10,20,1,18,19), controlled_by(10,22,5,11,17), controlled_by(10,23,8,13,20), controlled_by(10,28,19,23,7), controlled_by(10,28,23,21,15), controlled_by(10,28,7,26,8), controlled_by(10,4,5,15,20), controlled_by(10,6,2,16,1), controlled_by(10,9,17,28,6), controlled_by(10,9,24,17,9), controlled_by(10,9,29,29,12), controlled_by(11,1,18,28,12), controlled_by(11,12,15,20,30), controlled_by(11,14,26,15,30), controlled_by(11,15,16,25,13), controlled_by(11,15,19,25,25), controlled_by(11,17,14,19,1), controlled_by(11,18,21,9,26), controlled_by(11,21,18,25,26), controlled_by(11,22,14,6,6), controlled_by(11,27,29,5,28), controlled_by(11,28,6,16,28), controlled_by(11,3,4,12,8), controlled_by(11,30,25,30,10), controlled_by(11,7,7,24,17), controlled_by(11,9,6,7,29), controlled_by(12,1,29,19,14), controlled_by(12,15,5,13,11), controlled_by(12,16,3,5,13), controlled_by(12,18,30,14,18), controlled_by(12,19,8,30,7), controlled_by(12,20,2,23,5), controlled_by(12,25,24,18,28), controlled_by(12,27,27,10,18), controlled_by(12,28,27,27,4), controlled_by(12,30,13,2,15), controlled_by(12,4,9,27,23), controlled_by(12,5,9,25,23), controlled_by(12,6,27,8,18), controlled_by(12,8,24,21,24), controlled_by(12,9,11,2,29), controlled_by(13,11,11,30,4), controlled_by(13,11,23,10,20), controlled_by(13,15,3,4,15), controlled_by(13,20,6,3,22), controlled_by(13,25,6,24,21), controlled_by(13,28,5,11,30), controlled_by(13,29,15,9,20), controlled_by(13,29,21,17,12), controlled_by(13,30,4,6,12), controlled_by(13,30,6,5,22), controlled_by(13,4,10,29,27), controlled_by(13,6,18,12,25), controlled_by(13,7,21,24,14), controlled_by(13,8,9,29,6), controlled_by(13,9,29,24,2), controlled_by(14,11,5,17,27), controlled_by(14,16,16,2,15), controlled_by(14,19,10,30,28), controlled_by(14,23,27,8,26), controlled_by(14,25,1,6,27), controlled_by(14,25,8,2,4), controlled_by(14,30,20,29,3), controlled_by(14,30,29,3,27), controlled_by(14,4,13,30,3), controlled_by(14,4,29,29,18), controlled_by(14,5,22,23,4), controlled_by(14,7,6,6,1), controlled_by(14,8,28,18,6), controlled_by(14,8,28,8,21), controlled_by(14,8,3,11,2), controlled_by(15,1,7,30,29), controlled_by(15,1,8,11,1), controlled_by(15,16,12,21,14), controlled_by(15,18,30,16,28), controlled_by(15,20,2,18,17), controlled_by(15,21,12,4,9), controlled_by(15,21,27,23,7), controlled_by(15,22,21,29,25), controlled_by(15,27,21,7,23), controlled_by(15,27,5,24,6), controlled_by(15,30,10,7,17), controlled_by(15,30,17,26,29), controlled_by(15,5,2,23,23), controlled_by(15,7,5,24,13), controlled_by(15,9,26,17,27), controlled_by(16,11,2,15,10), controlled_by(16,14,1,21,27), controlled_by(16,14,6,18,1), controlled_by(16,15,4,10,13), controlled_by(16,18,25,22,3), controlled_by(16,19,7,24,3), controlled_by(16,20,27,30,6), controlled_by(16,23,1,25,23), controlled_by(16,23,9,19,8), controlled_by(16,24,14,3,8), controlled_by(16,25,29,25,10), controlled_by(16,30,29,24,4), controlled_by(16,6,29,13,15), controlled_by(16,7,21,1,21), controlled_by(16,9,5,1,17), controlled_by(17,1,19,24,16), controlled_by(17,13,23,16,5), controlled_by(17,14,13,26,11), controlled_by(17,14,13,6,27), controlled_by(17,15,10,21,30), controlled_by(17,16,15,11,19), controlled_by(17,16,24,5,9), controlled_by(17,18,23,7,22), controlled_by(17,19,13,11,30), controlled_by(17,20,1,11,6), controlled_by(17,25,2,4,2), controlled_by(17,27,16,6,25), controlled_by(17,29,30,13,5), controlled_by(17,5,13,29,25), controlled_by(17,7,23,26,11), controlled_by(18,15,16,27,12), controlled_by(18,15,3,26,28), controlled_by(18,2,4,22,22), controlled_by(18,20,19,1,5), controlled_by(18,23,22,28,25), controlled_by(18,23,29,17,28), controlled_by(18,24,22,21,15), controlled_by(18,25,13,20,12), controlled_by(18,26,20,16,27), controlled_by(18,27,8,10,25), controlled_by(18,3,16,30,20), controlled_by(18,3,8,14,13), controlled_by(18,5,11,17,23), controlled_by(18,7,4,7,6), controlled_by(18,9,17,29,26), controlled_by(19,1,26,17,27), controlled_by(19,11,15,15,27), controlled_by(19,11,30,27,21), controlled_by(19,13,18,24,18), controlled_by(19,20,1,10,8), controlled_by(19,20,9,7,6), controlled_by(19,21,15,16,29), controlled_by(19,22,12,22,28), controlled_by(19,22,26,4,29), controlled_by(19,23,4,24,9), controlled_by(19,4,18,6,3), controlled_by(19,6,11,15,13), controlled_by(19,7,20,1,13), controlled_by(19,8,17,6,21), controlled_by(19,8,22,7,1), controlled_by(2,1,29,6,21), controlled_by(2,14,1,17,27), controlled_by(2,16,22,4,13), controlled_by(2,20,27,9,18), controlled_by(2,21,1,4,1), controlled_by(2,21,8,21,28), controlled_by(2,28,1,4,14), controlled_by(2,30,16,25,25), controlled_by(2,5,21,6,17), controlled_by(2,6,25,7,26), controlled_by(2,7,10,11,15), controlled_by(2,8,1,25,11), controlled_by(2,8,7,8,6), controlled_by(2,9,20,28,15), controlled_by(2,9,21,30,30), controlled_by(20,1,25,19,23), controlled_by(20,1,6,26,30), controlled_by(20,12,3,4,16), controlled_by(20,13,14,21,24), controlled_by(20,16,17,9,27), controlled_by(20,17,18,17,3), controlled_by(20,17,3,15,30), controlled_by(20,19,7,8,6), controlled_by(20,23,25,22,4), controlled_by(20,24,29,16,15), controlled_by(20,27,2,19,14), controlled_by(20,28,11,16,21), controlled_by(20,4,22,13,18), controlled_by(20,5,19,5,14), controlled_by(20,8,14,28,25), controlled_by(21,10,11,7,17), controlled_by(21,11,24,19,1), controlled_by(21,13,10,25,22), controlled_by(21,14,10,25,15), controlled_by(21,16,23,3,19), controlled_by(21,17,22,5,9), controlled_by(21,18,19,26,4), controlled_by(21,19,22,24,26), controlled_by(21,19,23,8,8), controlled_by(21,2,1,16,30), controlled_by(21,23,29,30,3), controlled_by(21,25,9,27,12), controlled_by(21,26,27,16,16), controlled_by(21,28,7,4,4), controlled_by(21,29,17,24,10), controlled_by(22,11,2,3,13), controlled_by(22,11,25,17,5), controlled_by(22,12,21,14,18), controlled_by(22,15,24,18,2), controlled_by(22,17,27,20,29), controlled_by(22,18,10,1,28), controlled_by(22,18,19,28,7), controlled_by(22,19,10,8,1), controlled_by(22,19,20,14,3), controlled_by(22,21,8,10,1), controlled_by(22,28,28,14,12), controlled_by(22,3,15,23,17), controlled_by(22,30,27,21,11), controlled_by(22,5,10,14,23), controlled_by(22,6,1,3,23), controlled_by(23,10,18,19,24), controlled_by(23,10,7,3,4), controlled_by(23,11,29,11,2), controlled_by(23,11,8,27,21), controlled_by(23,13,15,2,14), controlled_by(23,17,21,18,18), controlled_by(23,19,30,9,15), controlled_by(23,2,30,16,30), controlled_by(23,20,24,11,25), controlled_by(23,22,11,25,6), controlled_by(23,24,18,24,11), controlled_by(23,27,20,15,28), controlled_by(23,3,11,16,22), controlled_by(23,4,22,29,13), controlled_by(23,4,29,27,22), controlled_by(24,14,14,30,19), controlled_by(24,17,7,12,2), controlled_by(24,18,15,15,23), controlled_by(24,20,12,13,18), controlled_by(24,20,30,6,16), controlled_by(24,23,29,17,4), controlled_by(24,25,8,19,11), controlled_by(24,27,26,14,6), controlled_by(24,28,20,7,20), controlled_by(24,28,4,16,21), controlled_by(24,28,7,26,13), controlled_by(24,29,20,20,10), controlled_by(24,29,30,19,30), controlled_by(24,3,21,19,5), controlled_by(24,9,15,23,20), controlled_by(25,12,28,17,11), controlled_by(25,17,16,20,23), controlled_by(25,19,17,23,1), controlled_by(25,2,27,17,13), controlled_by(25,20,2,15,4), controlled_by(25,21,12,19,10), controlled_by(25,22,12,4,3), controlled_by(25,22,27,21,27), controlled_by(25,24,15,2,2), controlled_by(25,24,21,17,15), controlled_by(25,26,16,23,7), controlled_by(25,3,7,7,23), controlled_by(25,5,13,29,2), controlled_by(25,7,20,23,13), controlled_by(25,8,6,15,21), controlled_by(26,11,14,20,9), controlled_by(26,12,16,24,18), controlled_by(26,12,30,5,25), controlled_by(26,12,4,30,23), controlled_by(26,15,29,5,24), controlled_by(26,17,9,9,21), controlled_by(26,18,8,7,1), controlled_by(26,22,16,24,11), controlled_by(26,22,8,30,30), controlled_by(26,25,9,30,3), controlled_by(26,28,12,20,30), controlled_by(26,5,8,5,14), controlled_by(26,7,28,23,18), controlled_by(26,9,17,13,25), controlled_by(26,9,19,13,25), controlled_by(27,13,24,23,30), controlled_by(27,13,4,23,14), controlled_by(27,16,11,2,10), controlled_by(27,17,30,22,17), controlled_by(27,19,23,5,8), controlled_by(27,21,13,28,17), controlled_by(27,21,24,8,25), controlled_by(27,23,9,14,19), controlled_by(27,24,8,13,24), controlled_by(27,26,7,24,1), controlled_by(27,28,3,25,5), controlled_by(27,29,17,12,15), controlled_by(27,30,23,25,18), controlled_by(27,8,25,29,30), controlled_by(27,8,29,8,18), controlled_by(28,12,11,30,15), controlled_by(28,12,21,15,8), controlled_by(28,13,21,24,11), controlled_by(28,18,12,30,27), controlled_by(28,18,13,29,17), controlled_by(28,20,21,8,29), controlled_by(28,20,22,6,2), controlled_by(28,20,8,8,13), controlled_by(28,21,30,14,29), controlled_by(28,21,4,14,11), controlled_by(28,22,2,5,12), controlled_by(28,25,15,24,16), controlled_by(28,27,8,8,18), controlled_by(28,30,22,3,12), controlled_by(28,5,29,12,30), controlled_by(29,1,10,19,19), controlled_by(29,11,23,22,18), controlled_by(29,12,15,2,26), controlled_by(29,16,16,1,1), controlled_by(29,18,28,13,30), controlled_by(29,19,2,3,26), controlled_by(29,2,19,9,28), controlled_by(29,21,13,15,3), controlled_by(29,21,5,12,9), controlled_by(29,22,26,21,24), controlled_by(29,26,27,23,9), controlled_by(29,28,11,17,24), controlled_by(29,3,16,11,6), controlled_by(29,3,8,6,14), controlled_by(29,4,18,15,28), controlled_by(3,10,13,8,18), controlled_by(3,11,4,2,4), controlled_by(3,13,30,21,5), controlled_by(3,15,2,24,12), controlled_by(3,16,24,28,28), controlled_by(3,17,17,29,26), controlled_by(3,23,10,17,30), controlled_by(3,26,11,20,26), controlled_by(3,26,26,28,4), controlled_by(3,29,20,30,28), controlled_by(3,5,12,4,4), controlled_by(3,6,13,24,21), controlled_by(3,6,16,9,2), controlled_by(3,6,5,17,29), controlled_by(3,7,24,1,27), controlled_by(30,10,10,18,13), controlled_by(30,10,8,18,18), controlled_by(30,13,3,11,11), controlled_by(30,15,27,27,8), controlled_by(30,15,3,4,20), controlled_by(30,16,11,14,21), controlled_by(30,16,3,29,2), controlled_by(30,16,7,21,17), controlled_by(30,17,1,11,27), controlled_by(30,19,25,13,5), controlled_by(30,2,13,27,19), controlled_by(30,2,4,12,3), controlled_by(30,21,22,29,22), controlled_by(30,22,17,8,23), controlled_by(30,7,23,17,7), controlled_by(4,13,21,6,18), controlled_by(4,15,29,19,11), controlled_by(4,16,6,15,11), controlled_by(4,18,5,2,21), controlled_by(4,19,6,3,11), controlled_by(4,23,1,21,24), controlled_by(4,25,2,8,19), controlled_by(4,26,29,29,2), controlled_by(4,27,27,28,25), controlled_by(4,29,26,8,19), controlled_by(4,5,7,1,25), controlled_by(4,6,29,21,26), controlled_by(4,8,26,17,6), controlled_by(4,9,20,26,9), controlled_by(4,9,30,13,27), controlled_by(5,1,1,22,17), controlled_by(5,13,20,6,29), controlled_by(5,14,13,27,2), controlled_by(5,14,8,11,25), controlled_by(5,15,12,11,10), controlled_by(5,16,24,28,7), controlled_by(5,21,20,22,8), controlled_by(5,24,16,20,9), controlled_by(5,25,29,25,16), controlled_by(5,26,29,10,25), controlled_by(5,29,29,25,6), controlled_by(5,3,26,30,1), controlled_by(5,4,21,24,21), controlled_by(5,9,25,18,26), controlled_by(5,9,4,30,13), controlled_by(6,10,9,23,22), controlled_by(6,12,21,13,2), controlled_by(6,19,1,17,29), controlled_by(6,19,3,16,7), controlled_by(6,2,8,22,18), controlled_by(6,22,24,20,29), controlled_by(6,26,15,13,13), controlled_by(6,28,22,17,26), controlled_by(6,28,26,1,10), controlled_by(6,3,11,14,19), controlled_by(6,30,14,22,1), controlled_by(6,30,7,10,26), controlled_by(6,4,1,14,26), controlled_by(6,5,10,8,5), controlled_by(6,7,23,26,29), controlled_by(7,1,3,1,14), controlled_by(7,10,15,5,4), controlled_by(7,11,30,29,6), controlled_by(7,12,19,22,1), controlled_by(7,14,13,20,8), controlled_by(7,14,3,10,11), controlled_by(7,15,16,30,28), controlled_by(7,15,25,18,8), controlled_by(7,15,25,19,13), controlled_by(7,2,8,29,20), controlled_by(7,3,17,16,21), controlled_by(7,30,16,2,30), controlled_by(7,4,25,17,10), controlled_by(7,4,9,15,18), controlled_by(7,6,29,5,30), controlled_by(8,1,6,3,17), controlled_by(8,12,12,4,15), controlled_by(8,12,14,13,16), controlled_by(8,12,4,28,13), controlled_by(8,16,21,1,28), controlled_by(8,16,23,25,14), controlled_by(8,18,9,2,3), controlled_by(8,21,14,6,10), controlled_by(8,21,21,12,13), controlled_by(8,22,5,28,28), controlled_by(8,27,6,12,27), controlled_by(8,29,15,9,2), controlled_by(8,3,7,26,20), controlled_by(8,5,2,26,6), controlled_by(8,6,4,20,17), controlled_by(9,12,11,24,13), controlled_by(9,13,3,15,6), controlled_by(9,14,18,14,21), controlled_by(9,18,25,26,30), controlled_by(9,26,6,27,7), controlled_by(9,28,17,18,27), controlled_by(9,28,17,19,20), controlled_by(9,28,20,21,19), controlled_by(9,29,22,12,27), controlled_by(9,4,18,20,25), controlled_by(9,4,5,16,21), controlled_by(9,5,26,28,19), controlled_by(9,7,22,11,11), controlled_by(9,8,17,30,14), controlled_by(9,8,20,19,4), produced_by(p1,10,26,14,4), produced_by(p10,10,22,20,20), produced_by(p100,26,7,29,27), produced_by(p101,25,18,8,21), produced_by(p102,4,11,5,12), produced_by(p103,7,28,17,26), produced_by(p104,14,20,29,29), produced_by(p105,10,14,3,29), produced_by(p106,5,2,11,16), produced_by(p107,22,16,23,18), produced_by(p108,23,21,6,9), produced_by(p109,30,6,22,4), produced_by(p11,21,20,13,19), produced_by(p110,16,26,7,14), produced_by(p111,15,15,9,29), produced_by(p112,4,8,19,5), produced_by(p113,13,21,4,17), produced_by(p114,15,6,24,28), produced_by(p115,22,16,7,6), produced_by(p116,29,13,14,28), produced_by(p117,18,27,23,3), produced_by(p118,23,29,9,7), produced_by(p119,13,17,27,8), produced_by(p12,4,29,3,16), produced_by(p120,16,16,13,28), produced_by(p121,6,8,6,12), produced_by(p122,13,29,10,4), produced_by(p123,7,16,1,5), produced_by(p124,28,15,2,15), produced_by(p125,3,25,10,25), produced_by(p126,15,18,24,20), produced_by(p127,26,20,27,4), produced_by(p128,5,1,23,3), produced_by(p129,8,29,14,13), produced_by(p13,26,20,3,10), produced_by(p130,19,23,8,25), produced_by(p131,9,9,21,28), produced_by(p132,15,23,13,17), produced_by(p133,9,14,12,23), produced_by(p134,23,27,12,10), produced_by(p135,16,1,13,13), produced_by(p136,1,6,15,1), produced_by(p137,26,20,13,6), produced_by(p138,5,20,23,5), produced_by(p139,28,13,2,4), produced_by(p14,17,29,8,3), produced_by(p140,5,6,21,5), produced_by(p141,19,24,28,3), produced_by(p142,20,1,13,5), produced_by(p143,1,25,9,24), produced_by(p144,22,15,24,9), produced_by(p145,5,6,15,1), produced_by(p146,17,7,5,7), produced_by(p147,11,28,10,8), produced_by(p148,4,30,12,14), produced_by(p149,15,1,17,4), produced_by(p15,22,5,17,20), produced_by(p150,2,29,1,24), produced_by(p151,15,9,17,29), produced_by(p152,24,10,29,20), produced_by(p153,7,5,20,16), produced_by(p154,3,16,22,14), produced_by(p155,13,1,21,8), produced_by(p156,23,2,22,7), produced_by(p157,25,8,3,26), produced_by(p158,28,3,11,4), produced_by(p159,11,28,2,26), produced_by(p16,18,29,28,26), produced_by(p160,29,23,15,6), produced_by(p161,27,26,13,30), produced_by(p162,11,4,13,16), produced_by(p163,26,25,23,10), produced_by(p164,18,6,17,12), produced_by(p165,5,19,29,2), produced_by(p166,13,10,28,15), produced_by(p167,7,21,11,5), produced_by(p168,13,17,2,10), produced_by(p169,13,14,9,15), produced_by(p17,24,2,20,3), produced_by(p170,9,13,22,5), produced_by(p171,7,7,14,24), produced_by(p172,12,22,6,9), produced_by(p173,2,4,2,14), produced_by(p174,5,29,29,11), produced_by(p175,20,9,8,2), produced_by(p176,17,1,3,21), produced_by(p177,15,3,6,15), produced_by(p178,15,19,19,13), produced_by(p179,25,25,29,29), produced_by(p18,23,10,14,6), produced_by(p180,8,4,7,10), produced_by(p181,29,8,23,4), produced_by(p182,29,13,6,18), produced_by(p183,13,5,11,30), produced_by(p184,6,6,20,12), produced_by(p185,8,17,26,15), produced_by(p186,28,7,19,22), produced_by(p187,1,17,12,8), produced_by(p188,12,10,9,11), produced_by(p189,10,24,6,8), produced_by(p19,21,26,24,24), produced_by(p190,6,3,25,19), produced_by(p191,8,27,10,13), produced_by(p192,24,29,16,2), produced_by(p193,8,11,8,5), produced_by(p194,17,26,18,9), produced_by(p195,13,30,9,24), produced_by(p196,1,9,26,10), produced_by(p197,2,23,17,8), produced_by(p198,26,3,18,25), produced_by(p199,22,27,29,15), produced_by(p2,24,15,19,18), produced_by(p20,24,26,2,12), produced_by(p200,25,14,16,24), produced_by(p201,24,15,28,3), produced_by(p202,11,16,3,15), produced_by(p203,7,3,8,29), produced_by(p204,4,26,9,5), produced_by(p205,18,25,12,5), produced_by(p206,20,29,29,11), produced_by(p207,17,27,25,12), produced_by(p208,2,11,27,26), produced_by(p209,25,25,20,27), produced_by(p21,8,4,13,24), produced_by(p210,2,14,11,30), produced_by(p211,9,11,28,12), produced_by(p212,6,6,16,23), produced_by(p213,23,20,20,12), produced_by(p214,10,18,22,27), produced_by(p215,7,16,30,8), produced_by(p216,18,26,25,5), produced_by(p217,12,6,23,5), produced_by(p218,20,4,4,20), produced_by(p219,6,24,1,11), produced_by(p22,24,20,18,7), produced_by(p220,29,8,25,13), produced_by(p221,19,14,16,29), produced_by(p222,24,7,17,30), produced_by(p223,15,8,7,24), produced_by(p224,25,24,28,7), produced_by(p225,29,21,11,18), produced_by(p226,16,7,7,21), produced_by(p227,30,29,23,20), produced_by(p228,7,17,3,25), produced_by(p229,23,18,15,16), produced_by(p23,24,26,27,3), produced_by(p230,17,23,15,1), produced_by(p231,30,13,16,25), produced_by(p232,6,14,23,5), produced_by(p233,26,25,14,3), produced_by(p234,1,21,15,22), produced_by(p235,11,7,12,17), produced_by(p236,15,6,4,7), produced_by(p237,15,18,22,1), produced_by(p238,3,28,23,2), produced_by(p239,11,9,18,16), produced_by(p24,16,16,20,9), produced_by(p240,14,2,12,1), produced_by(p241,19,26,25,19), produced_by(p242,16,1,3,26), produced_by(p243,7,14,5,13), produced_by(p244,11,30,12,25), produced_by(p245,17,3,18,11), produced_by(p246,1,2,13,3), produced_by(p247,2,22,10,7), produced_by(p248,16,22,29,4), produced_by(p249,17,23,22,24), produced_by(p25,17,10,4,2), produced_by(p250,23,16,11,21), produced_by(p251,21,15,26,1), produced_by(p252,14,7,18,23), produced_by(p253,9,5,3,1), produced_by(p254,28,7,3,30), produced_by(p255,21,13,28,6), produced_by(p256,26,27,9,12), produced_by(p257,19,22,5,4), produced_by(p258,8,15,16,28), produced_by(p259,22,11,21,27), produced_by(p26,11,17,7,1), produced_by(p260,17,8,19,18), produced_by(p261,4,14,10,1), produced_by(p262,12,5,22,2), produced_by(p263,9,20,7,4), produced_by(p264,8,7,15,18), produced_by(p265,29,11,21,6), produced_by(p266,25,29,25,8), produced_by(p267,9,15,5,18), produced_by(p268,14,15,5,17), produced_by(p269,20,14,10,2), produced_by(p27,5,22,17,20), produced_by(p270,18,23,25,26), produced_by(p271,4,2,29,11), produced_by(p272,8,5,29,28), produced_by(p273,15,11,25,10), produced_by(p274,9,20,9,18), produced_by(p275,26,13,5,10), produced_by(p276,20,9,18,9), produced_by(p277,14,27,10,2), produced_by(p278,20,5,19,23), produced_by(p279,28,18,4,27), produced_by(p28,9,10,1,16), produced_by(p280,22,24,25,7), produced_by(p281,4,19,8,13), produced_by(p282,30,16,30,26), produced_by(p283,21,26,5,10), produced_by(p284,26,22,18,9), produced_by(p285,11,20,2,30), produced_by(p286,16,21,14,5), produced_by(p287,30,9,1,13), produced_by(p288,2,17,11,6), produced_by(p289,28,10,10,27), produced_by(p29,5,5,2,28), produced_by(p290,18,1,22,8), produced_by(p291,26,18,9,13), produced_by(p292,2,18,21,12), produced_by(p293,29,15,3,14), produced_by(p294,27,8,10,18), produced_by(p295,17,3,30,10), produced_by(p296,19,3,7,16), produced_by(p297,12,16,5,21), produced_by(p298,16,18,20,3), produced_by(p299,6,20,15,7), produced_by(p3,10,8,1,2), produced_by(p30,24,11,5,9), produced_by(p300,8,28,10,6), produced_by(p301,4,12,12,30), produced_by(p302,19,13,9,27), produced_by(p303,15,30,7,4), produced_by(p304,2,5,11,6), produced_by(p305,21,15,26,28), produced_by(p306,3,8,23,30), produced_by(p307,27,29,28,4), produced_by(p308,26,7,2,21), produced_by(p309,18,5,20,28), produced_by(p31,7,1,3,22), produced_by(p310,17,20,25,24), produced_by(p311,20,23,27,21), produced_by(p312,27,29,18,17), produced_by(p313,6,14,7,8), produced_by(p314,13,29,29,9), produced_by(p315,27,26,5,15), produced_by(p316,2,6,5,11), produced_by(p317,2,17,8,10), produced_by(p318,6,24,3,25), produced_by(p319,16,21,8,13), produced_by(p32,8,23,23,17), produced_by(p320,20,25,21,25), produced_by(p321,30,27,24,12), produced_by(p322,17,14,13,6), produced_by(p323,9,17,12,2), produced_by(p324,14,8,12,15), produced_by(p325,16,11,16,22), produced_by(p326,5,11,8,20), produced_by(p327,1,7,2,12), produced_by(p328,24,23,28,15), produced_by(p329,11,13,27,20), produced_by(p33,2,26,18,12), produced_by(p330,26,1,17,26), produced_by(p331,17,28,27,30), produced_by(p332,5,30,6,21), produced_by(p333,11,13,4,15), produced_by(p334,23,3,26,16), produced_by(p335,2,20,27,25), produced_by(p336,4,17,9,14), produced_by(p337,29,27,25,17), produced_by(p338,27,11,12,5), produced_by(p339,30,1,26,27), produced_by(p34,12,16,4,16), produced_by(p340,30,23,17,2), produced_by(p341,6,12,8,20), produced_by(p342,6,4,5,7), produced_by(p343,23,24,1,26), produced_by(p344,10,2,1,30), produced_by(p345,28,26,16,17), produced_by(p346,28,28,21,28), produced_by(p347,28,9,24,19), produced_by(p348,1,2,21,6), produced_by(p349,13,28,26,10), produced_by(p35,29,20,28,29), produced_by(p350,23,22,17,7), produced_by(p351,15,9,2,16), produced_by(p352,2,25,16,22), produced_by(p353,20,1,8,17), produced_by(p354,20,20,6,9), produced_by(p355,28,29,20,29), produced_by(p356,30,10,26,4), produced_by(p357,29,13,14,14), produced_by(p358,27,22,20,11), produced_by(p359,30,22,19,2), produced_by(p36,29,20,15,3), produced_by(p360,8,4,23,19), produced_by(p361,26,22,5,16), produced_by(p362,3,11,16,1), produced_by(p363,1,5,21,1), produced_by(p364,6,8,4,5), produced_by(p365,21,17,18,17), produced_by(p366,8,29,19,30), produced_by(p367,12,29,23,19), produced_by(p368,2,7,7,20), produced_by(p369,28,4,27,30), produced_by(p37,25,8,1,10), produced_by(p370,6,12,22,6), produced_by(p371,9,4,6,14), produced_by(p372,12,2,10,24), produced_by(p373,18,19,2,18), produced_by(p374,18,12,9,29), produced_by(p375,11,1,18,12), produced_by(p376,29,16,23,26), produced_by(p377,19,19,17,24), produced_by(p378,1,1,30,9), produced_by(p379,4,27,14,7), produced_by(p38,18,27,19,16), produced_by(p380,28,24,30,8), produced_by(p381,12,23,17,21), produced_by(p382,5,25,12,15), produced_by(p383,17,29,18,15), produced_by(p384,6,11,2,25), produced_by(p385,29,10,18,21), produced_by(p386,10,9,21,6), produced_by(p387,28,5,12,25), produced_by(p388,20,4,24,23), produced_by(p389,26,10,14,22), produced_by(p39,27,13,8,26), produced_by(p390,26,25,28,12), produced_by(p391,15,16,26,20), produced_by(p392,18,19,6,8), produced_by(p393,29,16,21,30), produced_by(p394,24,11,27,13), produced_by(p395,15,9,8,4), produced_by(p396,4,1,27,21), produced_by(p397,3,2,13,28), produced_by(p398,18,10,10,2), produced_by(p399,17,27,13,26), produced_by(p4,28,22,25,23), produced_by(p40,5,22,12,6), produced_by(p400,8,11,26,6), produced_by(p401,26,16,27,11), produced_by(p402,26,24,24,3), produced_by(p403,24,1,28,27), produced_by(p404,23,16,9,25), produced_by(p405,17,13,15,26), produced_by(p406,15,16,27,1), produced_by(p407,12,2,19,19), produced_by(p408,12,14,16,7), produced_by(p409,21,13,9,16), produced_by(p41,17,21,17,20), produced_by(p410,28,2,10,21), produced_by(p411,24,8,9,17), produced_by(p412,23,17,3,2), produced_by(p413,30,17,27,14), produced_by(p414,24,16,6,6), produced_by(p415,9,16,16,20), produced_by(p416,29,24,18,19), produced_by(p417,28,26,27,25), produced_by(p418,28,6,15,13), produced_by(p419,5,15,29,28), produced_by(p42,6,21,28,26), produced_by(p420,1,24,21,30), produced_by(p421,2,9,5,26), produced_by(p422,24,11,23,2), produced_by(p423,26,30,13,17), produced_by(p424,23,30,27,20), produced_by(p425,18,15,6,7), produced_by(p426,21,12,19,25), produced_by(p427,26,10,14,19), produced_by(p428,3,26,10,26), produced_by(p429,5,15,21,28), produced_by(p43,2,25,25,23), produced_by(p430,17,13,22,4), produced_by(p431,5,4,12,27), produced_by(p432,26,9,9,13), produced_by(p433,23,14,19,5), produced_by(p434,18,29,22,5), produced_by(p435,8,27,23,10), produced_by(p436,23,3,6,19), produced_by(p437,17,18,16,25), produced_by(p438,23,29,28,27), produced_by(p439,3,10,23,20), produced_by(p44,14,1,25,8), produced_by(p440,10,23,2,2), produced_by(p441,29,12,29,16), produced_by(p442,10,20,12,10), produced_by(p443,8,5,19,30), produced_by(p444,7,16,10,15), produced_by(p445,26,18,9,18), produced_by(p446,16,6,6,10), produced_by(p447,7,20,29,16), produced_by(p448,13,22,10,3), produced_by(p449,25,8,18,5), produced_by(p45,8,17,18,17), produced_by(p450,19,29,6,26), produced_by(p451,25,24,18,1), produced_by(p452,2,27,15,19), produced_by(p453,14,15,6,22), produced_by(p454,13,3,1,19), produced_by(p455,22,30,5,4), produced_by(p456,21,6,28,8), produced_by(p457,13,15,12,1), produced_by(p458,6,17,18,22), produced_by(p459,2,27,23,25), produced_by(p46,13,28,3,1), produced_by(p460,24,29,13,29), produced_by(p461,14,10,20,26), produced_by(p462,12,21,6,26), produced_by(p463,12,2,21,24), produced_by(p464,7,19,1,19), produced_by(p465,3,4,11,30), produced_by(p466,12,21,22,6), produced_by(p467,17,6,22,10), produced_by(p468,26,5,1,1), produced_by(p469,6,20,18,10), produced_by(p47,10,2,27,15), produced_by(p470,2,24,5,13), produced_by(p471,25,25,29,2), produced_by(p472,13,29,12,8), produced_by(p473,3,15,29,6), produced_by(p474,5,12,11,21), produced_by(p475,17,3,23,5), produced_by(p476,29,23,5,26), produced_by(p477,4,23,5,6), produced_by(p478,8,9,10,24), produced_by(p479,26,8,17,8), produced_by(p48,23,8,12,9), produced_by(p480,7,29,7,1), produced_by(p481,5,6,28,1), produced_by(p482,17,1,21,26), produced_by(p483,3,5,30,23), produced_by(p484,19,26,10,23), produced_by(p485,10,15,20,17), produced_by(p486,15,29,11,10), produced_by(p487,29,19,10,27), produced_by(p488,9,16,19,13), produced_by(p489,21,16,13,30), produced_by(p49,21,21,28,26), produced_by(p490,16,26,25,10), produced_by(p491,22,16,24,11), produced_by(p492,3,4,25,13), produced_by(p493,18,14,21,24), produced_by(p494,4,23,26,2), produced_by(p495,12,27,28,20), produced_by(p496,12,16,3,25), produced_by(p497,24,7,16,9), produced_by(p498,2,10,11,24), produced_by(p499,17,4,26,19), produced_by(p5,13,28,18,25), produced_by(p50,3,25,14,4), produced_by(p500,7,20,1,16), produced_by(p501,25,14,2,28), produced_by(p502,6,27,22,17), produced_by(p503,15,19,7,26), produced_by(p504,27,1,12,20), produced_by(p505,7,27,20,1), produced_by(p506,28,30,24,6), produced_by(p507,26,19,25,2), produced_by(p508,30,17,10,24), produced_by(p509,30,3,21,6), produced_by(p51,11,8,26,25), produced_by(p510,29,12,14,5), produced_by(p511,23,20,30,19), produced_by(p512,12,4,30,19), produced_by(p513,22,19,19,20), produced_by(p514,11,4,25,6), produced_by(p515,14,11,29,13), produced_by(p516,20,30,6,11), produced_by(p517,2,18,8,22), produced_by(p518,22,22,26,14), produced_by(p519,3,18,24,15), produced_by(p52,30,13,24,29), produced_by(p520,21,15,3,12), produced_by(p521,25,13,23,5), produced_by(p522,16,10,2,29), produced_by(p523,12,1,3,1), produced_by(p524,30,30,4,24), produced_by(p525,17,11,15,8), produced_by(p526,24,11,13,27), produced_by(p527,28,28,11,10), produced_by(p528,12,5,13,7), produced_by(p529,9,6,3,24), produced_by(p53,21,3,15,26), produced_by(p530,7,5,14,18), produced_by(p531,27,8,11,26), produced_by(p532,7,14,19,23), produced_by(p533,24,26,23,18), produced_by(p534,6,27,6,25), produced_by(p535,25,8,4,28), produced_by(p536,12,16,26,20), produced_by(p537,13,29,5,19), produced_by(p538,25,10,7,21), produced_by(p539,9,17,16,15), produced_by(p54,30,9,18,2), produced_by(p540,30,27,29,15), produced_by(p541,22,21,24,19), produced_by(p542,10,29,13,26), produced_by(p543,6,8,23,9), produced_by(p544,23,19,20,28), produced_by(p545,9,16,16,3), produced_by(p546,25,14,23,25), produced_by(p547,22,30,9,13), produced_by(p548,18,8,28,9), produced_by(p549,20,21,19,29), produced_by(p55,10,6,8,2), produced_by(p550,12,1,24,17), produced_by(p551,30,9,18,15), produced_by(p552,27,7,12,5), produced_by(p553,23,27,29,9), produced_by(p554,3,21,4,24), produced_by(p555,20,12,7,8), produced_by(p556,11,26,8,1), produced_by(p557,8,19,21,19), produced_by(p558,19,15,28,11), produced_by(p559,23,15,25,11), produced_by(p56,14,19,2,26), produced_by(p560,13,6,7,5), produced_by(p561,24,5,6,26), produced_by(p562,25,9,12,6), produced_by(p563,12,10,5,15), produced_by(p564,5,5,15,4), produced_by(p565,23,27,15,3), produced_by(p566,11,12,13,25), produced_by(p567,18,7,27,30), produced_by(p568,12,3,27,6), produced_by(p569,29,2,23,15), produced_by(p57,1,22,21,3), produced_by(p570,2,26,13,5), produced_by(p571,5,17,19,1), produced_by(p572,21,25,27,5), produced_by(p573,22,11,8,24), produced_by(p574,14,20,11,1), produced_by(p575,27,7,30,30), produced_by(p576,2,26,27,30), produced_by(p577,19,12,15,20), produced_by(p578,7,27,25,4), produced_by(p579,5,5,4,26), produced_by(p58,8,26,29,19), produced_by(p580,30,30,22,21), produced_by(p581,2,29,14,15), produced_by(p582,19,16,15,7), produced_by(p583,23,7,28,24), produced_by(p584,24,25,15,13), produced_by(p585,6,29,24,4), produced_by(p586,17,10,7,22), produced_by(p587,15,3,9,14), produced_by(p588,24,30,26,26), produced_by(p589,29,1,2,9), produced_by(p59,25,16,5,24), produced_by(p590,17,17,7,1), produced_by(p591,15,4,24,30), produced_by(p592,20,8,4,25), produced_by(p593,29,28,21,15), produced_by(p594,29,19,28,13), produced_by(p595,21,28,18,7), produced_by(p596,28,13,2,18), produced_by(p597,6,25,18,22), produced_by(p598,3,24,22,17), produced_by(p599,27,7,17,17), produced_by(p6,22,28,5,29), produced_by(p60,20,28,22,11), produced_by(p600,6,12,3,4), produced_by(p601,9,15,11,8), produced_by(p602,4,8,12,24), produced_by(p603,6,30,22,25), produced_by(p604,4,15,4,1), produced_by(p605,10,21,22,12), produced_by(p606,6,5,29,24), produced_by(p607,3,7,2,9), produced_by(p608,18,5,12,19), produced_by(p609,11,14,18,14), produced_by(p61,1,29,28,22), produced_by(p610,14,21,30,19), produced_by(p611,20,21,5,16), produced_by(p612,6,30,16,15), produced_by(p613,12,30,18,17), produced_by(p614,26,8,10,29), produced_by(p615,6,12,7,24), produced_by(p616,8,10,4,18), produced_by(p617,24,13,2,7), produced_by(p618,3,23,17,15), produced_by(p619,13,13,30,10), produced_by(p62,7,15,15,17), produced_by(p620,12,7,16,23), produced_by(p621,6,4,1,24), produced_by(p622,11,10,22,17), produced_by(p623,13,20,2,12), produced_by(p624,21,27,30,14), produced_by(p625,9,23,12,11), produced_by(p626,7,20,25,19), produced_by(p627,2,16,21,5), produced_by(p628,23,6,19,20), produced_by(p629,1,19,13,12), produced_by(p63,21,22,10,26), produced_by(p630,21,26,20,25), produced_by(p631,7,13,7,28), produced_by(p632,9,28,11,17), produced_by(p633,20,15,19,26), produced_by(p634,4,14,6,28), produced_by(p635,21,26,24,13), produced_by(p636,24,13,25,24), produced_by(p637,23,29,27,5), produced_by(p638,17,8,30,23), produced_by(p639,20,28,20,28), produced_by(p64,3,4,21,3), produced_by(p640,25,23,6,6), produced_by(p641,7,25,1,2), produced_by(p642,30,28,29,20), produced_by(p643,16,15,25,1), produced_by(p644,19,11,24,11), produced_by(p645,9,13,8,17), produced_by(p646,20,7,10,2), produced_by(p647,26,21,29,20), produced_by(p648,13,27,25,11), produced_by(p649,13,17,13,12), produced_by(p65,25,3,28,24), produced_by(p650,6,3,23,13), produced_by(p651,17,17,13,5), produced_by(p652,19,29,8,20), produced_by(p653,11,15,6,22), produced_by(p654,13,7,23,8), produced_by(p655,28,14,19,10), produced_by(p656,10,5,21,22), produced_by(p657,21,25,25,18), produced_by(p658,27,17,1,6), produced_by(p659,26,13,2,6), produced_by(p66,29,26,4,23), produced_by(p660,3,9,25,5), produced_by(p67,3,8,17,23), produced_by(p68,28,30,25,28), produced_by(p69,28,22,11,5), produced_by(p7,16,23,6,15), produced_by(p70,6,26,13,18), produced_by(p71,17,22,13,11), produced_by(p72,17,25,6,3), produced_by(p73,28,3,27,26), produced_by(p74,20,30,10,22), produced_by(p75,30,18,14,27), produced_by(p76,18,8,16,7), produced_by(p77,29,26,3,27), produced_by(p78,13,15,14,30), produced_by(p79,29,19,2,15), produced_by(p8,7,14,21,9), produced_by(p80,13,7,18,10), produced_by(p81,9,14,27,28), produced_by(p82,5,29,12,4), produced_by(p83,16,25,22,25), produced_by(p84,25,7,24,15), produced_by(p85,25,26,3,7), produced_by(p86,11,9,28,9), produced_by(p87,27,30,23,1), produced_by(p88,28,2,11,7), produced_by(p89,15,29,26,12), produced_by(p9,9,4,12,3), produced_by(p90,27,29,7,5), produced_by(p91,24,29,21,10), produced_by(p92,27,14,24,21), produced_by(p93,2,27,28,12), produced_by(p94,5,17,12,23), produced_by(p95,16,26,23,14), produced_by(p96,28,25,12,4), produced_by(p97,16,29,15,4), produced_by(p98,28,22,8,13), produced_by(p99,20,21,14,16)}
"""
| 58.568505
| 32,854
| 0.729398
| 15,557
| 65,831
| 2.94382
| 0.045317
| 0.288228
| 0.017818
| 0.002839
| 0.998493
| 0.998493
| 0.998493
| 0.9981
| 0.9981
| 0.951307
| 0
| 0.321118
| 0.034178
| 65,831
| 1,123
| 32,855
| 58.620659
| 0.399176
| 0
| 0
| 0.001789
| 1
| 0.000894
| 0.999514
| 0.964424
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 13
|
58bc969978b3778ae19f1ef9a77db94aad6509f3
| 6,516
|
py
|
Python
|
tests/plugins/ibm_cloud_iam_test.py
|
shamilpatel25/detect-secrets
|
b0f15a1cef89efbb9e521215e507b20cd759912d
|
[
"Apache-2.0"
] | null | null | null |
tests/plugins/ibm_cloud_iam_test.py
|
shamilpatel25/detect-secrets
|
b0f15a1cef89efbb9e521215e507b20cd759912d
|
[
"Apache-2.0"
] | null | null | null |
tests/plugins/ibm_cloud_iam_test.py
|
shamilpatel25/detect-secrets
|
b0f15a1cef89efbb9e521215e507b20cd759912d
|
[
"Apache-2.0"
] | null | null | null |
import pytest
import responses
from detect_secrets.core.constants import VerifiedResult
from detect_secrets.plugins.ibm_cloud_iam import IbmCloudIamDetector
CLOUD_IAM_KEY = 'abcd1234abcd1234abcd1234ABCD1234ABCD1234--__'
CLOUD_IAM_KEY_BYTES = b'abcd1234abcd1234abcd1234ABCD1234ABCD1234--__'
class TestIbmCloudIamDetector(object):
@pytest.mark.parametrize(
'payload, should_flag',
[
('ibm-cloud_api_key: {cloud_iam_key}'.format(cloud_iam_key=CLOUD_IAM_KEY), True),
('apikeyid: {cloud_iam_key}'.format(cloud_iam_key=CLOUD_IAM_KEY), True),
('ibm_cloud_iam-key : {cloud_iam_key}'.format(cloud_iam_key=CLOUD_IAM_KEY), True),
('IBM-API-KEY : "{cloud_iam_key}"'.format(cloud_iam_key=CLOUD_IAM_KEY), True),
('"iam_api_key" : "{cloud_iam_key}"'.format(cloud_iam_key=CLOUD_IAM_KEY), True),
('cloud-api-key: "{cloud_iam_key}"'.format(cloud_iam_key=CLOUD_IAM_KEY), True),
('"iam-password": "{cloud_iam_key}"'.format(cloud_iam_key=CLOUD_IAM_KEY), True),
('CLOUD_IAM_API_KEY:"{cloud_iam_key}"'.format(cloud_iam_key=CLOUD_IAM_KEY), True),
('ibm-cloud-key:{cloud_iam_key}'.format(cloud_iam_key=CLOUD_IAM_KEY), True),
('ibm_key:"{cloud_iam_key}"'.format(cloud_iam_key=CLOUD_IAM_KEY), True),
('auth:"{cloud_iam_key}"'.format(cloud_iam_key=CLOUD_IAM_KEY), True),
(
'"ibm_cloud_iam_api_key":"{cloud_iam_key}"'.format(
cloud_iam_key=CLOUD_IAM_KEY,
), True,
),
('ibm_cloud_iamapikey= {cloud_iam_key}'.format(cloud_iam_key=CLOUD_IAM_KEY), True),
('ibm_cloud_api_key= "{cloud_iam_key}"'.format(cloud_iam_key=CLOUD_IAM_KEY), True),
('IBMCLOUDIAMAPIKEY={cloud_iam_key}'.format(cloud_iam_key=CLOUD_IAM_KEY), True),
('cloud_iam_api_key="{cloud_iam_key}"'.format(cloud_iam_key=CLOUD_IAM_KEY), True),
('ibm_api_key := {cloud_iam_key}'.format(cloud_iam_key=CLOUD_IAM_KEY), True),
('"ibm-iam_key" := "{cloud_iam_key}"'.format(cloud_iam_key=CLOUD_IAM_KEY), True),
(
'"X-Require-Whisk-Auth" = "{cloud_iam_key}"'.format(
cloud_iam_key=CLOUD_IAM_KEY,
), True,
),
(
'"ibm_cloud_iam_api_key":= "{cloud_iam_key}"'.format(
cloud_iam_key=CLOUD_IAM_KEY,
), True,
),
('ibm-cloud_api_key:={cloud_iam_key}'.format(cloud_iam_key=CLOUD_IAM_KEY), True),
('"cloud_iam_api_key":="{cloud_iam_key}"'.format(cloud_iam_key=CLOUD_IAM_KEY), True),
('ibm_iam_key:= "{cloud_iam_key}"'.format(cloud_iam_key=CLOUD_IAM_KEY), True),
('ibm_api_key:="{cloud_iam_key}"'.format(cloud_iam_key=CLOUD_IAM_KEY), True),
('ibm_password = "{cloud_iam_key}"'.format(cloud_iam_key=CLOUD_IAM_KEY), True),
('test_apikey = "{cloud_iam_key}"'.format(cloud_iam_key=CLOUD_IAM_KEY), True),
('ibm-cloud-pwd = {cloud_iam_key}'.format(cloud_iam_key=CLOUD_IAM_KEY), True),
('ibm-cloud-creds = {cloud_iam_key}'.format(cloud_iam_key=CLOUD_IAM_KEY), True),
('CREDENTIALS = {cloud_iam_key}'.format(cloud_iam_key=CLOUD_IAM_KEY), True),
('apikey:{cloud_iam_key}'.format(cloud_iam_key=CLOUD_IAM_KEY), True),
('IAMAuthenticator("{cloud_iam_key}")'.format(cloud_iam_key=CLOUD_IAM_KEY), True),
('.set("apikey", "{cloud_iam_key}")'.format(cloud_iam_key=CLOUD_IAM_KEY), True),
('iam_api_key="%s" % IBM_IAM_API_KEY_ENV', False),
('CLOUD_APIKEY: "insert_key_here"', False),
('cloud-iam-key:=afakekey', False),
('fake-cloud-iam-key= "not_long_enough"', False),
],
)
def test_analyze_line(self, payload, should_flag):
logic = IbmCloudIamDetector()
output = logic.analyze_line(payload, 1, 'mock_filename')
assert len(output) == (1 if should_flag else 0)
@responses.activate
def test_verify_invalid_secret(self):
responses.add(
responses.POST, 'https://iam.cloud.ibm.com/identity/introspect', status=200,
json={'active': False}, headers={'content-type': 'application/json'},
)
assert IbmCloudIamDetector().verify(CLOUD_IAM_KEY) == VerifiedResult.VERIFIED_FALSE
@responses.activate
def test_verify_valid_secret(self):
responses.add(
responses.POST, 'https://iam.cloud.ibm.com/identity/introspect', status=200,
json={'active': True}, headers={'content-type': 'application/json'},
)
assert IbmCloudIamDetector().verify(CLOUD_IAM_KEY) == VerifiedResult.VERIFIED_TRUE
@responses.activate
def test_verify_invalid_secret_bytes(self):
responses.add(
responses.POST, 'https://iam.cloud.ibm.com/identity/introspect', status=200,
json={'active': False}, headers={'content-type': 'application/json'},
)
assert IbmCloudIamDetector().verify(CLOUD_IAM_KEY_BYTES) == VerifiedResult.VERIFIED_FALSE
@responses.activate
def test_verify_valid_secret_bytes(self):
responses.add(
responses.POST, 'https://iam.cloud.ibm.com/identity/introspect', status=200,
json={'active': True}, headers={'content-type': 'application/json'},
)
assert IbmCloudIamDetector().verify(CLOUD_IAM_KEY_BYTES) == VerifiedResult.VERIFIED_TRUE
@responses.activate
def test_verify_bad_response(self):
responses.add(
responses.POST, 'https://iam.cloud.ibm.com/identity/introspect', status=404,
)
assert IbmCloudIamDetector().verify(CLOUD_IAM_KEY_BYTES) == VerifiedResult.UNVERIFIED
@responses.activate
def test_verify_invalid_payload(self):
responses.add(
responses.POST, 'https://iam.cloud.ibm.com/identity/introspect', status=200,
json={'not-the-field': 'we expect'}, headers={'content-type': 'application/json'},
)
assert IbmCloudIamDetector().verify(CLOUD_IAM_KEY) == VerifiedResult.UNVERIFIED
@responses.activate
def test_verify_payload_not_json(self):
responses.add(
responses.POST, 'https://iam.cloud.ibm.com/identity/introspect', status=200,
body='not json', headers={'content-type': 'not/json'},
)
assert IbmCloudIamDetector().verify(CLOUD_IAM_KEY) == VerifiedResult.UNVERIFIED
| 48.626866
| 97
| 0.649325
| 802
| 6,516
| 4.891521
| 0.120948
| 0.232475
| 0.302829
| 0.178435
| 0.812134
| 0.812134
| 0.810349
| 0.78129
| 0.748407
| 0.728014
| 0
| 0.01241
| 0.208564
| 6,516
| 133
| 98
| 48.992481
| 0.748303
| 0
| 0
| 0.3125
| 0
| 0
| 0.279619
| 0.082413
| 0
| 0
| 0
| 0
| 0.071429
| 1
| 0.071429
| false
| 0.017857
| 0.035714
| 0
| 0.116071
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
4515e6acb5957713588ee1a388cc65aa1b083808
| 832
|
py
|
Python
|
easydata/parsers/__init__.py
|
easydatapy/easydata
|
5e76bf7fc9f368065a82ccc99fca54b17f7e91bd
|
[
"BSD-3-Clause"
] | 6
|
2020-09-06T19:06:01.000Z
|
2020-09-09T23:19:21.000Z
|
easydata/parsers/__init__.py
|
sitegroove/easydata
|
0e347990027b9f6cc06a1072511197f1adb50e5c
|
[
"BSD-3-Clause"
] | null | null | null |
easydata/parsers/__init__.py
|
sitegroove/easydata
|
0e347990027b9f6cc06a1072511197f1adb50e5c
|
[
"BSD-3-Clause"
] | 1
|
2021-07-22T17:59:20.000Z
|
2021-07-22T17:59:20.000Z
|
from easydata.parsers.base import * # noqa: F401 F403
from easydata.parsers.bool import * # noqa: F401 F403
from easydata.parsers.choice import * # noqa: F401 F403
from easydata.parsers.clause import * # noqa: F401 F403
from easydata.parsers.data import * # noqa: F401 F403
from easydata.parsers.desc import * # noqa: F401 F403
from easydata.parsers.dict import * # noqa: F401 F403
from easydata.parsers.drop import * # noqa: F401 F403
from easydata.parsers.email import * # noqa: F401 F403
from easydata.parsers.list import * # noqa: F401 F403
from easydata.parsers.number import * # noqa: F401 F403
from easydata.parsers.price import * # noqa: F401 F403
from easydata.parsers.text import * # noqa: F401 F403
from easydata.parsers.time import * # noqa: F401 F403
from easydata.parsers.url import * # noqa: F401 F403
| 52
| 56
| 0.747596
| 120
| 832
| 5.183333
| 0.183333
| 0.289389
| 0.458199
| 0.434084
| 0.832797
| 0.832797
| 0.832797
| 0
| 0
| 0
| 0
| 0.129125
| 0.16226
| 832
| 15
| 57
| 55.466667
| 0.763271
| 0.28726
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 8
|
18b055283b1d7b40cd3708ac48c135fde5a857fe
| 57,296
|
py
|
Python
|
ModelEditor/ConvertOsim2Biorbd.py
|
pariterre/ViolinOptimalControl
|
b7037d051a73f2c6cf5815e9d2269ea8c2e11993
|
[
"MIT"
] | null | null | null |
ModelEditor/ConvertOsim2Biorbd.py
|
pariterre/ViolinOptimalControl
|
b7037d051a73f2c6cf5815e9d2269ea8c2e11993
|
[
"MIT"
] | 1
|
2020-04-16T02:21:49.000Z
|
2020-04-16T02:21:49.000Z
|
ModelEditor/ConvertOsim2Biorbd.py
|
pariterre/ViolinOptimalControl
|
b7037d051a73f2c6cf5815e9d2269ea8c2e11993
|
[
"MIT"
] | 1
|
2019-11-18T16:31:16.000Z
|
2019-11-18T16:31:16.000Z
|
# coding: utf-8
from lxml import etree
import inspect
import numpy as np
from numpy.linalg import inv
def index_go_to(_root, _tag, _attrib="False", _attribvalue="", index=""):
# return index to go to _tag which can have condition on its attribute
i = 0
for _child in _root:
if type(_child) == str:
return ""
if _attrib != "False":
if _child.tag == _tag and _child.get(_attrib) == _attribvalue:
return index + "[{}]".format(i)
else:
i += 1
else:
if _child.tag == _tag:
return index + "[{}]".format(i)
else:
i += 1
# not found in children, go to grand children
else:
j = 0
if _root is not None:
for _child in _root:
a = index_go_to(_child, _tag, _attrib, _attribvalue, index + "[{}]".format(j))
if a:
return index_go_to(_child, _tag, _attrib, _attribvalue, index + "[{}]".format(j))
else:
j += 1
else:
return None
def retrieve_name(var):
"""
Gets the name of var. Does it from the out most frame inner-wards.
:param var: variable to get name from.
:return: string
"""
for fi in reversed(inspect.stack()):
names = [var_name for var_name, var_val in fi.frame.f_locals.items() if var_val is var]
if len(names) > 0:
return names[0]
def go_to(_root, _tag, _attrib="False", _attribvalue=""):
# return element corresponding to _tag
# which can have condition on its attribute
_index = index_go_to(_root, _tag, _attrib, _attribvalue)
if _index is None:
return "None"
else:
_index = index_go_to(_root, _tag, _attrib, _attribvalue)
return eval(retrieve_name(_root) + _index)
def coord_sys(axis):
# define orthonormal coordinate system with given z-axis
[a, b, c] = axis
if a == 0:
if b == 0:
if c == 0:
return [[1, 0, 0], [0, 1, 0], [0, 0, 1]], ""
else:
return [[1, 0, 0], [0, 1, 0], [0, 0, 1]], "z"
else:
if c == 0:
return [[1, 0, 0], [0, 1, 0], [0, 0, 1]], "y"
else:
y_temp = [0, -c / b, 1]
else:
if b == 0:
if c == 0:
return [[1, 0, 0], [0, 1, 0], [0, 0, 1]], "x"
else:
y_temp = [-c / a, 0, 1]
else:
y_temp = [-b / a, 1, 0]
z_temp = [a, b, c]
x_temp = np.cross(y_temp, z_temp)
norm_x_temp = np.linalg.norm(x_temp)
norm_z_temp = np.linalg.norm(z_temp)
x = [1 / norm_x_temp * x_el for x_el in x_temp]
z = [1 / norm_z_temp * z_el for z_el in z_temp]
y = [y_el for y_el in np.cross(z, x)]
return [x, y, z], ""
class OrthoMatrix:
def __init__(self, translation=[0, 0, 0], rotation_1=[0, 0, 0], rotation_2=[0, 0, 0], rotation_3=[0, 0, 0]):
self.trans = np.transpose(np.array([translation]))
self.axe_1 = rotation_1 # axis of rotation for theta_1
self.axe_2 = rotation_2 # axis of rotation for theta_2
self.axe_3 = rotation_3 # axis of rotation for theta_3
self.rot_1 = np.transpose(np.array(coord_sys(self.axe_1)[0])) # rotation matrix for theta_1
self.rot_2 = np.transpose(np.array(coord_sys(self.axe_2)[0])) # rotation matrix for theta_2
self.rot_3 = np.transpose(np.array(coord_sys(self.axe_3)[0])) # rotation matrix for theta_3
self.rotation_matrix = self.rot_3.dot(self.rot_2.dot(self.rot_1)) # rotation matrix for
self.matrix = np.append(np.append(self.rotation_matrix, self.trans, axis=1), np.array([[0, 0, 0, 1]]), axis=0)
def get_rotation_matrix(self):
return self.rotation_matrix
def set_rotation_matrix(self, rotation_matrix):
self.rotation_matrix = rotation_matrix
def get_translation(self):
return self.trans
def set_translation(self, trans):
self.trans = trans
def get_matrix(self):
self.matrix = np.append(np.append(self.rotation_matrix, self.trans, axis=1), np.array([[0, 0, 0, 1]]), axis=0)
return self.matrix
def transpose(self):
self.rotation_matrix = np.transpose(self.rotation_matrix)
self.trans = -self.rotation_matrix.dot(self.trans)
self.matrix = np.append(np.append(self.rotation_matrix, self.trans, axis=1), np.array([[0, 0, 0, 1]]), axis=0)
return self.matrix
def product(self, other):
self.rotation_matrix = self.rotation_matrix.dot(other.get_rotation_matrix())
self.trans = self.trans + other.get_translation()
self.matrix = np.append(np.append(self.rotation_matrix, self.trans, axis=1), np.array([[0, 0, 0, 1]]), axis=0)
def get_axis(self):
return coord_sys(self.axe_1)[1] + coord_sys(self.axe_2)[1] + coord_sys(self.axe_3)[1]
def out_product(rotomatrix_1, rotomatrix_2):
rotomatrix_prod = OrthoMatrix()
rotomatrix_prod.set_translation(rotomatrix_1.get_translation() + rotomatrix_2.get_translation())
rotomatrix_prod.set_rotation_matrix(rotomatrix_1.get_rotation_matrix().dot(rotomatrix_2.get_rotation_matrix()))
rotomatrix_prod.get_matrix()
return rotomatrix_prod
class ConvertedFromOsim2Biorbd3:
def __init__(self, path, originfile, version=3):
self.path = path
self.originfile = originfile
self.version = str(version)
self.data_origin = etree.parse(self.originfile)
self.root = self.data_origin.getroot()
self.file = open(self.path, "w")
self.file.write("version " + self.version + "\n")
self.file.write("\n// File extracted from " + self.originfile)
self.file.write("\n")
def new_text(element):
if type(element) == str:
return element
else:
return element.text
def body_list(_self):
L = []
for _body in _self.data_origin.xpath("/OpenSimDocument/Model/BodySet/objects/Body"):
L.append(_body.get("name"))
return L
def parent_body(_body, _late_body):
ref = new_text(go_to(go_to(self.root, "Body", "name", _body), "parent_body"))
if ref == "None":
return _late_body
else:
return ref
def matrix_inertia(_body):
ref = new_text(go_to(go_to(self.root, "Body", "name", _body), "inertia_xx"))
if ref == "None":
_inertia_str = new_text(go_to(go_to(self.root, "Body", "name", _body), "inertia"))
_inertia = [float(s) for s in _inertia_str.split(" ")]
return _inertia
else:
return [
ref,
new_text(go_to(go_to(self.root, "Body", "name", _body), "inertia_yy")),
new_text(go_to(go_to(self.root, "Body", "name", _body), "inertia_zz")),
new_text(go_to(go_to(self.root, "Body", "name", _body), "inertia_xy")),
new_text(go_to(go_to(self.root, "Body", "name", _body), "inertia_xz")),
new_text(go_to(go_to(self.root, "Body", "name", _body), "inertia_yz")),
]
def muscle_list(_self):
_list = []
for _muscle in _self.data_origin.xpath("/OpenSimDocument/Model/ForceSet/objects/Thelen2003Muscle"):
_list.append(_muscle.get("name"))
return _list
def list_pathpoint_muscle(_muscle):
# return list of viapoint for each muscle
_viapoint = []
# TODO warning for other type of pathpoint
index_pathpoint = index_go_to(go_to(self.root, "Thelen2003Muscle", "name", _muscle), "PathPoint")
list_index = list(index_pathpoint)
tronc_list_index = list_index[: len(list_index) - 2]
tronc_index = "".join(tronc_list_index)
index_root = index_go_to(self.root, "Thelen2003Muscle", "name", _muscle)
index_tronc_total = index_root + tronc_index
i = 0
while True:
try:
child = eval("self.root" + index_tronc_total + str(i) + "]")
_viapoint.append(child.get("name"))
i += 1
except: # Exception as e: print('Error', e)
break
return _viapoint
def list_transform_body(_body):
# return list of transformation for each body
_translation = []
_rotation = []
index_transformation = index_go_to(go_to(self.root, "Body", "name", _body), "TransformAxis")
print(index_transformation, _body)
if index_transformation is None:
return [[], []]
else:
list_index = list(index_transformation)
tronc_list_index = list_index[: len(list_index) - 2]
tronc_index = "".join(tronc_list_index)
index_root = index_go_to(self.root, "Body", "name", _body)
index_tronc_total = index_root + tronc_index
i = 0
while True:
try:
child = eval("self.root" + index_tronc_total + str(i) + "]")
if child.get("name") is not None:
_translation.append(child.get("name")) if child.get("name").find(
"translation"
) == 0 else True
_rotation.append(child.get("name")) if child.get("name").find("rotation") == 0 else True
i += 1
except: # Exception as e: print('Error', e)
break
return [_translation, _rotation]
def list_markers_body(_body):
# return list of transformation for each body
markers = []
index_markers = index_go_to(self.root, "Marker")
if index_markers is None:
return []
else:
list_index = list(index_markers)
tronc_list_index = list_index[: len(list_index) - 2]
tronc_index = "".join(tronc_list_index)
i = 0
while True:
try:
child = eval("self.root" + tronc_index + str(i) + "]").get("name")
which_body = new_text(go_to(go_to(self.root, "Marker", "name", child), "body"))
if which_body == _body:
markers.append(child) if child is not None else True
i += 1
except: # Exception as e: print('Error', e)
break
return markers
def list_dof_body(_body):
# return list of generalizes coordinates for given body
dof = []
index_markers = index_go_to(go_to(self.root, "Body", "name", _body), "Coordinate")
if index_markers is None:
return []
else:
list_index = list(index_markers)
tronc_list_index = list_index[: len(list_index) - 2]
tronc_index = "".join(tronc_list_index)
index_root = index_go_to(self.root, "Body", "name", _body)
index_tronc_total = index_root + tronc_index
i = 0
while True:
try:
new_dof = eval("self.root" + index_tronc_total + str(i) + "]").get("name")
dof.append(new_dof)
i += 1
except: # Exception as e: print('Error', e)
break
return dof
def get_body_pathpoint(_pathpoint):
while True:
try:
if index_go_to(self.root, "PathPoint", "name", _pathpoint) is not None or "":
if index_go_to(go_to(self.root, "PathPoint", "name", _pathpoint), "body") is not None or "":
return new_text(go_to(go_to(self.root, "PathPoint", "name", _pathpoint), "body"))
# opensim version 4.0
if (
index_go_to(go_to(self.root, "PathPoint", "name", _pathpoint), "socket_parent_frame")
is not None
or ""
):
_ref = new_text(
go_to(go_to(self.root, "PathPoint", "name", _pathpoint), "socket_parent_frame")
)
return _ref[9:]
elif index_go_to(self.root, "ConditionalPathPoint", "name", _pathpoint) != "":
if index_go_to(go_to(self.root, "ConditionalPathPoint", "name", _pathpoint), "body") != "":
return new_text(go_to(go_to(self.root, "ConditionalPathPoint", "name", _pathpoint), "body"))
# opensim version 4.0
if (
index_go_to(
go_to(self.root, "ConditionalPathPoint", "name", _pathpoint), "socket_parent_frame"
)
is not None
or ""
):
_ref = new_text(
go_to(
go_to(self.root, "ConditionalPathPoint", "name", _pathpoint), "socket_parent_frame"
)
)
return _ref[9:]
elif index_go_to(self.root, "MovingPathPoint", "name", _pathpoint) != "":
if index_go_to(go_to(self.root, "MovingPathPoint", "name", _pathpoint), "body") != "":
return new_text(go_to(go_to(self.root, "MovingPathPoint", "name", _pathpoint), "body"))
# opensim version 4.0
if (
index_go_to(go_to(self.root, "MovingPathPoint", "name", _pathpoint), "socket_parent_frame")
is not None
or ""
):
_ref = new_text(
go_to(go_to(self.root, "MovingPathPoint", "name", _pathpoint), "socket_parent_frame")
)
return _ref[9:]
else:
return "None"
except Exception as e:
break
def get_pos(_pathpoint):
while True:
try:
if index_go_to(go_to(self.root, "PathPoint", "name", _pathpoint), "location") != "":
return new_text(go_to(go_to(self.root, "PathPoint", "name", _pathpoint), "location"))
elif index_go_to(go_to(self.root, "ConditionalPathPoint", "name", _pathpoint), "location") != "":
return new_text(go_to(go_to(self.root, "ConditionalPathPoint", "name", _pathpoint), "location"))
elif index_go_to(go_to(self.root, "MovingPathPoint", "name", _pathpoint), "location") != "":
return new_text(go_to(go_to(self.root, "MovingPathPoint", "name", _pathpoint), "location"))
else:
return "None"
except Exception as e:
break
def muscle_group_reference(_muscle, ref_group):
for el in ref_group:
if _muscle == el[0]:
return el[1]
else:
return "None"
# Segment definition
body_list_actuated = []
self.write("\n// SEGMENT DEFINITION\n")
def printing_segment(
_body,
_name,
parent_name,
_rotomatrix,
transformation_type="",
_is_dof="None",
true_segment=False,
dof_total_trans="",
):
rt_in_matrix = 1
[
[r11, r12, r13, r14],
[r21, r22, r23, r24],
[r31, r32, r33, r34],
[r41, r42, r43, r44],
] = _rotomatrix.get_matrix().tolist()
[i11, i22, i33, i12, i13, i23] = matrix_inertia(_body)
mass = new_text(go_to(go_to(self.root, "Body", "name", _body), "mass"))
com = new_text(go_to(go_to(self.root, "Body", "name", _body), "mass_center"))
path_mesh_file = new_text(go_to(go_to(self.root, "Body", "name", _body), "mesh_file"))
# TODO add mesh files
# writing data
self.write(" // Segment\n")
self.write(" segment {}\n".format(_name)) if _name != "None" else self.write("")
self.write(" parent {} \n".format(parent_name)) if parent_name != "None" else self.write("")
self.write(" RTinMatrix {}\n".format(rt_in_matrix)) if rt_in_matrix != "None" else self.write("")
self.write(" RT\n")
self.write(
" {} {} {} {}\n"
" {} {} {} {}\n"
" {} {} {} {}\n"
" {} {} {} {}\n".format(
r11, r12, r13, r14, r21, r22, r23, r24, r31, r32, r33, r34, r41, r42, r43, r44
)
)
self.write(
" translations {}\n".format(dof_total_trans)
) if transformation_type == "translation" and dof_total_trans != "" else True
self.write(" rotations {}\n".format("z")) if _is_dof == "True" else True
self.write(" mass {}\n".format(mass)) if true_segment is True else True
self.write(
" inertia\n"
" {} {} {}\n"
" {} {} {}\n"
" {} {} {}\n".format(i11, i12, i13, i12, i22, i23, i13, i23, i33)
) if true_segment is True else True
self.write(" com {}\n".format(com)) if true_segment is True else True
self.write(" //meshfile {}\n".format(path_mesh_file)) if path_mesh_file != "None" else True
self.write(" endsegment\n")
# Division of body in segment depending of transformation
late_body = "None"
for body in body_list(self):
rotomatrix = OrthoMatrix([0, 0, 0])
self.write("\n// Information about {} segment\n".format(body))
parent = parent_body(body, late_body)
list_transform = list_transform_body(body)
rotation_for_markers = rotomatrix.get_rotation_matrix()
# segment data
if list_transform[0] == []:
if list_transform[1] == []:
printing_segment(body, body, parent, rotomatrix, true_segment=True)
body_list_actuated.append(body)
parent = body
else:
body_trans = body + "_translation"
dof_total_trans = ""
j = 0
list_trans_dof = ["x", "y", "z"]
for translation in list_transform[0]:
if translation.find("translation") == 0:
axis_str = new_text(
go_to(
go_to(go_to(self.root, "Body", "name", body), "TransformAxis", "name", translation),
"axis",
)
)
axis = [float(s) for s in axis_str.split(" ")]
rotomatrix.product(OrthoMatrix([0, 0, 0], axis))
is_dof = new_text(
go_to(
go_to(go_to(self.root, "Body", "name", body), "TransformAxis", "name", translation),
"coordinates",
)
)
if is_dof in list_dof_body(body):
dof_total_trans += list_trans_dof[j]
j += 1
trans_str = new_text(go_to(go_to(self.root, "Body", "name", body), "location_in_parent"))
trans_value = []
for s in trans_str.split(" "):
if s != "":
trans_value.append(float(s))
rotomatrix.product(OrthoMatrix(trans_value))
rotation_for_markers = rotomatrix.get_rotation_matrix()
if list_transform[1] == []:
is_true_segment = True
else:
is_true_segment = False
printing_segment(
body, body_trans, parent, rotomatrix, "translation", dof_total_trans, true_segment=is_true_segment
)
parent = body_trans
if list_transform[1] != []:
rotomatrix = OrthoMatrix([0, 0, 0])
for rotation in list_transform[1]:
if rotation.find("rotation") == 0:
axis_str = new_text(
go_to(
go_to(go_to(self.root, "Body", "name", body), "TransformAxis", "name", rotation), "axis"
)
)
axis = [float(s) for s in axis_str.split(" ")]
rotation_axis = rotomatrix.get_axis()
if rotation_axis == "":
rotation_axis = "z"
rotomatrix = OrthoMatrix([0, 0, 0], axis)
is_dof = new_text(
go_to(
go_to(go_to(self.root, "Body", "name", body), "TransformAxis", "name", rotation),
"coordinates",
)
)
if is_dof in list_dof_body(body):
is_dof = "True"
else:
is_dof = "None"
printing_segment(body, body + "_" + rotation, parent, rotomatrix, "rotation", is_dof)
rotation_for_markers = rotation_for_markers.dot(rotomatrix.get_rotation_matrix())
parent = body + "_" + rotation
# segment to cancel axis effects
rotomatrix.set_rotation_matrix(inv(rotation_for_markers))
printing_segment(body, body, parent, rotomatrix, true_segment=True)
parent = body
# Markers
_list_markers = list_markers_body(body)
if _list_markers is not []:
self.write("\n // Markers")
for marker in _list_markers:
position = new_text(go_to(go_to(self.root, "Marker", "name", marker), "location"))
self.write("\n marker {}".format(marker))
self.write("\n parent {}".format(parent))
self.write("\n position {}".format(position))
self.write("\n endmarker\n")
late_body = body
# Muscle definition
self.write("\n// MUSCLE DEFINIION\n")
sort_muscle = []
muscle_ref_group = []
for muscle in muscle_list(self):
viapoint = list_pathpoint_muscle(muscle)
bodies_viapoint = []
for pathpoint in viapoint:
bodies_viapoint.append(get_body_pathpoint(pathpoint))
# it is supposed that viapoints are organized in order
# from the parent body to the child body
body_start = bodies_viapoint[0]
body_end = bodies_viapoint[len(bodies_viapoint) - 1]
sort_muscle.append([body_start, body_end])
muscle_ref_group.append([muscle, body_start + "_to_" + body_end])
# selecting muscle group
group_muscle = []
for ext_muscle in sort_muscle:
if ext_muscle not in group_muscle:
group_muscle.append(ext_muscle)
# print muscle group
for muscle_group in group_muscle:
self.write("\n// {} > {}\n".format(muscle_group[0], muscle_group[1]))
self.write("musclegroup {}\n".format(muscle_group[0] + "_to_" + muscle_group[1]))
self.write(" OriginParent {}\n".format(muscle_group[0]))
self.write(" InsertionParent {}\n".format(muscle_group[1]))
self.write("endmusclegroup\n")
# muscle
for muscle in muscle_list(self):
# muscle data
m_ref = muscle_group_reference(muscle, muscle_ref_group)
if m_ref == muscle_group[0] + "_to_" + muscle_group[1]:
muscle_type = "hillthelen"
state_type = "buchanan"
start_point = list_pathpoint_muscle(muscle)[0]
end_point = list_pathpoint_muscle(muscle)[len(list_pathpoint_muscle(muscle)) - 1]
start_pos = get_pos(start_point)
insert_pos = get_pos(end_point)
opt_length = new_text(
go_to(go_to(self.root, "Thelen2003Muscle", "name", muscle), "optimal_fiber_length")
)
max_force = new_text(
go_to(go_to(self.root, "Thelen2003Muscle", "name", muscle), "max_isometric_force")
)
tendon_slack_length = new_text(
go_to(go_to(self.root, "Thelen2003Muscle", "name", muscle), "tendon_slack_length")
)
pennation_angle = new_text(
go_to(go_to(self.root, "Thelen2003Muscle", "name", muscle), "pennation_angle_at_optimal")
)
pcsa = new_text(go_to(go_to(self.root, "Thelen2003Muscle", "name", muscle), "pcsa"))
max_velocity = new_text(
go_to(go_to(self.root, "Thelen2003Muscle", "name", muscle), "max_contraction_velocity")
)
# print muscle data
self.write("\n muscle {}".format(muscle))
self.write("\n Type {}".format(muscle_type)) if muscle_type != "None" else self.write("")
self.write("\n statetype {}".format(state_type)) if state_type != "None" else self.write(
""
)
self.write("\n musclegroup {}".format(m_ref)) if m_ref != "None" else self.write("")
self.write(
"\n OriginPosition {}".format(start_pos)
) if start_pos != "None" else self.write("")
self.write(
"\n InsertionPosition {}".format(insert_pos)
) if insert_pos != "None" else self.write("")
self.write(
"\n optimalLength {}".format(opt_length)
) if opt_length != "None" else self.write("")
self.write("\n maximalForce {}".format(max_force)) if max_force != "None" else self.write(
""
)
self.write(
"\n tendonSlackLength {}".format(tendon_slack_length)
) if tendon_slack_length != "None" else self.write("")
self.write(
"\n pennationAngle {}".format(pennation_angle)
) if pennation_angle != "None" else self.write("")
self.write("\n PCSA {}".format(pcsa)) if pcsa != "None" else self.write("")
self.write(
"\n maxVelocity {}".format(max_velocity)
) if max_velocity != "None" else self.write("")
self.write("\n endmuscle\n")
# viapoint
for viapoint in list_pathpoint_muscle(muscle):
# viapoint data
parent_viapoint = get_body_pathpoint(viapoint)
viapoint_pos = get_pos(viapoint)
# print viapoint data
self.write("\n viapoint {}".format(viapoint))
self.write(
"\n parent {}".format(parent_viapoint)
) if parent_viapoint != "None" else self.write("")
self.write("\n muscle {}".format(muscle))
self.write("\n musclegroup {}".format(m_ref)) if m_ref != "None" else self.write(
""
)
self.write(
"\n position {}".format(viapoint_pos)
) if viapoint_pos != "None" else self.write("")
self.write("\n endviapoint")
self.write("\n")
self.file.close()
def __getattr__(self, attr):
print("Error : {} is not an attribute of this class".format(attr))
def get_path(self):
return self.path
def write(self, string):
self.file = open(self.path, "a")
self.file.write(string)
self.file.close()
def get_origin_file(self):
return self.originfile
def credits(self):
return self.data_origin.xpath("/OpenSimDocument/Model/credits")[0].text
def publications(self):
return self.data_origin.xpath("/OpenSimDocument/Model/publications")[0].text
def body_list(self):
_list = []
for body in self.data_origin.xpath("/OpenSimDocument/Model/BodySet/objects/Body"):
_list.append(body.get("name"))
return _list
class ConvertedFromOsim2Biorbd4:
def __init__(self, path, origin_file, version=3):
self.path = path
self.origin_file = origin_file
self.version = str(version)
self.data_origin = etree.parse(self.origin_file)
self.root = self.data_origin.getroot()
self.file = open(self.path, "w")
self.file.write("version " + self.version + "\n")
self.file.write("\n// File extracted from " + self.origin_file)
self.file.write("\n")
def new_text(element):
if type(element) == str:
return element
else:
return element.text
def body_list(_self):
list_of_bodies = []
for _body in _self.data_origin.xpath("/OpenSimDocument/Model/BodySet/objects/Body"):
list_of_bodies.append(_body.get("name"))
return list_of_bodies
def matrix_inertia(_body):
_ref = new_text(go_to(go_to(self.root, "Body", "name", _body), "inertia"))
if _ref != "None":
_inertia_str = _ref
_inertia = [float(s) for s in _inertia_str.split(" ")]
return _inertia
else:
return "None"
def muscle_list(_self):
_list = []
for _muscle in _self.data_origin.xpath("/OpenSimDocument/Model/ForceSet/objects/Thelen2003Muscle"):
_list.append(_muscle.get("name"))
return _list
def list_pathpoint_muscle(_muscle):
# return list of viapoint for each muscle
_viapoint = []
# TODO warning for other type of pathpoint
index_pathpoint = index_go_to(go_to(self.root, "Thelen2003Muscle", "name", _muscle), "PathPoint")
_list_index = list(index_pathpoint)
_tronc_list_index = _list_index[: len(_list_index) - 2]
_tronc_index = "".join(_tronc_list_index)
index_root = index_go_to(self.root, "Thelen2003Muscle", "name", _muscle)
index_tronc_total = index_root + _tronc_index
i = 0
while True:
try:
child = eval("self.root" + index_tronc_total + str(i) + "]")
_viapoint.append(child.get("name"))
i += 1
except: # Exception as e: print('Error', e)
break
return _viapoint
def list_markers_body(_body):
# return list of transformation for each body
markers = []
index_markers = index_go_to(self.root, "Marker")
if index_markers is None:
return []
else:
_list_index = list(index_markers)
_tronc_list_index = _list_index[: len(_list_index) - 2]
_tronc_index = "".join(_tronc_list_index)
i = 0
while True:
try:
child = eval("self.root" + _tronc_index + str(i) + "]").get("name")
which_body = new_text(go_to(go_to(self.root, "Marker", "name", child), "socket_parent_frame"))[
9:
]
if which_body == _body:
markers.append(child) if child is not None else True
i += 1
except:
break
return markers
# list of joints with parent and child
list_joint = []
index_joints = index_go_to(self.root, "WeldJoint")
if index_joints is not None:
list_index = list(index_joints)
tronc_list_index = list_index[: len(list_index) - 2]
tronc_index = "".join(tronc_list_index)
i = 0
while True:
try:
new_joint = eval("self.root" + tronc_index + str(i) + "]").get("name")
if new_text(go_to(self.root, "WeldJoint", "name", new_joint)) != "None":
_parent_joint = new_text(
go_to(go_to(self.root, "WeldJoint", "name", new_joint), "socket_parent_frame")
)[:-7]
_child_joint = new_text(
go_to(go_to(self.root, "WeldJoint", "name", new_joint), "socket_child_frame")
)[:-7]
list_joint.append([new_joint, _parent_joint, _child_joint, "WeldJoint"])
i += 1
except: # Exception as error:
# print('Error', error)
break
index_joints = index_go_to(self.root, "CustomJoint")
if index_joints is not None:
list_index = list(index_joints)
tronc_list_index = list_index[: len(list_index) - 2]
tronc_index = "".join(tronc_list_index)
i = int(list_index[len(list_index) - 2])
while True:
try:
new_joint = eval("self.root" + tronc_index + str(i) + "]").get("name")
if new_text(go_to(self.root, "CustomJoint", "name", new_joint)) != "None":
_parent_joint = new_text(
go_to(go_to(self.root, "CustomJoint", "name", new_joint), "socket_parent_frame")
)[:-7]
_child_joint = new_text(
go_to(go_to(self.root, "CustomJoint", "name", new_joint), "socket_child_frame")
)[:-7]
list_joint.append([new_joint, _parent_joint, _child_joint, "CustomJoint"])
i += 1
except: # Exception as e:print('Error', e)
break
def dof_of_joint(_joint, _joint_type):
dof = []
_index_dof = index_go_to(go_to(self.root, _joint_type, "name", _joint), "Coordinate")
if _index_dof is None:
return []
else:
_list_index = list(_index_dof)
_tronc_list_index = _list_index[: len(_list_index) - 2]
_tronc_index = "".join(_tronc_list_index)
_index_root = index_go_to(self.root, _joint_type, "name", _joint)
_index_tronc_total = _index_root + _tronc_index
i = 0
while True:
try:
child = eval("self.root" + _index_tronc_total + str(i) + "]")
if child.get("name") is not None:
dof.append(child.get("name"))
i += 1
except:
break
return dof
def parent_child(_child):
# return parent of a child
# suppose that a parent can only have one child
for _joint in list_joint:
if _joint[2] == _child:
return _joint[1]
else:
return "None"
def joint_body(_body):
# return the joint to which the body is child
for _joint in list_joint:
if _joint[2] == _body:
return _joint[0], _joint[3]
else:
return "None", "None"
def transform_of_joint(_joint, _joint_type):
_translation = []
_rotation = []
if _joint is "None":
return [[], []]
_index_transform = index_go_to(go_to(self.root, _joint_type, "name", _joint), "TransformAxis")
if _index_transform is None:
return [[], []]
else:
_list_index = list(_index_transform)
_tronc_list_index = _list_index[: len(_list_index) - 2]
_tronc_index = "".join(_tronc_list_index)
_index_root = index_go_to(self.root, _joint_type, "name", _joint)
if not _index_root:
pass
_index_tronc_total = _index_root + _tronc_index
i = 0
while True:
try:
child = eval("self.root" + _index_tronc_total + str(i) + "]")
if child.get("name") is not None:
_translation.append(child.get("name")) if child.get("name").find(
"translation"
) == 0 else True
_rotation.append(child.get("name")) if child.get("name").find("rotation") == 0 else True
i += 1
except: # Exception as e: print('Error', e)
break
return [_translation, _rotation]
def get_body_pathpoint(_pathpoint):
while True:
try:
if (
index_go_to(go_to(self.root, "PathPoint", "name", _pathpoint), "socket_parent_frame")
is not None
or ""
):
_ref = new_text(go_to(go_to(self.root, "PathPoint", "name", _pathpoint), "socket_parent_frame"))
return _ref[9:]
if (
index_go_to(go_to(self.root, "ConditionalPathPoint", "name", _pathpoint), "socket_parent_frame")
is not None
or ""
):
_ref = new_text(
go_to(go_to(self.root, "ConditionalPathPoint", "name", _pathpoint), "socket_parent_frame")
)
return _ref[9:]
if (
index_go_to(go_to(self.root, "MovingPathPoint", "name", _pathpoint), "socket_parent_frame")
is not None
or ""
):
_ref = new_text(
go_to(go_to(self.root, "MovingPathPoint", "name", _pathpoint), "socket_parent_frame")
)
return _ref[9:]
else:
return "None"
except Exception as e:
break
def get_pos(_pathpoint):
while True:
try:
if index_go_to(go_to(self.root, "PathPoint", "name", _pathpoint), "location") != "":
return new_text(go_to(go_to(self.root, "PathPoint", "name", _pathpoint), "location"))
elif index_go_to(go_to(self.root, "ConditionalPathPoint", "name", _pathpoint), "location") != "":
return new_text(go_to(go_to(self.root, "ConditionalPathPoint", "name", _pathpoint), "location"))
elif index_go_to(go_to(self.root, "MovingPathPoint", "name", _pathpoint), "location") != "":
return new_text(go_to(go_to(self.root, "MovingPathPoint", "name", _pathpoint), "location"))
else:
return "None"
except Exception as e:
break
def muscle_group_reference(_muscle, ref_group):
for el in ref_group:
if _muscle == el[0]:
return el[1]
else:
return "None"
# # Credits
# self.write('\n// CREDITS')
# _credits = print_credits()
# self.write('\n'+_credits+'\n')
#
# # Publications
# self.write('\n// PUBLICATIONS\n')
# _publications = print_publications()
# self.write('\n'+_publications+'\n')
# Segment definition
self.write("\n// SEGMENT DEFINITION\n")
# TODO change spaces into \t
def printing_segment(
_body,
_name,
parent_name,
_rotomatrix,
transformation_type="",
_is_dof="None",
true_segment=False,
_dof_total_trans="",
):
rt_in_matrix = 1
[
[r11, r12, r13, r14],
[r21, r22, r23, r24],
[r31, r32, r33, r34],
[r41, r42, r43, r44],
] = _rotomatrix.get_matrix().tolist()
for i in range(4):
for j in range(4):
round(eval("r" + str(i + 1) + str(j + 1)), 9)
[i11, i22, i33, i12, i13, i23] = matrix_inertia(_body)
mass = new_text(go_to(go_to(self.root, "Body", "name", _body), "mass"))
com = new_text(go_to(go_to(self.root, "Body", "name", _body), "mass_center"))
path_mesh_file = new_text(go_to(go_to(self.root, "Body", "name", _body), "mesh_file"))
# TODO add mesh files
# writing data
self.write(" // Segment\n")
self.write(" segment {}\n".format(_name)) if _name != "None" else self.write("")
self.write(" parent {} \n".format(parent_name)) if parent_name != "None" else self.write("")
self.write(" RTinMatrix {}\n".format(rt_in_matrix)) if rt_in_matrix != "None" else self.write("")
self.write(" RT\n")
self.write(
" {} {} {} {}\n"
" {} {} {} {}\n"
" {} {} {} {}\n"
" {} {} {} {}\n".format(
r11, r12, r13, r14, r21, r22, r23, r24, r31, r32, r33, r34, r41, r42, r43, r44
)
)
self.write(
" translations {}\n".format(_dof_total_trans)
) if transformation_type == "translation" and _dof_total_trans != "" else True
self.write(" rotations {}\n".format("z")) if _is_dof == "True" else True
self.write(" mass {}\n".format(mass)) if true_segment is True else True
self.write(
" inertia\n"
" {} {} {}\n"
" {} {} {}\n"
" {} {} {}\n".format(i11, i12, i13, i12, i22, i23, i13, i23, i33)
) if true_segment is True else True
self.write(" com {}\n".format(com)) if true_segment is True else True
self.write(" //meshfile {}\n".format(path_mesh_file)) if path_mesh_file != "None" else True
self.write(" endsegment\n")
# Division of body in segment depending of transformation
for body in body_list(self):
rotomatrix = OrthoMatrix([0, 0, 0])
self.write("\n// Information about {} segment\n".format(body))
parent = parent_child(body)
if parent == "ground":
parent = "None"
joint, joint_type = joint_body(body)
list_transform = transform_of_joint(joint, joint_type)
rotation_for_markers = rotomatrix.get_rotation_matrix()
# segment data
if list_transform[0] == []:
if list_transform[1] == []:
printing_segment(body, body, parent, rotomatrix, true_segment=True)
parent = body
else:
body_trans = body + "_translation"
dof_total_trans = ""
j = 0
list_trans_dof = ["x", "y", "z"]
for translation in list_transform[0]:
if translation.find("translation") == 0:
axis_str = new_text(
go_to(
go_to(
go_to(self.root, joint_type, "name", joint), "TransformAxis", "name", translation
),
"axis",
)
)
axis = [float(s) for s in axis_str.split(" ")]
rotomatrix.product(OrthoMatrix([0, 0, 0], axis))
is_dof = new_text(
go_to(
go_to(
go_to(self.root, joint_type, "name", joint), "TransformAxis", "name", translation
),
"coordinates",
)
)
if is_dof in dof_of_joint(joint, joint_type):
dof_total_trans += list_trans_dof[j]
j += 1
trans_str = new_text(
go_to(
go_to(
go_to(self.root, joint_type, "name", joint),
"PhysicalOffsetFrame",
"name",
parent + "_offset",
),
"translation",
)
)
trans_value = []
for s in trans_str.split(" "):
if s != "" and s is not "None":
trans_value.append(float(s))
rotomatrix.product(OrthoMatrix(trans_value))
rotation_for_markers = rotomatrix.get_rotation_matrix()
if list_transform[1] == []:
is_true_segment = True
else:
is_true_segment = False
printing_segment(
body, body_trans, parent, rotomatrix, "translation", dof_total_trans, true_segment=is_true_segment
)
parent = body_trans
if list_transform[1] != []:
rotomatrix = OrthoMatrix([0, 0, 0])
for rotation in list_transform[1]:
if rotation.find("rotation") == 0:
axis_str = new_text(
go_to(
go_to(go_to(self.root, joint_type, "name", joint), "TransformAxis", "name", rotation),
"axis",
)
)
axis = [float(s) for s in axis_str.split(" ")]
rotomatrix = OrthoMatrix([0, 0, 0], axis)
is_dof = new_text(
go_to(
go_to(go_to(self.root, joint_type, "name", joint), "TransformAxis", "name", rotation),
"coordinates",
)
)
if is_dof in dof_of_joint(joint, joint_type):
is_dof = "True"
else:
is_dof = "None"
printing_segment(body, body + "_" + rotation, parent, rotomatrix, "rotation", is_dof)
rotation_for_markers = rotation_for_markers.dot(rotomatrix.get_rotation_matrix())
parent = body + "_" + rotation
# segment to cancel axis effects
rotomatrix.set_rotation_matrix(inv(rotation_for_markers))
printing_segment(body, body, parent, rotomatrix, true_segment=True)
parent = body
# Markers
_list_markers = list_markers_body(body)
if _list_markers is not []:
self.write("\n // Markers")
for marker in _list_markers:
position = new_text(go_to(go_to(self.root, "Marker", "name", marker), "location"))
self.write("\n marker {}".format(marker))
self.write("\n parent {}".format(parent))
self.write("\n position {}".format(position))
self.write("\n endmarker\n")
late_body = body
# Muscle definition
self.write("\n// MUSCLE DEFINIION\n")
sort_muscle = []
muscle_ref_group = []
for muscle in muscle_list(self):
viapoint = list_pathpoint_muscle(muscle)
bodies_viapoint = []
for pathpoint in viapoint:
bodies_viapoint.append(get_body_pathpoint(pathpoint))
# it is supposed that viapoints are organized in order
# from the parent body to the child body
body_start = bodies_viapoint[0]
body_end = bodies_viapoint[len(bodies_viapoint) - 1]
sort_muscle.append([body_start, body_end])
muscle_ref_group.append([muscle, body_start + "_to_" + body_end])
# selecting muscle group
group_muscle = []
for ext_muscle in sort_muscle:
if ext_muscle not in group_muscle:
group_muscle.append(ext_muscle)
# print muscle group
for muscle_group in group_muscle:
self.write("\n// {} > {}\n".format(muscle_group[0], muscle_group[1]))
self.write("musclegroup {}\n".format(muscle_group[0] + "_to_" + muscle_group[1]))
self.write(" OriginParent {}\n".format(muscle_group[0]))
self.write(" InsertionParent {}\n".format(muscle_group[1]))
self.write("endmusclegroup\n")
# muscle
for muscle in muscle_list(self):
# muscle data
m_ref = muscle_group_reference(muscle, muscle_ref_group)
if m_ref == muscle_group[0] + "_to_" + muscle_group[1]:
muscle_type = "hillthelen"
state_type = "buchanan"
list_pathpoint = list_pathpoint_muscle(muscle)
start_point = list_pathpoint.pop(0)
end_point = list_pathpoint.pop()
start_pos = get_pos(start_point)
insert_pos = get_pos(end_point)
opt_length = new_text(
go_to(go_to(self.root, "Thelen2003Muscle", "name", muscle), "optimal_fiber_length")
)
max_force = new_text(
go_to(go_to(self.root, "Thelen2003Muscle", "name", muscle), "max_isometric_force")
)
tendon_slack_length = new_text(
go_to(go_to(self.root, "Thelen2003Muscle", "name", muscle), "tendon_slack_length")
)
pennation_angle = new_text(
go_to(go_to(self.root, "Thelen2003Muscle", "name", muscle), "pennation_angle_at_optimal")
)
pcsa = new_text(go_to(go_to(self.root, "Thelen2003Muscle", "name", muscle), "pcsa"))
max_velocity = new_text(
go_to(go_to(self.root, "Thelen2003Muscle", "name", muscle), "max_contraction_velocity")
)
# print muscle data
self.write("\n muscle {}".format(muscle))
self.write("\n Type {}".format(muscle_type)) if muscle_type != "None" else self.write("")
self.write("\n statetype {}".format(state_type)) if state_type != "None" else self.write(
""
)
self.write("\n musclegroup {}".format(m_ref)) if m_ref != "None" else self.write("")
self.write(
"\n OriginPosition {}".format(start_pos)
) if start_pos != "None" else self.write("")
self.write(
"\n InsertionPosition {}".format(insert_pos)
) if insert_pos != "None" else self.write("")
self.write(
"\n optimalLength {}".format(opt_length)
) if opt_length != "None" else self.write("")
self.write("\n maximalForce {}".format(max_force)) if max_force != "None" else self.write(
""
)
self.write(
"\n tendonSlackLength {}".format(tendon_slack_length)
) if tendon_slack_length != "None" else self.write("")
self.write(
"\n pennationAngle {}".format(pennation_angle)
) if pennation_angle != "None" else self.write("")
self.write("\n PCSA {}".format(pcsa)) if pcsa != "None" else self.write("")
self.write(
"\n maxVelocity {}".format(max_velocity)
) if max_velocity != "None" else self.write("")
self.write("\n endmuscle\n")
# viapoint
for viapoint in list_pathpoint:
# viapoint data
parent_viapoint = get_body_pathpoint(viapoint)
viapoint_pos = get_pos(viapoint)
# print viapoint data
self.write("\n viapoint {}".format(viapoint))
self.write(
"\n parent {}".format(parent_viapoint)
) if parent_viapoint != "None" else self.write("")
self.write("\n muscle {}".format(muscle))
self.write("\n musclegroup {}".format(m_ref)) if m_ref != "None" else self.write(
""
)
self.write(
"\n position {}".format(viapoint_pos)
) if viapoint_pos != "None" else self.write("")
self.write("\n endviapoint")
self.write("\n")
self.file.close()
def __getattr__(self, attr):
print("Error : {} is not an attribute of this class".format(attr))
def get_path(self):
return self.path
def write(self, string):
self.file = open(self.path, "a")
self.file.write(string)
self.file.close()
def get_origin_file(self):
return self.originfile
def credits(self):
return self.data_origin.xpath("/OpenSimDocument/Model/credits")[0].text
def publications(self):
return self.data_origin.xpath("/OpenSimDocument/Model/publications")[0].text
def body_list(self):
_list = []
for body in self.data_origin.xpath("/OpenSimDocument/Model/BodySet/objects/Body"):
_list.append(body.get("name"))
return _list
| 47.002461
| 120
| 0.478358
| 5,890
| 57,296
| 4.39236
| 0.054839
| 0.029995
| 0.029686
| 0.044529
| 0.876966
| 0.853195
| 0.839125
| 0.827877
| 0.815198
| 0.805187
| 0
| 0.015933
| 0.411774
| 57,296
| 1,218
| 121
| 47.041051
| 0.751684
| 0.043703
| 0
| 0.753555
| 0
| 0
| 0.114348
| 0.009399
| 0
| 0
| 0
| 0.000821
| 0
| 1
| 0.054028
| false
| 0.000948
| 0.003791
| 0.010427
| 0.141232
| 0.012322
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e135cf03c56a065cdf0083840c823ca360e9b769
| 2,195
|
py
|
Python
|
vscode/extensions/magicstack.magicpython-1.0.12/test/strings/format4.py
|
nlimpid/dotfiles
|
b78d08707992f742f984f556fa58349c2ccd095d
|
[
"MIT"
] | null | null | null |
vscode/extensions/magicstack.magicpython-1.0.12/test/strings/format4.py
|
nlimpid/dotfiles
|
b78d08707992f742f984f556fa58349c2ccd095d
|
[
"MIT"
] | 4
|
2019-06-16T09:52:03.000Z
|
2019-08-18T02:11:35.000Z
|
vscode/extensions/magicstack.magicpython-1.0.12/test/strings/format4.py
|
nlimpid/dotfiles
|
b78d08707992f742f984f556fa58349c2ccd095d
|
[
"MIT"
] | null | null | null |
a = 'qqq{:%Y-%m-%d %H:%M:%S}www'
a = 'qqq{0:{fill}{align}16}www'
a : source.python
: source.python
= : keyword.operator.assignment.python, source.python
: source.python
' : punctuation.definition.string.begin.python, source.python, string.quoted.single.python
qqq : source.python, string.quoted.single.python
{ : constant.character.format.placeholder.other.python, source.python, string.quoted.single.python
: : constant.character.format.placeholder.other.python, source.python, storage.type.format.python, string.quoted.single.python
%Y-%m-%d %H:%M:%S : constant.character.format.placeholder.other.python, source.python, string.quoted.single.python
} : constant.character.format.placeholder.other.python, source.python, string.quoted.single.python
www : source.python, string.quoted.single.python
' : punctuation.definition.string.end.python, source.python, string.quoted.single.python
a : source.python
: source.python
= : keyword.operator.assignment.python, source.python
: source.python
' : punctuation.definition.string.begin.python, source.python, string.quoted.single.python
qqq : source.python, string.quoted.single.python
{0 : constant.character.format.placeholder.other.python, source.python, string.quoted.single.python
: : constant.character.format.placeholder.other.python, source.python, storage.type.format.python, string.quoted.single.python
{fill} : constant.character.format.placeholder.other.python, source.python, string.quoted.single.python
{align} : constant.character.format.placeholder.other.python, source.python, string.quoted.single.python
16 : constant.character.format.placeholder.other.python, source.python, string.quoted.single.python
} : constant.character.format.placeholder.other.python, source.python, string.quoted.single.python
www : source.python, string.quoted.single.python
' : punctuation.definition.string.end.python, source.python, string.quoted.single.python
| 66.515152
| 138
| 0.689749
| 252
| 2,195
| 6.007937
| 0.123016
| 0.206077
| 0.237781
| 0.285337
| 0.974901
| 0.974901
| 0.966975
| 0.966975
| 0.966975
| 0.966975
| 0
| 0.003359
| 0.186333
| 2,195
| 32
| 139
| 68.59375
| 0.844345
| 0
| 0
| 0.714286
| 0
| 0.428571
| 0.023235
| 0.01139
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 12
|
e15f51bc0f1560b1c76ade59b9d19be24500e88e
| 35
|
py
|
Python
|
geek/university/geek3.py
|
franciscoRic/Aula-Python
|
bc767b2ff22526ad0c3cc4ab51ee5227be9ce399
|
[
"MIT"
] | null | null | null |
geek/university/geek3.py
|
franciscoRic/Aula-Python
|
bc767b2ff22526ad0c3cc4ab51ee5227be9ce399
|
[
"MIT"
] | null | null | null |
geek/university/geek3.py
|
franciscoRic/Aula-Python
|
bc767b2ff22526ad0c3cc4ab51ee5227be9ce399
|
[
"MIT"
] | null | null | null |
def funcao3():
return 'Geek'
| 7
| 17
| 0.571429
| 4
| 35
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.04
| 0.285714
| 35
| 4
| 18
| 8.75
| 0.76
| 0
| 0
| 0
| 0
| 0
| 0.121212
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0.5
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
e16550dc555ccd5ca059a6734d328ad954c4598e
| 7,716
|
py
|
Python
|
networks/resnet.py
|
rickgroen/cov-weighting
|
64c296679cd37e724a03c6dc107606f7048aec96
|
[
"MIT"
] | 26
|
2021-01-05T07:10:31.000Z
|
2022-03-23T06:31:00.000Z
|
networks/resnet.py
|
rickgroen/cov-weighting
|
64c296679cd37e724a03c6dc107606f7048aec96
|
[
"MIT"
] | 6
|
2021-04-12T16:27:11.000Z
|
2022-02-09T07:00:15.000Z
|
networks/resnet.py
|
rickgroen/cov-weighting
|
64c296679cd37e724a03c6dc107606f7048aec96
|
[
"MIT"
] | 7
|
2021-03-08T09:28:05.000Z
|
2022-02-23T07:39:29.000Z
|
from networks.blocks import *
class ResNet50MD(nn.Module):
"""
Original MonoDepth implementation of ResNet50.
"""
def __init__(self, normalize=None, do_multi_objective=False):
super(ResNet50MD, self).__init__()
# encoder
self.conv1 = Conv(3, 64, 7, 2, normalize=normalize) # H/2 - 64D
self.pool1 = MaxPool(3) # H/4 - 64D
self.conv2 = ResBlock(64, 64, 3, 2, normalize=normalize) # H/8 - 256D
self.conv3 = ResBlock(256, 128, 4, 2, normalize=normalize) # H/16 - 512D
self.conv4 = ResBlock(512, 256, 6, 2, normalize=normalize) # H/32 - 1024D
self.conv5 = ResBlock(1024, 512, 3, 2, normalize=normalize) # H/64 - 2048D
# decoder
self.upconv6 = Upconv(2048, 512, 3, 2, normalize=normalize)
self.iconv6 = Conv(1024 + 512, 512, 3, 1, normalize=normalize)
self.upconv5 = Upconv(512, 256, 3, 2, normalize=normalize)
self.iconv5 = Conv(512+256, 256, 3, 1, normalize=normalize)
self.upconv4 = Upconv(256, 128, 3, 2, normalize=normalize)
self.iconv4 = Conv(256 + 128, 128, 3, 1, normalize=normalize)
self.disp4_layer = GetDisp(128, num_out_layers=2)
self.upconv3 = Upconv(128, 64, 3, 2, normalize=normalize)
self.iconv3 = Conv(64 + 64 + 2, 64, 3, 1, normalize=normalize)
self.disp3_layer = GetDisp(64, num_out_layers=2)
self.upconv2 = Upconv(64, 32, 3, 2, normalize=normalize)
self.iconv2 = Conv(64 + 32 + 2, 32, 3, 1, normalize=normalize)
self.disp2_layer = GetDisp(32, num_out_layers=2)
self.upconv1 = Upconv(32, 16, 3, 2, normalize=normalize)
self.iconv1 = Conv(16 + 2, 16, 3, 1, normalize=normalize)
self.disp1_layer = GetDisp(16, num_out_layers=2)
self.return_shared_output = do_multi_objective
def forward(self, x):
# encoder
x1 = self.conv1(x)
x_pool1 = self.pool1(x1)
x2 = self.conv2(x_pool1)
x3 = self.conv3(x2)
x4 = self.conv4(x3)
x5 = self.conv5(x4)
# skips
skip1 = x1
skip2 = x_pool1
skip3 = x2
skip4 = x3
skip5 = x4
# decoder
upconv6 = self.upconv6(x5)
concat6 = torch.cat((upconv6, skip5), 1)
iconv6 = self.iconv6(concat6)
upconv5 = self.upconv5(iconv6)
concat5 = torch.cat((upconv5, skip4), 1)
iconv5 = self.iconv5(concat5)
upconv4 = self.upconv4(iconv5)
concat4 = torch.cat((upconv4, skip3), 1)
iconv4 = self.iconv4(concat4)
self.disp4 = self.disp4_layer(iconv4)
self.udisp4 = nn.functional.interpolate(self.disp4, scale_factor=2, mode='bilinear', align_corners=True)
upconv3 = self.upconv3(iconv4)
concat3 = torch.cat((upconv3, skip2, self.udisp4), 1)
iconv3 = self.iconv3(concat3)
self.disp3 = self.disp3_layer(iconv3)
self.udisp3 = nn.functional.interpolate(self.disp3, scale_factor=2, mode='bilinear', align_corners=True)
upconv2 = self.upconv2(iconv3)
concat2 = torch.cat((upconv2, skip1, self.udisp3), 1)
iconv2 = self.iconv2(concat2)
self.disp2 = self.disp2_layer(iconv2)
self.udisp2 = nn.functional.interpolate(self.disp2, scale_factor=2, mode='bilinear', align_corners=True)
upconv1 = self.upconv1(iconv2)
concat1 = torch.cat((upconv1, self.udisp2), 1)
iconv1 = self.iconv1(concat1)
self.disp1 = self.disp1_layer(iconv1)
if self.return_shared_output:
# Specifically for Multi-objective optimization.
return (skip2, skip1, iconv4), (self.disp1, self.disp2, self.disp3, self.disp4)
return self.disp1, self.disp2, self.disp3, self.disp4
class ResNet18MD(nn.Module):
def __init__(self, normalize=None, do_multi_objective=False):
super(ResNet18MD, self).__init__()
# encoder
self.conv1 = Conv(3, 64, 7, 2, normalize=normalize) # H/2 - 64D
self.pool1 = MaxPool(3) # H/4 - 64D
self.conv2 = ResBlockBasic(64, 64, 2, 2, normalize=normalize) # H/8 - 64D
self.conv3 = ResBlockBasic(64, 128, 2, 2, normalize=normalize) # H/16 - 128D
self.conv4 = ResBlockBasic(128, 256, 2, 2, normalize=normalize) # H/32 - 256D
self.conv5 = ResBlockBasic(256, 512, 2, 2, normalize=normalize) # H/64 - 512D
# decoder
self.upconv6 = Upconv(512, 512, 3, 2, normalize=normalize)
self.iconv6 = Conv(256+512, 512, 3, 1, normalize=normalize)
self.upconv5 = Upconv(512, 256, 3, 2, normalize=normalize)
self.iconv5 = Conv(128 + 256, 256, 3, 1, normalize=normalize)
self.upconv4 = Upconv(256, 128, 3, 2, normalize=normalize)
self.iconv4 = Conv(64 + 128, 128, 3, 1, normalize=normalize)
self.disp4_layer = GetDisp(128, num_out_layers=2)
self.upconv3 = Upconv(128, 64, 3, 2, normalize=normalize)
self.iconv3 = Conv(64 + 64 + 2, 64, 3, 1, normalize=normalize)
self.disp3_layer = GetDisp(64, num_out_layers=2)
self.upconv2 = Upconv(64, 32, 3, 2, normalize=normalize)
self.iconv2 = Conv(64 + 32 + 2, 32, 3, 1, normalize=normalize)
self.disp2_layer = GetDisp(32, num_out_layers=2)
self.upconv1 = Upconv(32, 16, 3, 2, normalize=normalize)
self.iconv1 = Conv(16 + 2, 16, 3, 1, normalize=normalize)
self.disp1_layer = GetDisp(16, num_out_layers=2)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.xavier_uniform_(m.weight)
self.return_shared_output = do_multi_objective
def forward(self, x):
# encoder
x1 = self.conv1(x)
x_pool1 = self.pool1(x1)
x2 = self.conv2(x_pool1)
x3 = self.conv3(x2)
x4 = self.conv4(x3)
x5 = self.conv5(x4)
# skips
skip1 = x1
skip2 = x_pool1
skip3 = x2
skip4 = x3
skip5 = x4
# decoder
upconv6 = self.upconv6(x5)
concat6 = torch.cat((upconv6, skip5), 1)
iconv6 = self.iconv6(concat6)
upconv5 = self.upconv5(iconv6)
concat5 = torch.cat((upconv5, skip4), 1)
iconv5 = self.iconv5(concat5)
upconv4 = self.upconv4(iconv5)
concat4 = torch.cat((upconv4, skip3), 1)
iconv4 = self.iconv4(concat4)
self.disp4 = self.disp4_layer(iconv4)
self.udisp4 = nn.functional.interpolate(self.disp4, scale_factor=2, mode='bilinear', align_corners=True)
upconv3 = self.upconv3(iconv4)
concat3 = torch.cat((upconv3, skip2, self.udisp4), 1)
iconv3 = self.iconv3(concat3)
self.disp3 = self.disp3_layer(iconv3)
self.udisp3 = nn.functional.interpolate(self.disp3, scale_factor=2, mode='bilinear', align_corners=True)
upconv2 = self.upconv2(iconv3)
concat2 = torch.cat((upconv2, skip1, self.udisp3), 1)
iconv2 = self.iconv2(concat2)
self.disp2 = self.disp2_layer(iconv2)
self.udisp2 = nn.functional.interpolate(self.disp2, scale_factor=2, mode='bilinear', align_corners=True)
upconv1 = self.upconv1(iconv2)
concat1 = torch.cat((upconv1, self.udisp2), 1)
iconv1 = self.iconv1(concat1)
self.disp1 = self.disp1_layer(iconv1)
if self.return_shared_output:
# Specifically for Multi-objective optimization.
return (skip2, skip1, iconv4), (self.disp1, self.disp2, self.disp3, self.disp4)
return self.disp1, self.disp2, self.disp3, self.disp4
| 40.610526
| 112
| 0.602514
| 981
| 7,716
| 4.653415
| 0.130479
| 0.134064
| 0.115663
| 0.061336
| 0.894852
| 0.854326
| 0.854326
| 0.854326
| 0.838116
| 0.838116
| 0
| 0.116225
| 0.276309
| 7,716
| 189
| 113
| 40.825397
| 0.701289
| 0.048341
| 0
| 0.823529
| 0
| 0
| 0.006575
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.029412
| false
| 0
| 0.007353
| 0
| 0.080882
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e1846aff634f9ba03db27c11cefe6fe7282965b9
| 285,325
|
py
|
Python
|
cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_dwdm_ui_oper.py
|
Maikor/ydk-py
|
b86c4a7c570ae3b2c5557d098420446df5de4929
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_dwdm_ui_oper.py
|
Maikor/ydk-py
|
b86c4a7c570ae3b2c5557d098420446df5de4929
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_dwdm_ui_oper.py
|
Maikor/ydk-py
|
b86c4a7c570ae3b2c5557d098420446df5de4929
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
""" Cisco_IOS_XR_dwdm_ui_oper
This module contains a collection of YANG definitions
for Cisco IOS\-XR dwdm\-ui package operational data.
This module contains definitions
for the following management objects\:
dwdm\: DWDM operational data
vtxp\: vtxp
Copyright (c) 2013\-2018 by Cisco Systems, Inc.
All rights reserved.
"""
from collections import OrderedDict
from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.filters import YFilter
from ydk.errors import YError, YModelError
from ydk.errors.error_handler import handle_type_error as _handle_type_error
class DwdmControllerState(Enum):
"""
DwdmControllerState (Enum Class)
Dwdm controller state
.. data:: dwdm_ui_state_up = 0
Up
.. data:: dwdm_ui_state_down = 1
Down
.. data:: dwdm_ui_state_admin_down = 2
Administratively Down
"""
dwdm_ui_state_up = Enum.YLeaf(0, "dwdm-ui-state-up")
dwdm_ui_state_down = Enum.YLeaf(1, "dwdm-ui-state-down")
dwdm_ui_state_admin_down = Enum.YLeaf(2, "dwdm-ui-state-admin-down")
class DwdmWaveChannelOwner(Enum):
"""
DwdmWaveChannelOwner (Enum Class)
Dwdm wave channel owner
.. data:: default = 0
Hardware Default
.. data:: configuration = 1
Configuration
.. data:: gmpls = 2
GMPLS Signaled
"""
default = Enum.YLeaf(0, "default")
configuration = Enum.YLeaf(1, "configuration")
gmpls = Enum.YLeaf(2, "gmpls")
class DwdmtasState(Enum):
"""
DwdmtasState (Enum Class)
Dwdmtas state
.. data:: tas_oos = 0
Out of Service
.. data:: tas_is = 1
In Service
.. data:: tas_oos_mt = 2
Out of Service Maintenance
.. data:: tas_is_cfg = 3
In Service Config allowed
"""
tas_oos = Enum.YLeaf(0, "tas-oos")
tas_is = Enum.YLeaf(1, "tas-is")
tas_oos_mt = Enum.YLeaf(2, "tas-oos-mt")
tas_is_cfg = Enum.YLeaf(3, "tas-is-cfg")
class G709apsByte(Enum):
"""
G709apsByte (Enum Class)
G709aps byte
.. data:: pp_no_protect = 0
No Protection
.. data:: pp_no_request = 15
No Request
.. data:: pp_regen_degrade = 63
Regenerator Degrade
.. data:: pp_sig_degrade = 175
Signal Degrade
.. data:: pp_remote_main = 239
Maintenance Request
.. data:: pp_aps_unknown = 255
Unknown
"""
pp_no_protect = Enum.YLeaf(0, "pp-no-protect")
pp_no_request = Enum.YLeaf(15, "pp-no-request")
pp_regen_degrade = Enum.YLeaf(63, "pp-regen-degrade")
pp_sig_degrade = Enum.YLeaf(175, "pp-sig-degrade")
pp_remote_main = Enum.YLeaf(239, "pp-remote-main")
pp_aps_unknown = Enum.YLeaf(255, "pp-aps-unknown")
class G709efecMode(Enum):
"""
G709efecMode (Enum Class)
G709efec mode
.. data:: g975_none = 0
.. data:: g975_1_i4 = 1
G975.1 I.4
.. data:: g975_1_i7 = 2
G975.1 I.7
"""
g975_none = Enum.YLeaf(0, "g975-none")
g975_1_i4 = Enum.YLeaf(1, "g975-1-i4")
g975_1_i7 = Enum.YLeaf(2, "g975-1-i7")
class G709ppfsmMode(Enum):
"""
G709ppfsmMode (Enum Class)
G709ppfsm mode
.. data:: pp_disable = 0
OFF
.. data:: pp_default_mode = 1
ON (Default Mode)
.. data:: pp_graceful_mode = 2
ON (Graceful Mode)
"""
pp_disable = Enum.YLeaf(0, "pp-disable")
pp_default_mode = Enum.YLeaf(1, "pp-default-mode")
pp_graceful_mode = Enum.YLeaf(2, "pp-graceful-mode")
class G709ppfsmState(Enum):
"""
G709ppfsmState (Enum Class)
G709ppfsm state
.. data:: in_active = 0
In Active
.. data:: disabled = 1
Disabled
.. data:: normal_state = 2
Normal
.. data:: local_failing = 3
Local Failing
.. data:: remote_failing = 4
Remote Failing
.. data:: main_t_failing = 5
Maintance Failing
.. data:: regen_failing = 6
Regenerator Failing
.. data:: local_failed = 7
Local Failed
.. data:: remote_failed = 8
Remote Failed
.. data:: main_t_failed = 9
Maintance Failed
.. data:: regen_failed = 10
Regenerator Failed
"""
in_active = Enum.YLeaf(0, "in-active")
disabled = Enum.YLeaf(1, "disabled")
normal_state = Enum.YLeaf(2, "normal-state")
local_failing = Enum.YLeaf(3, "local-failing")
remote_failing = Enum.YLeaf(4, "remote-failing")
main_t_failing = Enum.YLeaf(5, "main-t-failing")
regen_failing = Enum.YLeaf(6, "regen-failing")
local_failed = Enum.YLeaf(7, "local-failed")
remote_failed = Enum.YLeaf(8, "remote-failed")
main_t_failed = Enum.YLeaf(9, "main-t-failed")
regen_failed = Enum.YLeaf(10, "regen-failed")
class G709ppintfState(Enum):
"""
G709ppintfState (Enum Class)
G709ppintf state
.. data:: pp_intf_up = 0
Interface is Up
.. data:: pp_intf_failing = 1
Interface is Going Down
.. data:: pp_intf_down = 2
Interface Down
"""
pp_intf_up = Enum.YLeaf(0, "pp-intf-up")
pp_intf_failing = Enum.YLeaf(1, "pp-intf-failing")
pp_intf_down = Enum.YLeaf(2, "pp-intf-down")
class G709prbsInterval(Enum):
"""
G709prbsInterval (Enum Class)
PRBS test interval information
.. data:: current_interval = 0
Current interval
.. data:: previous_interval = 1
Previous interval
.. data:: previous_interval2 = 2
Previous interval 2
.. data:: previous_interval3 = 3
Previous interval 3
.. data:: previous_interval4 = 4
Previous interval 4
.. data:: previous_interval5 = 5
Previous interval 5
.. data:: previous_interval6 = 6
Previous interval 6
.. data:: previous_interval7 = 7
Previous interval 7
.. data:: previous_interval8 = 8
Previous interval 8
.. data:: previous_interval9 = 9
Previous interval 9
.. data:: previous_interval10 = 10
Previous interval 10
.. data:: previous_interval11 = 11
Previous interval 11
.. data:: previous_interval12 = 12
Previous interval 12
.. data:: previous_interval13 = 13
Previous interval 13
.. data:: previous_interval14 = 14
Previous interval 14
.. data:: previous_interval15 = 15
Previous interval 15
.. data:: previous_interval16 = 16
Previous interval 16
.. data:: previous_interval17 = 17
Previous interval 17
.. data:: previous_interval18 = 18
Previous interval 18
.. data:: previous_interval19 = 19
Previous interval 19
.. data:: previous_interval20 = 20
Previous interval 20
.. data:: previous_interval21 = 21
Previous interval 21
.. data:: previous_interval22 = 22
Previous interval 22
.. data:: previous_interval23 = 23
Previous interval 23
.. data:: previous_interval24 = 24
Previous interval 24
.. data:: previous_interval25 = 25
Previous interval 25
.. data:: previous_interval26 = 26
Previous interval 26
.. data:: previous_interval27 = 27
Previous interval 27
.. data:: previous_interval28 = 28
Previous interval 28
.. data:: previous_interval29 = 29
Previous interval 29
.. data:: previous_interval30 = 30
Previous interval 30
.. data:: previous_interval31 = 31
Previous interval 31
.. data:: previous_interval32 = 32
Previous interval 32
"""
current_interval = Enum.YLeaf(0, "current-interval")
previous_interval = Enum.YLeaf(1, "previous-interval")
previous_interval2 = Enum.YLeaf(2, "previous-interval2")
previous_interval3 = Enum.YLeaf(3, "previous-interval3")
previous_interval4 = Enum.YLeaf(4, "previous-interval4")
previous_interval5 = Enum.YLeaf(5, "previous-interval5")
previous_interval6 = Enum.YLeaf(6, "previous-interval6")
previous_interval7 = Enum.YLeaf(7, "previous-interval7")
previous_interval8 = Enum.YLeaf(8, "previous-interval8")
previous_interval9 = Enum.YLeaf(9, "previous-interval9")
previous_interval10 = Enum.YLeaf(10, "previous-interval10")
previous_interval11 = Enum.YLeaf(11, "previous-interval11")
previous_interval12 = Enum.YLeaf(12, "previous-interval12")
previous_interval13 = Enum.YLeaf(13, "previous-interval13")
previous_interval14 = Enum.YLeaf(14, "previous-interval14")
previous_interval15 = Enum.YLeaf(15, "previous-interval15")
previous_interval16 = Enum.YLeaf(16, "previous-interval16")
previous_interval17 = Enum.YLeaf(17, "previous-interval17")
previous_interval18 = Enum.YLeaf(18, "previous-interval18")
previous_interval19 = Enum.YLeaf(19, "previous-interval19")
previous_interval20 = Enum.YLeaf(20, "previous-interval20")
previous_interval21 = Enum.YLeaf(21, "previous-interval21")
previous_interval22 = Enum.YLeaf(22, "previous-interval22")
previous_interval23 = Enum.YLeaf(23, "previous-interval23")
previous_interval24 = Enum.YLeaf(24, "previous-interval24")
previous_interval25 = Enum.YLeaf(25, "previous-interval25")
previous_interval26 = Enum.YLeaf(26, "previous-interval26")
previous_interval27 = Enum.YLeaf(27, "previous-interval27")
previous_interval28 = Enum.YLeaf(28, "previous-interval28")
previous_interval29 = Enum.YLeaf(29, "previous-interval29")
previous_interval30 = Enum.YLeaf(30, "previous-interval30")
previous_interval31 = Enum.YLeaf(31, "previous-interval31")
previous_interval32 = Enum.YLeaf(32, "previous-interval32")
class G709prbsMode(Enum):
"""
G709prbsMode (Enum Class)
G709prbs mode
.. data:: mode_source = 0
mode source
.. data:: mode_sink = 1
mode sink
.. data:: mode_source_sink = 2
mode source sink
.. data:: mode_invalid = 3
mode invalid
"""
mode_source = Enum.YLeaf(0, "mode-source")
mode_sink = Enum.YLeaf(1, "mode-sink")
mode_source_sink = Enum.YLeaf(2, "mode-source-sink")
mode_invalid = Enum.YLeaf(3, "mode-invalid")
class G709prbsPattern(Enum):
"""
G709prbsPattern (Enum Class)
G709prbs pattern
.. data:: pattern_none = 0
pattern none
.. data:: pattern_null = 1
pattern null
.. data:: pattern_pn11 = 2
pattern pn11
.. data:: pattern_pn23 = 3
pattern pn23
.. data:: pattern_pn31 = 4
pattern pn31
"""
pattern_none = Enum.YLeaf(0, "pattern-none")
pattern_null = Enum.YLeaf(1, "pattern-null")
pattern_pn11 = Enum.YLeaf(2, "pattern-pn11")
pattern_pn23 = Enum.YLeaf(3, "pattern-pn23")
pattern_pn31 = Enum.YLeaf(4, "pattern-pn31")
class Dwdm(Entity):
"""
DWDM operational data
.. attribute:: ports
All DWDM Port operational data
**type**\: :py:class:`Ports <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.Dwdm.Ports>`
"""
_prefix = 'dwdm-ui-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dwdm, self).__init__()
self._top_entity = None
self.yang_name = "dwdm"
self.yang_parent_name = "Cisco-IOS-XR-dwdm-ui-oper"
self.is_top_level_class = True
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("ports", ("ports", Dwdm.Ports))])
self._leafs = OrderedDict()
self.ports = Dwdm.Ports()
self.ports.parent = self
self._children_name_map["ports"] = "ports"
self._segment_path = lambda: "Cisco-IOS-XR-dwdm-ui-oper:dwdm"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dwdm, [], name, value)
class Ports(Entity):
"""
All DWDM Port operational data
.. attribute:: port
DWDM Port operational data
**type**\: list of :py:class:`Port <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.Dwdm.Ports.Port>`
"""
_prefix = 'dwdm-ui-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dwdm.Ports, self).__init__()
self.yang_name = "ports"
self.yang_parent_name = "dwdm"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("port", ("port", Dwdm.Ports.Port))])
self._leafs = OrderedDict()
self.port = YList(self)
self._segment_path = lambda: "ports"
self._absolute_path = lambda: "Cisco-IOS-XR-dwdm-ui-oper:dwdm/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dwdm.Ports, [], name, value)
class Port(Entity):
"""
DWDM Port operational data
.. attribute:: name (key)
Port name
**type**\: str
**pattern:** [a\-zA\-Z0\-9.\_/\-]+
.. attribute:: prbs
DWDM Port PRBS related data
**type**\: :py:class:`Prbs <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.Dwdm.Ports.Port.Prbs>`
.. attribute:: optics
DWDM Port optics operational data
**type**\: :py:class:`Optics <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.Dwdm.Ports.Port.Optics>`
.. attribute:: info
DWDM port operational data
**type**\: :py:class:`Info <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.Dwdm.Ports.Port.Info>`
"""
_prefix = 'dwdm-ui-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dwdm.Ports.Port, self).__init__()
self.yang_name = "port"
self.yang_parent_name = "ports"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['name']
self._child_classes = OrderedDict([("prbs", ("prbs", Dwdm.Ports.Port.Prbs)), ("optics", ("optics", Dwdm.Ports.Port.Optics)), ("info", ("info", Dwdm.Ports.Port.Info))])
self._leafs = OrderedDict([
('name', (YLeaf(YType.str, 'name'), ['str'])),
])
self.name = None
self.prbs = Dwdm.Ports.Port.Prbs()
self.prbs.parent = self
self._children_name_map["prbs"] = "prbs"
self.optics = Dwdm.Ports.Port.Optics()
self.optics.parent = self
self._children_name_map["optics"] = "optics"
self.info = Dwdm.Ports.Port.Info()
self.info.parent = self
self._children_name_map["info"] = "info"
self._segment_path = lambda: "port" + "[name='" + str(self.name) + "']"
self._absolute_path = lambda: "Cisco-IOS-XR-dwdm-ui-oper:dwdm/ports/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dwdm.Ports.Port, ['name'], name, value)
class Prbs(Entity):
"""
DWDM Port PRBS related data
.. attribute:: twenty_four_hours_bucket
Port 24\-hour PRBS statistics table
**type**\: :py:class:`TwentyFourHoursBucket <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.Dwdm.Ports.Port.Prbs.TwentyFourHoursBucket>`
.. attribute:: fifteen_minutes_bucket
Port 15\-minute PRBS statistics table
**type**\: :py:class:`FifteenMinutesBucket <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.Dwdm.Ports.Port.Prbs.FifteenMinutesBucket>`
"""
_prefix = 'dwdm-ui-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dwdm.Ports.Port.Prbs, self).__init__()
self.yang_name = "prbs"
self.yang_parent_name = "port"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("twenty-four-hours-bucket", ("twenty_four_hours_bucket", Dwdm.Ports.Port.Prbs.TwentyFourHoursBucket)), ("fifteen-minutes-bucket", ("fifteen_minutes_bucket", Dwdm.Ports.Port.Prbs.FifteenMinutesBucket))])
self._leafs = OrderedDict()
self.twenty_four_hours_bucket = Dwdm.Ports.Port.Prbs.TwentyFourHoursBucket()
self.twenty_four_hours_bucket.parent = self
self._children_name_map["twenty_four_hours_bucket"] = "twenty-four-hours-bucket"
self.fifteen_minutes_bucket = Dwdm.Ports.Port.Prbs.FifteenMinutesBucket()
self.fifteen_minutes_bucket.parent = self
self._children_name_map["fifteen_minutes_bucket"] = "fifteen-minutes-bucket"
self._segment_path = lambda: "prbs"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dwdm.Ports.Port.Prbs, [], name, value)
class TwentyFourHoursBucket(Entity):
"""
Port 24\-hour PRBS statistics table
.. attribute:: twenty_four_hours_statistics
Port 24\-hour PRBS statistics data
**type**\: :py:class:`TwentyFourHoursStatistics <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.Dwdm.Ports.Port.Prbs.TwentyFourHoursBucket.TwentyFourHoursStatistics>`
"""
_prefix = 'dwdm-ui-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dwdm.Ports.Port.Prbs.TwentyFourHoursBucket, self).__init__()
self.yang_name = "twenty-four-hours-bucket"
self.yang_parent_name = "prbs"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("twenty-four-hours-statistics", ("twenty_four_hours_statistics", Dwdm.Ports.Port.Prbs.TwentyFourHoursBucket.TwentyFourHoursStatistics))])
self._leafs = OrderedDict()
self.twenty_four_hours_statistics = Dwdm.Ports.Port.Prbs.TwentyFourHoursBucket.TwentyFourHoursStatistics()
self.twenty_four_hours_statistics.parent = self
self._children_name_map["twenty_four_hours_statistics"] = "twenty-four-hours-statistics"
self._segment_path = lambda: "twenty-four-hours-bucket"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dwdm.Ports.Port.Prbs.TwentyFourHoursBucket, [], name, value)
class TwentyFourHoursStatistics(Entity):
"""
Port 24\-hour PRBS statistics data
.. attribute:: is_prbs_enabled
'True' if PRBS is enabled 'False' otherwise
**type**\: bool
.. attribute:: prbs_config_mode
Configured mode of PRBS test
**type**\: :py:class:`G709prbsMode <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.G709prbsMode>`
.. attribute:: prbs_entry
History consists of 15\-minute/24\-hour intervals
**type**\: list of :py:class:`PrbsEntry <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.Dwdm.Ports.Port.Prbs.TwentyFourHoursBucket.TwentyFourHoursStatistics.PrbsEntry>`
"""
_prefix = 'dwdm-ui-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dwdm.Ports.Port.Prbs.TwentyFourHoursBucket.TwentyFourHoursStatistics, self).__init__()
self.yang_name = "twenty-four-hours-statistics"
self.yang_parent_name = "twenty-four-hours-bucket"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("prbs-entry", ("prbs_entry", Dwdm.Ports.Port.Prbs.TwentyFourHoursBucket.TwentyFourHoursStatistics.PrbsEntry))])
self._leafs = OrderedDict([
('is_prbs_enabled', (YLeaf(YType.boolean, 'is-prbs-enabled'), ['bool'])),
('prbs_config_mode', (YLeaf(YType.enumeration, 'prbs-config-mode'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper', 'G709prbsMode', '')])),
])
self.is_prbs_enabled = None
self.prbs_config_mode = None
self.prbs_entry = YList(self)
self._segment_path = lambda: "twenty-four-hours-statistics"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dwdm.Ports.Port.Prbs.TwentyFourHoursBucket.TwentyFourHoursStatistics, ['is_prbs_enabled', 'prbs_config_mode'], name, value)
class PrbsEntry(Entity):
"""
History consists of 15\-minute/24\-hour intervals
.. attribute:: interval_index
Index of bucket, current and previous
**type**\: :py:class:`G709prbsInterval <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.G709prbsInterval>`
.. attribute:: configured_pattern
Configured pattern of PRBS test
**type**\: :py:class:`G709prbsPattern <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.G709prbsPattern>`
.. attribute:: start_at
Interval start timestamp
**type**\: str
**length:** 0..64
.. attribute:: stop_at
Interval stop timestamp
**type**\: str
**length:** 0..64
.. attribute:: received_pattern
Received Pattern of PRBS Test
**type**\: :py:class:`G709prbsPattern <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.G709prbsPattern>`
.. attribute:: bit_error_count
Bit Error Count
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: found_count
Count of pattern found in interval
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: lost_count
Count of pattern lost in interval
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: found_at
Pattern first found at timestamp
**type**\: str
**length:** 0..64
.. attribute:: lost_at
Pattern first lost at timestamp
**type**\: str
**length:** 0..64
"""
_prefix = 'dwdm-ui-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dwdm.Ports.Port.Prbs.TwentyFourHoursBucket.TwentyFourHoursStatistics.PrbsEntry, self).__init__()
self.yang_name = "prbs-entry"
self.yang_parent_name = "twenty-four-hours-statistics"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('interval_index', (YLeaf(YType.enumeration, 'interval-index'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper', 'G709prbsInterval', '')])),
('configured_pattern', (YLeaf(YType.enumeration, 'configured-pattern'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper', 'G709prbsPattern', '')])),
('start_at', (YLeaf(YType.str, 'start-at'), ['str'])),
('stop_at', (YLeaf(YType.str, 'stop-at'), ['str'])),
('received_pattern', (YLeaf(YType.enumeration, 'received-pattern'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper', 'G709prbsPattern', '')])),
('bit_error_count', (YLeaf(YType.uint64, 'bit-error-count'), ['int'])),
('found_count', (YLeaf(YType.uint64, 'found-count'), ['int'])),
('lost_count', (YLeaf(YType.uint64, 'lost-count'), ['int'])),
('found_at', (YLeaf(YType.str, 'found-at'), ['str'])),
('lost_at', (YLeaf(YType.str, 'lost-at'), ['str'])),
])
self.interval_index = None
self.configured_pattern = None
self.start_at = None
self.stop_at = None
self.received_pattern = None
self.bit_error_count = None
self.found_count = None
self.lost_count = None
self.found_at = None
self.lost_at = None
self._segment_path = lambda: "prbs-entry"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dwdm.Ports.Port.Prbs.TwentyFourHoursBucket.TwentyFourHoursStatistics.PrbsEntry, ['interval_index', 'configured_pattern', 'start_at', 'stop_at', 'received_pattern', 'bit_error_count', 'found_count', 'lost_count', 'found_at', 'lost_at'], name, value)
class FifteenMinutesBucket(Entity):
"""
Port 15\-minute PRBS statistics table
.. attribute:: fifteen_minutes_statistics
Port 15\-minute PRBS statistics data
**type**\: :py:class:`FifteenMinutesStatistics <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.Dwdm.Ports.Port.Prbs.FifteenMinutesBucket.FifteenMinutesStatistics>`
"""
_prefix = 'dwdm-ui-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dwdm.Ports.Port.Prbs.FifteenMinutesBucket, self).__init__()
self.yang_name = "fifteen-minutes-bucket"
self.yang_parent_name = "prbs"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("fifteen-minutes-statistics", ("fifteen_minutes_statistics", Dwdm.Ports.Port.Prbs.FifteenMinutesBucket.FifteenMinutesStatistics))])
self._leafs = OrderedDict()
self.fifteen_minutes_statistics = Dwdm.Ports.Port.Prbs.FifteenMinutesBucket.FifteenMinutesStatistics()
self.fifteen_minutes_statistics.parent = self
self._children_name_map["fifteen_minutes_statistics"] = "fifteen-minutes-statistics"
self._segment_path = lambda: "fifteen-minutes-bucket"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dwdm.Ports.Port.Prbs.FifteenMinutesBucket, [], name, value)
class FifteenMinutesStatistics(Entity):
"""
Port 15\-minute PRBS statistics data
.. attribute:: is_prbs_enabled
'True' if PRBS is enabled 'False' otherwise
**type**\: bool
.. attribute:: prbs_config_mode
Configured mode of PRBS test
**type**\: :py:class:`G709prbsMode <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.G709prbsMode>`
.. attribute:: prbs_entry
History consists of 15\-minute/24\-hour intervals
**type**\: list of :py:class:`PrbsEntry <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.Dwdm.Ports.Port.Prbs.FifteenMinutesBucket.FifteenMinutesStatistics.PrbsEntry>`
"""
_prefix = 'dwdm-ui-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dwdm.Ports.Port.Prbs.FifteenMinutesBucket.FifteenMinutesStatistics, self).__init__()
self.yang_name = "fifteen-minutes-statistics"
self.yang_parent_name = "fifteen-minutes-bucket"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("prbs-entry", ("prbs_entry", Dwdm.Ports.Port.Prbs.FifteenMinutesBucket.FifteenMinutesStatistics.PrbsEntry))])
self._leafs = OrderedDict([
('is_prbs_enabled', (YLeaf(YType.boolean, 'is-prbs-enabled'), ['bool'])),
('prbs_config_mode', (YLeaf(YType.enumeration, 'prbs-config-mode'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper', 'G709prbsMode', '')])),
])
self.is_prbs_enabled = None
self.prbs_config_mode = None
self.prbs_entry = YList(self)
self._segment_path = lambda: "fifteen-minutes-statistics"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dwdm.Ports.Port.Prbs.FifteenMinutesBucket.FifteenMinutesStatistics, ['is_prbs_enabled', 'prbs_config_mode'], name, value)
class PrbsEntry(Entity):
"""
History consists of 15\-minute/24\-hour intervals
.. attribute:: interval_index
Index of bucket, current and previous
**type**\: :py:class:`G709prbsInterval <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.G709prbsInterval>`
.. attribute:: configured_pattern
Configured pattern of PRBS test
**type**\: :py:class:`G709prbsPattern <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.G709prbsPattern>`
.. attribute:: start_at
Interval start timestamp
**type**\: str
**length:** 0..64
.. attribute:: stop_at
Interval stop timestamp
**type**\: str
**length:** 0..64
.. attribute:: received_pattern
Received Pattern of PRBS Test
**type**\: :py:class:`G709prbsPattern <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.G709prbsPattern>`
.. attribute:: bit_error_count
Bit Error Count
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: found_count
Count of pattern found in interval
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: lost_count
Count of pattern lost in interval
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: found_at
Pattern first found at timestamp
**type**\: str
**length:** 0..64
.. attribute:: lost_at
Pattern first lost at timestamp
**type**\: str
**length:** 0..64
"""
_prefix = 'dwdm-ui-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dwdm.Ports.Port.Prbs.FifteenMinutesBucket.FifteenMinutesStatistics.PrbsEntry, self).__init__()
self.yang_name = "prbs-entry"
self.yang_parent_name = "fifteen-minutes-statistics"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('interval_index', (YLeaf(YType.enumeration, 'interval-index'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper', 'G709prbsInterval', '')])),
('configured_pattern', (YLeaf(YType.enumeration, 'configured-pattern'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper', 'G709prbsPattern', '')])),
('start_at', (YLeaf(YType.str, 'start-at'), ['str'])),
('stop_at', (YLeaf(YType.str, 'stop-at'), ['str'])),
('received_pattern', (YLeaf(YType.enumeration, 'received-pattern'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper', 'G709prbsPattern', '')])),
('bit_error_count', (YLeaf(YType.uint64, 'bit-error-count'), ['int'])),
('found_count', (YLeaf(YType.uint64, 'found-count'), ['int'])),
('lost_count', (YLeaf(YType.uint64, 'lost-count'), ['int'])),
('found_at', (YLeaf(YType.str, 'found-at'), ['str'])),
('lost_at', (YLeaf(YType.str, 'lost-at'), ['str'])),
])
self.interval_index = None
self.configured_pattern = None
self.start_at = None
self.stop_at = None
self.received_pattern = None
self.bit_error_count = None
self.found_count = None
self.lost_count = None
self.found_at = None
self.lost_at = None
self._segment_path = lambda: "prbs-entry"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dwdm.Ports.Port.Prbs.FifteenMinutesBucket.FifteenMinutesStatistics.PrbsEntry, ['interval_index', 'configured_pattern', 'start_at', 'stop_at', 'received_pattern', 'bit_error_count', 'found_count', 'lost_count', 'found_at', 'lost_at'], name, value)
class Optics(Entity):
"""
DWDM Port optics operational data
.. attribute:: wave_info
DWDM port wavelength information data
**type**\: :py:class:`WaveInfo <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.Dwdm.Ports.Port.Optics.WaveInfo>`
"""
_prefix = 'dwdm-ui-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dwdm.Ports.Port.Optics, self).__init__()
self.yang_name = "optics"
self.yang_parent_name = "port"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("wave-info", ("wave_info", Dwdm.Ports.Port.Optics.WaveInfo))])
self._leafs = OrderedDict()
self.wave_info = Dwdm.Ports.Port.Optics.WaveInfo()
self.wave_info.parent = self
self._children_name_map["wave_info"] = "wave-info"
self._segment_path = lambda: "optics"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dwdm.Ports.Port.Optics, [], name, value)
class WaveInfo(Entity):
"""
DWDM port wavelength information data
.. attribute:: wave_band
Wavelength band
**type**\: int
**range:** 0..4294967295
.. attribute:: wave_channel_min
Lowest ITU wavelength channel number supported
**type**\: int
**range:** 0..4294967295
.. attribute:: wave_channel_max
Highest ITU wavelength channel number supported
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'dwdm-ui-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dwdm.Ports.Port.Optics.WaveInfo, self).__init__()
self.yang_name = "wave-info"
self.yang_parent_name = "optics"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('wave_band', (YLeaf(YType.uint32, 'wave-band'), ['int'])),
('wave_channel_min', (YLeaf(YType.uint32, 'wave-channel-min'), ['int'])),
('wave_channel_max', (YLeaf(YType.uint32, 'wave-channel-max'), ['int'])),
])
self.wave_band = None
self.wave_channel_min = None
self.wave_channel_max = None
self._segment_path = lambda: "wave-info"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dwdm.Ports.Port.Optics.WaveInfo, ['wave_band', 'wave_channel_min', 'wave_channel_max'], name, value)
class Info(Entity):
"""
DWDM port operational data
.. attribute:: g709_info
G709 operational information
**type**\: :py:class:`G709Info <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.Dwdm.Ports.Port.Info.G709Info>`
.. attribute:: optics_info
Optics operational information
**type**\: :py:class:`OpticsInfo <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.Dwdm.Ports.Port.Info.OpticsInfo>`
.. attribute:: tdc_info
TDC operational information
**type**\: :py:class:`TdcInfo <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.Dwdm.Ports.Port.Info.TdcInfo>`
.. attribute:: network_srlg_info
Network SRLG information
**type**\: :py:class:`NetworkSrlgInfo <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.Dwdm.Ports.Port.Info.NetworkSrlgInfo>`
.. attribute:: proactive
Proactive protection information
**type**\: :py:class:`Proactive <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.Dwdm.Ports.Port.Info.Proactive>`
.. attribute:: signal_log
Signal log information
**type**\: :py:class:`SignalLog <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.Dwdm.Ports.Port.Info.SignalLog>`
.. attribute:: controller_state
DWDM controller state\: Up, Down or Administratively Down
**type**\: :py:class:`DwdmControllerState <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.DwdmControllerState>`
.. attribute:: transport_admin_state
DWDM controller TAS state\: IS, OOS, OOS\-MT or IS\-CFG
**type**\: :py:class:`DwdmtasState <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.DwdmtasState>`
.. attribute:: slice_state
DWDM port slice state Up/Down
**type**\: bool
"""
_prefix = 'dwdm-ui-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dwdm.Ports.Port.Info, self).__init__()
self.yang_name = "info"
self.yang_parent_name = "port"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("g709-info", ("g709_info", Dwdm.Ports.Port.Info.G709Info)), ("optics-info", ("optics_info", Dwdm.Ports.Port.Info.OpticsInfo)), ("tdc-info", ("tdc_info", Dwdm.Ports.Port.Info.TdcInfo)), ("network-srlg-info", ("network_srlg_info", Dwdm.Ports.Port.Info.NetworkSrlgInfo)), ("proactive", ("proactive", Dwdm.Ports.Port.Info.Proactive)), ("signal-log", ("signal_log", Dwdm.Ports.Port.Info.SignalLog))])
self._leafs = OrderedDict([
('controller_state', (YLeaf(YType.enumeration, 'controller-state'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper', 'DwdmControllerState', '')])),
('transport_admin_state', (YLeaf(YType.enumeration, 'transport-admin-state'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper', 'DwdmtasState', '')])),
('slice_state', (YLeaf(YType.boolean, 'slice-state'), ['bool'])),
])
self.controller_state = None
self.transport_admin_state = None
self.slice_state = None
self.g709_info = Dwdm.Ports.Port.Info.G709Info()
self.g709_info.parent = self
self._children_name_map["g709_info"] = "g709-info"
self.optics_info = Dwdm.Ports.Port.Info.OpticsInfo()
self.optics_info.parent = self
self._children_name_map["optics_info"] = "optics-info"
self.tdc_info = Dwdm.Ports.Port.Info.TdcInfo()
self.tdc_info.parent = self
self._children_name_map["tdc_info"] = "tdc-info"
self.network_srlg_info = Dwdm.Ports.Port.Info.NetworkSrlgInfo()
self.network_srlg_info.parent = self
self._children_name_map["network_srlg_info"] = "network-srlg-info"
self.proactive = Dwdm.Ports.Port.Info.Proactive()
self.proactive.parent = self
self._children_name_map["proactive"] = "proactive"
self.signal_log = Dwdm.Ports.Port.Info.SignalLog()
self.signal_log.parent = self
self._children_name_map["signal_log"] = "signal-log"
self._segment_path = lambda: "info"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dwdm.Ports.Port.Info, ['controller_state', 'transport_admin_state', 'slice_state'], name, value)
class G709Info(Entity):
"""
G709 operational information
.. attribute:: fec_mismatch
FEC mismatch alarm
**type**\: :py:class:`FecMismatch <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.Dwdm.Ports.Port.Info.G709Info.FecMismatch>`
.. attribute:: ec_tca
FEC Corrected bits TCA information
**type**\: :py:class:`EcTca <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.Dwdm.Ports.Port.Info.G709Info.EcTca>`
.. attribute:: uc_tca
FEC uncorrected words TCA information
**type**\: :py:class:`UcTca <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.Dwdm.Ports.Port.Info.G709Info.UcTca>`
.. attribute:: otu_info
OTU layer information
**type**\: :py:class:`OtuInfo <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.Dwdm.Ports.Port.Info.G709Info.OtuInfo>`
.. attribute:: odu_info
ODU layer Information
**type**\: :py:class:`OduInfo <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.Dwdm.Ports.Port.Info.G709Info.OduInfo>`
.. attribute:: is_g709_enabled
Is G709 framing enabled
**type**\: bool
.. attribute:: is_fec_mode_default
Is Operating FEC Mode Default
**type**\: bool
.. attribute:: fec_mode
FEC information
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: remote_fec_mode
Remote FEC information
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: efec_mode
EFEC information
**type**\: :py:class:`G709efecMode <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.G709efecMode>`
.. attribute:: loopback_mode
Loopback information
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: ec
Corrected bit error counter
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: ec_accum
FEC Corrected bit error accumulated counter
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: uc
FEC Uncorrected words counter
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: fec_ber
pre fec ber calculated
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: fec_ber_man
pre fec ber calculated
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: q
q value calculated
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: q_margin
q margin calculated
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: fe_cstr
FEC BER String
**type**\: str
**length:** 0..64
.. attribute:: qstr
Q String
**type**\: str
**length:** 0..64
.. attribute:: qmargin_str
QMargin String
**type**\: str
**length:** 0..64
.. attribute:: network_port_id
Network port ID
**type**\: str
**length:** 0..65
.. attribute:: network_conn_id
Network connection ID
**type**\: str
**length:** 0..65
.. attribute:: is_prbs_enabled
'true' if Prbs is enabled 'false' otherwise
**type**\: bool
.. attribute:: g709_prbs_mode
Configured mode of PRBS Test
**type**\: :py:class:`G709prbsMode <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.G709prbsMode>`
.. attribute:: g709_prbs_pattern
Pattern of PRBS Test
**type**\: :py:class:`G709prbsPattern <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.G709prbsPattern>`
.. attribute:: prbs_time_stamp
Time stamp for prbs configuration
**type**\: int
**range:** 0..18446744073709551615
"""
_prefix = 'dwdm-ui-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dwdm.Ports.Port.Info.G709Info, self).__init__()
self.yang_name = "g709-info"
self.yang_parent_name = "info"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("fec-mismatch", ("fec_mismatch", Dwdm.Ports.Port.Info.G709Info.FecMismatch)), ("ec-tca", ("ec_tca", Dwdm.Ports.Port.Info.G709Info.EcTca)), ("uc-tca", ("uc_tca", Dwdm.Ports.Port.Info.G709Info.UcTca)), ("otu-info", ("otu_info", Dwdm.Ports.Port.Info.G709Info.OtuInfo)), ("odu-info", ("odu_info", Dwdm.Ports.Port.Info.G709Info.OduInfo))])
self._leafs = OrderedDict([
('is_g709_enabled', (YLeaf(YType.boolean, 'is-g709-enabled'), ['bool'])),
('is_fec_mode_default', (YLeaf(YType.boolean, 'is-fec-mode-default'), ['bool'])),
('fec_mode', (YLeaf(YType.int32, 'fec-mode'), ['int'])),
('remote_fec_mode', (YLeaf(YType.int32, 'remote-fec-mode'), ['int'])),
('efec_mode', (YLeaf(YType.enumeration, 'efec-mode'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper', 'G709efecMode', '')])),
('loopback_mode', (YLeaf(YType.int32, 'loopback-mode'), ['int'])),
('ec', (YLeaf(YType.uint64, 'ec'), ['int'])),
('ec_accum', (YLeaf(YType.uint64, 'ec-accum'), ['int'])),
('uc', (YLeaf(YType.uint64, 'uc'), ['int'])),
('fec_ber', (YLeaf(YType.uint64, 'fec-ber'), ['int'])),
('fec_ber_man', (YLeaf(YType.int32, 'fec-ber-man'), ['int'])),
('q', (YLeaf(YType.uint64, 'q'), ['int'])),
('q_margin', (YLeaf(YType.uint64, 'q-margin'), ['int'])),
('fe_cstr', (YLeaf(YType.str, 'fe-cstr'), ['str'])),
('qstr', (YLeaf(YType.str, 'qstr'), ['str'])),
('qmargin_str', (YLeaf(YType.str, 'qmargin-str'), ['str'])),
('network_port_id', (YLeaf(YType.str, 'network-port-id'), ['str'])),
('network_conn_id', (YLeaf(YType.str, 'network-conn-id'), ['str'])),
('is_prbs_enabled', (YLeaf(YType.boolean, 'is-prbs-enabled'), ['bool'])),
('g709_prbs_mode', (YLeaf(YType.enumeration, 'g709-prbs-mode'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper', 'G709prbsMode', '')])),
('g709_prbs_pattern', (YLeaf(YType.enumeration, 'g709-prbs-pattern'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper', 'G709prbsPattern', '')])),
('prbs_time_stamp', (YLeaf(YType.uint64, 'prbs-time-stamp'), ['int'])),
])
self.is_g709_enabled = None
self.is_fec_mode_default = None
self.fec_mode = None
self.remote_fec_mode = None
self.efec_mode = None
self.loopback_mode = None
self.ec = None
self.ec_accum = None
self.uc = None
self.fec_ber = None
self.fec_ber_man = None
self.q = None
self.q_margin = None
self.fe_cstr = None
self.qstr = None
self.qmargin_str = None
self.network_port_id = None
self.network_conn_id = None
self.is_prbs_enabled = None
self.g709_prbs_mode = None
self.g709_prbs_pattern = None
self.prbs_time_stamp = None
self.fec_mismatch = Dwdm.Ports.Port.Info.G709Info.FecMismatch()
self.fec_mismatch.parent = self
self._children_name_map["fec_mismatch"] = "fec-mismatch"
self.ec_tca = Dwdm.Ports.Port.Info.G709Info.EcTca()
self.ec_tca.parent = self
self._children_name_map["ec_tca"] = "ec-tca"
self.uc_tca = Dwdm.Ports.Port.Info.G709Info.UcTca()
self.uc_tca.parent = self
self._children_name_map["uc_tca"] = "uc-tca"
self.otu_info = Dwdm.Ports.Port.Info.G709Info.OtuInfo()
self.otu_info.parent = self
self._children_name_map["otu_info"] = "otu-info"
self.odu_info = Dwdm.Ports.Port.Info.G709Info.OduInfo()
self.odu_info.parent = self
self._children_name_map["odu_info"] = "odu-info"
self._segment_path = lambda: "g709-info"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dwdm.Ports.Port.Info.G709Info, ['is_g709_enabled', 'is_fec_mode_default', 'fec_mode', 'remote_fec_mode', 'efec_mode', 'loopback_mode', 'ec', 'ec_accum', 'uc', 'fec_ber', 'fec_ber_man', 'q', 'q_margin', 'fe_cstr', 'qstr', 'qmargin_str', 'network_port_id', 'network_conn_id', 'is_prbs_enabled', 'g709_prbs_mode', 'g709_prbs_pattern', 'prbs_time_stamp'], name, value)
class FecMismatch(Entity):
"""
FEC mismatch alarm
.. attribute:: reporting_enabled
Is reporting enabled?
**type**\: bool
.. attribute:: is_detected
Is defect detected?
**type**\: bool
.. attribute:: is_asserted
Is defect delared?
**type**\: bool
.. attribute:: counter
Alarm counter
**type**\: int
**range:** 0..18446744073709551615
"""
_prefix = 'dwdm-ui-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dwdm.Ports.Port.Info.G709Info.FecMismatch, self).__init__()
self.yang_name = "fec-mismatch"
self.yang_parent_name = "g709-info"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('reporting_enabled', (YLeaf(YType.boolean, 'reporting-enabled'), ['bool'])),
('is_detected', (YLeaf(YType.boolean, 'is-detected'), ['bool'])),
('is_asserted', (YLeaf(YType.boolean, 'is-asserted'), ['bool'])),
('counter', (YLeaf(YType.uint64, 'counter'), ['int'])),
])
self.reporting_enabled = None
self.is_detected = None
self.is_asserted = None
self.counter = None
self._segment_path = lambda: "fec-mismatch"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dwdm.Ports.Port.Info.G709Info.FecMismatch, ['reporting_enabled', 'is_detected', 'is_asserted', 'counter'], name, value)
class EcTca(Entity):
"""
FEC Corrected bits TCA information
.. attribute:: reporting_enabled
Is reporting enabled?
**type**\: bool
.. attribute:: is_detected
Is defect detected?
**type**\: bool
.. attribute:: is_asserted
Is defect delared?
**type**\: bool
.. attribute:: threshold
Error threshold power
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: counter
Error counter
**type**\: int
**range:** 0..18446744073709551615
"""
_prefix = 'dwdm-ui-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dwdm.Ports.Port.Info.G709Info.EcTca, self).__init__()
self.yang_name = "ec-tca"
self.yang_parent_name = "g709-info"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('reporting_enabled', (YLeaf(YType.boolean, 'reporting-enabled'), ['bool'])),
('is_detected', (YLeaf(YType.boolean, 'is-detected'), ['bool'])),
('is_asserted', (YLeaf(YType.boolean, 'is-asserted'), ['bool'])),
('threshold', (YLeaf(YType.int32, 'threshold'), ['int'])),
('counter', (YLeaf(YType.uint64, 'counter'), ['int'])),
])
self.reporting_enabled = None
self.is_detected = None
self.is_asserted = None
self.threshold = None
self.counter = None
self._segment_path = lambda: "ec-tca"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dwdm.Ports.Port.Info.G709Info.EcTca, ['reporting_enabled', 'is_detected', 'is_asserted', 'threshold', 'counter'], name, value)
class UcTca(Entity):
"""
FEC uncorrected words TCA information
.. attribute:: reporting_enabled
Is reporting enabled?
**type**\: bool
.. attribute:: is_detected
Is defect detected?
**type**\: bool
.. attribute:: is_asserted
Is defect delared?
**type**\: bool
.. attribute:: threshold
Error threshold power
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: counter
Error counter
**type**\: int
**range:** 0..18446744073709551615
"""
_prefix = 'dwdm-ui-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dwdm.Ports.Port.Info.G709Info.UcTca, self).__init__()
self.yang_name = "uc-tca"
self.yang_parent_name = "g709-info"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('reporting_enabled', (YLeaf(YType.boolean, 'reporting-enabled'), ['bool'])),
('is_detected', (YLeaf(YType.boolean, 'is-detected'), ['bool'])),
('is_asserted', (YLeaf(YType.boolean, 'is-asserted'), ['bool'])),
('threshold', (YLeaf(YType.int32, 'threshold'), ['int'])),
('counter', (YLeaf(YType.uint64, 'counter'), ['int'])),
])
self.reporting_enabled = None
self.is_detected = None
self.is_asserted = None
self.threshold = None
self.counter = None
self._segment_path = lambda: "uc-tca"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dwdm.Ports.Port.Info.G709Info.UcTca, ['reporting_enabled', 'is_detected', 'is_asserted', 'threshold', 'counter'], name, value)
class OtuInfo(Entity):
"""
OTU layer information
.. attribute:: los
Loss of Signal information
**type**\: :py:class:`Los <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.Dwdm.Ports.Port.Info.G709Info.OtuInfo.Los>`
.. attribute:: lof
Loss of Frame information
**type**\: :py:class:`Lof <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.Dwdm.Ports.Port.Info.G709Info.OtuInfo.Lof>`
.. attribute:: lom
Loss of MultiFrame information
**type**\: :py:class:`Lom <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.Dwdm.Ports.Port.Info.G709Info.OtuInfo.Lom>`
.. attribute:: oof
Out of Frame information
**type**\: :py:class:`Oof <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.Dwdm.Ports.Port.Info.G709Info.OtuInfo.Oof>`
.. attribute:: oom
Out of MultiFrame information
**type**\: :py:class:`Oom <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.Dwdm.Ports.Port.Info.G709Info.OtuInfo.Oom>`
.. attribute:: ais
Alarm Indication Signal information
**type**\: :py:class:`Ais <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.Dwdm.Ports.Port.Info.G709Info.OtuInfo.Ais>`
.. attribute:: iae
Incoming Alignment Error information
**type**\: :py:class:`Iae <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.Dwdm.Ports.Port.Info.G709Info.OtuInfo.Iae>`
.. attribute:: bdi
Backward Defect Indication information
**type**\: :py:class:`Bdi <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.Dwdm.Ports.Port.Info.G709Info.OtuInfo.Bdi>`
.. attribute:: tim
Trace Identifier Mismatch information
**type**\: :py:class:`Tim <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.Dwdm.Ports.Port.Info.G709Info.OtuInfo.Tim>`
.. attribute:: eoc
GCC End of Channel information
**type**\: :py:class:`Eoc <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.Dwdm.Ports.Port.Info.G709Info.OtuInfo.Eoc>`
.. attribute:: sf_ber
Signal Fail BER information
**type**\: :py:class:`SfBer <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.Dwdm.Ports.Port.Info.G709Info.OtuInfo.SfBer>`
.. attribute:: sd_ber
Signal Degrade BER information
**type**\: :py:class:`SdBer <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.Dwdm.Ports.Port.Info.G709Info.OtuInfo.SdBer>`
.. attribute:: prefec_sf_ber
Prefec Signal Fail BER information
**type**\: :py:class:`PrefecSfBer <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.Dwdm.Ports.Port.Info.G709Info.OtuInfo.PrefecSfBer>`
.. attribute:: prefec_sd_ber
Prefec Signal Degrade BER information
**type**\: :py:class:`PrefecSdBer <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.Dwdm.Ports.Port.Info.G709Info.OtuInfo.PrefecSdBer>`
.. attribute:: bbe_tca
Backgound Block Error TCA information
**type**\: :py:class:`BbeTca <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.Dwdm.Ports.Port.Info.G709Info.OtuInfo.BbeTca>`
.. attribute:: es_tca
Errored Seconds TCA information
**type**\: :py:class:`EsTca <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.Dwdm.Ports.Port.Info.G709Info.OtuInfo.EsTca>`
.. attribute:: bbe
Backgound Block Error information
**type**\: :py:class:`Bbe <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.Dwdm.Ports.Port.Info.G709Info.OtuInfo.Bbe>`
.. attribute:: es
Errored Seconds information
**type**\: :py:class:`Es <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.Dwdm.Ports.Port.Info.G709Info.OtuInfo.Es>`
.. attribute:: ses
Severly Errored Seconds information
**type**\: :py:class:`Ses <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.Dwdm.Ports.Port.Info.G709Info.OtuInfo.Ses>`
.. attribute:: uas
Unavailability Seconds information
**type**\: :py:class:`Uas <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.Dwdm.Ports.Port.Info.G709Info.OtuInfo.Uas>`
.. attribute:: fc
Failure Count information
**type**\: :py:class:`Fc <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.Dwdm.Ports.Port.Info.G709Info.OtuInfo.Fc>`
.. attribute:: bber
Backgound Block Error Rate information
**type**\: :py:class:`Bber <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.Dwdm.Ports.Port.Info.G709Info.OtuInfo.Bber>`
.. attribute:: esr
Errored Seconds Rate information
**type**\: :py:class:`Esr <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.Dwdm.Ports.Port.Info.G709Info.OtuInfo.Esr>`
.. attribute:: sesr
Severly Errored Seconds Rate information
**type**\: :py:class:`Sesr <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.Dwdm.Ports.Port.Info.G709Info.OtuInfo.Sesr>`
.. attribute:: tti
Trail Trace Identifier information
**type**\: :py:class:`Tti <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.Dwdm.Ports.Port.Info.G709Info.OtuInfo.Tti>`
.. attribute:: bei
Backward Error Indication counter
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: bip
Bit Interleave Parity(BIP) counter
**type**\: int
**range:** 0..18446744073709551615
"""
_prefix = 'dwdm-ui-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dwdm.Ports.Port.Info.G709Info.OtuInfo, self).__init__()
self.yang_name = "otu-info"
self.yang_parent_name = "g709-info"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("los", ("los", Dwdm.Ports.Port.Info.G709Info.OtuInfo.Los)), ("lof", ("lof", Dwdm.Ports.Port.Info.G709Info.OtuInfo.Lof)), ("lom", ("lom", Dwdm.Ports.Port.Info.G709Info.OtuInfo.Lom)), ("oof", ("oof", Dwdm.Ports.Port.Info.G709Info.OtuInfo.Oof)), ("oom", ("oom", Dwdm.Ports.Port.Info.G709Info.OtuInfo.Oom)), ("ais", ("ais", Dwdm.Ports.Port.Info.G709Info.OtuInfo.Ais)), ("iae", ("iae", Dwdm.Ports.Port.Info.G709Info.OtuInfo.Iae)), ("bdi", ("bdi", Dwdm.Ports.Port.Info.G709Info.OtuInfo.Bdi)), ("tim", ("tim", Dwdm.Ports.Port.Info.G709Info.OtuInfo.Tim)), ("eoc", ("eoc", Dwdm.Ports.Port.Info.G709Info.OtuInfo.Eoc)), ("sf-ber", ("sf_ber", Dwdm.Ports.Port.Info.G709Info.OtuInfo.SfBer)), ("sd-ber", ("sd_ber", Dwdm.Ports.Port.Info.G709Info.OtuInfo.SdBer)), ("prefec-sf-ber", ("prefec_sf_ber", Dwdm.Ports.Port.Info.G709Info.OtuInfo.PrefecSfBer)), ("prefec-sd-ber", ("prefec_sd_ber", Dwdm.Ports.Port.Info.G709Info.OtuInfo.PrefecSdBer)), ("bbe-tca", ("bbe_tca", Dwdm.Ports.Port.Info.G709Info.OtuInfo.BbeTca)), ("es-tca", ("es_tca", Dwdm.Ports.Port.Info.G709Info.OtuInfo.EsTca)), ("bbe", ("bbe", Dwdm.Ports.Port.Info.G709Info.OtuInfo.Bbe)), ("es", ("es", Dwdm.Ports.Port.Info.G709Info.OtuInfo.Es)), ("ses", ("ses", Dwdm.Ports.Port.Info.G709Info.OtuInfo.Ses)), ("uas", ("uas", Dwdm.Ports.Port.Info.G709Info.OtuInfo.Uas)), ("fc", ("fc", Dwdm.Ports.Port.Info.G709Info.OtuInfo.Fc)), ("bber", ("bber", Dwdm.Ports.Port.Info.G709Info.OtuInfo.Bber)), ("esr", ("esr", Dwdm.Ports.Port.Info.G709Info.OtuInfo.Esr)), ("sesr", ("sesr", Dwdm.Ports.Port.Info.G709Info.OtuInfo.Sesr)), ("tti", ("tti", Dwdm.Ports.Port.Info.G709Info.OtuInfo.Tti))])
self._leafs = OrderedDict([
('bei', (YLeaf(YType.uint64, 'bei'), ['int'])),
('bip', (YLeaf(YType.uint64, 'bip'), ['int'])),
])
self.bei = None
self.bip = None
self.los = Dwdm.Ports.Port.Info.G709Info.OtuInfo.Los()
self.los.parent = self
self._children_name_map["los"] = "los"
self.lof = Dwdm.Ports.Port.Info.G709Info.OtuInfo.Lof()
self.lof.parent = self
self._children_name_map["lof"] = "lof"
self.lom = Dwdm.Ports.Port.Info.G709Info.OtuInfo.Lom()
self.lom.parent = self
self._children_name_map["lom"] = "lom"
self.oof = Dwdm.Ports.Port.Info.G709Info.OtuInfo.Oof()
self.oof.parent = self
self._children_name_map["oof"] = "oof"
self.oom = Dwdm.Ports.Port.Info.G709Info.OtuInfo.Oom()
self.oom.parent = self
self._children_name_map["oom"] = "oom"
self.ais = Dwdm.Ports.Port.Info.G709Info.OtuInfo.Ais()
self.ais.parent = self
self._children_name_map["ais"] = "ais"
self.iae = Dwdm.Ports.Port.Info.G709Info.OtuInfo.Iae()
self.iae.parent = self
self._children_name_map["iae"] = "iae"
self.bdi = Dwdm.Ports.Port.Info.G709Info.OtuInfo.Bdi()
self.bdi.parent = self
self._children_name_map["bdi"] = "bdi"
self.tim = Dwdm.Ports.Port.Info.G709Info.OtuInfo.Tim()
self.tim.parent = self
self._children_name_map["tim"] = "tim"
self.eoc = Dwdm.Ports.Port.Info.G709Info.OtuInfo.Eoc()
self.eoc.parent = self
self._children_name_map["eoc"] = "eoc"
self.sf_ber = Dwdm.Ports.Port.Info.G709Info.OtuInfo.SfBer()
self.sf_ber.parent = self
self._children_name_map["sf_ber"] = "sf-ber"
self.sd_ber = Dwdm.Ports.Port.Info.G709Info.OtuInfo.SdBer()
self.sd_ber.parent = self
self._children_name_map["sd_ber"] = "sd-ber"
self.prefec_sf_ber = Dwdm.Ports.Port.Info.G709Info.OtuInfo.PrefecSfBer()
self.prefec_sf_ber.parent = self
self._children_name_map["prefec_sf_ber"] = "prefec-sf-ber"
self.prefec_sd_ber = Dwdm.Ports.Port.Info.G709Info.OtuInfo.PrefecSdBer()
self.prefec_sd_ber.parent = self
self._children_name_map["prefec_sd_ber"] = "prefec-sd-ber"
self.bbe_tca = Dwdm.Ports.Port.Info.G709Info.OtuInfo.BbeTca()
self.bbe_tca.parent = self
self._children_name_map["bbe_tca"] = "bbe-tca"
self.es_tca = Dwdm.Ports.Port.Info.G709Info.OtuInfo.EsTca()
self.es_tca.parent = self
self._children_name_map["es_tca"] = "es-tca"
self.bbe = Dwdm.Ports.Port.Info.G709Info.OtuInfo.Bbe()
self.bbe.parent = self
self._children_name_map["bbe"] = "bbe"
self.es = Dwdm.Ports.Port.Info.G709Info.OtuInfo.Es()
self.es.parent = self
self._children_name_map["es"] = "es"
self.ses = Dwdm.Ports.Port.Info.G709Info.OtuInfo.Ses()
self.ses.parent = self
self._children_name_map["ses"] = "ses"
self.uas = Dwdm.Ports.Port.Info.G709Info.OtuInfo.Uas()
self.uas.parent = self
self._children_name_map["uas"] = "uas"
self.fc = Dwdm.Ports.Port.Info.G709Info.OtuInfo.Fc()
self.fc.parent = self
self._children_name_map["fc"] = "fc"
self.bber = Dwdm.Ports.Port.Info.G709Info.OtuInfo.Bber()
self.bber.parent = self
self._children_name_map["bber"] = "bber"
self.esr = Dwdm.Ports.Port.Info.G709Info.OtuInfo.Esr()
self.esr.parent = self
self._children_name_map["esr"] = "esr"
self.sesr = Dwdm.Ports.Port.Info.G709Info.OtuInfo.Sesr()
self.sesr.parent = self
self._children_name_map["sesr"] = "sesr"
self.tti = Dwdm.Ports.Port.Info.G709Info.OtuInfo.Tti()
self.tti.parent = self
self._children_name_map["tti"] = "tti"
self._segment_path = lambda: "otu-info"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dwdm.Ports.Port.Info.G709Info.OtuInfo, ['bei', 'bip'], name, value)
class Los(Entity):
"""
Loss of Signal information
.. attribute:: reporting_enabled
Is reporting enabled?
**type**\: bool
.. attribute:: is_detected
Is defect detected?
**type**\: bool
.. attribute:: is_asserted
Is defect delared?
**type**\: bool
.. attribute:: counter
Alarm counter
**type**\: int
**range:** 0..18446744073709551615
"""
_prefix = 'dwdm-ui-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dwdm.Ports.Port.Info.G709Info.OtuInfo.Los, self).__init__()
self.yang_name = "los"
self.yang_parent_name = "otu-info"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('reporting_enabled', (YLeaf(YType.boolean, 'reporting-enabled'), ['bool'])),
('is_detected', (YLeaf(YType.boolean, 'is-detected'), ['bool'])),
('is_asserted', (YLeaf(YType.boolean, 'is-asserted'), ['bool'])),
('counter', (YLeaf(YType.uint64, 'counter'), ['int'])),
])
self.reporting_enabled = None
self.is_detected = None
self.is_asserted = None
self.counter = None
self._segment_path = lambda: "los"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dwdm.Ports.Port.Info.G709Info.OtuInfo.Los, ['reporting_enabled', 'is_detected', 'is_asserted', 'counter'], name, value)
class Lof(Entity):
"""
Loss of Frame information
.. attribute:: reporting_enabled
Is reporting enabled?
**type**\: bool
.. attribute:: is_detected
Is defect detected?
**type**\: bool
.. attribute:: is_asserted
Is defect delared?
**type**\: bool
.. attribute:: counter
Alarm counter
**type**\: int
**range:** 0..18446744073709551615
"""
_prefix = 'dwdm-ui-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dwdm.Ports.Port.Info.G709Info.OtuInfo.Lof, self).__init__()
self.yang_name = "lof"
self.yang_parent_name = "otu-info"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('reporting_enabled', (YLeaf(YType.boolean, 'reporting-enabled'), ['bool'])),
('is_detected', (YLeaf(YType.boolean, 'is-detected'), ['bool'])),
('is_asserted', (YLeaf(YType.boolean, 'is-asserted'), ['bool'])),
('counter', (YLeaf(YType.uint64, 'counter'), ['int'])),
])
self.reporting_enabled = None
self.is_detected = None
self.is_asserted = None
self.counter = None
self._segment_path = lambda: "lof"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dwdm.Ports.Port.Info.G709Info.OtuInfo.Lof, ['reporting_enabled', 'is_detected', 'is_asserted', 'counter'], name, value)
class Lom(Entity):
"""
Loss of MultiFrame information
.. attribute:: reporting_enabled
Is reporting enabled?
**type**\: bool
.. attribute:: is_detected
Is defect detected?
**type**\: bool
.. attribute:: is_asserted
Is defect delared?
**type**\: bool
.. attribute:: counter
Alarm counter
**type**\: int
**range:** 0..18446744073709551615
"""
_prefix = 'dwdm-ui-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dwdm.Ports.Port.Info.G709Info.OtuInfo.Lom, self).__init__()
self.yang_name = "lom"
self.yang_parent_name = "otu-info"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('reporting_enabled', (YLeaf(YType.boolean, 'reporting-enabled'), ['bool'])),
('is_detected', (YLeaf(YType.boolean, 'is-detected'), ['bool'])),
('is_asserted', (YLeaf(YType.boolean, 'is-asserted'), ['bool'])),
('counter', (YLeaf(YType.uint64, 'counter'), ['int'])),
])
self.reporting_enabled = None
self.is_detected = None
self.is_asserted = None
self.counter = None
self._segment_path = lambda: "lom"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dwdm.Ports.Port.Info.G709Info.OtuInfo.Lom, ['reporting_enabled', 'is_detected', 'is_asserted', 'counter'], name, value)
class Oof(Entity):
"""
Out of Frame information
.. attribute:: reporting_enabled
Is reporting enabled?
**type**\: bool
.. attribute:: is_detected
Is defect detected?
**type**\: bool
.. attribute:: is_asserted
Is defect delared?
**type**\: bool
.. attribute:: counter
Alarm counter
**type**\: int
**range:** 0..18446744073709551615
"""
_prefix = 'dwdm-ui-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dwdm.Ports.Port.Info.G709Info.OtuInfo.Oof, self).__init__()
self.yang_name = "oof"
self.yang_parent_name = "otu-info"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('reporting_enabled', (YLeaf(YType.boolean, 'reporting-enabled'), ['bool'])),
('is_detected', (YLeaf(YType.boolean, 'is-detected'), ['bool'])),
('is_asserted', (YLeaf(YType.boolean, 'is-asserted'), ['bool'])),
('counter', (YLeaf(YType.uint64, 'counter'), ['int'])),
])
self.reporting_enabled = None
self.is_detected = None
self.is_asserted = None
self.counter = None
self._segment_path = lambda: "oof"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dwdm.Ports.Port.Info.G709Info.OtuInfo.Oof, ['reporting_enabled', 'is_detected', 'is_asserted', 'counter'], name, value)
class Oom(Entity):
"""
Out of MultiFrame information
.. attribute:: reporting_enabled
Is reporting enabled?
**type**\: bool
.. attribute:: is_detected
Is defect detected?
**type**\: bool
.. attribute:: is_asserted
Is defect delared?
**type**\: bool
.. attribute:: counter
Alarm counter
**type**\: int
**range:** 0..18446744073709551615
"""
_prefix = 'dwdm-ui-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dwdm.Ports.Port.Info.G709Info.OtuInfo.Oom, self).__init__()
self.yang_name = "oom"
self.yang_parent_name = "otu-info"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('reporting_enabled', (YLeaf(YType.boolean, 'reporting-enabled'), ['bool'])),
('is_detected', (YLeaf(YType.boolean, 'is-detected'), ['bool'])),
('is_asserted', (YLeaf(YType.boolean, 'is-asserted'), ['bool'])),
('counter', (YLeaf(YType.uint64, 'counter'), ['int'])),
])
self.reporting_enabled = None
self.is_detected = None
self.is_asserted = None
self.counter = None
self._segment_path = lambda: "oom"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dwdm.Ports.Port.Info.G709Info.OtuInfo.Oom, ['reporting_enabled', 'is_detected', 'is_asserted', 'counter'], name, value)
class Ais(Entity):
"""
Alarm Indication Signal information
.. attribute:: reporting_enabled
Is reporting enabled?
**type**\: bool
.. attribute:: is_detected
Is defect detected?
**type**\: bool
.. attribute:: is_asserted
Is defect delared?
**type**\: bool
.. attribute:: counter
Alarm counter
**type**\: int
**range:** 0..18446744073709551615
"""
_prefix = 'dwdm-ui-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dwdm.Ports.Port.Info.G709Info.OtuInfo.Ais, self).__init__()
self.yang_name = "ais"
self.yang_parent_name = "otu-info"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('reporting_enabled', (YLeaf(YType.boolean, 'reporting-enabled'), ['bool'])),
('is_detected', (YLeaf(YType.boolean, 'is-detected'), ['bool'])),
('is_asserted', (YLeaf(YType.boolean, 'is-asserted'), ['bool'])),
('counter', (YLeaf(YType.uint64, 'counter'), ['int'])),
])
self.reporting_enabled = None
self.is_detected = None
self.is_asserted = None
self.counter = None
self._segment_path = lambda: "ais"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dwdm.Ports.Port.Info.G709Info.OtuInfo.Ais, ['reporting_enabled', 'is_detected', 'is_asserted', 'counter'], name, value)
class Iae(Entity):
"""
Incoming Alignment Error information
.. attribute:: reporting_enabled
Is reporting enabled?
**type**\: bool
.. attribute:: is_detected
Is defect detected?
**type**\: bool
.. attribute:: is_asserted
Is defect delared?
**type**\: bool
.. attribute:: counter
Alarm counter
**type**\: int
**range:** 0..18446744073709551615
"""
_prefix = 'dwdm-ui-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dwdm.Ports.Port.Info.G709Info.OtuInfo.Iae, self).__init__()
self.yang_name = "iae"
self.yang_parent_name = "otu-info"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('reporting_enabled', (YLeaf(YType.boolean, 'reporting-enabled'), ['bool'])),
('is_detected', (YLeaf(YType.boolean, 'is-detected'), ['bool'])),
('is_asserted', (YLeaf(YType.boolean, 'is-asserted'), ['bool'])),
('counter', (YLeaf(YType.uint64, 'counter'), ['int'])),
])
self.reporting_enabled = None
self.is_detected = None
self.is_asserted = None
self.counter = None
self._segment_path = lambda: "iae"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dwdm.Ports.Port.Info.G709Info.OtuInfo.Iae, ['reporting_enabled', 'is_detected', 'is_asserted', 'counter'], name, value)
class Bdi(Entity):
"""
Backward Defect Indication information
.. attribute:: reporting_enabled
Is reporting enabled?
**type**\: bool
.. attribute:: is_detected
Is defect detected?
**type**\: bool
.. attribute:: is_asserted
Is defect delared?
**type**\: bool
.. attribute:: counter
Alarm counter
**type**\: int
**range:** 0..18446744073709551615
"""
_prefix = 'dwdm-ui-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dwdm.Ports.Port.Info.G709Info.OtuInfo.Bdi, self).__init__()
self.yang_name = "bdi"
self.yang_parent_name = "otu-info"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('reporting_enabled', (YLeaf(YType.boolean, 'reporting-enabled'), ['bool'])),
('is_detected', (YLeaf(YType.boolean, 'is-detected'), ['bool'])),
('is_asserted', (YLeaf(YType.boolean, 'is-asserted'), ['bool'])),
('counter', (YLeaf(YType.uint64, 'counter'), ['int'])),
])
self.reporting_enabled = None
self.is_detected = None
self.is_asserted = None
self.counter = None
self._segment_path = lambda: "bdi"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dwdm.Ports.Port.Info.G709Info.OtuInfo.Bdi, ['reporting_enabled', 'is_detected', 'is_asserted', 'counter'], name, value)
class Tim(Entity):
"""
Trace Identifier Mismatch information
.. attribute:: reporting_enabled
Is reporting enabled?
**type**\: bool
.. attribute:: is_detected
Is defect detected?
**type**\: bool
.. attribute:: is_asserted
Is defect delared?
**type**\: bool
.. attribute:: counter
Alarm counter
**type**\: int
**range:** 0..18446744073709551615
"""
_prefix = 'dwdm-ui-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dwdm.Ports.Port.Info.G709Info.OtuInfo.Tim, self).__init__()
self.yang_name = "tim"
self.yang_parent_name = "otu-info"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('reporting_enabled', (YLeaf(YType.boolean, 'reporting-enabled'), ['bool'])),
('is_detected', (YLeaf(YType.boolean, 'is-detected'), ['bool'])),
('is_asserted', (YLeaf(YType.boolean, 'is-asserted'), ['bool'])),
('counter', (YLeaf(YType.uint64, 'counter'), ['int'])),
])
self.reporting_enabled = None
self.is_detected = None
self.is_asserted = None
self.counter = None
self._segment_path = lambda: "tim"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dwdm.Ports.Port.Info.G709Info.OtuInfo.Tim, ['reporting_enabled', 'is_detected', 'is_asserted', 'counter'], name, value)
class Eoc(Entity):
"""
GCC End of Channel information
.. attribute:: reporting_enabled
Is reporting enabled?
**type**\: bool
.. attribute:: is_detected
Is defect detected?
**type**\: bool
.. attribute:: is_asserted
Is defect delared?
**type**\: bool
.. attribute:: counter
Alarm counter
**type**\: int
**range:** 0..18446744073709551615
"""
_prefix = 'dwdm-ui-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dwdm.Ports.Port.Info.G709Info.OtuInfo.Eoc, self).__init__()
self.yang_name = "eoc"
self.yang_parent_name = "otu-info"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('reporting_enabled', (YLeaf(YType.boolean, 'reporting-enabled'), ['bool'])),
('is_detected', (YLeaf(YType.boolean, 'is-detected'), ['bool'])),
('is_asserted', (YLeaf(YType.boolean, 'is-asserted'), ['bool'])),
('counter', (YLeaf(YType.uint64, 'counter'), ['int'])),
])
self.reporting_enabled = None
self.is_detected = None
self.is_asserted = None
self.counter = None
self._segment_path = lambda: "eoc"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dwdm.Ports.Port.Info.G709Info.OtuInfo.Eoc, ['reporting_enabled', 'is_detected', 'is_asserted', 'counter'], name, value)
class SfBer(Entity):
"""
Signal Fail BER information
.. attribute:: reporting_enabled
Is reporting enabled?
**type**\: bool
.. attribute:: is_detected
Is defect detected?
**type**\: bool
.. attribute:: is_asserted
Is defect delared?
**type**\: bool
.. attribute:: threshold
Error threshold power
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: counter
Error counter
**type**\: int
**range:** 0..18446744073709551615
"""
_prefix = 'dwdm-ui-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dwdm.Ports.Port.Info.G709Info.OtuInfo.SfBer, self).__init__()
self.yang_name = "sf-ber"
self.yang_parent_name = "otu-info"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('reporting_enabled', (YLeaf(YType.boolean, 'reporting-enabled'), ['bool'])),
('is_detected', (YLeaf(YType.boolean, 'is-detected'), ['bool'])),
('is_asserted', (YLeaf(YType.boolean, 'is-asserted'), ['bool'])),
('threshold', (YLeaf(YType.int32, 'threshold'), ['int'])),
('counter', (YLeaf(YType.uint64, 'counter'), ['int'])),
])
self.reporting_enabled = None
self.is_detected = None
self.is_asserted = None
self.threshold = None
self.counter = None
self._segment_path = lambda: "sf-ber"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dwdm.Ports.Port.Info.G709Info.OtuInfo.SfBer, ['reporting_enabled', 'is_detected', 'is_asserted', 'threshold', 'counter'], name, value)
class SdBer(Entity):
"""
Signal Degrade BER information
.. attribute:: reporting_enabled
Is reporting enabled?
**type**\: bool
.. attribute:: is_detected
Is defect detected?
**type**\: bool
.. attribute:: is_asserted
Is defect delared?
**type**\: bool
.. attribute:: threshold
Error threshold power
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: counter
Error counter
**type**\: int
**range:** 0..18446744073709551615
"""
_prefix = 'dwdm-ui-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dwdm.Ports.Port.Info.G709Info.OtuInfo.SdBer, self).__init__()
self.yang_name = "sd-ber"
self.yang_parent_name = "otu-info"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('reporting_enabled', (YLeaf(YType.boolean, 'reporting-enabled'), ['bool'])),
('is_detected', (YLeaf(YType.boolean, 'is-detected'), ['bool'])),
('is_asserted', (YLeaf(YType.boolean, 'is-asserted'), ['bool'])),
('threshold', (YLeaf(YType.int32, 'threshold'), ['int'])),
('counter', (YLeaf(YType.uint64, 'counter'), ['int'])),
])
self.reporting_enabled = None
self.is_detected = None
self.is_asserted = None
self.threshold = None
self.counter = None
self._segment_path = lambda: "sd-ber"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dwdm.Ports.Port.Info.G709Info.OtuInfo.SdBer, ['reporting_enabled', 'is_detected', 'is_asserted', 'threshold', 'counter'], name, value)
class PrefecSfBer(Entity):
"""
Prefec Signal Fail BER information
.. attribute:: reporting_enabled
Is reporting enabled?
**type**\: bool
.. attribute:: is_detected
Is defect detected?
**type**\: bool
.. attribute:: is_asserted
Is defect delared?
**type**\: bool
.. attribute:: threshold
Error threshold power
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: counter
Error counter
**type**\: int
**range:** 0..18446744073709551615
"""
_prefix = 'dwdm-ui-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dwdm.Ports.Port.Info.G709Info.OtuInfo.PrefecSfBer, self).__init__()
self.yang_name = "prefec-sf-ber"
self.yang_parent_name = "otu-info"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('reporting_enabled', (YLeaf(YType.boolean, 'reporting-enabled'), ['bool'])),
('is_detected', (YLeaf(YType.boolean, 'is-detected'), ['bool'])),
('is_asserted', (YLeaf(YType.boolean, 'is-asserted'), ['bool'])),
('threshold', (YLeaf(YType.int32, 'threshold'), ['int'])),
('counter', (YLeaf(YType.uint64, 'counter'), ['int'])),
])
self.reporting_enabled = None
self.is_detected = None
self.is_asserted = None
self.threshold = None
self.counter = None
self._segment_path = lambda: "prefec-sf-ber"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dwdm.Ports.Port.Info.G709Info.OtuInfo.PrefecSfBer, ['reporting_enabled', 'is_detected', 'is_asserted', 'threshold', 'counter'], name, value)
class PrefecSdBer(Entity):
"""
Prefec Signal Degrade BER information
.. attribute:: reporting_enabled
Is reporting enabled?
**type**\: bool
.. attribute:: is_detected
Is defect detected?
**type**\: bool
.. attribute:: is_asserted
Is defect delared?
**type**\: bool
.. attribute:: threshold
Error threshold power
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: counter
Error counter
**type**\: int
**range:** 0..18446744073709551615
"""
_prefix = 'dwdm-ui-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dwdm.Ports.Port.Info.G709Info.OtuInfo.PrefecSdBer, self).__init__()
self.yang_name = "prefec-sd-ber"
self.yang_parent_name = "otu-info"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('reporting_enabled', (YLeaf(YType.boolean, 'reporting-enabled'), ['bool'])),
('is_detected', (YLeaf(YType.boolean, 'is-detected'), ['bool'])),
('is_asserted', (YLeaf(YType.boolean, 'is-asserted'), ['bool'])),
('threshold', (YLeaf(YType.int32, 'threshold'), ['int'])),
('counter', (YLeaf(YType.uint64, 'counter'), ['int'])),
])
self.reporting_enabled = None
self.is_detected = None
self.is_asserted = None
self.threshold = None
self.counter = None
self._segment_path = lambda: "prefec-sd-ber"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dwdm.Ports.Port.Info.G709Info.OtuInfo.PrefecSdBer, ['reporting_enabled', 'is_detected', 'is_asserted', 'threshold', 'counter'], name, value)
class BbeTca(Entity):
"""
Backgound Block Error TCA information
.. attribute:: reporting_enabled
Is reporting enabled?
**type**\: bool
.. attribute:: is_detected
Is defect detected?
**type**\: bool
.. attribute:: is_asserted
Is defect delared?
**type**\: bool
.. attribute:: threshold
Error threshold power
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: counter
Error counter
**type**\: int
**range:** 0..18446744073709551615
"""
_prefix = 'dwdm-ui-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dwdm.Ports.Port.Info.G709Info.OtuInfo.BbeTca, self).__init__()
self.yang_name = "bbe-tca"
self.yang_parent_name = "otu-info"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('reporting_enabled', (YLeaf(YType.boolean, 'reporting-enabled'), ['bool'])),
('is_detected', (YLeaf(YType.boolean, 'is-detected'), ['bool'])),
('is_asserted', (YLeaf(YType.boolean, 'is-asserted'), ['bool'])),
('threshold', (YLeaf(YType.int32, 'threshold'), ['int'])),
('counter', (YLeaf(YType.uint64, 'counter'), ['int'])),
])
self.reporting_enabled = None
self.is_detected = None
self.is_asserted = None
self.threshold = None
self.counter = None
self._segment_path = lambda: "bbe-tca"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dwdm.Ports.Port.Info.G709Info.OtuInfo.BbeTca, ['reporting_enabled', 'is_detected', 'is_asserted', 'threshold', 'counter'], name, value)
class EsTca(Entity):
"""
Errored Seconds TCA information
.. attribute:: reporting_enabled
Is reporting enabled?
**type**\: bool
.. attribute:: is_detected
Is defect detected?
**type**\: bool
.. attribute:: is_asserted
Is defect delared?
**type**\: bool
.. attribute:: threshold
Error threshold power
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: counter
Error counter
**type**\: int
**range:** 0..18446744073709551615
"""
_prefix = 'dwdm-ui-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dwdm.Ports.Port.Info.G709Info.OtuInfo.EsTca, self).__init__()
self.yang_name = "es-tca"
self.yang_parent_name = "otu-info"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('reporting_enabled', (YLeaf(YType.boolean, 'reporting-enabled'), ['bool'])),
('is_detected', (YLeaf(YType.boolean, 'is-detected'), ['bool'])),
('is_asserted', (YLeaf(YType.boolean, 'is-asserted'), ['bool'])),
('threshold', (YLeaf(YType.int32, 'threshold'), ['int'])),
('counter', (YLeaf(YType.uint64, 'counter'), ['int'])),
])
self.reporting_enabled = None
self.is_detected = None
self.is_asserted = None
self.threshold = None
self.counter = None
self._segment_path = lambda: "es-tca"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dwdm.Ports.Port.Info.G709Info.OtuInfo.EsTca, ['reporting_enabled', 'is_detected', 'is_asserted', 'threshold', 'counter'], name, value)
class Bbe(Entity):
"""
Backgound Block Error information
.. attribute:: counter
Performance Monitoring counter
**type**\: int
**range:** 0..18446744073709551615
"""
_prefix = 'dwdm-ui-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dwdm.Ports.Port.Info.G709Info.OtuInfo.Bbe, self).__init__()
self.yang_name = "bbe"
self.yang_parent_name = "otu-info"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('counter', (YLeaf(YType.uint64, 'counter'), ['int'])),
])
self.counter = None
self._segment_path = lambda: "bbe"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dwdm.Ports.Port.Info.G709Info.OtuInfo.Bbe, ['counter'], name, value)
class Es(Entity):
"""
Errored Seconds information
.. attribute:: counter
Performance Monitoring counter
**type**\: int
**range:** 0..18446744073709551615
"""
_prefix = 'dwdm-ui-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dwdm.Ports.Port.Info.G709Info.OtuInfo.Es, self).__init__()
self.yang_name = "es"
self.yang_parent_name = "otu-info"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('counter', (YLeaf(YType.uint64, 'counter'), ['int'])),
])
self.counter = None
self._segment_path = lambda: "es"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dwdm.Ports.Port.Info.G709Info.OtuInfo.Es, ['counter'], name, value)
class Ses(Entity):
"""
Severly Errored Seconds information
.. attribute:: counter
Performance Monitoring counter
**type**\: int
**range:** 0..18446744073709551615
"""
_prefix = 'dwdm-ui-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dwdm.Ports.Port.Info.G709Info.OtuInfo.Ses, self).__init__()
self.yang_name = "ses"
self.yang_parent_name = "otu-info"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('counter', (YLeaf(YType.uint64, 'counter'), ['int'])),
])
self.counter = None
self._segment_path = lambda: "ses"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dwdm.Ports.Port.Info.G709Info.OtuInfo.Ses, ['counter'], name, value)
class Uas(Entity):
"""
Unavailability Seconds information
.. attribute:: counter
Performance Monitoring counter
**type**\: int
**range:** 0..18446744073709551615
"""
_prefix = 'dwdm-ui-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dwdm.Ports.Port.Info.G709Info.OtuInfo.Uas, self).__init__()
self.yang_name = "uas"
self.yang_parent_name = "otu-info"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('counter', (YLeaf(YType.uint64, 'counter'), ['int'])),
])
self.counter = None
self._segment_path = lambda: "uas"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dwdm.Ports.Port.Info.G709Info.OtuInfo.Uas, ['counter'], name, value)
class Fc(Entity):
"""
Failure Count information
.. attribute:: counter
Performance Monitoring counter
**type**\: int
**range:** 0..18446744073709551615
"""
_prefix = 'dwdm-ui-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dwdm.Ports.Port.Info.G709Info.OtuInfo.Fc, self).__init__()
self.yang_name = "fc"
self.yang_parent_name = "otu-info"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('counter', (YLeaf(YType.uint64, 'counter'), ['int'])),
])
self.counter = None
self._segment_path = lambda: "fc"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dwdm.Ports.Port.Info.G709Info.OtuInfo.Fc, ['counter'], name, value)
class Bber(Entity):
"""
Backgound Block Error Rate information
.. attribute:: counter
Performance Monitoring counter
**type**\: int
**range:** 0..18446744073709551615
"""
_prefix = 'dwdm-ui-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dwdm.Ports.Port.Info.G709Info.OtuInfo.Bber, self).__init__()
self.yang_name = "bber"
self.yang_parent_name = "otu-info"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('counter', (YLeaf(YType.uint64, 'counter'), ['int'])),
])
self.counter = None
self._segment_path = lambda: "bber"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dwdm.Ports.Port.Info.G709Info.OtuInfo.Bber, ['counter'], name, value)
class Esr(Entity):
"""
Errored Seconds Rate information
.. attribute:: counter
Performance Monitoring counter
**type**\: int
**range:** 0..18446744073709551615
"""
_prefix = 'dwdm-ui-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dwdm.Ports.Port.Info.G709Info.OtuInfo.Esr, self).__init__()
self.yang_name = "esr"
self.yang_parent_name = "otu-info"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('counter', (YLeaf(YType.uint64, 'counter'), ['int'])),
])
self.counter = None
self._segment_path = lambda: "esr"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dwdm.Ports.Port.Info.G709Info.OtuInfo.Esr, ['counter'], name, value)
class Sesr(Entity):
"""
Severly Errored Seconds Rate information
.. attribute:: counter
Performance Monitoring counter
**type**\: int
**range:** 0..18446744073709551615
"""
_prefix = 'dwdm-ui-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dwdm.Ports.Port.Info.G709Info.OtuInfo.Sesr, self).__init__()
self.yang_name = "sesr"
self.yang_parent_name = "otu-info"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('counter', (YLeaf(YType.uint64, 'counter'), ['int'])),
])
self.counter = None
self._segment_path = lambda: "sesr"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dwdm.Ports.Port.Info.G709Info.OtuInfo.Sesr, ['counter'], name, value)
class Tti(Entity):
"""
Trail Trace Identifier information
.. attribute:: tx_string_type
Type of String
**type**\: int
**range:** 0..4294967295
.. attribute:: expected_string_type
Type of String
**type**\: int
**range:** 0..4294967295
.. attribute:: rx_string_type
Type of String
**type**\: int
**range:** 0..4294967295
.. attribute:: tx_tti
Tx TTI String
**type**\: str
**length:** 0..129
.. attribute:: tx_sapi0
Tx SAPI[0] Field
**type**\: str
**length:** 0..5
.. attribute:: tx_sapi
Tx SAPI[1\-15] Field
**type**\: str
**length:** 0..16
.. attribute:: tx_sapi_range
Tx SAPI Range String
**type**\: str
**length:** 0..6
.. attribute:: tx_dapi0
Tx DAPI[0] Field
**type**\: str
**length:** 0..5
.. attribute:: tx_dapi
Tx DAPI[1\-15] Field
**type**\: str
**length:** 0..16
.. attribute:: tx_dapi_range
Tx DAPI Range String
**type**\: str
**length:** 0..6
.. attribute:: tx_oper_spec
Tx Operator Specific Field
**type**\: str
**length:** 0..33
.. attribute:: tx_oper_spec_range
Tx Operator Specific Field Range String
**type**\: str
**length:** 0..6
.. attribute:: rx_tti
Rx TTI String
**type**\: str
**length:** 0..129
.. attribute:: rx_sapi0
Rx SAPI[0] Field
**type**\: str
**length:** 0..5
.. attribute:: rx_sapi
Rx SAPI[1\-15] Field
**type**\: str
**length:** 0..16
.. attribute:: rx_sapi_range
Rx SAPI Range String
**type**\: str
**length:** 0..6
.. attribute:: rx_dapi0
Rx DAPI[0] Field
**type**\: str
**length:** 0..5
.. attribute:: rx_dapi
Rx DAPI[1\-15] Field
**type**\: str
**length:** 0..16
.. attribute:: rx_dapi_range
Rx DAPI Range String
**type**\: str
**length:** 0..6
.. attribute:: rx_oper_spec_range
Rx Operator Specific Field Range String
**type**\: str
**length:** 0..6
.. attribute:: rx_oper_spec
Rx Operator Specific Field
**type**\: str
**length:** 0..33
.. attribute:: expected_tti
Expected TTI String
**type**\: str
**length:** 0..129
.. attribute:: expected_sapi0
Expected SAPI[0] Field
**type**\: str
**length:** 0..5
.. attribute:: expected_sapi
Expected SAPI[1\-15] Field
**type**\: str
**length:** 0..16
.. attribute:: exp_sapi_range
Expected SAPI Range String
**type**\: str
**length:** 0..6
.. attribute:: expected_dapi0
Expected DAPI[0] Field
**type**\: str
**length:** 0..5
.. attribute:: expected_dapi
Expected DAPI[1\-15] Field
**type**\: str
**length:** 0..16
.. attribute:: exp_dapi_range
Expected DAPI Range String
**type**\: str
**length:** 0..6
.. attribute:: expected_oper_spec
Expected Operator Specific Field
**type**\: str
**length:** 0..33
.. attribute:: exp_oper_spec_range
Expected Operator Specific Field Range String
**type**\: str
**length:** 0..6
"""
_prefix = 'dwdm-ui-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dwdm.Ports.Port.Info.G709Info.OtuInfo.Tti, self).__init__()
self.yang_name = "tti"
self.yang_parent_name = "otu-info"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('tx_string_type', (YLeaf(YType.uint32, 'tx-string-type'), ['int'])),
('expected_string_type', (YLeaf(YType.uint32, 'expected-string-type'), ['int'])),
('rx_string_type', (YLeaf(YType.uint32, 'rx-string-type'), ['int'])),
('tx_tti', (YLeaf(YType.str, 'tx-tti'), ['str'])),
('tx_sapi0', (YLeaf(YType.str, 'tx-sapi0'), ['str'])),
('tx_sapi', (YLeaf(YType.str, 'tx-sapi'), ['str'])),
('tx_sapi_range', (YLeaf(YType.str, 'tx-sapi-range'), ['str'])),
('tx_dapi0', (YLeaf(YType.str, 'tx-dapi0'), ['str'])),
('tx_dapi', (YLeaf(YType.str, 'tx-dapi'), ['str'])),
('tx_dapi_range', (YLeaf(YType.str, 'tx-dapi-range'), ['str'])),
('tx_oper_spec', (YLeaf(YType.str, 'tx-oper-spec'), ['str'])),
('tx_oper_spec_range', (YLeaf(YType.str, 'tx-oper-spec-range'), ['str'])),
('rx_tti', (YLeaf(YType.str, 'rx-tti'), ['str'])),
('rx_sapi0', (YLeaf(YType.str, 'rx-sapi0'), ['str'])),
('rx_sapi', (YLeaf(YType.str, 'rx-sapi'), ['str'])),
('rx_sapi_range', (YLeaf(YType.str, 'rx-sapi-range'), ['str'])),
('rx_dapi0', (YLeaf(YType.str, 'rx-dapi0'), ['str'])),
('rx_dapi', (YLeaf(YType.str, 'rx-dapi'), ['str'])),
('rx_dapi_range', (YLeaf(YType.str, 'rx-dapi-range'), ['str'])),
('rx_oper_spec_range', (YLeaf(YType.str, 'rx-oper-spec-range'), ['str'])),
('rx_oper_spec', (YLeaf(YType.str, 'rx-oper-spec'), ['str'])),
('expected_tti', (YLeaf(YType.str, 'expected-tti'), ['str'])),
('expected_sapi0', (YLeaf(YType.str, 'expected-sapi0'), ['str'])),
('expected_sapi', (YLeaf(YType.str, 'expected-sapi'), ['str'])),
('exp_sapi_range', (YLeaf(YType.str, 'exp-sapi-range'), ['str'])),
('expected_dapi0', (YLeaf(YType.str, 'expected-dapi0'), ['str'])),
('expected_dapi', (YLeaf(YType.str, 'expected-dapi'), ['str'])),
('exp_dapi_range', (YLeaf(YType.str, 'exp-dapi-range'), ['str'])),
('expected_oper_spec', (YLeaf(YType.str, 'expected-oper-spec'), ['str'])),
('exp_oper_spec_range', (YLeaf(YType.str, 'exp-oper-spec-range'), ['str'])),
])
self.tx_string_type = None
self.expected_string_type = None
self.rx_string_type = None
self.tx_tti = None
self.tx_sapi0 = None
self.tx_sapi = None
self.tx_sapi_range = None
self.tx_dapi0 = None
self.tx_dapi = None
self.tx_dapi_range = None
self.tx_oper_spec = None
self.tx_oper_spec_range = None
self.rx_tti = None
self.rx_sapi0 = None
self.rx_sapi = None
self.rx_sapi_range = None
self.rx_dapi0 = None
self.rx_dapi = None
self.rx_dapi_range = None
self.rx_oper_spec_range = None
self.rx_oper_spec = None
self.expected_tti = None
self.expected_sapi0 = None
self.expected_sapi = None
self.exp_sapi_range = None
self.expected_dapi0 = None
self.expected_dapi = None
self.exp_dapi_range = None
self.expected_oper_spec = None
self.exp_oper_spec_range = None
self._segment_path = lambda: "tti"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dwdm.Ports.Port.Info.G709Info.OtuInfo.Tti, ['tx_string_type', 'expected_string_type', 'rx_string_type', 'tx_tti', 'tx_sapi0', 'tx_sapi', 'tx_sapi_range', 'tx_dapi0', 'tx_dapi', 'tx_dapi_range', 'tx_oper_spec', 'tx_oper_spec_range', 'rx_tti', 'rx_sapi0', 'rx_sapi', 'rx_sapi_range', 'rx_dapi0', 'rx_dapi', 'rx_dapi_range', 'rx_oper_spec_range', 'rx_oper_spec', 'expected_tti', 'expected_sapi0', 'expected_sapi', 'exp_sapi_range', 'expected_dapi0', 'expected_dapi', 'exp_dapi_range', 'expected_oper_spec', 'exp_oper_spec_range'], name, value)
class OduInfo(Entity):
"""
ODU layer Information
.. attribute:: oci
Open Connection Indiction information
**type**\: :py:class:`Oci <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.Dwdm.Ports.Port.Info.G709Info.OduInfo.Oci>`
.. attribute:: ais
Alarm Indication Signal information
**type**\: :py:class:`Ais <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.Dwdm.Ports.Port.Info.G709Info.OduInfo.Ais>`
.. attribute:: lck
Upstream Connection Locked information
**type**\: :py:class:`Lck <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.Dwdm.Ports.Port.Info.G709Info.OduInfo.Lck>`
.. attribute:: bdi
Backward Defect Indication information
**type**\: :py:class:`Bdi <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.Dwdm.Ports.Port.Info.G709Info.OduInfo.Bdi>`
.. attribute:: eoc
GCC End of Channel information
**type**\: :py:class:`Eoc <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.Dwdm.Ports.Port.Info.G709Info.OduInfo.Eoc>`
.. attribute:: ptim
Payload Type Identifier Mismatch information
**type**\: :py:class:`Ptim <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.Dwdm.Ports.Port.Info.G709Info.OduInfo.Ptim>`
.. attribute:: tim
Trace Identifier Mismatch information
**type**\: :py:class:`Tim <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.Dwdm.Ports.Port.Info.G709Info.OduInfo.Tim>`
.. attribute:: sf_ber
Signal Fail BER information
**type**\: :py:class:`SfBer <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.Dwdm.Ports.Port.Info.G709Info.OduInfo.SfBer>`
.. attribute:: sd_ber
Signal Degrade BER information
**type**\: :py:class:`SdBer <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.Dwdm.Ports.Port.Info.G709Info.OduInfo.SdBer>`
.. attribute:: bbe_tca
Background Block Error TCA information
**type**\: :py:class:`BbeTca <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.Dwdm.Ports.Port.Info.G709Info.OduInfo.BbeTca>`
.. attribute:: es_tca
Errored Seconds TCA information
**type**\: :py:class:`EsTca <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.Dwdm.Ports.Port.Info.G709Info.OduInfo.EsTca>`
.. attribute:: bbe
Background Block Error information
**type**\: :py:class:`Bbe <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.Dwdm.Ports.Port.Info.G709Info.OduInfo.Bbe>`
.. attribute:: es
Errored Seconds information
**type**\: :py:class:`Es <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.Dwdm.Ports.Port.Info.G709Info.OduInfo.Es>`
.. attribute:: ses
Severly Errored Seconds information
**type**\: :py:class:`Ses <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.Dwdm.Ports.Port.Info.G709Info.OduInfo.Ses>`
.. attribute:: uas
Unavailability Seconds information
**type**\: :py:class:`Uas <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.Dwdm.Ports.Port.Info.G709Info.OduInfo.Uas>`
.. attribute:: fc
Failure count information
**type**\: :py:class:`Fc <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.Dwdm.Ports.Port.Info.G709Info.OduInfo.Fc>`
.. attribute:: bber
Background Block Error Rate count information
**type**\: :py:class:`Bber <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.Dwdm.Ports.Port.Info.G709Info.OduInfo.Bber>`
.. attribute:: esr
Errored Seconds Rate information
**type**\: :py:class:`Esr <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.Dwdm.Ports.Port.Info.G709Info.OduInfo.Esr>`
.. attribute:: sesr
Severly Errored Seconds Rate information
**type**\: :py:class:`Sesr <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.Dwdm.Ports.Port.Info.G709Info.OduInfo.Sesr>`
.. attribute:: tti
Trail Trace Identifier information
**type**\: :py:class:`Tti <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.Dwdm.Ports.Port.Info.G709Info.OduInfo.Tti>`
.. attribute:: bip
Bit Interleave Parity(BIP) counter
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: bei
Backward Error Indication counter
**type**\: int
**range:** 0..18446744073709551615
"""
_prefix = 'dwdm-ui-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dwdm.Ports.Port.Info.G709Info.OduInfo, self).__init__()
self.yang_name = "odu-info"
self.yang_parent_name = "g709-info"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("oci", ("oci", Dwdm.Ports.Port.Info.G709Info.OduInfo.Oci)), ("ais", ("ais", Dwdm.Ports.Port.Info.G709Info.OduInfo.Ais)), ("lck", ("lck", Dwdm.Ports.Port.Info.G709Info.OduInfo.Lck)), ("bdi", ("bdi", Dwdm.Ports.Port.Info.G709Info.OduInfo.Bdi)), ("eoc", ("eoc", Dwdm.Ports.Port.Info.G709Info.OduInfo.Eoc)), ("ptim", ("ptim", Dwdm.Ports.Port.Info.G709Info.OduInfo.Ptim)), ("tim", ("tim", Dwdm.Ports.Port.Info.G709Info.OduInfo.Tim)), ("sf-ber", ("sf_ber", Dwdm.Ports.Port.Info.G709Info.OduInfo.SfBer)), ("sd-ber", ("sd_ber", Dwdm.Ports.Port.Info.G709Info.OduInfo.SdBer)), ("bbe-tca", ("bbe_tca", Dwdm.Ports.Port.Info.G709Info.OduInfo.BbeTca)), ("es-tca", ("es_tca", Dwdm.Ports.Port.Info.G709Info.OduInfo.EsTca)), ("bbe", ("bbe", Dwdm.Ports.Port.Info.G709Info.OduInfo.Bbe)), ("es", ("es", Dwdm.Ports.Port.Info.G709Info.OduInfo.Es)), ("ses", ("ses", Dwdm.Ports.Port.Info.G709Info.OduInfo.Ses)), ("uas", ("uas", Dwdm.Ports.Port.Info.G709Info.OduInfo.Uas)), ("fc", ("fc", Dwdm.Ports.Port.Info.G709Info.OduInfo.Fc)), ("bber", ("bber", Dwdm.Ports.Port.Info.G709Info.OduInfo.Bber)), ("esr", ("esr", Dwdm.Ports.Port.Info.G709Info.OduInfo.Esr)), ("sesr", ("sesr", Dwdm.Ports.Port.Info.G709Info.OduInfo.Sesr)), ("tti", ("tti", Dwdm.Ports.Port.Info.G709Info.OduInfo.Tti))])
self._leafs = OrderedDict([
('bip', (YLeaf(YType.uint64, 'bip'), ['int'])),
('bei', (YLeaf(YType.uint64, 'bei'), ['int'])),
])
self.bip = None
self.bei = None
self.oci = Dwdm.Ports.Port.Info.G709Info.OduInfo.Oci()
self.oci.parent = self
self._children_name_map["oci"] = "oci"
self.ais = Dwdm.Ports.Port.Info.G709Info.OduInfo.Ais()
self.ais.parent = self
self._children_name_map["ais"] = "ais"
self.lck = Dwdm.Ports.Port.Info.G709Info.OduInfo.Lck()
self.lck.parent = self
self._children_name_map["lck"] = "lck"
self.bdi = Dwdm.Ports.Port.Info.G709Info.OduInfo.Bdi()
self.bdi.parent = self
self._children_name_map["bdi"] = "bdi"
self.eoc = Dwdm.Ports.Port.Info.G709Info.OduInfo.Eoc()
self.eoc.parent = self
self._children_name_map["eoc"] = "eoc"
self.ptim = Dwdm.Ports.Port.Info.G709Info.OduInfo.Ptim()
self.ptim.parent = self
self._children_name_map["ptim"] = "ptim"
self.tim = Dwdm.Ports.Port.Info.G709Info.OduInfo.Tim()
self.tim.parent = self
self._children_name_map["tim"] = "tim"
self.sf_ber = Dwdm.Ports.Port.Info.G709Info.OduInfo.SfBer()
self.sf_ber.parent = self
self._children_name_map["sf_ber"] = "sf-ber"
self.sd_ber = Dwdm.Ports.Port.Info.G709Info.OduInfo.SdBer()
self.sd_ber.parent = self
self._children_name_map["sd_ber"] = "sd-ber"
self.bbe_tca = Dwdm.Ports.Port.Info.G709Info.OduInfo.BbeTca()
self.bbe_tca.parent = self
self._children_name_map["bbe_tca"] = "bbe-tca"
self.es_tca = Dwdm.Ports.Port.Info.G709Info.OduInfo.EsTca()
self.es_tca.parent = self
self._children_name_map["es_tca"] = "es-tca"
self.bbe = Dwdm.Ports.Port.Info.G709Info.OduInfo.Bbe()
self.bbe.parent = self
self._children_name_map["bbe"] = "bbe"
self.es = Dwdm.Ports.Port.Info.G709Info.OduInfo.Es()
self.es.parent = self
self._children_name_map["es"] = "es"
self.ses = Dwdm.Ports.Port.Info.G709Info.OduInfo.Ses()
self.ses.parent = self
self._children_name_map["ses"] = "ses"
self.uas = Dwdm.Ports.Port.Info.G709Info.OduInfo.Uas()
self.uas.parent = self
self._children_name_map["uas"] = "uas"
self.fc = Dwdm.Ports.Port.Info.G709Info.OduInfo.Fc()
self.fc.parent = self
self._children_name_map["fc"] = "fc"
self.bber = Dwdm.Ports.Port.Info.G709Info.OduInfo.Bber()
self.bber.parent = self
self._children_name_map["bber"] = "bber"
self.esr = Dwdm.Ports.Port.Info.G709Info.OduInfo.Esr()
self.esr.parent = self
self._children_name_map["esr"] = "esr"
self.sesr = Dwdm.Ports.Port.Info.G709Info.OduInfo.Sesr()
self.sesr.parent = self
self._children_name_map["sesr"] = "sesr"
self.tti = Dwdm.Ports.Port.Info.G709Info.OduInfo.Tti()
self.tti.parent = self
self._children_name_map["tti"] = "tti"
self._segment_path = lambda: "odu-info"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dwdm.Ports.Port.Info.G709Info.OduInfo, ['bip', 'bei'], name, value)
class Oci(Entity):
"""
Open Connection Indiction information
.. attribute:: reporting_enabled
Is reporting enabled?
**type**\: bool
.. attribute:: is_detected
Is defect detected?
**type**\: bool
.. attribute:: is_asserted
Is defect delared?
**type**\: bool
.. attribute:: counter
Alarm counter
**type**\: int
**range:** 0..18446744073709551615
"""
_prefix = 'dwdm-ui-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dwdm.Ports.Port.Info.G709Info.OduInfo.Oci, self).__init__()
self.yang_name = "oci"
self.yang_parent_name = "odu-info"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('reporting_enabled', (YLeaf(YType.boolean, 'reporting-enabled'), ['bool'])),
('is_detected', (YLeaf(YType.boolean, 'is-detected'), ['bool'])),
('is_asserted', (YLeaf(YType.boolean, 'is-asserted'), ['bool'])),
('counter', (YLeaf(YType.uint64, 'counter'), ['int'])),
])
self.reporting_enabled = None
self.is_detected = None
self.is_asserted = None
self.counter = None
self._segment_path = lambda: "oci"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dwdm.Ports.Port.Info.G709Info.OduInfo.Oci, ['reporting_enabled', 'is_detected', 'is_asserted', 'counter'], name, value)
class Ais(Entity):
"""
Alarm Indication Signal information
.. attribute:: reporting_enabled
Is reporting enabled?
**type**\: bool
.. attribute:: is_detected
Is defect detected?
**type**\: bool
.. attribute:: is_asserted
Is defect delared?
**type**\: bool
.. attribute:: counter
Alarm counter
**type**\: int
**range:** 0..18446744073709551615
"""
_prefix = 'dwdm-ui-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dwdm.Ports.Port.Info.G709Info.OduInfo.Ais, self).__init__()
self.yang_name = "ais"
self.yang_parent_name = "odu-info"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('reporting_enabled', (YLeaf(YType.boolean, 'reporting-enabled'), ['bool'])),
('is_detected', (YLeaf(YType.boolean, 'is-detected'), ['bool'])),
('is_asserted', (YLeaf(YType.boolean, 'is-asserted'), ['bool'])),
('counter', (YLeaf(YType.uint64, 'counter'), ['int'])),
])
self.reporting_enabled = None
self.is_detected = None
self.is_asserted = None
self.counter = None
self._segment_path = lambda: "ais"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dwdm.Ports.Port.Info.G709Info.OduInfo.Ais, ['reporting_enabled', 'is_detected', 'is_asserted', 'counter'], name, value)
class Lck(Entity):
"""
Upstream Connection Locked information
.. attribute:: reporting_enabled
Is reporting enabled?
**type**\: bool
.. attribute:: is_detected
Is defect detected?
**type**\: bool
.. attribute:: is_asserted
Is defect delared?
**type**\: bool
.. attribute:: counter
Alarm counter
**type**\: int
**range:** 0..18446744073709551615
"""
_prefix = 'dwdm-ui-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dwdm.Ports.Port.Info.G709Info.OduInfo.Lck, self).__init__()
self.yang_name = "lck"
self.yang_parent_name = "odu-info"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('reporting_enabled', (YLeaf(YType.boolean, 'reporting-enabled'), ['bool'])),
('is_detected', (YLeaf(YType.boolean, 'is-detected'), ['bool'])),
('is_asserted', (YLeaf(YType.boolean, 'is-asserted'), ['bool'])),
('counter', (YLeaf(YType.uint64, 'counter'), ['int'])),
])
self.reporting_enabled = None
self.is_detected = None
self.is_asserted = None
self.counter = None
self._segment_path = lambda: "lck"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dwdm.Ports.Port.Info.G709Info.OduInfo.Lck, ['reporting_enabled', 'is_detected', 'is_asserted', 'counter'], name, value)
class Bdi(Entity):
"""
Backward Defect Indication information
.. attribute:: reporting_enabled
Is reporting enabled?
**type**\: bool
.. attribute:: is_detected
Is defect detected?
**type**\: bool
.. attribute:: is_asserted
Is defect delared?
**type**\: bool
.. attribute:: counter
Alarm counter
**type**\: int
**range:** 0..18446744073709551615
"""
_prefix = 'dwdm-ui-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dwdm.Ports.Port.Info.G709Info.OduInfo.Bdi, self).__init__()
self.yang_name = "bdi"
self.yang_parent_name = "odu-info"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('reporting_enabled', (YLeaf(YType.boolean, 'reporting-enabled'), ['bool'])),
('is_detected', (YLeaf(YType.boolean, 'is-detected'), ['bool'])),
('is_asserted', (YLeaf(YType.boolean, 'is-asserted'), ['bool'])),
('counter', (YLeaf(YType.uint64, 'counter'), ['int'])),
])
self.reporting_enabled = None
self.is_detected = None
self.is_asserted = None
self.counter = None
self._segment_path = lambda: "bdi"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dwdm.Ports.Port.Info.G709Info.OduInfo.Bdi, ['reporting_enabled', 'is_detected', 'is_asserted', 'counter'], name, value)
class Eoc(Entity):
"""
GCC End of Channel information
.. attribute:: reporting_enabled
Is reporting enabled?
**type**\: bool
.. attribute:: is_detected
Is defect detected?
**type**\: bool
.. attribute:: is_asserted
Is defect delared?
**type**\: bool
.. attribute:: counter
Alarm counter
**type**\: int
**range:** 0..18446744073709551615
"""
_prefix = 'dwdm-ui-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dwdm.Ports.Port.Info.G709Info.OduInfo.Eoc, self).__init__()
self.yang_name = "eoc"
self.yang_parent_name = "odu-info"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('reporting_enabled', (YLeaf(YType.boolean, 'reporting-enabled'), ['bool'])),
('is_detected', (YLeaf(YType.boolean, 'is-detected'), ['bool'])),
('is_asserted', (YLeaf(YType.boolean, 'is-asserted'), ['bool'])),
('counter', (YLeaf(YType.uint64, 'counter'), ['int'])),
])
self.reporting_enabled = None
self.is_detected = None
self.is_asserted = None
self.counter = None
self._segment_path = lambda: "eoc"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dwdm.Ports.Port.Info.G709Info.OduInfo.Eoc, ['reporting_enabled', 'is_detected', 'is_asserted', 'counter'], name, value)
class Ptim(Entity):
"""
Payload Type Identifier Mismatch information
.. attribute:: reporting_enabled
Is reporting enabled?
**type**\: bool
.. attribute:: is_detected
Is defect detected?
**type**\: bool
.. attribute:: is_asserted
Is defect delared?
**type**\: bool
.. attribute:: counter
Alarm counter
**type**\: int
**range:** 0..18446744073709551615
"""
_prefix = 'dwdm-ui-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dwdm.Ports.Port.Info.G709Info.OduInfo.Ptim, self).__init__()
self.yang_name = "ptim"
self.yang_parent_name = "odu-info"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('reporting_enabled', (YLeaf(YType.boolean, 'reporting-enabled'), ['bool'])),
('is_detected', (YLeaf(YType.boolean, 'is-detected'), ['bool'])),
('is_asserted', (YLeaf(YType.boolean, 'is-asserted'), ['bool'])),
('counter', (YLeaf(YType.uint64, 'counter'), ['int'])),
])
self.reporting_enabled = None
self.is_detected = None
self.is_asserted = None
self.counter = None
self._segment_path = lambda: "ptim"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dwdm.Ports.Port.Info.G709Info.OduInfo.Ptim, ['reporting_enabled', 'is_detected', 'is_asserted', 'counter'], name, value)
class Tim(Entity):
"""
Trace Identifier Mismatch information
.. attribute:: reporting_enabled
Is reporting enabled?
**type**\: bool
.. attribute:: is_detected
Is defect detected?
**type**\: bool
.. attribute:: is_asserted
Is defect delared?
**type**\: bool
.. attribute:: counter
Alarm counter
**type**\: int
**range:** 0..18446744073709551615
"""
_prefix = 'dwdm-ui-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dwdm.Ports.Port.Info.G709Info.OduInfo.Tim, self).__init__()
self.yang_name = "tim"
self.yang_parent_name = "odu-info"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('reporting_enabled', (YLeaf(YType.boolean, 'reporting-enabled'), ['bool'])),
('is_detected', (YLeaf(YType.boolean, 'is-detected'), ['bool'])),
('is_asserted', (YLeaf(YType.boolean, 'is-asserted'), ['bool'])),
('counter', (YLeaf(YType.uint64, 'counter'), ['int'])),
])
self.reporting_enabled = None
self.is_detected = None
self.is_asserted = None
self.counter = None
self._segment_path = lambda: "tim"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dwdm.Ports.Port.Info.G709Info.OduInfo.Tim, ['reporting_enabled', 'is_detected', 'is_asserted', 'counter'], name, value)
class SfBer(Entity):
"""
Signal Fail BER information
.. attribute:: reporting_enabled
Is reporting enabled?
**type**\: bool
.. attribute:: is_detected
Is defect detected?
**type**\: bool
.. attribute:: is_asserted
Is defect delared?
**type**\: bool
.. attribute:: threshold
Error threshold power
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: counter
Error counter
**type**\: int
**range:** 0..18446744073709551615
"""
_prefix = 'dwdm-ui-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dwdm.Ports.Port.Info.G709Info.OduInfo.SfBer, self).__init__()
self.yang_name = "sf-ber"
self.yang_parent_name = "odu-info"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('reporting_enabled', (YLeaf(YType.boolean, 'reporting-enabled'), ['bool'])),
('is_detected', (YLeaf(YType.boolean, 'is-detected'), ['bool'])),
('is_asserted', (YLeaf(YType.boolean, 'is-asserted'), ['bool'])),
('threshold', (YLeaf(YType.int32, 'threshold'), ['int'])),
('counter', (YLeaf(YType.uint64, 'counter'), ['int'])),
])
self.reporting_enabled = None
self.is_detected = None
self.is_asserted = None
self.threshold = None
self.counter = None
self._segment_path = lambda: "sf-ber"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dwdm.Ports.Port.Info.G709Info.OduInfo.SfBer, ['reporting_enabled', 'is_detected', 'is_asserted', 'threshold', 'counter'], name, value)
class SdBer(Entity):
"""
Signal Degrade BER information
.. attribute:: reporting_enabled
Is reporting enabled?
**type**\: bool
.. attribute:: is_detected
Is defect detected?
**type**\: bool
.. attribute:: is_asserted
Is defect delared?
**type**\: bool
.. attribute:: threshold
Error threshold power
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: counter
Error counter
**type**\: int
**range:** 0..18446744073709551615
"""
_prefix = 'dwdm-ui-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dwdm.Ports.Port.Info.G709Info.OduInfo.SdBer, self).__init__()
self.yang_name = "sd-ber"
self.yang_parent_name = "odu-info"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('reporting_enabled', (YLeaf(YType.boolean, 'reporting-enabled'), ['bool'])),
('is_detected', (YLeaf(YType.boolean, 'is-detected'), ['bool'])),
('is_asserted', (YLeaf(YType.boolean, 'is-asserted'), ['bool'])),
('threshold', (YLeaf(YType.int32, 'threshold'), ['int'])),
('counter', (YLeaf(YType.uint64, 'counter'), ['int'])),
])
self.reporting_enabled = None
self.is_detected = None
self.is_asserted = None
self.threshold = None
self.counter = None
self._segment_path = lambda: "sd-ber"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dwdm.Ports.Port.Info.G709Info.OduInfo.SdBer, ['reporting_enabled', 'is_detected', 'is_asserted', 'threshold', 'counter'], name, value)
class BbeTca(Entity):
"""
Background Block Error TCA information
.. attribute:: reporting_enabled
Is reporting enabled?
**type**\: bool
.. attribute:: is_detected
Is defect detected?
**type**\: bool
.. attribute:: is_asserted
Is defect delared?
**type**\: bool
.. attribute:: threshold
Error threshold power
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: counter
Error counter
**type**\: int
**range:** 0..18446744073709551615
"""
_prefix = 'dwdm-ui-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dwdm.Ports.Port.Info.G709Info.OduInfo.BbeTca, self).__init__()
self.yang_name = "bbe-tca"
self.yang_parent_name = "odu-info"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('reporting_enabled', (YLeaf(YType.boolean, 'reporting-enabled'), ['bool'])),
('is_detected', (YLeaf(YType.boolean, 'is-detected'), ['bool'])),
('is_asserted', (YLeaf(YType.boolean, 'is-asserted'), ['bool'])),
('threshold', (YLeaf(YType.int32, 'threshold'), ['int'])),
('counter', (YLeaf(YType.uint64, 'counter'), ['int'])),
])
self.reporting_enabled = None
self.is_detected = None
self.is_asserted = None
self.threshold = None
self.counter = None
self._segment_path = lambda: "bbe-tca"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dwdm.Ports.Port.Info.G709Info.OduInfo.BbeTca, ['reporting_enabled', 'is_detected', 'is_asserted', 'threshold', 'counter'], name, value)
class EsTca(Entity):
"""
Errored Seconds TCA information
.. attribute:: reporting_enabled
Is reporting enabled?
**type**\: bool
.. attribute:: is_detected
Is defect detected?
**type**\: bool
.. attribute:: is_asserted
Is defect delared?
**type**\: bool
.. attribute:: threshold
Error threshold power
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: counter
Error counter
**type**\: int
**range:** 0..18446744073709551615
"""
_prefix = 'dwdm-ui-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dwdm.Ports.Port.Info.G709Info.OduInfo.EsTca, self).__init__()
self.yang_name = "es-tca"
self.yang_parent_name = "odu-info"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('reporting_enabled', (YLeaf(YType.boolean, 'reporting-enabled'), ['bool'])),
('is_detected', (YLeaf(YType.boolean, 'is-detected'), ['bool'])),
('is_asserted', (YLeaf(YType.boolean, 'is-asserted'), ['bool'])),
('threshold', (YLeaf(YType.int32, 'threshold'), ['int'])),
('counter', (YLeaf(YType.uint64, 'counter'), ['int'])),
])
self.reporting_enabled = None
self.is_detected = None
self.is_asserted = None
self.threshold = None
self.counter = None
self._segment_path = lambda: "es-tca"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dwdm.Ports.Port.Info.G709Info.OduInfo.EsTca, ['reporting_enabled', 'is_detected', 'is_asserted', 'threshold', 'counter'], name, value)
class Bbe(Entity):
"""
Background Block Error information
.. attribute:: counter
Performance Monitoring counter
**type**\: int
**range:** 0..18446744073709551615
"""
_prefix = 'dwdm-ui-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dwdm.Ports.Port.Info.G709Info.OduInfo.Bbe, self).__init__()
self.yang_name = "bbe"
self.yang_parent_name = "odu-info"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('counter', (YLeaf(YType.uint64, 'counter'), ['int'])),
])
self.counter = None
self._segment_path = lambda: "bbe"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dwdm.Ports.Port.Info.G709Info.OduInfo.Bbe, ['counter'], name, value)
class Es(Entity):
"""
Errored Seconds information
.. attribute:: counter
Performance Monitoring counter
**type**\: int
**range:** 0..18446744073709551615
"""
_prefix = 'dwdm-ui-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dwdm.Ports.Port.Info.G709Info.OduInfo.Es, self).__init__()
self.yang_name = "es"
self.yang_parent_name = "odu-info"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('counter', (YLeaf(YType.uint64, 'counter'), ['int'])),
])
self.counter = None
self._segment_path = lambda: "es"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dwdm.Ports.Port.Info.G709Info.OduInfo.Es, ['counter'], name, value)
class Ses(Entity):
"""
Severly Errored Seconds information
.. attribute:: counter
Performance Monitoring counter
**type**\: int
**range:** 0..18446744073709551615
"""
_prefix = 'dwdm-ui-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dwdm.Ports.Port.Info.G709Info.OduInfo.Ses, self).__init__()
self.yang_name = "ses"
self.yang_parent_name = "odu-info"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('counter', (YLeaf(YType.uint64, 'counter'), ['int'])),
])
self.counter = None
self._segment_path = lambda: "ses"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dwdm.Ports.Port.Info.G709Info.OduInfo.Ses, ['counter'], name, value)
class Uas(Entity):
"""
Unavailability Seconds information
.. attribute:: counter
Performance Monitoring counter
**type**\: int
**range:** 0..18446744073709551615
"""
_prefix = 'dwdm-ui-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dwdm.Ports.Port.Info.G709Info.OduInfo.Uas, self).__init__()
self.yang_name = "uas"
self.yang_parent_name = "odu-info"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('counter', (YLeaf(YType.uint64, 'counter'), ['int'])),
])
self.counter = None
self._segment_path = lambda: "uas"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dwdm.Ports.Port.Info.G709Info.OduInfo.Uas, ['counter'], name, value)
class Fc(Entity):
"""
Failure count information
.. attribute:: counter
Performance Monitoring counter
**type**\: int
**range:** 0..18446744073709551615
"""
_prefix = 'dwdm-ui-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dwdm.Ports.Port.Info.G709Info.OduInfo.Fc, self).__init__()
self.yang_name = "fc"
self.yang_parent_name = "odu-info"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('counter', (YLeaf(YType.uint64, 'counter'), ['int'])),
])
self.counter = None
self._segment_path = lambda: "fc"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dwdm.Ports.Port.Info.G709Info.OduInfo.Fc, ['counter'], name, value)
class Bber(Entity):
"""
Background Block Error Rate count information
.. attribute:: counter
Performance Monitoring counter
**type**\: int
**range:** 0..18446744073709551615
"""
_prefix = 'dwdm-ui-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dwdm.Ports.Port.Info.G709Info.OduInfo.Bber, self).__init__()
self.yang_name = "bber"
self.yang_parent_name = "odu-info"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('counter', (YLeaf(YType.uint64, 'counter'), ['int'])),
])
self.counter = None
self._segment_path = lambda: "bber"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dwdm.Ports.Port.Info.G709Info.OduInfo.Bber, ['counter'], name, value)
class Esr(Entity):
"""
Errored Seconds Rate information
.. attribute:: counter
Performance Monitoring counter
**type**\: int
**range:** 0..18446744073709551615
"""
_prefix = 'dwdm-ui-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dwdm.Ports.Port.Info.G709Info.OduInfo.Esr, self).__init__()
self.yang_name = "esr"
self.yang_parent_name = "odu-info"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('counter', (YLeaf(YType.uint64, 'counter'), ['int'])),
])
self.counter = None
self._segment_path = lambda: "esr"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dwdm.Ports.Port.Info.G709Info.OduInfo.Esr, ['counter'], name, value)
class Sesr(Entity):
"""
Severly Errored Seconds Rate information
.. attribute:: counter
Performance Monitoring counter
**type**\: int
**range:** 0..18446744073709551615
"""
_prefix = 'dwdm-ui-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dwdm.Ports.Port.Info.G709Info.OduInfo.Sesr, self).__init__()
self.yang_name = "sesr"
self.yang_parent_name = "odu-info"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('counter', (YLeaf(YType.uint64, 'counter'), ['int'])),
])
self.counter = None
self._segment_path = lambda: "sesr"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dwdm.Ports.Port.Info.G709Info.OduInfo.Sesr, ['counter'], name, value)
class Tti(Entity):
"""
Trail Trace Identifier information
.. attribute:: tx_string_type
Type of String
**type**\: int
**range:** 0..4294967295
.. attribute:: expected_string_type
Type of String
**type**\: int
**range:** 0..4294967295
.. attribute:: rx_string_type
Type of String
**type**\: int
**range:** 0..4294967295
.. attribute:: tx_tti
Tx TTI String
**type**\: str
**length:** 0..129
.. attribute:: tx_sapi0
Tx SAPI[0] Field
**type**\: str
**length:** 0..5
.. attribute:: tx_sapi
Tx SAPI[1\-15] Field
**type**\: str
**length:** 0..16
.. attribute:: tx_sapi_range
Tx SAPI Range String
**type**\: str
**length:** 0..6
.. attribute:: tx_dapi0
Tx DAPI[0] Field
**type**\: str
**length:** 0..5
.. attribute:: tx_dapi
Tx DAPI[1\-15] Field
**type**\: str
**length:** 0..16
.. attribute:: tx_dapi_range
Tx DAPI Range String
**type**\: str
**length:** 0..6
.. attribute:: tx_oper_spec
Tx Operator Specific Field
**type**\: str
**length:** 0..33
.. attribute:: tx_oper_spec_range
Tx Operator Specific Field Range String
**type**\: str
**length:** 0..6
.. attribute:: rx_tti
Rx TTI String
**type**\: str
**length:** 0..129
.. attribute:: rx_sapi0
Rx SAPI[0] Field
**type**\: str
**length:** 0..5
.. attribute:: rx_sapi
Rx SAPI[1\-15] Field
**type**\: str
**length:** 0..16
.. attribute:: rx_sapi_range
Rx SAPI Range String
**type**\: str
**length:** 0..6
.. attribute:: rx_dapi0
Rx DAPI[0] Field
**type**\: str
**length:** 0..5
.. attribute:: rx_dapi
Rx DAPI[1\-15] Field
**type**\: str
**length:** 0..16
.. attribute:: rx_dapi_range
Rx DAPI Range String
**type**\: str
**length:** 0..6
.. attribute:: rx_oper_spec_range
Rx Operator Specific Field Range String
**type**\: str
**length:** 0..6
.. attribute:: rx_oper_spec
Rx Operator Specific Field
**type**\: str
**length:** 0..33
.. attribute:: expected_tti
Expected TTI String
**type**\: str
**length:** 0..129
.. attribute:: expected_sapi0
Expected SAPI[0] Field
**type**\: str
**length:** 0..5
.. attribute:: expected_sapi
Expected SAPI[1\-15] Field
**type**\: str
**length:** 0..16
.. attribute:: exp_sapi_range
Expected SAPI Range String
**type**\: str
**length:** 0..6
.. attribute:: expected_dapi0
Expected DAPI[0] Field
**type**\: str
**length:** 0..5
.. attribute:: expected_dapi
Expected DAPI[1\-15] Field
**type**\: str
**length:** 0..16
.. attribute:: exp_dapi_range
Expected DAPI Range String
**type**\: str
**length:** 0..6
.. attribute:: expected_oper_spec
Expected Operator Specific Field
**type**\: str
**length:** 0..33
.. attribute:: exp_oper_spec_range
Expected Operator Specific Field Range String
**type**\: str
**length:** 0..6
"""
_prefix = 'dwdm-ui-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dwdm.Ports.Port.Info.G709Info.OduInfo.Tti, self).__init__()
self.yang_name = "tti"
self.yang_parent_name = "odu-info"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('tx_string_type', (YLeaf(YType.uint32, 'tx-string-type'), ['int'])),
('expected_string_type', (YLeaf(YType.uint32, 'expected-string-type'), ['int'])),
('rx_string_type', (YLeaf(YType.uint32, 'rx-string-type'), ['int'])),
('tx_tti', (YLeaf(YType.str, 'tx-tti'), ['str'])),
('tx_sapi0', (YLeaf(YType.str, 'tx-sapi0'), ['str'])),
('tx_sapi', (YLeaf(YType.str, 'tx-sapi'), ['str'])),
('tx_sapi_range', (YLeaf(YType.str, 'tx-sapi-range'), ['str'])),
('tx_dapi0', (YLeaf(YType.str, 'tx-dapi0'), ['str'])),
('tx_dapi', (YLeaf(YType.str, 'tx-dapi'), ['str'])),
('tx_dapi_range', (YLeaf(YType.str, 'tx-dapi-range'), ['str'])),
('tx_oper_spec', (YLeaf(YType.str, 'tx-oper-spec'), ['str'])),
('tx_oper_spec_range', (YLeaf(YType.str, 'tx-oper-spec-range'), ['str'])),
('rx_tti', (YLeaf(YType.str, 'rx-tti'), ['str'])),
('rx_sapi0', (YLeaf(YType.str, 'rx-sapi0'), ['str'])),
('rx_sapi', (YLeaf(YType.str, 'rx-sapi'), ['str'])),
('rx_sapi_range', (YLeaf(YType.str, 'rx-sapi-range'), ['str'])),
('rx_dapi0', (YLeaf(YType.str, 'rx-dapi0'), ['str'])),
('rx_dapi', (YLeaf(YType.str, 'rx-dapi'), ['str'])),
('rx_dapi_range', (YLeaf(YType.str, 'rx-dapi-range'), ['str'])),
('rx_oper_spec_range', (YLeaf(YType.str, 'rx-oper-spec-range'), ['str'])),
('rx_oper_spec', (YLeaf(YType.str, 'rx-oper-spec'), ['str'])),
('expected_tti', (YLeaf(YType.str, 'expected-tti'), ['str'])),
('expected_sapi0', (YLeaf(YType.str, 'expected-sapi0'), ['str'])),
('expected_sapi', (YLeaf(YType.str, 'expected-sapi'), ['str'])),
('exp_sapi_range', (YLeaf(YType.str, 'exp-sapi-range'), ['str'])),
('expected_dapi0', (YLeaf(YType.str, 'expected-dapi0'), ['str'])),
('expected_dapi', (YLeaf(YType.str, 'expected-dapi'), ['str'])),
('exp_dapi_range', (YLeaf(YType.str, 'exp-dapi-range'), ['str'])),
('expected_oper_spec', (YLeaf(YType.str, 'expected-oper-spec'), ['str'])),
('exp_oper_spec_range', (YLeaf(YType.str, 'exp-oper-spec-range'), ['str'])),
])
self.tx_string_type = None
self.expected_string_type = None
self.rx_string_type = None
self.tx_tti = None
self.tx_sapi0 = None
self.tx_sapi = None
self.tx_sapi_range = None
self.tx_dapi0 = None
self.tx_dapi = None
self.tx_dapi_range = None
self.tx_oper_spec = None
self.tx_oper_spec_range = None
self.rx_tti = None
self.rx_sapi0 = None
self.rx_sapi = None
self.rx_sapi_range = None
self.rx_dapi0 = None
self.rx_dapi = None
self.rx_dapi_range = None
self.rx_oper_spec_range = None
self.rx_oper_spec = None
self.expected_tti = None
self.expected_sapi0 = None
self.expected_sapi = None
self.exp_sapi_range = None
self.expected_dapi0 = None
self.expected_dapi = None
self.exp_dapi_range = None
self.expected_oper_spec = None
self.exp_oper_spec_range = None
self._segment_path = lambda: "tti"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dwdm.Ports.Port.Info.G709Info.OduInfo.Tti, ['tx_string_type', 'expected_string_type', 'rx_string_type', 'tx_tti', 'tx_sapi0', 'tx_sapi', 'tx_sapi_range', 'tx_dapi0', 'tx_dapi', 'tx_dapi_range', 'tx_oper_spec', 'tx_oper_spec_range', 'rx_tti', 'rx_sapi0', 'rx_sapi', 'rx_sapi_range', 'rx_dapi0', 'rx_dapi', 'rx_dapi_range', 'rx_oper_spec_range', 'rx_oper_spec', 'expected_tti', 'expected_sapi0', 'expected_sapi', 'exp_sapi_range', 'expected_dapi0', 'expected_dapi', 'exp_dapi_range', 'expected_oper_spec', 'exp_oper_spec_range'], name, value)
class OpticsInfo(Entity):
"""
Optics operational information
.. attribute:: optics_type
Optics type name
**type**\: str
**length:** 0..64
.. attribute:: clock_source
Actual transmit clock source
**type**\: int
**range:** 0..255
.. attribute:: wave_frequency_progressive_string
Wave Frequency Information for Progressive Frequencies
**type**\: str
**length:** 0..64
.. attribute:: wavelength_progressive_string
Wavelength Information for Progressive Frequencies
**type**\: str
**length:** 0..64
.. attribute:: is_wave_frequency_progressive_valid
True if Progressive Frequency is supported by hw
**type**\: bool
.. attribute:: wavelength_progressive
Wavelength Information for Progressive Frequencies
**type**\: int
**range:** 0..4294967295
.. attribute:: wave_band
Wavelength band information
**type**\: int
**range:** 0..4294967295
.. attribute:: wave_channel
Current ITU wavelength channel number
**type**\: int
**range:** 0..4294967295
.. attribute:: wave_frequency
wavelenght frequency read from hw in the uint 0 .01nm
**type**\: int
**range:** 0..4294967295
.. attribute:: is_wave_frequency_valid
True if hw supported wavelength frequency readback
**type**\: bool
.. attribute:: wave_channel_owner
Owner of current wavelength
**type**\: :py:class:`DwdmWaveChannelOwner <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.DwdmWaveChannelOwner>`
.. attribute:: gmpls_set_wave_channel
Wavelength channel set by GMPLS
**type**\: int
**range:** 0..65535
.. attribute:: configured_wave_channel
Wavelength channel set from configuration
**type**\: int
**range:** 0..65535
.. attribute:: default_wave_channel
Wavelength channel default from hardware
**type**\: int
**range:** 0..65535
.. attribute:: transmit_power
Transmit power in the unit of 0.01dbm
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: transmit_power_threshold
Transmit power threshold value
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: laser_current_bias
Laser current bias value
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: laser_current_bias_threshold
Laser Current Bias threshold value
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: receive_power
Transponder receive power
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: is_rx_los_threshold_supported
TRUE if Rx LOS thresold configurable
**type**\: bool
.. attribute:: rx_los_threshold
Rx LOS threshold value
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: transmit_power_min
Transmit power mininum value in the interval time
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: transmit_power_max
Transmit power maximum value in the interval time
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: transmit_power_avg
Transmit optical average value in the interval time
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: receive_power_min
Recieve power mininum value in the interval time
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: receive_power_max
Receive power maximum value in the interval time
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: receive_power_avg
Recieve power average value in the interval time
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: laser_bias_current_min
Laser bias current minimum value in the interval time
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: laser_bias_current_max
Laser bias current maxinum value in the interval time
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: laser_bias_current_avg
Laser bias current average value in the interval time
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: chromatic_dispersion
Current chromatic dispersion
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: differential_group_delay
Current differential group Delay
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: polarization_mode_dispersion
Current polarization mode dispersion
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: signal_to_noise_ratio
Current optical signal to noise ratio
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: polarization_dependent_loss
Current Polarization Dependent loss
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: polarization_change_rate
Current Polarization change rate
**type**\: int
**range:** 0..4294967295
.. attribute:: phase_noise
Current Phase Noise
**type**\: int
**range:** 0..4294967295
.. attribute:: output_power_fail
Transmit power failure(above/belowe a threshold) count
**type**\: int
**range:** 0..4294967295
.. attribute:: input_power_fail
Receive power failure(above/belowe a threshold) count
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'dwdm-ui-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dwdm.Ports.Port.Info.OpticsInfo, self).__init__()
self.yang_name = "optics-info"
self.yang_parent_name = "info"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('optics_type', (YLeaf(YType.str, 'optics-type'), ['str'])),
('clock_source', (YLeaf(YType.uint8, 'clock-source'), ['int'])),
('wave_frequency_progressive_string', (YLeaf(YType.str, 'wave-frequency-progressive-string'), ['str'])),
('wavelength_progressive_string', (YLeaf(YType.str, 'wavelength-progressive-string'), ['str'])),
('is_wave_frequency_progressive_valid', (YLeaf(YType.boolean, 'is-wave-frequency-progressive-valid'), ['bool'])),
('wavelength_progressive', (YLeaf(YType.uint32, 'wavelength-progressive'), ['int'])),
('wave_band', (YLeaf(YType.uint32, 'wave-band'), ['int'])),
('wave_channel', (YLeaf(YType.uint32, 'wave-channel'), ['int'])),
('wave_frequency', (YLeaf(YType.uint32, 'wave-frequency'), ['int'])),
('is_wave_frequency_valid', (YLeaf(YType.boolean, 'is-wave-frequency-valid'), ['bool'])),
('wave_channel_owner', (YLeaf(YType.enumeration, 'wave-channel-owner'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper', 'DwdmWaveChannelOwner', '')])),
('gmpls_set_wave_channel', (YLeaf(YType.uint16, 'gmpls-set-wave-channel'), ['int'])),
('configured_wave_channel', (YLeaf(YType.uint16, 'configured-wave-channel'), ['int'])),
('default_wave_channel', (YLeaf(YType.uint16, 'default-wave-channel'), ['int'])),
('transmit_power', (YLeaf(YType.int32, 'transmit-power'), ['int'])),
('transmit_power_threshold', (YLeaf(YType.int32, 'transmit-power-threshold'), ['int'])),
('laser_current_bias', (YLeaf(YType.int32, 'laser-current-bias'), ['int'])),
('laser_current_bias_threshold', (YLeaf(YType.int32, 'laser-current-bias-threshold'), ['int'])),
('receive_power', (YLeaf(YType.int32, 'receive-power'), ['int'])),
('is_rx_los_threshold_supported', (YLeaf(YType.boolean, 'is-rx-los-threshold-supported'), ['bool'])),
('rx_los_threshold', (YLeaf(YType.int32, 'rx-los-threshold'), ['int'])),
('transmit_power_min', (YLeaf(YType.int32, 'transmit-power-min'), ['int'])),
('transmit_power_max', (YLeaf(YType.int32, 'transmit-power-max'), ['int'])),
('transmit_power_avg', (YLeaf(YType.int32, 'transmit-power-avg'), ['int'])),
('receive_power_min', (YLeaf(YType.int32, 'receive-power-min'), ['int'])),
('receive_power_max', (YLeaf(YType.int32, 'receive-power-max'), ['int'])),
('receive_power_avg', (YLeaf(YType.int32, 'receive-power-avg'), ['int'])),
('laser_bias_current_min', (YLeaf(YType.int32, 'laser-bias-current-min'), ['int'])),
('laser_bias_current_max', (YLeaf(YType.int32, 'laser-bias-current-max'), ['int'])),
('laser_bias_current_avg', (YLeaf(YType.int32, 'laser-bias-current-avg'), ['int'])),
('chromatic_dispersion', (YLeaf(YType.int32, 'chromatic-dispersion'), ['int'])),
('differential_group_delay', (YLeaf(YType.int32, 'differential-group-delay'), ['int'])),
('polarization_mode_dispersion', (YLeaf(YType.int32, 'polarization-mode-dispersion'), ['int'])),
('signal_to_noise_ratio', (YLeaf(YType.int32, 'signal-to-noise-ratio'), ['int'])),
('polarization_dependent_loss', (YLeaf(YType.int32, 'polarization-dependent-loss'), ['int'])),
('polarization_change_rate', (YLeaf(YType.uint32, 'polarization-change-rate'), ['int'])),
('phase_noise', (YLeaf(YType.uint32, 'phase-noise'), ['int'])),
('output_power_fail', (YLeaf(YType.uint32, 'output-power-fail'), ['int'])),
('input_power_fail', (YLeaf(YType.uint32, 'input-power-fail'), ['int'])),
])
self.optics_type = None
self.clock_source = None
self.wave_frequency_progressive_string = None
self.wavelength_progressive_string = None
self.is_wave_frequency_progressive_valid = None
self.wavelength_progressive = None
self.wave_band = None
self.wave_channel = None
self.wave_frequency = None
self.is_wave_frequency_valid = None
self.wave_channel_owner = None
self.gmpls_set_wave_channel = None
self.configured_wave_channel = None
self.default_wave_channel = None
self.transmit_power = None
self.transmit_power_threshold = None
self.laser_current_bias = None
self.laser_current_bias_threshold = None
self.receive_power = None
self.is_rx_los_threshold_supported = None
self.rx_los_threshold = None
self.transmit_power_min = None
self.transmit_power_max = None
self.transmit_power_avg = None
self.receive_power_min = None
self.receive_power_max = None
self.receive_power_avg = None
self.laser_bias_current_min = None
self.laser_bias_current_max = None
self.laser_bias_current_avg = None
self.chromatic_dispersion = None
self.differential_group_delay = None
self.polarization_mode_dispersion = None
self.signal_to_noise_ratio = None
self.polarization_dependent_loss = None
self.polarization_change_rate = None
self.phase_noise = None
self.output_power_fail = None
self.input_power_fail = None
self._segment_path = lambda: "optics-info"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dwdm.Ports.Port.Info.OpticsInfo, ['optics_type', 'clock_source', 'wave_frequency_progressive_string', 'wavelength_progressive_string', 'is_wave_frequency_progressive_valid', 'wavelength_progressive', 'wave_band', 'wave_channel', 'wave_frequency', 'is_wave_frequency_valid', 'wave_channel_owner', 'gmpls_set_wave_channel', 'configured_wave_channel', 'default_wave_channel', 'transmit_power', 'transmit_power_threshold', 'laser_current_bias', 'laser_current_bias_threshold', 'receive_power', 'is_rx_los_threshold_supported', 'rx_los_threshold', 'transmit_power_min', 'transmit_power_max', 'transmit_power_avg', 'receive_power_min', 'receive_power_max', 'receive_power_avg', 'laser_bias_current_min', 'laser_bias_current_max', 'laser_bias_current_avg', 'chromatic_dispersion', 'differential_group_delay', 'polarization_mode_dispersion', 'signal_to_noise_ratio', 'polarization_dependent_loss', 'polarization_change_rate', 'phase_noise', 'output_power_fail', 'input_power_fail'], name, value)
class TdcInfo(Entity):
"""
TDC operational information
.. attribute:: tdc_valid
TRUE for Valid else Invalid
**type**\: bool
.. attribute:: major_alarm
TRUE for Alarm condition else FALSE
**type**\: bool
.. attribute:: operation_mode
TRUE for MANUAL else AUTO
**type**\: bool
.. attribute:: tdc_status
TRUE if TDC Aquiring else Locked
**type**\: bool
.. attribute:: dispersion_offset
TDC Dispersion Offset
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: reroute_ber
Reroute BER
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: is_reroute_control_enabled
TRUE for ENABLED else DISABLED
**type**\: bool
"""
_prefix = 'dwdm-ui-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dwdm.Ports.Port.Info.TdcInfo, self).__init__()
self.yang_name = "tdc-info"
self.yang_parent_name = "info"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('tdc_valid', (YLeaf(YType.boolean, 'tdc-valid'), ['bool'])),
('major_alarm', (YLeaf(YType.boolean, 'major-alarm'), ['bool'])),
('operation_mode', (YLeaf(YType.boolean, 'operation-mode'), ['bool'])),
('tdc_status', (YLeaf(YType.boolean, 'tdc-status'), ['bool'])),
('dispersion_offset', (YLeaf(YType.int32, 'dispersion-offset'), ['int'])),
('reroute_ber', (YLeaf(YType.int32, 'reroute-ber'), ['int'])),
('is_reroute_control_enabled', (YLeaf(YType.boolean, 'is-reroute-control-enabled'), ['bool'])),
])
self.tdc_valid = None
self.major_alarm = None
self.operation_mode = None
self.tdc_status = None
self.dispersion_offset = None
self.reroute_ber = None
self.is_reroute_control_enabled = None
self._segment_path = lambda: "tdc-info"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dwdm.Ports.Port.Info.TdcInfo, ['tdc_valid', 'major_alarm', 'operation_mode', 'tdc_status', 'dispersion_offset', 'reroute_ber', 'is_reroute_control_enabled'], name, value)
class NetworkSrlgInfo(Entity):
"""
Network SRLG information
.. attribute:: network_srlg
Network Srlg
**type**\: list of int
**range:** 0..4294967295
"""
_prefix = 'dwdm-ui-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dwdm.Ports.Port.Info.NetworkSrlgInfo, self).__init__()
self.yang_name = "network-srlg-info"
self.yang_parent_name = "info"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('network_srlg', (YLeafList(YType.uint32, 'network-srlg'), ['int'])),
])
self.network_srlg = []
self._segment_path = lambda: "network-srlg-info"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dwdm.Ports.Port.Info.NetworkSrlgInfo, ['network_srlg'], name, value)
class Proactive(Entity):
"""
Proactive protection information
.. attribute:: proactive_feature
Feature Support
**type**\: bool
.. attribute:: proactive_mode
Proactive Mode
**type**\: :py:class:`G709ppfsmMode <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.G709ppfsmMode>`
.. attribute:: proactive_fsm_state
Proactive FSM State
**type**\: :py:class:`G709ppfsmState <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.G709ppfsmState>`
.. attribute:: proactive_fsm_if_state
Proactive FSM IF State
**type**\: :py:class:`G709ppintfState <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.G709ppintfState>`
.. attribute:: tas_state
TAS State
**type**\: :py:class:`DwdmtasState <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.DwdmtasState>`
.. attribute:: trig_thresh_coeff
Trigger threshold coefficient
**type**\: int
**range:** 0..255
.. attribute:: trig_thresh_power
Trigger threshold power
**type**\: int
**range:** 0..255
.. attribute:: rvrt_thresh_coeff
Revert threshold coefficient
**type**\: int
**range:** 0..255
.. attribute:: rvrt_thresh_power
Revert threshold power
**type**\: int
**range:** 0..255
.. attribute:: default_trig_thresh_coeff
Default Trigger threshold coefficient
**type**\: int
**range:** 0..255
.. attribute:: default_trig_thresh_power
Default Trigger threshold power
**type**\: int
**range:** 0..255
.. attribute:: default_rvrt_thresh_coeff
Default Revert threshold coefficient
**type**\: int
**range:** 0..255
.. attribute:: default_rvrt_thresh_power
Default Revert threshold power
**type**\: int
**range:** 0..255
.. attribute:: trig_samples
Required Trigger Samples
**type**\: int
**range:** 0..255
.. attribute:: rvrt_samples
Required Revert Samples
**type**\: int
**range:** 0..255
.. attribute:: trigger_window
Trigger Integration window
**type**\: int
**range:** 0..4294967295
.. attribute:: revert_window
Revert Integration Window
**type**\: int
**range:** 0..4294967295
.. attribute:: protection_trigger
Protection Trigger State
**type**\: bool
.. attribute:: interface_trigger
Proactive Interface Triffer
**type**\: bool
.. attribute:: tx_aps
Transmitted APS Byte
**type**\: int
**range:** 0..255
.. attribute:: tx_aps_descr
Tx APS Description
**type**\: :py:class:`G709apsByte <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.G709apsByte>`
.. attribute:: rx_aps
Received APS byte
**type**\: int
**range:** 0..255
.. attribute:: rx_aps_descr
Rx APS Description
**type**\: :py:class:`G709apsByte <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.G709apsByte>`
.. attribute:: alarm_state
AlarmState
**type**\: bool
.. attribute:: trig_ec_cnt
Trigger EC Cnt
**type**\: int
**range:** 0..4294967295
.. attribute:: rvrt_ec_cnt
Revert EC Cnt
**type**\: int
**range:** 0..4294967295
.. attribute:: prefec_thresh_crossed
Prefec Trigger Thresh Crossed
**type**\: bool
"""
_prefix = 'dwdm-ui-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dwdm.Ports.Port.Info.Proactive, self).__init__()
self.yang_name = "proactive"
self.yang_parent_name = "info"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('proactive_feature', (YLeaf(YType.boolean, 'proactive-feature'), ['bool'])),
('proactive_mode', (YLeaf(YType.enumeration, 'proactive-mode'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper', 'G709ppfsmMode', '')])),
('proactive_fsm_state', (YLeaf(YType.enumeration, 'proactive-fsm-state'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper', 'G709ppfsmState', '')])),
('proactive_fsm_if_state', (YLeaf(YType.enumeration, 'proactive-fsm-if-state'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper', 'G709ppintfState', '')])),
('tas_state', (YLeaf(YType.enumeration, 'tas-state'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper', 'DwdmtasState', '')])),
('trig_thresh_coeff', (YLeaf(YType.uint8, 'trig-thresh-coeff'), ['int'])),
('trig_thresh_power', (YLeaf(YType.uint8, 'trig-thresh-power'), ['int'])),
('rvrt_thresh_coeff', (YLeaf(YType.uint8, 'rvrt-thresh-coeff'), ['int'])),
('rvrt_thresh_power', (YLeaf(YType.uint8, 'rvrt-thresh-power'), ['int'])),
('default_trig_thresh_coeff', (YLeaf(YType.uint8, 'default-trig-thresh-coeff'), ['int'])),
('default_trig_thresh_power', (YLeaf(YType.uint8, 'default-trig-thresh-power'), ['int'])),
('default_rvrt_thresh_coeff', (YLeaf(YType.uint8, 'default-rvrt-thresh-coeff'), ['int'])),
('default_rvrt_thresh_power', (YLeaf(YType.uint8, 'default-rvrt-thresh-power'), ['int'])),
('trig_samples', (YLeaf(YType.uint8, 'trig-samples'), ['int'])),
('rvrt_samples', (YLeaf(YType.uint8, 'rvrt-samples'), ['int'])),
('trigger_window', (YLeaf(YType.uint32, 'trigger-window'), ['int'])),
('revert_window', (YLeaf(YType.uint32, 'revert-window'), ['int'])),
('protection_trigger', (YLeaf(YType.boolean, 'protection-trigger'), ['bool'])),
('interface_trigger', (YLeaf(YType.boolean, 'interface-trigger'), ['bool'])),
('tx_aps', (YLeaf(YType.uint8, 'tx-aps'), ['int'])),
('tx_aps_descr', (YLeaf(YType.enumeration, 'tx-aps-descr'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper', 'G709apsByte', '')])),
('rx_aps', (YLeaf(YType.uint8, 'rx-aps'), ['int'])),
('rx_aps_descr', (YLeaf(YType.enumeration, 'rx-aps-descr'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper', 'G709apsByte', '')])),
('alarm_state', (YLeaf(YType.boolean, 'alarm-state'), ['bool'])),
('trig_ec_cnt', (YLeaf(YType.uint32, 'trig-ec-cnt'), ['int'])),
('rvrt_ec_cnt', (YLeaf(YType.uint32, 'rvrt-ec-cnt'), ['int'])),
('prefec_thresh_crossed', (YLeaf(YType.boolean, 'prefec-thresh-crossed'), ['bool'])),
])
self.proactive_feature = None
self.proactive_mode = None
self.proactive_fsm_state = None
self.proactive_fsm_if_state = None
self.tas_state = None
self.trig_thresh_coeff = None
self.trig_thresh_power = None
self.rvrt_thresh_coeff = None
self.rvrt_thresh_power = None
self.default_trig_thresh_coeff = None
self.default_trig_thresh_power = None
self.default_rvrt_thresh_coeff = None
self.default_rvrt_thresh_power = None
self.trig_samples = None
self.rvrt_samples = None
self.trigger_window = None
self.revert_window = None
self.protection_trigger = None
self.interface_trigger = None
self.tx_aps = None
self.tx_aps_descr = None
self.rx_aps = None
self.rx_aps_descr = None
self.alarm_state = None
self.trig_ec_cnt = None
self.rvrt_ec_cnt = None
self.prefec_thresh_crossed = None
self._segment_path = lambda: "proactive"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dwdm.Ports.Port.Info.Proactive, ['proactive_feature', 'proactive_mode', 'proactive_fsm_state', 'proactive_fsm_if_state', 'tas_state', 'trig_thresh_coeff', 'trig_thresh_power', 'rvrt_thresh_coeff', 'rvrt_thresh_power', 'default_trig_thresh_coeff', 'default_trig_thresh_power', 'default_rvrt_thresh_coeff', 'default_rvrt_thresh_power', 'trig_samples', 'rvrt_samples', 'trigger_window', 'revert_window', 'protection_trigger', 'interface_trigger', 'tx_aps', 'tx_aps_descr', 'rx_aps', 'rx_aps_descr', 'alarm_state', 'trig_ec_cnt', 'rvrt_ec_cnt', 'prefec_thresh_crossed'], name, value)
class SignalLog(Entity):
"""
Signal log information
.. attribute:: is_log_enabled
'true' if signal log is enabled 'false' otherwise
**type**\: bool
.. attribute:: log_filename
Log file name
**type**\: str
**length:** 0..64
"""
_prefix = 'dwdm-ui-oper'
_revision = '2015-11-09'
def __init__(self):
super(Dwdm.Ports.Port.Info.SignalLog, self).__init__()
self.yang_name = "signal-log"
self.yang_parent_name = "info"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('is_log_enabled', (YLeaf(YType.boolean, 'is-log-enabled'), ['bool'])),
('log_filename', (YLeaf(YType.str, 'log-filename'), ['str'])),
])
self.is_log_enabled = None
self.log_filename = None
self._segment_path = lambda: "signal-log"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Dwdm.Ports.Port.Info.SignalLog, ['is_log_enabled', 'log_filename'], name, value)
def clone_ptr(self):
self._top_entity = Dwdm()
return self._top_entity
class Vtxp(Entity):
"""
vtxp
.. attribute:: dwdm_vtxp
DWDM operational data
**type**\: :py:class:`DwdmVtxp <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.Vtxp.DwdmVtxp>`
"""
_prefix = 'dwdm-ui-oper'
_revision = '2015-11-09'
def __init__(self):
super(Vtxp, self).__init__()
self._top_entity = None
self.yang_name = "vtxp"
self.yang_parent_name = "Cisco-IOS-XR-dwdm-ui-oper"
self.is_top_level_class = True
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("dwdm-vtxp", ("dwdm_vtxp", Vtxp.DwdmVtxp))])
self._leafs = OrderedDict()
self.dwdm_vtxp = Vtxp.DwdmVtxp()
self.dwdm_vtxp.parent = self
self._children_name_map["dwdm_vtxp"] = "dwdm-vtxp"
self._segment_path = lambda: "Cisco-IOS-XR-dwdm-ui-oper:vtxp"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Vtxp, [], name, value)
class DwdmVtxp(Entity):
"""
DWDM operational data
.. attribute:: port_vtxps
All DWDM Port operational data
**type**\: :py:class:`PortVtxps <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.Vtxp.DwdmVtxp.PortVtxps>`
"""
_prefix = 'dwdm-ui-oper'
_revision = '2015-11-09'
def __init__(self):
super(Vtxp.DwdmVtxp, self).__init__()
self.yang_name = "dwdm-vtxp"
self.yang_parent_name = "vtxp"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("port-vtxps", ("port_vtxps", Vtxp.DwdmVtxp.PortVtxps))])
self._leafs = OrderedDict()
self.port_vtxps = Vtxp.DwdmVtxp.PortVtxps()
self.port_vtxps.parent = self
self._children_name_map["port_vtxps"] = "port-vtxps"
self._segment_path = lambda: "dwdm-vtxp"
self._absolute_path = lambda: "Cisco-IOS-XR-dwdm-ui-oper:vtxp/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Vtxp.DwdmVtxp, [], name, value)
class PortVtxps(Entity):
"""
All DWDM Port operational data
.. attribute:: port_vtxp
DWDM Port operational data
**type**\: list of :py:class:`PortVtxp <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.Vtxp.DwdmVtxp.PortVtxps.PortVtxp>`
"""
_prefix = 'dwdm-ui-oper'
_revision = '2015-11-09'
def __init__(self):
super(Vtxp.DwdmVtxp.PortVtxps, self).__init__()
self.yang_name = "port-vtxps"
self.yang_parent_name = "dwdm-vtxp"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("port-vtxp", ("port_vtxp", Vtxp.DwdmVtxp.PortVtxps.PortVtxp))])
self._leafs = OrderedDict()
self.port_vtxp = YList(self)
self._segment_path = lambda: "port-vtxps"
self._absolute_path = lambda: "Cisco-IOS-XR-dwdm-ui-oper:vtxp/dwdm-vtxp/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Vtxp.DwdmVtxp.PortVtxps, [], name, value)
class PortVtxp(Entity):
"""
DWDM Port operational data
.. attribute:: name (key)
Port name
**type**\: str
**pattern:** [a\-zA\-Z0\-9.\_/\-]+
.. attribute:: info
DWDM port operational data
**type**\: :py:class:`Info <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dwdm_ui_oper.Vtxp.DwdmVtxp.PortVtxps.PortVtxp.Info>`
"""
_prefix = 'dwdm-ui-oper'
_revision = '2015-11-09'
def __init__(self):
super(Vtxp.DwdmVtxp.PortVtxps.PortVtxp, self).__init__()
self.yang_name = "port-vtxp"
self.yang_parent_name = "port-vtxps"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['name']
self._child_classes = OrderedDict([("info", ("info", Vtxp.DwdmVtxp.PortVtxps.PortVtxp.Info))])
self._leafs = OrderedDict([
('name', (YLeaf(YType.str, 'name'), ['str'])),
])
self.name = None
self.info = Vtxp.DwdmVtxp.PortVtxps.PortVtxp.Info()
self.info.parent = self
self._children_name_map["info"] = "info"
self._segment_path = lambda: "port-vtxp" + "[name='" + str(self.name) + "']"
self._absolute_path = lambda: "Cisco-IOS-XR-dwdm-ui-oper:vtxp/dwdm-vtxp/port-vtxps/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Vtxp.DwdmVtxp.PortVtxps.PortVtxp, ['name'], name, value)
class Info(Entity):
"""
DWDM port operational data
.. attribute:: vtxp_enable
Is VTXP attribute enabled
**type**\: bool
"""
_prefix = 'dwdm-ui-oper'
_revision = '2015-11-09'
def __init__(self):
super(Vtxp.DwdmVtxp.PortVtxps.PortVtxp.Info, self).__init__()
self.yang_name = "info"
self.yang_parent_name = "port-vtxp"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('vtxp_enable', (YLeaf(YType.boolean, 'vtxp-enable'), ['bool'])),
])
self.vtxp_enable = None
self._segment_path = lambda: "info"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Vtxp.DwdmVtxp.PortVtxps.PortVtxp.Info, ['vtxp_enable'], name, value)
def clone_ptr(self):
self._top_entity = Vtxp()
return self._top_entity
| 45.983078
| 1,679
| 0.38827
| 21,057
| 285,325
| 5.002564
| 0.026405
| 0.026125
| 0.040972
| 0.045994
| 0.818396
| 0.780727
| 0.752931
| 0.729587
| 0.690808
| 0.676188
| 0
| 0.037767
| 0.51837
| 285,325
| 6,204
| 1,680
| 45.99049
| 0.728775
| 0.201742
| 0
| 0.678667
| 0
| 0.000444
| 0.134445
| 0.024465
| 0
| 0
| 0
| 0
| 0.04
| 1
| 0.066667
| false
| 0
| 0.002222
| 0
| 0.144
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e19c63a31b1770d2cf9406b9e4a0b8937e55d1df
| 77,076
|
py
|
Python
|
v1alpha1/swagger_client/apis/grafeas_api.py
|
dinagraves/client-python
|
c4f77332b402e61a1e1700fa0c746183008d435c
|
[
"Apache-2.0"
] | 10
|
2017-11-19T20:21:16.000Z
|
2021-04-23T06:59:33.000Z
|
v1alpha1/swagger_client/apis/grafeas_api.py
|
dinagraves/client-python
|
c4f77332b402e61a1e1700fa0c746183008d435c
|
[
"Apache-2.0"
] | 1
|
2021-06-01T21:55:48.000Z
|
2021-06-01T21:55:48.000Z
|
v1alpha1/swagger_client/apis/grafeas_api.py
|
dinagraves/client-python
|
c4f77332b402e61a1e1700fa0c746183008d435c
|
[
"Apache-2.0"
] | 1
|
2019-08-11T16:51:36.000Z
|
2019-08-11T16:51:36.000Z
|
# coding: utf-8
"""
Grafeas API
An API to insert and retrieve annotations on cloud artifacts.
OpenAPI spec version: 0.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class GrafeasApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def create_note(self, projects_id, **kwargs):
"""
Creates a new note.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_note(projects_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str projects_id: Part of `parent`. This field contains the projectId for example: \"project/{project_id} (required)
:param str note_id: The ID to use for this note.
:param Note note: The Note to be inserted
:return: Note
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.create_note_with_http_info(projects_id, **kwargs)
else:
(data) = self.create_note_with_http_info(projects_id, **kwargs)
return data
def create_note_with_http_info(self, projects_id, **kwargs):
"""
Creates a new note.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_note_with_http_info(projects_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str projects_id: Part of `parent`. This field contains the projectId for example: \"project/{project_id} (required)
:param str note_id: The ID to use for this note.
:param Note note: The Note to be inserted
:return: Note
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['projects_id', 'note_id', 'note']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_note" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'projects_id' is set
if ('projects_id' not in params) or (params['projects_id'] is None):
raise ValueError("Missing the required parameter `projects_id` when calling `create_note`")
resource_path = '/v1alpha1/projects/{projectsId}/notes'.replace('{format}', 'json')
path_params = {}
if 'projects_id' in params:
path_params['projectsId'] = params['projects_id']
query_params = {}
if 'note_id' in params:
query_params['noteId'] = params['note_id']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'note' in params:
body_params = params['note']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Note',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def create_occurrence(self, projects_id, **kwargs):
"""
Creates a new occurrence.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_occurrence(projects_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str projects_id: Part of `parent`. This field contains the projectId for example: \"projects/{project_id}\" (required)
:param Occurrence occurrence: The occurrence to be inserted
:return: Occurrence
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.create_occurrence_with_http_info(projects_id, **kwargs)
else:
(data) = self.create_occurrence_with_http_info(projects_id, **kwargs)
return data
def create_occurrence_with_http_info(self, projects_id, **kwargs):
"""
Creates a new occurrence.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_occurrence_with_http_info(projects_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str projects_id: Part of `parent`. This field contains the projectId for example: \"projects/{project_id}\" (required)
:param Occurrence occurrence: The occurrence to be inserted
:return: Occurrence
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['projects_id', 'occurrence']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_occurrence" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'projects_id' is set
if ('projects_id' not in params) or (params['projects_id'] is None):
raise ValueError("Missing the required parameter `projects_id` when calling `create_occurrence`")
resource_path = '/v1alpha1/projects/{projectsId}/occurrences'.replace('{format}', 'json')
path_params = {}
if 'projects_id' in params:
path_params['projectsId'] = params['projects_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'occurrence' in params:
body_params = params['occurrence']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Occurrence',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def delete_note(self, projects_id, notes_id, **kwargs):
"""
Deletes the given note from the system.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_note(projects_id, notes_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str projects_id: Part of `name`. The name of the note in the form \"projects/{project_id}/notes/{note_id}\" (required)
:param str notes_id: Part of `name`. See documentation of `projectsId`. (required)
:return: Empty
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_note_with_http_info(projects_id, notes_id, **kwargs)
else:
(data) = self.delete_note_with_http_info(projects_id, notes_id, **kwargs)
return data
def delete_note_with_http_info(self, projects_id, notes_id, **kwargs):
"""
Deletes the given note from the system.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_note_with_http_info(projects_id, notes_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str projects_id: Part of `name`. The name of the note in the form \"projects/{project_id}/notes/{note_id}\" (required)
:param str notes_id: Part of `name`. See documentation of `projectsId`. (required)
:return: Empty
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['projects_id', 'notes_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_note" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'projects_id' is set
if ('projects_id' not in params) or (params['projects_id'] is None):
raise ValueError("Missing the required parameter `projects_id` when calling `delete_note`")
# verify the required parameter 'notes_id' is set
if ('notes_id' not in params) or (params['notes_id'] is None):
raise ValueError("Missing the required parameter `notes_id` when calling `delete_note`")
resource_path = '/v1alpha1/projects/{projectsId}/notes/{notesId}'.replace('{format}', 'json')
path_params = {}
if 'projects_id' in params:
path_params['projectsId'] = params['projects_id']
if 'notes_id' in params:
path_params['notesId'] = params['notes_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Empty',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def delete_occurrence(self, projects_id, occurrences_id, **kwargs):
"""
Deletes the given occurrence from the system.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_occurrence(projects_id, occurrences_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str projects_id: Part of `name`. The name of the occurrence in the form \"projects/{project_id}/occurrences/{occurrence_id}\" (required)
:param str occurrences_id: Part of `name`. See documentation of `projectsId`. (required)
:return: Empty
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_occurrence_with_http_info(projects_id, occurrences_id, **kwargs)
else:
(data) = self.delete_occurrence_with_http_info(projects_id, occurrences_id, **kwargs)
return data
def delete_occurrence_with_http_info(self, projects_id, occurrences_id, **kwargs):
"""
Deletes the given occurrence from the system.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_occurrence_with_http_info(projects_id, occurrences_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str projects_id: Part of `name`. The name of the occurrence in the form \"projects/{project_id}/occurrences/{occurrence_id}\" (required)
:param str occurrences_id: Part of `name`. See documentation of `projectsId`. (required)
:return: Empty
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['projects_id', 'occurrences_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_occurrence" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'projects_id' is set
if ('projects_id' not in params) or (params['projects_id'] is None):
raise ValueError("Missing the required parameter `projects_id` when calling `delete_occurrence`")
# verify the required parameter 'occurrences_id' is set
if ('occurrences_id' not in params) or (params['occurrences_id'] is None):
raise ValueError("Missing the required parameter `occurrences_id` when calling `delete_occurrence`")
resource_path = '/v1alpha1/projects/{projectsId}/occurrences/{occurrencesId}'.replace('{format}', 'json')
path_params = {}
if 'projects_id' in params:
path_params['projectsId'] = params['projects_id']
if 'occurrences_id' in params:
path_params['occurrencesId'] = params['occurrences_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Empty',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def get_note(self, projects_id, notes_id, **kwargs):
"""
Returns the requested occurrence
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_note(projects_id, notes_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str projects_id: Part of `name`. The name of the note in the form \"projects/{project_id}/notes/{note_id}\" (required)
:param str notes_id: Part of `name`. See documentation of `projectsId`. (required)
:return: Note
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_note_with_http_info(projects_id, notes_id, **kwargs)
else:
(data) = self.get_note_with_http_info(projects_id, notes_id, **kwargs)
return data
def get_note_with_http_info(self, projects_id, notes_id, **kwargs):
"""
Returns the requested occurrence
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_note_with_http_info(projects_id, notes_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str projects_id: Part of `name`. The name of the note in the form \"projects/{project_id}/notes/{note_id}\" (required)
:param str notes_id: Part of `name`. See documentation of `projectsId`. (required)
:return: Note
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['projects_id', 'notes_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_note" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'projects_id' is set
if ('projects_id' not in params) or (params['projects_id'] is None):
raise ValueError("Missing the required parameter `projects_id` when calling `get_note`")
# verify the required parameter 'notes_id' is set
if ('notes_id' not in params) or (params['notes_id'] is None):
raise ValueError("Missing the required parameter `notes_id` when calling `get_note`")
resource_path = '/v1alpha1/projects/{projectsId}/notes/{notesId}'.replace('{format}', 'json')
path_params = {}
if 'projects_id' in params:
path_params['projectsId'] = params['projects_id']
if 'notes_id' in params:
path_params['notesId'] = params['notes_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Note',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def get_occurrence(self, projects_id, occurrences_id, **kwargs):
"""
Returns the requested occurrence
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_occurrence(projects_id, occurrences_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str projects_id: Part of `name`. The name of the occurrence in the form \"projects/{project_id}/occurrences/{occurrence_id}\" (required)
:param str occurrences_id: Part of `name`. See documentation of `projectsId`. (required)
:return: Occurrence
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_occurrence_with_http_info(projects_id, occurrences_id, **kwargs)
else:
(data) = self.get_occurrence_with_http_info(projects_id, occurrences_id, **kwargs)
return data
def get_occurrence_with_http_info(self, projects_id, occurrences_id, **kwargs):
"""
Returns the requested occurrence
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_occurrence_with_http_info(projects_id, occurrences_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str projects_id: Part of `name`. The name of the occurrence in the form \"projects/{project_id}/occurrences/{occurrence_id}\" (required)
:param str occurrences_id: Part of `name`. See documentation of `projectsId`. (required)
:return: Occurrence
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['projects_id', 'occurrences_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_occurrence" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'projects_id' is set
if ('projects_id' not in params) or (params['projects_id'] is None):
raise ValueError("Missing the required parameter `projects_id` when calling `get_occurrence`")
# verify the required parameter 'occurrences_id' is set
if ('occurrences_id' not in params) or (params['occurrences_id'] is None):
raise ValueError("Missing the required parameter `occurrences_id` when calling `get_occurrence`")
resource_path = '/v1alpha1/projects/{projectsId}/occurrences/{occurrencesId}'.replace('{format}', 'json')
path_params = {}
if 'projects_id' in params:
path_params['projectsId'] = params['projects_id']
if 'occurrences_id' in params:
path_params['occurrencesId'] = params['occurrences_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Occurrence',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def get_occurrence_note(self, projects_id, occurrences_id, **kwargs):
"""
Gets the note that this occurrence is attached to.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_occurrence_note(projects_id, occurrences_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str projects_id: Part of `name`. The name of the occurrence in the form \"projects/{project_id}/occurrences/{occurrence_id}\" (required)
:param str occurrences_id: Part of `name`. See documentation of `projectsId`. (required)
:return: Note
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_occurrence_note_with_http_info(projects_id, occurrences_id, **kwargs)
else:
(data) = self.get_occurrence_note_with_http_info(projects_id, occurrences_id, **kwargs)
return data
def get_occurrence_note_with_http_info(self, projects_id, occurrences_id, **kwargs):
"""
Gets the note that this occurrence is attached to.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_occurrence_note_with_http_info(projects_id, occurrences_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str projects_id: Part of `name`. The name of the occurrence in the form \"projects/{project_id}/occurrences/{occurrence_id}\" (required)
:param str occurrences_id: Part of `name`. See documentation of `projectsId`. (required)
:return: Note
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['projects_id', 'occurrences_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_occurrence_note" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'projects_id' is set
if ('projects_id' not in params) or (params['projects_id'] is None):
raise ValueError("Missing the required parameter `projects_id` when calling `get_occurrence_note`")
# verify the required parameter 'occurrences_id' is set
if ('occurrences_id' not in params) or (params['occurrences_id'] is None):
raise ValueError("Missing the required parameter `occurrences_id` when calling `get_occurrence_note`")
resource_path = '/v1alpha1/projects/{projectsId}/occurrences/{occurrencesId}/notes'.replace('{format}', 'json')
path_params = {}
if 'projects_id' in params:
path_params['projectsId'] = params['projects_id']
if 'occurrences_id' in params:
path_params['occurrencesId'] = params['occurrences_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Note',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def get_operation(self, projects_id, operations_id, **kwargs):
"""
Returns the requested occurrence
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_operation(projects_id, operations_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str projects_id: Part of `name`. The name of the operation in the form \"projects/{project_id}/operations/{operation_id}\" (required)
:param str operations_id: Part of `name`. See documentation of `projectsId`. (required)
:return: Operation
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_operation_with_http_info(projects_id, operations_id, **kwargs)
else:
(data) = self.get_operation_with_http_info(projects_id, operations_id, **kwargs)
return data
def get_operation_with_http_info(self, projects_id, operations_id, **kwargs):
"""
Returns the requested occurrence
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_operation_with_http_info(projects_id, operations_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str projects_id: Part of `name`. The name of the operation in the form \"projects/{project_id}/operations/{operation_id}\" (required)
:param str operations_id: Part of `name`. See documentation of `projectsId`. (required)
:return: Operation
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['projects_id', 'operations_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_operation" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'projects_id' is set
if ('projects_id' not in params) or (params['projects_id'] is None):
raise ValueError("Missing the required parameter `projects_id` when calling `get_operation`")
# verify the required parameter 'operations_id' is set
if ('operations_id' not in params) or (params['operations_id'] is None):
raise ValueError("Missing the required parameter `operations_id` when calling `get_operation`")
resource_path = '/v1alpha1/projects/{projectsId}/operations/{operationsId}'.replace('{format}', 'json')
path_params = {}
if 'projects_id' in params:
path_params['projectsId'] = params['projects_id']
if 'operations_id' in params:
path_params['operationsId'] = params['operations_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Operation',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def list_note_occurrences(self, projects_id, notes_id, **kwargs):
"""
Lists the names of Occurrences linked to a particular Note.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_note_occurrences(projects_id, notes_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str projects_id: Part of `name`. The name field will contain the note name for example: \"project/{project_id}/notes/{note_id}\" (required)
:param str notes_id: Part of `name`. See documentation of `projectsId`. (required)
:param str filter: The filter expression.
:param int page_size: Number of notes to return in the list.
:param str page_token: Token to provide to skip to a particular spot in the list.
:return: ListNoteOccurrencesResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.list_note_occurrences_with_http_info(projects_id, notes_id, **kwargs)
else:
(data) = self.list_note_occurrences_with_http_info(projects_id, notes_id, **kwargs)
return data
def list_note_occurrences_with_http_info(self, projects_id, notes_id, **kwargs):
"""
Lists the names of Occurrences linked to a particular Note.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_note_occurrences_with_http_info(projects_id, notes_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str projects_id: Part of `name`. The name field will contain the note name for example: \"project/{project_id}/notes/{note_id}\" (required)
:param str notes_id: Part of `name`. See documentation of `projectsId`. (required)
:param str filter: The filter expression.
:param int page_size: Number of notes to return in the list.
:param str page_token: Token to provide to skip to a particular spot in the list.
:return: ListNoteOccurrencesResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['projects_id', 'notes_id', 'filter', 'page_size', 'page_token']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_note_occurrences" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'projects_id' is set
if ('projects_id' not in params) or (params['projects_id'] is None):
raise ValueError("Missing the required parameter `projects_id` when calling `list_note_occurrences`")
# verify the required parameter 'notes_id' is set
if ('notes_id' not in params) or (params['notes_id'] is None):
raise ValueError("Missing the required parameter `notes_id` when calling `list_note_occurrences`")
resource_path = '/v1alpha1/projects/{projectsId}/notes/{notesId}/occurrences'.replace('{format}', 'json')
path_params = {}
if 'projects_id' in params:
path_params['projectsId'] = params['projects_id']
if 'notes_id' in params:
path_params['notesId'] = params['notes_id']
query_params = {}
if 'filter' in params:
query_params['filter'] = params['filter']
if 'page_size' in params:
query_params['pageSize'] = params['page_size']
if 'page_token' in params:
query_params['pageToken'] = params['page_token']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ListNoteOccurrencesResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def list_notes(self, projects_id, **kwargs):
"""
Lists all notes for a given project.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_notes(projects_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str projects_id: Part of `parent`. This field contains the projectId for example: \"project/{project_id} (required)
:param str filter: The filter expression.
:param int page_size: Number of notes to return in the list.
:param str page_token: Token to provide to skip to a particular spot in the list.
:return: ListNotesResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.list_notes_with_http_info(projects_id, **kwargs)
else:
(data) = self.list_notes_with_http_info(projects_id, **kwargs)
return data
def list_notes_with_http_info(self, projects_id, **kwargs):
"""
Lists all notes for a given project.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_notes_with_http_info(projects_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str projects_id: Part of `parent`. This field contains the projectId for example: \"project/{project_id} (required)
:param str filter: The filter expression.
:param int page_size: Number of notes to return in the list.
:param str page_token: Token to provide to skip to a particular spot in the list.
:return: ListNotesResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['projects_id', 'filter', 'page_size', 'page_token']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_notes" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'projects_id' is set
if ('projects_id' not in params) or (params['projects_id'] is None):
raise ValueError("Missing the required parameter `projects_id` when calling `list_notes`")
resource_path = '/v1alpha1/projects/{projectsId}/notes'.replace('{format}', 'json')
path_params = {}
if 'projects_id' in params:
path_params['projectsId'] = params['projects_id']
query_params = {}
if 'filter' in params:
query_params['filter'] = params['filter']
if 'page_size' in params:
query_params['pageSize'] = params['page_size']
if 'page_token' in params:
query_params['pageToken'] = params['page_token']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ListNotesResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def list_occurrences(self, projects_id, **kwargs):
"""
Lists active occurrences for a given project/Digest.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_occurrences(projects_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str projects_id: Part of `parent`. This contains the projectId for example: projects/{project_id} (required)
:param str filter: The filter expression.
:param int page_size: Number of occurrences to return in the list.
:param str page_token: Token to provide to skip to a particular spot in the list.
:return: ListOccurrencesResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.list_occurrences_with_http_info(projects_id, **kwargs)
else:
(data) = self.list_occurrences_with_http_info(projects_id, **kwargs)
return data
def list_occurrences_with_http_info(self, projects_id, **kwargs):
"""
Lists active occurrences for a given project/Digest.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_occurrences_with_http_info(projects_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str projects_id: Part of `parent`. This contains the projectId for example: projects/{project_id} (required)
:param str filter: The filter expression.
:param int page_size: Number of occurrences to return in the list.
:param str page_token: Token to provide to skip to a particular spot in the list.
:return: ListOccurrencesResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['projects_id', 'filter', 'page_size', 'page_token']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_occurrences" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'projects_id' is set
if ('projects_id' not in params) or (params['projects_id'] is None):
raise ValueError("Missing the required parameter `projects_id` when calling `list_occurrences`")
resource_path = '/v1alpha1/projects/{projectsId}/occurrences'.replace('{format}', 'json')
path_params = {}
if 'projects_id' in params:
path_params['projectsId'] = params['projects_id']
query_params = {}
if 'filter' in params:
query_params['filter'] = params['filter']
if 'page_size' in params:
query_params['pageSize'] = params['page_size']
if 'page_token' in params:
query_params['pageToken'] = params['page_token']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ListOccurrencesResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def list_operations(self, projects_id, **kwargs):
"""
Lists all operations for a given project.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_operations(projects_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str projects_id: Part of `parent`. This field contains the projectId for example: \"project/{project_id} (required)
:param str filter: The filter expression.
:param int page_size: Number of operations to return in the list.
:param str page_token: Token to provide to skip to a particular spot in the list.
:return: ListOperationsResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.list_operations_with_http_info(projects_id, **kwargs)
else:
(data) = self.list_operations_with_http_info(projects_id, **kwargs)
return data
def list_operations_with_http_info(self, projects_id, **kwargs):
"""
Lists all operations for a given project.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_operations_with_http_info(projects_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str projects_id: Part of `parent`. This field contains the projectId for example: \"project/{project_id} (required)
:param str filter: The filter expression.
:param int page_size: Number of operations to return in the list.
:param str page_token: Token to provide to skip to a particular spot in the list.
:return: ListOperationsResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['projects_id', 'filter', 'page_size', 'page_token']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_operations" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'projects_id' is set
if ('projects_id' not in params) or (params['projects_id'] is None):
raise ValueError("Missing the required parameter `projects_id` when calling `list_operations`")
resource_path = '/v1alpha1/projects/{projectsId}/operations'.replace('{format}', 'json')
path_params = {}
if 'projects_id' in params:
path_params['projectsId'] = params['projects_id']
query_params = {}
if 'filter' in params:
query_params['filter'] = params['filter']
if 'page_size' in params:
query_params['pageSize'] = params['page_size']
if 'page_token' in params:
query_params['pageToken'] = params['page_token']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ListOperationsResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def update_note(self, projects_id, notes_id, **kwargs):
"""
Updates an existing note.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_note(projects_id, notes_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str projects_id: Part of `name`. The name of the note. Should be of the form \"projects/{project_id}/notes/{note_id}\". (required)
:param str notes_id: Part of `name`. See documentation of `projectsId`. (required)
:param Note note: The updated note.
:return: Note
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.update_note_with_http_info(projects_id, notes_id, **kwargs)
else:
(data) = self.update_note_with_http_info(projects_id, notes_id, **kwargs)
return data
def update_note_with_http_info(self, projects_id, notes_id, **kwargs):
"""
Updates an existing note.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_note_with_http_info(projects_id, notes_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str projects_id: Part of `name`. The name of the note. Should be of the form \"projects/{project_id}/notes/{note_id}\". (required)
:param str notes_id: Part of `name`. See documentation of `projectsId`. (required)
:param Note note: The updated note.
:return: Note
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['projects_id', 'notes_id', 'note']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_note" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'projects_id' is set
if ('projects_id' not in params) or (params['projects_id'] is None):
raise ValueError("Missing the required parameter `projects_id` when calling `update_note`")
# verify the required parameter 'notes_id' is set
if ('notes_id' not in params) or (params['notes_id'] is None):
raise ValueError("Missing the required parameter `notes_id` when calling `update_note`")
resource_path = '/v1alpha1/projects/{projectsId}/notes/{notesId}'.replace('{format}', 'json')
path_params = {}
if 'projects_id' in params:
path_params['projectsId'] = params['projects_id']
if 'notes_id' in params:
path_params['notesId'] = params['notes_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'note' in params:
body_params = params['note']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Note',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def update_occurrence(self, projects_id, occurrences_id, **kwargs):
"""
Updates an existing occurrence.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_occurrence(projects_id, occurrences_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str projects_id: Part of `name`. The name of the occurrence. Should be of the form \"projects/{project_id}/occurrences/{occurrence_id}\". (required)
:param str occurrences_id: Part of `name`. See documentation of `projectsId`. (required)
:param Occurrence occurrence: The updated occurrence.
:return: Occurrence
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.update_occurrence_with_http_info(projects_id, occurrences_id, **kwargs)
else:
(data) = self.update_occurrence_with_http_info(projects_id, occurrences_id, **kwargs)
return data
def update_occurrence_with_http_info(self, projects_id, occurrences_id, **kwargs):
"""
Updates an existing occurrence.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_occurrence_with_http_info(projects_id, occurrences_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str projects_id: Part of `name`. The name of the occurrence. Should be of the form \"projects/{project_id}/occurrences/{occurrence_id}\". (required)
:param str occurrences_id: Part of `name`. See documentation of `projectsId`. (required)
:param Occurrence occurrence: The updated occurrence.
:return: Occurrence
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['projects_id', 'occurrences_id', 'occurrence']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_occurrence" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'projects_id' is set
if ('projects_id' not in params) or (params['projects_id'] is None):
raise ValueError("Missing the required parameter `projects_id` when calling `update_occurrence`")
# verify the required parameter 'occurrences_id' is set
if ('occurrences_id' not in params) or (params['occurrences_id'] is None):
raise ValueError("Missing the required parameter `occurrences_id` when calling `update_occurrence`")
resource_path = '/v1alpha1/projects/{projectsId}/occurrences/{occurrencesId}'.replace('{format}', 'json')
path_params = {}
if 'projects_id' in params:
path_params['projectsId'] = params['projects_id']
if 'occurrences_id' in params:
path_params['occurrencesId'] = params['occurrences_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'occurrence' in params:
body_params = params['occurrence']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Occurrence',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def update_operation(self, projects_id, operations_id, **kwargs):
"""
Updates an existing operation returns an error if operation does not exist. The only valid operations are to update mark the done bit change the result.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_operation(projects_id, operations_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str projects_id: Part of `name`. The name of the Operation. Should be of the form \"projects/{project_id}/operations/{operation_id}\". (required)
:param str operations_id: Part of `name`. See documentation of `projectsId`. (required)
:param UpdateOperationRequest body: The request body.
:return: Operation
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.update_operation_with_http_info(projects_id, operations_id, **kwargs)
else:
(data) = self.update_operation_with_http_info(projects_id, operations_id, **kwargs)
return data
def update_operation_with_http_info(self, projects_id, operations_id, **kwargs):
"""
Updates an existing operation returns an error if operation does not exist. The only valid operations are to update mark the done bit change the result.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_operation_with_http_info(projects_id, operations_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str projects_id: Part of `name`. The name of the Operation. Should be of the form \"projects/{project_id}/operations/{operation_id}\". (required)
:param str operations_id: Part of `name`. See documentation of `projectsId`. (required)
:param UpdateOperationRequest body: The request body.
:return: Operation
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['projects_id', 'operations_id', 'body']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_operation" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'projects_id' is set
if ('projects_id' not in params) or (params['projects_id'] is None):
raise ValueError("Missing the required parameter `projects_id` when calling `update_operation`")
# verify the required parameter 'operations_id' is set
if ('operations_id' not in params) or (params['operations_id'] is None):
raise ValueError("Missing the required parameter `operations_id` when calling `update_operation`")
resource_path = '/v1alpha1/projects/{projectsId}/operations/{operationsId}'.replace('{format}', 'json')
path_params = {}
if 'projects_id' in params:
path_params['projectsId'] = params['projects_id']
if 'operations_id' in params:
path_params['operationsId'] = params['operations_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Operation',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
| 43.917949
| 163
| 0.583606
| 8,209
| 77,076
| 5.268973
| 0.03216
| 0.05202
| 0.019421
| 0.024969
| 0.972996
| 0.970152
| 0.9668
| 0.960049
| 0.959772
| 0.956812
| 0
| 0.000758
| 0.332633
| 77,076
| 1,754
| 164
| 43.942987
| 0.840118
| 0.36704
| 0
| 0.82104
| 1
| 0
| 0.184586
| 0.043791
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037485
| false
| 0
| 0.008464
| 0
| 0.101572
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
e1b83a167077297ec358af37373e458ebda6d7a6
| 20,612
|
py
|
Python
|
tests/library/series/seriesinfo/test_suffix_us.py
|
stampedeboss/DadVision2
|
572d377086f7f356d24f60493cdbb655f5729e8d
|
[
"Apache-2.0"
] | 1
|
2021-02-26T19:43:06.000Z
|
2021-02-26T19:43:06.000Z
|
tests/library/series/seriesinfo/test_suffix_us.py
|
stampedeboss/DadVision2
|
572d377086f7f356d24f60493cdbb655f5729e8d
|
[
"Apache-2.0"
] | null | null | null |
tests/library/series/seriesinfo/test_suffix_us.py
|
stampedeboss/DadVision2
|
572d377086f7f356d24f60493cdbb655f5729e8d
|
[
"Apache-2.0"
] | null | null | null |
import unittest
import logger
from series import SeriesInfo
# A level more detailed than DEBUG
TRACE = 5
# A level more detailed than INFO
VERBOSE = 15
class KnownValues(unittest.TestCase):
SeriesData = {'SeriesName': 'Suits'}
Suits_Data = {'SeriesName': 'Suits',
'tvdb_id': 247808,
'status': 'Continuing',
'top_show': 'Unknown',
'source': 'tvdb',
'imdb_id': 'tt1632701'}
TVDB_ID = {'Suits': 247808,
'Married at First Sight (US)': 283196,
'The Office (US)': 73244,
'The Tomorrow People (US)': 268591,
'The Voice (US)': 247824,
'Who Do You Think You Are? (US)': 146651,
'Battlestar Galactica (2003)': 73545,
'Castle (2009)': 83462,
'Doctor Who (2005)': 78804,
'Last Man Standing (2011)': 248834,
'Legends (2014)': 265074,
'Once Upon a Time (2011)': 248835,
'Parenthood (2010)': 94551,
'Partners (2012)': 259092,
'Pawn Stars': 111051,
'Rush (2014)': 280939,
'Scandal (2012)': 248841,
'So You Think You Can Dance': 78956,
'The Americans (2013)': 261690,
'The Bridge (2013)': 264085,
'The Newsroom (2012)': 256227,
'The Twilight Zone': 73587,
'Zero Hour (2013)': 258773,
}
class SeriesInfoSuffix(unittest.TestCase):
def setUp(self):
logger.initialize(unit_test=True, level=VERBOSE)
self.library = SeriesInfo(rtnDict=True)
self.library.args = self.library.options.parser.parse_args(["/usr/local/bin/episode.py",
"--tvdb",
"--so"]
)
# @unittest.expectedFailure
def test_suffix_us_and_year_100(self):
self.assertEqual(self.library.getShowInfo({'SeriesName': "Suits"})['tvdb_id'],
KnownValues.Suits_Data['tvdb_id'])
# @unittest.expectedFailure
def test_suffix_us_and_year_110(self):
KnownValues.SeriesData = {'SeriesName': "Married at First Sight (US)"}
KnownValues.SeriesData_alt = {'SeriesName': "Married at First Sight"}
self.assertEqual(self.library.getShowInfo(KnownValues.SeriesData)['tvdb_id'],
self.library.getShowInfo(KnownValues.SeriesData_alt)['tvdb_id'])
def test_suffix_us_and_year_111(self):
KnownValues.SeriesData = {'SeriesName': "Married at First Sight (US)"}
self.assertEqual(self.library.getShowInfo(KnownValues.SeriesData)['tvdb_id'],
KnownValues.TVDB_ID["Married at First Sight (US)"])
def test_suffix_us_and_year_112(self):
KnownValues.SeriesData = {'SeriesName': "Married at First Sight"}
self.assertEqual(self.library.getShowInfo(KnownValues.SeriesData)['tvdb_id'],
KnownValues.TVDB_ID["Married at First Sight (US)"])
def test_suffix_us_and_year_120(self):
KnownValues.SeriesData = {'SeriesName': "The Office (US)"}
KnownValues.SeriesData_alt = {'SeriesName': "The Office"}
self.assertEqual(self.library.getShowInfo(KnownValues.SeriesData)['tvdb_id'],
self.library.getShowInfo(KnownValues.SeriesData_alt)['tvdb_id'])
def test_suffix_us_and_year_121(self):
KnownValues.SeriesData = {'SeriesName': "The Office (US)"}
self.assertEqual(self.library.getShowInfo(KnownValues.SeriesData)['tvdb_id'],
KnownValues.TVDB_ID["The Office (US)"])
def test_suffix_us_and_year_122(self):
KnownValues.SeriesData = {'SeriesName': "The Office"}
self.assertEqual(self.library.getShowInfo(KnownValues.SeriesData)['tvdb_id'],
KnownValues.TVDB_ID["The Office (US)"])
def test_suffix_us_and_year_130(self):
KnownValues.SeriesData = {'SeriesName': "The Tomorrow People (US)"}
KnownValues.SeriesData_alt = {'SeriesName': "The Tomorrow People"}
self.assertEqual(self.library.getShowInfo(KnownValues.SeriesData)['tvdb_id'],
self.library.getShowInfo(KnownValues.SeriesData_alt)['tvdb_id'])
def test_suffix_us_and_year_131(self):
KnownValues.SeriesData = {'SeriesName': "The Tomorrow People (US)"}
self.assertEqual(self.library.getShowInfo(KnownValues.SeriesData)['tvdb_id'],
KnownValues.TVDB_ID["The Tomorrow People (US)"])
def test_suffix_us_and_year_132(self):
KnownValues.SeriesData = {'SeriesName': "The Tomorrow People"}
self.assertEqual(self.library.getShowInfo(KnownValues.SeriesData)['tvdb_id'],
KnownValues.TVDB_ID["The Tomorrow People (US)"])
def test_suffix_us_and_year_140(self):
KnownValues.SeriesData = {'SeriesName': "The Voice (US)"}
KnownValues.SeriesData_alt = {'SeriesName': "The Voice"}
self.assertEqual(self.library.getShowInfo(KnownValues.SeriesData)['tvdb_id'],
self.library.getShowInfo(KnownValues.SeriesData_alt)['tvdb_id'])
def test_suffix_us_and_year_141(self):
KnownValues.SeriesData = {'SeriesName': "The Voice (US)"}
self.assertEqual(self.library.getShowInfo(KnownValues.SeriesData)['tvdb_id'],
KnownValues.TVDB_ID["The Voice (US)"])
def test_suffix_us_and_year_142(self):
KnownValues.SeriesData = {'SeriesName': "The Voice"}
self.assertEqual(self.library.getShowInfo(KnownValues.SeriesData)['tvdb_id'],
KnownValues.TVDB_ID["The Voice (US)"])
def test_suffix_us_and_year_150(self):
KnownValues.SeriesData = {'SeriesName': "Who Do You Think You Are? (US)"}
KnownValues.SeriesData_alt = {'SeriesName': "Who Do You Think You Are?"}
self.assertEqual(self.library.getShowInfo(KnownValues.SeriesData)['tvdb_id'],
self.library.getShowInfo(KnownValues.SeriesData_alt)['tvdb_id'])
def test_suffix_us_and_year_151(self):
KnownValues.SeriesData = {'SeriesName': "Who Do You Think You Are? (US)"}
self.assertEqual(self.library.getShowInfo(KnownValues.SeriesData)['tvdb_id'],
KnownValues.TVDB_ID[ "Who Do You Think You Are? (US)"])
def test_suffix_us_and_year_152(self):
KnownValues.SeriesData = {'SeriesName': "Who Do You Think You Are?"}
self.assertEqual(self.library.getShowInfo(KnownValues.SeriesData)['tvdb_id'],
KnownValues.TVDB_ID[ "Who Do You Think You Are? (US)"])
def test_suffix_us_and_year_153(self):
KnownValues.SeriesData = {'SeriesName': "Who Do You Think You Are (US)"}
self.assertEqual(self.library.getShowInfo(KnownValues.SeriesData)['tvdb_id'],
KnownValues.TVDB_ID[ "Who Do You Think You Are? (US)"])
def test_suffix_us_and_year_154(self):
KnownValues.SeriesData = {'SeriesName': "Who Do You Think You Are"}
self.assertEqual(self.library.getShowInfo(KnownValues.SeriesData)['tvdb_id'],
KnownValues.TVDB_ID[ "Who Do You Think You Are? (US)"])
def test_suffix_us_and_year_200(self):
KnownValues.SeriesData = {'SeriesName': "Battlestar Galactica (2003)"}
KnownValues.SeriesData_alt = {'SeriesName': "Battlestar Galactica"}
self.assertEqual(self.library.getShowInfo(KnownValues.SeriesData)['tvdb_id'],
self.library.getShowInfo(KnownValues.SeriesData_alt)['tvdb_id'])
def test_suffix_us_and_year_201(self):
KnownValues.SeriesData = {'SeriesName': "Battlestar Galactica (2003)"}
self.assertEqual(self.library.getShowInfo(KnownValues.SeriesData)['tvdb_id'],
KnownValues.TVDB_ID["Battlestar Galactica (2003)"])
def test_suffix_us_and_year_202(self):
KnownValues.SeriesData = {'SeriesName': "Battlestar Galactica"}
self.assertEqual(self.library.getShowInfo(KnownValues.SeriesData)['tvdb_id'],
KnownValues.TVDB_ID["Battlestar Galactica (2003)"])
def test_suffix_us_and_year_210(self):
KnownValues.SeriesData = {'SeriesName': "Castle (2009)"}
KnownValues.SeriesData_alt = {'SeriesName': "Castle"}
self.assertEqual(self.library.getShowInfo(KnownValues.SeriesData)['tvdb_id'],
self.library.getShowInfo(KnownValues.SeriesData_alt)['tvdb_id'])
def test_suffix_us_and_year_211(self):
KnownValues.SeriesData = {'SeriesName': "Castle (2009)"}
self.assertEqual(self.library.getShowInfo(KnownValues.SeriesData)['tvdb_id'],
KnownValues.TVDB_ID["Castle (2009)"])
def test_suffix_us_and_year_212(self):
KnownValues.SeriesData = {'SeriesName': "Castle"}
self.assertEqual(self.library.getShowInfo(KnownValues.SeriesData)['tvdb_id'],
KnownValues.TVDB_ID["Castle (2009)"])
def test_suffix_us_and_year_220(self):
KnownValues.SeriesData = {'SeriesName': "Doctor Who (2005)"}
KnownValues.SeriesData_alt = {'SeriesName': "Doctor Who"}
self.assertEqual(self.library.getShowInfo(KnownValues.SeriesData)['tvdb_id'],
self.library.getShowInfo(KnownValues.SeriesData_alt)['tvdb_id'])
def test_suffix_us_and_year_221(self):
KnownValues.SeriesData = {'SeriesName': "Doctor Who (2005)"}
self.assertEqual(self.library.getShowInfo(KnownValues.SeriesData)['tvdb_id'],
KnownValues.TVDB_ID["Doctor Who (2005)"])
def test_suffix_us_and_year_222(self):
KnownValues.SeriesData = {'SeriesName': "Doctor Who"}
self.assertEqual(self.library.getShowInfo(KnownValues.SeriesData)['tvdb_id'],
KnownValues.TVDB_ID["Doctor Who (2005)"])
def test_suffix_us_and_year_230(self):
KnownValues.SeriesData = {'SeriesName': "Last Man Standing (2011)"}
KnownValues.SeriesData_alt = {'SeriesName': "Last Man Standing"}
self.assertEqual(self.library.getShowInfo(KnownValues.SeriesData)['tvdb_id'],
self.library.getShowInfo(KnownValues.SeriesData_alt)['tvdb_id'])
def test_suffix_us_and_year_231(self):
KnownValues.SeriesData = {'SeriesName': "Last Man Standing (2011)"}
self.assertEqual(self.library.getShowInfo(KnownValues.SeriesData)['tvdb_id'],
KnownValues.TVDB_ID["Last Man Standing (2011)"])
def test_suffix_us_and_year_232(self):
KnownValues.SeriesData = {'SeriesName': "Last Man Standing"}
self.assertEqual(self.library.getShowInfo(KnownValues.SeriesData)['tvdb_id'],
KnownValues.TVDB_ID["Last Man Standing (2011)"])
def test_suffix_us_and_year_240(self):
KnownValues.SeriesData = {'SeriesName': "Legends (2014)"}
KnownValues.SeriesData_alt = {'SeriesName': "Legends"}
self.assertEqual(self.library.getShowInfo(KnownValues.SeriesData)['tvdb_id'],
self.library.getShowInfo(KnownValues.SeriesData_alt)['tvdb_id'])
def test_suffix_us_and_year_241(self):
KnownValues.SeriesData = {'SeriesName': "Legends (2014)"}
self.assertEqual(self.library.getShowInfo(KnownValues.SeriesData)['tvdb_id'],
KnownValues.TVDB_ID["Legends (2014)"])
def test_suffix_us_and_year_242(self):
KnownValues.SeriesData = {'SeriesName': "Legends (US)"}
self.assertEqual(self.library.getShowInfo(KnownValues.SeriesData)['tvdb_id'],
KnownValues.TVDB_ID["Legends (2014)"])
def test_suffix_us_and_year_243(self):
KnownValues.SeriesData = {'SeriesName': "Legends"}
self.assertEqual(self.library.getShowInfo(KnownValues.SeriesData)['tvdb_id'],
KnownValues.TVDB_ID["Legends (2014)"])
def test_suffix_us_and_year_250(self):
KnownValues.SeriesData = {'SeriesName': "Once Upon a Time (2011)"}
KnownValues.SeriesData_alt = {'SeriesName': "Once Upon a Time"}
self.assertEqual(self.library.getShowInfo(KnownValues.SeriesData)['tvdb_id'],
self.library.getShowInfo(KnownValues.SeriesData_alt)['tvdb_id'])
def test_suffix_us_and_year_251(self):
KnownValues.SeriesData = {'SeriesName': "Once Upon a Time (2011)"}
self.assertEqual(self.library.getShowInfo(KnownValues.SeriesData)['tvdb_id'],
KnownValues.TVDB_ID["Once Upon a Time (2011)"])
def test_suffix_us_and_year_252(self):
KnownValues.SeriesData = {'SeriesName': "Once Upon a Time"}
self.assertEqual(self.library.getShowInfo(KnownValues.SeriesData)['tvdb_id'],
KnownValues.TVDB_ID["Once Upon a Time (2011)"])
def test_suffix_us_and_year_253(self):
KnownValues.SeriesData = {'SeriesName': "Once Upon a Time"}
self.assertEqual(self.library.getShowInfo(KnownValues.SeriesData)['tvdb_id'],
KnownValues.TVDB_ID["Once Upon a Time (2011)"])
def test_suffix_us_and_year_260(self):
KnownValues.SeriesData = {'SeriesName': "Parenthood (2010)"}
KnownValues.SeriesData_alt = {'SeriesName': "Parenthood"}
self.assertEqual(self.library.getShowInfo(KnownValues.SeriesData)['tvdb_id'],
self.library.getShowInfo(KnownValues.SeriesData_alt)['tvdb_id'])
def test_suffix_us_and_year_261(self):
KnownValues.SeriesData = {'SeriesName': "Parenthood (2010)"}
self.assertEqual(self.library.getShowInfo(KnownValues.SeriesData)['tvdb_id'],
KnownValues.TVDB_ID["Parenthood (2010)"])
def test_suffix_us_and_year_262(self):
KnownValues.SeriesData = {'SeriesName': "Parenthood"}
self.assertEqual(self.library.getShowInfo(KnownValues.SeriesData)['tvdb_id'],
KnownValues.TVDB_ID["Parenthood (2010)"])
def test_suffix_us_and_year_270(self):
KnownValues.SeriesData = {'SeriesName': "Partners (2012)"}
KnownValues.SeriesData_alt = {'SeriesName': "Partners"}
self.assertEqual(self.library.getShowInfo(KnownValues.SeriesData)['tvdb_id'],
self.library.getShowInfo(KnownValues.SeriesData_alt)['tvdb_id'])
def test_suffix_us_and_year_271(self):
KnownValues.SeriesData = {'SeriesName': "Partners (2012)"}
self.assertEqual(self.library.getShowInfo(KnownValues.SeriesData)['tvdb_id'],
KnownValues.TVDB_ID["Partners (2012)"])
def test_suffix_us_and_year_272(self):
KnownValues.SeriesData = {'SeriesName': "Partners"}
self.assertEqual(self.library.getShowInfo(KnownValues.SeriesData)['tvdb_id'],
KnownValues.TVDB_ID["Partners (2012)"])
def test_suffix_us_and_year_280(self):
KnownValues.SeriesData = {'SeriesName': "Rush (2014)"}
KnownValues.SeriesData_alt = {'SeriesName': "Rush"}
self.assertEqual(self.library.getShowInfo(KnownValues.SeriesData)['tvdb_id'],
self.library.getShowInfo(KnownValues.SeriesData_alt)['tvdb_id'])
def test_suffix_us_and_year_281(self):
KnownValues.SeriesData = {'SeriesName': "Rush (2014)"}
self.assertEqual(self.library.getShowInfo(KnownValues.SeriesData)['tvdb_id'],
KnownValues.TVDB_ID["Rush (2014)"])
def test_suffix_us_and_year_282(self):
KnownValues.SeriesData = {'SeriesName': "Rush (US)"}
self.assertEqual(self.library.getShowInfo(KnownValues.SeriesData)['tvdb_id'],
KnownValues.TVDB_ID["Rush (2014)"])
def test_suffix_us_and_year_283(self):
KnownValues.SeriesData = {'SeriesName': "Rush"}
self.assertEqual(self.library.getShowInfo(KnownValues.SeriesData)['tvdb_id'],
KnownValues.TVDB_ID["Rush (2014)"])
def test_suffix_us_and_year_290(self):
KnownValues.SeriesData = {'SeriesName': "Scandal (2012)"}
KnownValues.SeriesData_alt = {'SeriesName': "Scandal (US)"}
self.assertEqual(self.library.getShowInfo(KnownValues.SeriesData)['tvdb_id'],
self.library.getShowInfo(KnownValues.SeriesData_alt)['tvdb_id'])
def test_suffix_us_and_year_291(self):
KnownValues.SeriesData = {'SeriesName': "Scandal (2012)"}
self.assertEqual(self.library.getShowInfo(KnownValues.SeriesData)['tvdb_id'],
KnownValues.TVDB_ID["Scandal (2012)"])
def test_suffix_us_and_year_292(self):
KnownValues.SeriesData = {'SeriesName': "Scandal (US)"}
self.assertEqual(self.library.getShowInfo(KnownValues.SeriesData)['tvdb_id'],
KnownValues.TVDB_ID["Scandal (2012)"])
def test_suffix_us_and_year_293(self):
KnownValues.SeriesData = {'SeriesName': "Scandal"}
self.assertEqual(self.library.getShowInfo(KnownValues.SeriesData)['tvdb_id'],
KnownValues.TVDB_ID["Scandal (2012)"])
def test_suffix_us_and_year_300(self):
KnownValues.SeriesData = {'SeriesName': "The Americans (2013)"}
KnownValues.SeriesData_alt = {'SeriesName': "The Americans (US)"}
self.assertEqual(self.library.getShowInfo(KnownValues.SeriesData)['tvdb_id'],
self.library.getShowInfo(KnownValues.SeriesData_alt)['tvdb_id'])
def test_suffix_us_and_year_301(self):
KnownValues.SeriesData = {'SeriesName': "The Americans (2013)"}
self.assertEqual(self.library.getShowInfo(KnownValues.SeriesData)['tvdb_id'],
KnownValues.TVDB_ID["The Americans (2013)"])
def test_suffix_us_and_year_302(self):
KnownValues.SeriesData = {'SeriesName': "The Americans (US)"}
self.assertEqual(self.library.getShowInfo(KnownValues.SeriesData)['tvdb_id'],
KnownValues.TVDB_ID["The Americans (2013)"])
def test_suffix_us_and_year_303(self):
KnownValues.SeriesData = {'SeriesName': "The Americans"}
self.assertEqual(self.library.getShowInfo(KnownValues.SeriesData)['tvdb_id'],
KnownValues.TVDB_ID["The Americans (2013)"])
def test_suffix_us_and_year_310(self):
KnownValues.SeriesData = {'SeriesName': "The Bridge (2013)"}
KnownValues.SeriesData_alt = {'SeriesName': "The Bridge (US)"}
self.assertEqual(self.library.getShowInfo(KnownValues.SeriesData)['tvdb_id'],
self.library.getShowInfo(KnownValues.SeriesData_alt)['tvdb_id'])
def test_suffix_us_and_year_311(self):
KnownValues.SeriesData = {'SeriesName': "The Bridge (2013)"}
self.assertEqual(self.library.getShowInfo(KnownValues.SeriesData)['tvdb_id'],
KnownValues.TVDB_ID["The Bridge (2013)"])
def test_suffix_us_and_year_312(self):
KnownValues.SeriesData = {'SeriesName': "The Bridge (US)"}
self.assertEqual(self.library.getShowInfo(KnownValues.SeriesData)['tvdb_id'],
KnownValues.TVDB_ID["The Bridge (2013)"])
def test_suffix_us_and_year_313(self):
KnownValues.SeriesData = {'SeriesName': "The Bridge"}
self.assertEqual(self.library.getShowInfo(KnownValues.SeriesData)['tvdb_id'],
KnownValues.TVDB_ID["The Bridge (2013)"])
def test_suffix_us_and_year_320(self):
KnownValues.SeriesData = {'SeriesName': "The Newsroom (2012)"}
KnownValues.SeriesData_alt = {'SeriesName': "The Newsroom (US)"}
self.assertEqual(self.library.getShowInfo(KnownValues.SeriesData)['tvdb_id'],
self.library.getShowInfo(KnownValues.SeriesData_alt)['tvdb_id'])
def test_suffix_us_and_year_321(self):
KnownValues.SeriesData = {'SeriesName': "The Newsroom (2012)"}
self.assertEqual(self.library.getShowInfo(KnownValues.SeriesData)['tvdb_id'],
KnownValues.TVDB_ID["The Newsroom (2012)"])
def test_suffix_us_and_year_322(self):
KnownValues.SeriesData = {'SeriesName': "The Newsroom (US)"}
self.assertEqual(self.library.getShowInfo(KnownValues.SeriesData)['tvdb_id'],
KnownValues.TVDB_ID["The Newsroom (2012)"])
def test_suffix_us_and_year_323(self):
KnownValues.SeriesData = {'SeriesName': "The Newsroom"}
self.assertEqual(self.library.getShowInfo(KnownValues.SeriesData)['tvdb_id'],
KnownValues.TVDB_ID["The Newsroom (2012)"])
def test_suffix_us_and_year_330(self):
KnownValues.SeriesData = {'SeriesName': "Zero Hour (2013)"}
KnownValues.SeriesData_alt = {'SeriesName': "Zero Hour (US)"}
self.assertEqual(self.library.getShowInfo(KnownValues.SeriesData)['tvdb_id'],
self.library.getShowInfo(KnownValues.SeriesData_alt)['tvdb_id'])
def test_suffix_us_and_year_331(self):
KnownValues.SeriesData = {'SeriesName': "Zero Hour (2013)"}
self.assertEqual(self.library.getShowInfo(KnownValues.SeriesData)['tvdb_id'],
KnownValues.TVDB_ID["Zero Hour (2013)"])
def test_suffix_us_and_year_332(self):
KnownValues.SeriesData = {'SeriesName': "Zero Hour (US)"}
self.assertEqual(self.library.getShowInfo(KnownValues.SeriesData)['tvdb_id'],
KnownValues.TVDB_ID["Zero Hour (2013)"])
def test_suffix_us_and_year_333(self):
KnownValues.SeriesData = {'SeriesName': "Zero Hour"}
self.assertEqual(self.library.getShowInfo(KnownValues.SeriesData)['tvdb_id'],
KnownValues.TVDB_ID["Zero Hour (2013)"])
def test_suffix_us_and_year_340(self):
KnownValues.SeriesData = {'SeriesName': "So You Think You Can Dance"}
KnownValues.SeriesData_alt = {'SeriesName': "So You Think You Can Dance (US)"}
self.assertEqual(self.library.getShowInfo(KnownValues.SeriesData)['tvdb_id'],
self.library.getShowInfo(KnownValues.SeriesData_alt)['tvdb_id'])
def test_suffix_us_and_year_341(self):
KnownValues.SeriesData = {'SeriesName': "So You Think You Can Dance"}
self.assertEqual(self.library.getShowInfo(KnownValues.SeriesData)['tvdb_id'],
KnownValues.TVDB_ID["So You Think You Can Dance"])
def test_suffix_us_and_year_342(self):
KnownValues.SeriesData = {'SeriesName': "So You Think You Can Dance (US)"}
self.assertEqual(self.library.getShowInfo(KnownValues.SeriesData)['tvdb_id'],
KnownValues.TVDB_ID["So You Think You Can Dance"])
def test_suffix_us_and_year_343(self):
KnownValues.SeriesData = {'SeriesName': "SYTYCD"}
self.assertEqual(self.library.getShowInfo(KnownValues.SeriesData)['tvdb_id'],
KnownValues.TVDB_ID["So You Think You Can Dance"])
def test_suffix_us_and_year_350(self):
KnownValues.SeriesData = {'SeriesName': "The Twilight Zone"}
self.assertEqual(self.library.getShowInfo(KnownValues.SeriesData)['tvdb_id'],
KnownValues.TVDB_ID["The Twilight Zone"])
def theSuite(self):
suite = unittest.TestLoader().loadTestsFromTestCase(self)
return suite
if __name__ == '__main__':
suite = SeriesInfoSuffix.theSuite()
unittest.TextTestRunner(verbosity=1).run(suite)
| 45.501104
| 90
| 0.743644
| 2,521
| 20,612
| 5.855216
| 0.08687
| 0.261771
| 0.138609
| 0.205677
| 0.904478
| 0.859088
| 0.787481
| 0.730777
| 0.70876
| 0.694533
| 0
| 0.037208
| 0.114642
| 20,612
| 452
| 91
| 45.60177
| 0.771659
| 0.005628
| 0
| 0.51105
| 0
| 0
| 0.227319
| 0.00122
| 0
| 0
| 0
| 0
| 0.201657
| 1
| 0.207182
| false
| 0
| 0.008287
| 0
| 0.232044
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
8313c5c4a7504c5b73eda17a22163e974eb775d9
| 218
|
py
|
Python
|
src/models/td_models/__init__.py
|
volkancirik/refer360
|
8ae7e739b812345204aa83514cdf5a271dfa812c
|
[
"MIT"
] | 7
|
2020-06-03T16:08:09.000Z
|
2021-09-30T07:30:34.000Z
|
src/models/td_models/__init__.py
|
volkancirik/refer360
|
8ae7e739b812345204aa83514cdf5a271dfa812c
|
[
"MIT"
] | null | null | null |
src/models/td_models/__init__.py
|
volkancirik/refer360
|
8ae7e739b812345204aa83514cdf5a271dfa812c
|
[
"MIT"
] | 1
|
2021-01-25T14:11:07.000Z
|
2021-01-25T14:11:07.000Z
|
from models.td_models.model import clones
from models.td_models.model import Concat
from models.td_models.model import ConcatConv
from models.td_models.model import RNN2Conv
from models.td_models.model import LingUNet
| 36.333333
| 45
| 0.862385
| 35
| 218
| 5.228571
| 0.285714
| 0.273224
| 0.327869
| 0.491803
| 0.79235
| 0.79235
| 0
| 0
| 0
| 0
| 0
| 0.005051
| 0.091743
| 218
| 5
| 46
| 43.6
| 0.919192
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
835930364703f99ff810e22acf9b9207a23aab69
| 2,890
|
py
|
Python
|
morpheus_mcpi/spectator_mode.py
|
bigjango13/Morpheus-2
|
4aa7799b1af7e68333459d1ba48f4f3263bd4d8f
|
[
"MIT"
] | 6
|
2021-11-16T21:02:11.000Z
|
2022-02-24T16:53:58.000Z
|
morpheus_mcpi/spectator_mode.py
|
bigjango13/Nebuchadnezzar-2-hack
|
4aa7799b1af7e68333459d1ba48f4f3263bd4d8f
|
[
"MIT"
] | 2
|
2022-01-27T23:21:43.000Z
|
2022-02-15T17:43:54.000Z
|
morpheus_mcpi/spectator_mode.py
|
bigjango13/Nebuchadnezzar-2-hack
|
4aa7799b1af7e68333459d1ba48f4f3263bd4d8f
|
[
"MIT"
] | 3
|
2022-01-23T20:39:33.000Z
|
2022-02-10T05:33:13.000Z
|
import mcpi.minecraft as minecraft
import keyboard
from time import sleep
def switch():
mc = minecraft.Minecraft.create()
x, y, z = mc.player.getPos()
cx, cy, cz = x, y, z
mc.camera.setFixed()
mc.camera.setPos(x, y, z)
while True:
if keyboard.is_pressed("w"):
cz = cz + 1
mc.player.setPos(x, y, z)
mc.camera.setPos(cx, cy, cz)
sleep(0.01)
mc.player.setPos(x, y, z)
elif keyboard.is_pressed("s"):
cz = cz - 1
mc.player.setPos(x, y, z)
mc.camera.setPos(cx, cy, cz)
sleep(0.01)
mc.player.setPos(x, y, z)
elif keyboard.is_pressed("d"):
cx = cx - 1
mc.player.setPos(x, y, z)
mc.camera.setPos(cx, cy, cz)
sleep(0.01)
mc.player.setPos(x, y, z)
elif keyboard.is_pressed("a"):
cx = cx + 1
mc.player.setPos(x, y, z)
mc.camera.setPos(cx, cy, cz)
sleep(0.01)
mc.player.setPos(x, y, z)
elif keyboard.is_pressed("space"):
cy = cy + 1
mc.player.setPos(x, y, z)
mc.camera.setPos(cx, cy, cz)
sleep(0.01)
mc.player.setPos(x, y, z)
elif keyboard.is_pressed("shift"):
cy = cy - 1
mc.player.setPos(x, y, z)
mc.camera.setPos(cx, cy, cz)
sleep(0.01)
mc.player.setPos(x, y, z)
elif keyboard.is_pressed("esc"):
mc.player.setPos(x, y, z)
mc.camera.setNormal()
break
elif keyboard.is_pressed("up"):
cz = cz + 1
mc.camera.setPos(cx, cy, cz)
mc.player.setPos(x, y, z)
sleep(0.1)
mc.player.setPos(x, y, z)
elif keyboard.is_pressed("down"):
cz = cz - 1
mc.camera.setPos(cx, cy, cz)
mc.player.setPos(x, y, z)
sleep(0.1)
mc.player.setPos(x, y, z)
elif keyboard.is_pressed("right"):
cx = cx - 1
mc.camera.setPos(cx, cy, cz)
mc.player.setPos(x, y, z)
sleep(0.1)
mc.player.setPos(x, y, z)
elif keyboard.is_pressed("left"):
cx = cx + 1
mc.camera.setPos(cx, cy, cz)
mc.player.setPos(x, y, z)
sleep(0.1)
mc.player.setPos(x, y, z)
elif keyboard.is_pressed("o"):
cy = cy + 1
mc.camera.setPos(cx, cy, cz)
mc.player.setPos(x, y, z)
sleep(0.1)
mc.player.setPos(x, y, z)
elif keyboard.is_pressed("l"):
cy = cy - 1
mc.camera.setPos(cx, cy, cz)
mc.player.setPos(x, y, z)
sleep(0.1)
mc.player.setPos(x, y, z)
if __name__ == "__main__":
switch()
| 30.744681
| 42
| 0.465398
| 407
| 2,890
| 3.253071
| 0.120393
| 0.042296
| 0.063444
| 0.176737
| 0.804381
| 0.796073
| 0.796073
| 0.796073
| 0.77719
| 0.77719
| 0
| 0.024027
| 0.395156
| 2,890
| 93
| 43
| 31.075269
| 0.73341
| 0
| 0
| 0.693182
| 0
| 0
| 0.014533
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.011364
| false
| 0
| 0.034091
| 0
| 0.045455
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
83597a0c29de484aa7966d77d2334b0f20ec1f2c
| 1,229
|
py
|
Python
|
struct/data/boss/RUN.py
|
naonori/hitomi
|
02b188eb8ada4d39a10801bf3193581b9bc9c310
|
[
"MIT"
] | 6
|
2021-09-28T04:00:56.000Z
|
2022-03-23T03:49:19.000Z
|
struct/data/boss/RUN.py
|
naonori/hitomi
|
02b188eb8ada4d39a10801bf3193581b9bc9c310
|
[
"MIT"
] | null | null | null |
struct/data/boss/RUN.py
|
naonori/hitomi
|
02b188eb8ada4d39a10801bf3193581b9bc9c310
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import subprocess
## For galaxies
#for NS in ["North", "South"]:
# for zbin in [1,2,3]:
# for Weight in [0,1,2,3]:
## for Weight in [0]:
#
# fr = open("run_base.sh", "r")
# AA = fr.readlines()
# fr.close()
#
# AA = [AA[i].replace("NS zbin Weight", "%s %d %d" % (NS, zbin, Weight)) for i in range(len(AA))]
#
# fw = open("run_new.sh", "w")
# fw.writelines(AA)
# fw.close()
#
# subprocess.call(["chmod", "u+x", "run_new.sh"])
# subprocess.call(["qsub", "run_new.sh"])
#
## For mocks
#for NS in ["North", "South"]:
# for zbin in [1,2,3]:
# for NR in range(0, 21):
## for NR in [0]:
#
# fr = open("run_base.sh", "r")
# AA = fr.readlines()
# fr.close()
#
# AA = [AA[i].replace("NS zbin NR", "%s %d %d" % (NS, zbin, NR)) for i in range(len(AA))]
#
# fw = open("run_new.sh", "w")
# fw.writelines(AA)
# fw.close()
#
# subprocess.call(["chmod", "u+x", "run_new.sh"])
# subprocess.call(["qsub", "run_new.sh"])
#
| 27.931818
| 108
| 0.432872
| 166
| 1,229
| 3.156627
| 0.289157
| 0.068702
| 0.091603
| 0.034351
| 0.822519
| 0.788168
| 0.788168
| 0.748092
| 0.748092
| 0.748092
| 0
| 0.020151
| 0.353946
| 1,229
| 43
| 109
| 28.581395
| 0.639798
| 0.915378
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 10
|
55fa91e340fed4a094261adf7d22e6833d79b5d3
| 72
|
py
|
Python
|
src/data/dcgan32/__init__.py
|
ndeutschmann/face_gans
|
84f2e92f2f674de5052aeee1dac8c20a29190be4
|
[
"MIT"
] | null | null | null |
src/data/dcgan32/__init__.py
|
ndeutschmann/face_gans
|
84f2e92f2f674de5052aeee1dac8c20a29190be4
|
[
"MIT"
] | null | null | null |
src/data/dcgan32/__init__.py
|
ndeutschmann/face_gans
|
84f2e92f2f674de5052aeee1dac8c20a29190be4
|
[
"MIT"
] | null | null | null |
from .create_inversion_dataset import generate_dcgan32_inversion_dataset
| 72
| 72
| 0.944444
| 9
| 72
| 7
| 0.777778
| 0.507937
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.028986
| 0.041667
| 72
| 1
| 72
| 72
| 0.884058
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
367a22068cb7be1db211aa33f30db249f7f73961
| 209
|
py
|
Python
|
Data-Analysis/venv_macos/lib/python3.8/site-packages/pyecharts_jupyter_installer/__init__.py
|
Qiaozhi94/Python-Projects
|
aefc6cf49c1f4f2cc9beba8dbe80cfa826ba75c4
|
[
"MIT"
] | null | null | null |
Data-Analysis/venv_macos/lib/python3.8/site-packages/pyecharts_jupyter_installer/__init__.py
|
Qiaozhi94/Python-Projects
|
aefc6cf49c1f4f2cc9beba8dbe80cfa826ba75c4
|
[
"MIT"
] | null | null | null |
Data-Analysis/venv_macos/lib/python3.8/site-packages/pyecharts_jupyter_installer/__init__.py
|
Qiaozhi94/Python-Projects
|
aefc6cf49c1f4f2cc9beba8dbe80cfa826ba75c4
|
[
"MIT"
] | null | null | null |
# flake8: noqa
from pyecharts_jupyter_installer._version import __version__
from pyecharts_jupyter_installer._version import __author__
from pyecharts_jupyter_installer.jupyter_install import install_cmd_for
| 34.833333
| 71
| 0.899522
| 26
| 209
| 6.5
| 0.461538
| 0.230769
| 0.35503
| 0.514793
| 0.497041
| 0.497041
| 0
| 0
| 0
| 0
| 0
| 0.005181
| 0.076555
| 209
| 5
| 72
| 41.8
| 0.870466
| 0.057416
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
367e1e8290261af684c2e8fe8dd8f8770720643b
| 5,169
|
py
|
Python
|
conans/test/integration/conanfile_errors_test.py
|
datalogics-kam/conan
|
7bf230cd5f8ef68eb804908777ebaad75e951b16
|
[
"MIT"
] | null | null | null |
conans/test/integration/conanfile_errors_test.py
|
datalogics-kam/conan
|
7bf230cd5f8ef68eb804908777ebaad75e951b16
|
[
"MIT"
] | null | null | null |
conans/test/integration/conanfile_errors_test.py
|
datalogics-kam/conan
|
7bf230cd5f8ef68eb804908777ebaad75e951b16
|
[
"MIT"
] | null | null | null |
import unittest
from conans.test.utils.tools import TestClient
class ConanfileErrorsTest(unittest.TestCase):
def copy_error_test(self):
client = TestClient()
conanfile = '''
from conans import ConanFile
class HelloConan(ConanFile):
name = "Hello"
version = "0.1"
exports = "*"
def package(self):
self.copy2("*.h", dst="include", src=["include","platform"])
'''
files = {"conanfile.py": conanfile, "test.txt": "Hello world"}
client.save(files)
client.run("export . lasote/stable")
client.run("install Hello/0.1@lasote/stable --build", ignore_error=True)
self.assertIn("Hello/0.1@lasote/stable: Error in package() method, line 9",
client.user_io.out)
self.assertIn('self.copy2("*.h", dst="include", src=["include","platform"]',
client.user_io.out)
self.assertIn("'HelloConan' object has no attribute 'copy2'",
client.user_io.out)
def copy_error2_test(self):
client = TestClient()
conanfile = '''
from conans import ConanFile
class HelloConan(ConanFile):
name = "Hello"
version = "0.1"
exports = "*"
def package(self):
self.copy("*.h", dst="include", src=["include","platform"])
'''
files = {"conanfile.py": conanfile, "test.txt": "Hello world"}
client.save(files)
client.run("export . lasote/stable")
client.run("install Hello/0.1@lasote/stable --build", ignore_error=True)
self.assertIn("Hello/0.1@lasote/stable: Error in package() method, line 9",
client.user_io.out)
self.assertIn('self.copy("*.h", dst="include", src=["include","platform"]',
client.user_io.out)
# It results that the error is different in different Python2/3 and OSs
# self.assertIn("'list' object has no attribute 'replace'", client.user_io.out)
def package_info_error_test(self):
client = TestClient()
conanfile = '''
from conans import ConanFile
class HelloConan(ConanFile):
name = "Hello"
version = "0.1"
exports = "*"
def package_info(self):
self.copy2()
'''
files = {"conanfile.py": conanfile, "test.txt": "Hello world"}
client.save(files)
client.run("export . lasote/stable")
client.run("install Hello/0.1@lasote/stable --build", ignore_error=True)
self.assertIn("Hello/0.1@lasote/stable: Error in package_info() method, line 9",
client.user_io.out)
self.assertIn('self.copy2()',
client.user_io.out)
self.assertIn("'HelloConan' object has no attribute 'copy2'",
client.user_io.out)
def config_error_test(self):
client = TestClient()
conanfile = '''
from conans import ConanFile
class HelloConan(ConanFile):
name = "Hello"
version = "0.1"
exports = "*"
def configure(self):
self.copy2()
'''
files = {"conanfile.py": conanfile, "test.txt": "Hello world"}
client.save(files)
client.run("export . lasote/stable")
client.run("install Hello/0.1@lasote/stable --build", ignore_error=True)
self.assertIn("""ERROR: Hello/0.1@lasote/stable: Error in configure() method, line 9
self.copy2()
AttributeError: 'HelloConan' object has no attribute 'copy2'""", client.user_io.out)
def source_error_test(self):
client = TestClient()
conanfile = '''
from conans import ConanFile
class HelloConan(ConanFile):
name = "Hello"
version = "0.1"
exports = "*"
def source(self):
self.copy2()
'''
files = {"conanfile.py": conanfile, "test.txt": "Hello world"}
client.save(files)
client.run("export . lasote/stable")
client.run("install Hello/0.1@lasote/stable --build", ignore_error=True)
self.assertIn("Hello/0.1@lasote/stable: Error in source() method, line 9",
client.user_io.out)
self.assertIn('self.copy2()',
client.user_io.out)
self.assertIn("'HelloConan' object has no attribute 'copy2'",
client.user_io.out)
def duplicate_requires_test(self):
client = TestClient()
conanfile = '''
[requires]
foo/0.1@user/testing
foo/0.2@user/testing
'''
files = {"conanfile.txt": conanfile}
client.save(files)
error = client.run("install . --build", ignore_error=True)
self.assertTrue(error)
self.assertIn("ERROR: Duplicated requirement", client.user_io.out)
def duplicate_requires_py_test(self):
client = TestClient()
conanfile = '''
from conans import ConanFile
class HelloConan(ConanFile):
name = "Hello"
version = "0.1"
requires = "foo/0.1@user/testing", "foo/0.2@user/testing"
'''
files = {"conanfile.py": conanfile}
client.save(files)
error = client.run("install . --build", ignore_error=True)
self.assertTrue(error)
self.assertIn("Error while initializing requirements. Duplicated requirement",
client.user_io.out)
| 34.691275
| 92
| 0.60534
| 606
| 5,169
| 5.09736
| 0.140264
| 0.011654
| 0.058271
| 0.072839
| 0.887666
| 0.871156
| 0.852703
| 0.827452
| 0.823244
| 0.823244
| 0
| 0.01552
| 0.25208
| 5,169
| 148
| 93
| 34.925676
| 0.783497
| 0.028439
| 0
| 0.776923
| 0
| 0.007692
| 0.474597
| 0.103208
| 0
| 0
| 0
| 0
| 0.123077
| 1
| 0.053846
| false
| 0
| 0.061538
| 0
| 0.123077
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
36b2f20ce33a7233ab1e7bb33a9eeb527c511659
| 86
|
py
|
Python
|
icevision/models/mmseg/__init__.py
|
lgvaz/mantisshrimp2
|
743cb7df0dae7eb1331fc2bb66fc9ca09db496cd
|
[
"Apache-2.0"
] | 17
|
2020-07-31T22:09:07.000Z
|
2020-08-30T11:18:36.000Z
|
icevision/models/mmseg/__init__.py
|
Borda/icevision
|
743cb7df0dae7eb1331fc2bb66fc9ca09db496cd
|
[
"Apache-2.0"
] | 115
|
2020-08-01T09:19:54.000Z
|
2020-09-04T18:51:28.000Z
|
icevision/models/mmseg/__init__.py
|
lgvaz/mantisshrimp2
|
743cb7df0dae7eb1331fc2bb66fc9ca09db496cd
|
[
"Apache-2.0"
] | 1
|
2020-08-25T06:04:34.000Z
|
2020-08-25T06:04:34.000Z
|
from icevision.models.mmseg import common
from icevision.models.mmseg.models import *
| 28.666667
| 43
| 0.837209
| 12
| 86
| 6
| 0.5
| 0.361111
| 0.527778
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.093023
| 86
| 2
| 44
| 43
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
36cd3d4368474cb3eeacff92c9a112e00809090c
| 20,360
|
py
|
Python
|
cfg/detection/object_detection/obj_6_cornernet_lite/train_obj_6_cornernet_lite.py
|
THEFASHIONGEEK/Monk_Gui
|
7c23cdd3487ae5a5b28b0a3419e4e64022b11e06
|
[
"Apache-2.0"
] | 129
|
2020-01-30T22:08:05.000Z
|
2022-03-04T06:33:14.000Z
|
cfg/detection/object_detection/obj_6_cornernet_lite/train_obj_6_cornernet_lite.py
|
netwrkspider/Monk_Gui
|
05ce1bbef0199fbd38519220cc71fb6904c59e7c
|
[
"Apache-2.0"
] | 2
|
2020-04-04T14:57:49.000Z
|
2020-06-13T14:13:01.000Z
|
cfg/detection/object_detection/obj_6_cornernet_lite/train_obj_6_cornernet_lite.py
|
netwrkspider/Monk_Gui
|
05ce1bbef0199fbd38519220cc71fb6904c59e7c
|
[
"Apache-2.0"
] | 46
|
2020-01-31T00:23:21.000Z
|
2022-01-31T01:21:51.000Z
|
import os
import sys
import numpy as np
import pandas as pd
import cv2
import xmltodict
import json
def isnotebook():
try:
shell = get_ipython().__class__.__name__
if shell == 'ZMQInteractiveShell':
return True # Jupyter notebook or qtconsole
elif shell == 'TerminalInteractiveShell':
return False # Terminal running IPython
else:
return False # Other type (?)
except NameError:
return False
if(isnotebook()):
from tqdm.notebook import tqdm
else:
from tqdm import tqdm as tqdm
from pycocotools.coco import COCO
class Unbuffered(object):
def __init__(self, stream):
self.stream = stream
def write(self, data):
self.stream.write(data)
self.stream.flush()
def writelines(self, datas):
self.stream.writelines(datas)
self.stream.flush()
def __getattr__(self, attr):
return getattr(self.stream, attr)
sys.stdout = Unbuffered(sys.stdout)
print("Training....")
with open('obj_6_cornernet_lite.json') as json_file:
system = json.load(json_file)
system["batch_size"] = int(system["batch_size"]);
system["iterations"] = int(system["iterations"]);
system["lr"] = float(system["lr"]);
system["val_interval"] = int(system["val_interval"]);
system["print_interval"] = int(system["print_interval"]);
sys.path.append("Monk_Object_Detection/6_cornernet_lite/lib/")
from train_detector import Detector
if(system["anno_type"] == "monk"):
root_dir = system["monk_root_dir"];
img_dir = system["monk_img_dir"];
anno_file = system["monk_anno_file"];
dataset_path = root_dir;
images_folder = root_dir + "/" + img_dir;
annotations_path = root_dir + "/annotations/";
if not os.path.isdir(annotations_path):
os.mkdir(annotations_path)
input_images_folder = images_folder;
input_annotations_path = root_dir + "/" + anno_file;
output_dataset_path = root_dir;
output_image_folder = input_images_folder;
output_annotation_folder = annotations_path;
tmp = img_dir.replace("/", "");
output_annotation_file = output_annotation_folder + "/instances_" + tmp + ".json";
output_classes_file = output_annotation_folder + "/classes.txt";
if not os.path.isdir(output_annotation_folder):
os.mkdir(output_annotation_folder);
df = pd.read_csv(input_annotations_path);
columns = df.columns
delimiter = " ";
list_dict = [];
anno = [];
for i in range(len(df)):
img_name = df[columns[0]][i];
labels = df[columns[1]][i];
tmp = labels.split(delimiter);
for j in range(len(tmp)//5):
label = tmp[j*5+4];
if(label not in anno):
anno.append(label);
anno = sorted(anno)
for i in tqdm(range(len(anno))):
tmp = {};
tmp["supercategory"] = "master";
tmp["id"] = i;
tmp["name"] = anno[i];
list_dict.append(tmp);
anno_f = open(output_classes_file, 'w');
for i in range(len(anno)):
anno_f.write(anno[i] + "\n");
anno_f.close();
coco_data = {};
coco_data["type"] = "instances";
coco_data["images"] = [];
coco_data["annotations"] = [];
coco_data["categories"] = list_dict;
image_id = 0;
annotation_id = 0;
for i in tqdm(range(len(df))):
img_name = df[columns[0]][i];
labels = df[columns[1]][i];
tmp = labels.split(delimiter);
image_in_path = input_images_folder + "/" + img_name;
img = cv2.imread(image_in_path, 1);
h, w, c = img.shape;
images_tmp = {};
images_tmp["file_name"] = img_name;
images_tmp["height"] = h;
images_tmp["width"] = w;
images_tmp["id"] = image_id;
coco_data["images"].append(images_tmp);
for j in range(len(tmp)//5):
x1 = int(tmp[j*5+0]);
y1 = int(tmp[j*5+1]);
x2 = int(tmp[j*5+2]);
y2 = int(tmp[j*5+3]);
label = tmp[j*5+4];
annotations_tmp = {};
annotations_tmp["id"] = annotation_id;
annotation_id += 1;
annotations_tmp["image_id"] = image_id;
annotations_tmp["segmentation"] = [];
annotations_tmp["ignore"] = 0;
annotations_tmp["area"] = (x2-x1)*(y2-y1);
annotations_tmp["iscrowd"] = 0;
annotations_tmp["bbox"] = [x1, y1, x2-x1, y2-y1];
annotations_tmp["category_id"] = anno.index(label);
coco_data["annotations"].append(annotations_tmp)
image_id += 1;
outfile = open(output_annotation_file, 'w');
json_str = json.dumps(coco_data, indent=4);
outfile.write(json_str);
outfile.close();
root_dir = system["monk_root_dir"];
coco_dir = "";
img_dir = "";
set_dir = system["monk_img_dir"];
elif(system["anno_type"] == "voc"):
root_dir = system["voc_root_dir"];
img_dir = system["voc_img_dir"];
anno_dir = system["voc_anno_dir"];
files = os.listdir(root_dir + "/" + anno_dir);
combined = [];
for i in tqdm(range(len(files))):
annoFile = root_dir + "/" + anno_dir + "/" + files[i];
f = open(annoFile, 'r');
my_xml = f.read();
anno = dict(dict(xmltodict.parse(my_xml))["annotation"])
fname = anno["filename"];
label_str = "";
if(type(anno["object"]) == list):
for j in range(len(anno["object"])):
obj = dict(anno["object"][j]);
label = anno["object"][j]["name"];
bbox = dict(anno["object"][j]["bndbox"])
x1 = bbox["xmin"];
y1 = bbox["ymin"];
x2 = bbox["xmax"];
y2 = bbox["ymax"];
if(j == len(anno["object"])-1):
label_str += x1 + " " + y1 + " " + x2 + " " + y2 + " " + label;
else:
label_str += x1 + " " + y1 + " " + x2 + " " + y2 + " " + label + " ";
else:
obj = dict(anno["object"]);
label = anno["object"]["name"];
bbox = dict(anno["object"]["bndbox"])
x1 = bbox["xmin"];
y1 = bbox["ymin"];
x2 = bbox["xmax"];
y2 = bbox["ymax"];
label_str += x1 + " " + y1 + " " + x2 + " " + y2 + " " + label;
combined.append([fname, label_str])
df = pd.DataFrame(combined, columns = ['ID', 'Label']);
df.to_csv(root_dir + "/train_labels.csv", index=False);
anno_file = "train_labels.csv";
dataset_path = root_dir;
images_folder = root_dir + "/" + img_dir;
annotations_path = root_dir + "/annotations/";
if not os.path.isdir(annotations_path):
os.mkdir(annotations_path)
input_images_folder = images_folder;
input_annotations_path = root_dir + "/" + anno_file;
output_dataset_path = root_dir;
output_image_folder = input_images_folder;
output_annotation_folder = annotations_path;
tmp = img_dir.replace("/", "");
output_annotation_file = output_annotation_folder + "/instances_" + tmp + ".json";
output_classes_file = output_annotation_folder + "/classes.txt";
if not os.path.isdir(output_annotation_folder):
os.mkdir(output_annotation_folder);
df = pd.read_csv(input_annotations_path);
columns = df.columns
delimiter = " ";
list_dict = [];
anno = [];
for i in range(len(df)):
img_name = df[columns[0]][i];
labels = df[columns[1]][i];
tmp = labels.split(delimiter);
for j in range(len(tmp)//5):
label = tmp[j*5+4];
if(label not in anno):
anno.append(label);
anno = sorted(anno)
for i in tqdm(range(len(anno))):
tmp = {};
tmp["supercategory"] = "master";
tmp["id"] = i;
tmp["name"] = anno[i];
list_dict.append(tmp);
anno_f = open(output_classes_file, 'w');
for i in range(len(anno)):
anno_f.write(anno[i] + "\n");
anno_f.close();
coco_data = {};
coco_data["type"] = "instances";
coco_data["images"] = [];
coco_data["annotations"] = [];
coco_data["categories"] = list_dict;
image_id = 0;
annotation_id = 0;
for i in tqdm(range(len(df))):
img_name = df[columns[0]][i];
labels = df[columns[1]][i];
tmp = labels.split(delimiter);
image_in_path = input_images_folder + "/" + img_name;
img = cv2.imread(image_in_path, 1);
h, w, c = img.shape;
images_tmp = {};
images_tmp["file_name"] = img_name;
images_tmp["height"] = h;
images_tmp["width"] = w;
images_tmp["id"] = image_id;
coco_data["images"].append(images_tmp);
for j in range(len(tmp)//5):
x1 = int(tmp[j*5+0]);
y1 = int(tmp[j*5+1]);
x2 = int(tmp[j*5+2]);
y2 = int(tmp[j*5+3]);
label = tmp[j*5+4];
annotations_tmp = {};
annotations_tmp["id"] = annotation_id;
annotation_id += 1;
annotations_tmp["image_id"] = image_id;
annotations_tmp["segmentation"] = [];
annotations_tmp["ignore"] = 0;
annotations_tmp["area"] = (x2-x1)*(y2-y1);
annotations_tmp["iscrowd"] = 0;
annotations_tmp["bbox"] = [x1, y1, x2-x1, y2-y1];
annotations_tmp["category_id"] = anno.index(label);
coco_data["annotations"].append(annotations_tmp)
image_id += 1;
outfile = open(output_annotation_file, 'w');
json_str = json.dumps(coco_data, indent=4);
outfile.write(json_str);
outfile.close();
root_dir = system["monk_root_dir"];
coco_dir = "";
img_dir = "";
set_dir = system["monk_img_dir"];
else:
root_dir = system["coco_root_dir"];
coco_dir = system["coco_coco_dir"];
img_dir = system["coco_img_dir"];
set_dir = system["coco_set_dir"];
if(system["val_data"] == "yes"):
if(system["val_anno_type"] == "monk"):
val_root_dir = system["val_monk_root_dir"];
val_img_dir = system["val_monk_img_dir"];
anno_file = system["val_monk_anno_file"];
dataset_path = val_root_dir;
images_folder = val_root_dir + "/" + val_img_dir;
annotations_path = val_root_dir + "/annotations/";
if not os.path.isdir(annotations_path):
os.mkdir(annotations_path)
input_images_folder = images_folder;
input_annotations_path = val_root_dir + "/" + anno_file;
output_dataset_path = val_root_dir;
output_image_folder = input_images_folder;
output_annotation_folder = annotations_path;
tmp = val_img_dir.replace("/", "");
output_annotation_file = output_annotation_folder + "/instances_" + tmp + ".json";
output_classes_file = output_annotation_folder + "/classes.txt";
if not os.path.isdir(output_annotation_folder):
os.mkdir(output_annotation_folder);
df = pd.read_csv(input_annotations_path);
columns = df.columns
delimiter = " ";
list_dict = [];
anno = [];
for i in range(len(df)):
img_name = df[columns[0]][i];
labels = df[columns[1]][i];
tmp = labels.split(delimiter);
for j in range(len(tmp)//5):
label = tmp[j*5+4];
if(label not in anno):
anno.append(label);
anno = sorted(anno)
for i in tqdm(range(len(anno))):
tmp = {};
tmp["supercategory"] = "master";
tmp["id"] = i;
tmp["name"] = anno[i];
list_dict.append(tmp);
anno_f = open(output_classes_file, 'w');
for i in range(len(anno)):
anno_f.write(anno[i] + "\n");
anno_f.close();
coco_data = {};
coco_data["type"] = "instances";
coco_data["images"] = [];
coco_data["annotations"] = [];
coco_data["categories"] = list_dict;
image_id = 0;
annotation_id = 0;
for i in tqdm(range(len(df))):
img_name = df[columns[0]][i];
labels = df[columns[1]][i];
tmp = labels.split(delimiter);
image_in_path = input_images_folder + "/" + img_name;
img = cv2.imread(image_in_path, 1);
h, w, c = img.shape;
images_tmp = {};
images_tmp["file_name"] = img_name;
images_tmp["height"] = h;
images_tmp["width"] = w;
images_tmp["id"] = image_id;
coco_data["images"].append(images_tmp);
for j in range(len(tmp)//5):
x1 = int(tmp[j*5+0]);
y1 = int(tmp[j*5+1]);
x2 = int(tmp[j*5+2]);
y2 = int(tmp[j*5+3]);
label = tmp[j*5+4];
annotations_tmp = {};
annotations_tmp["id"] = annotation_id;
annotation_id += 1;
annotations_tmp["image_id"] = image_id;
annotations_tmp["segmentation"] = [];
annotations_tmp["ignore"] = 0;
annotations_tmp["area"] = (x2-x1)*(y2-y1);
annotations_tmp["iscrowd"] = 0;
annotations_tmp["bbox"] = [x1, y1, x2-x1, y2-y1];
annotations_tmp["category_id"] = anno.index(label);
coco_data["annotations"].append(annotations_tmp)
image_id += 1;
outfile = open(output_annotation_file, 'w');
json_str = json.dumps(coco_data, indent=4);
outfile.write(json_str);
outfile.close();
val_root_dir = system["val_monk_root_dir"];
val_coco_dir = "";
val_img_dir = "";
val_set_dir = system["val_monk_img_dir"];
elif(system["val_anno_type"] == "voc"):
val_root_dir = system["val_voc_root_dir"];
val_img_dir = system["val_voc_img_dir"];
anno_dir = system["val_voc_anno_dir"];
files = os.listdir(val_root_dir + "/" + anno_dir);
combined = [];
for i in tqdm(range(len(files))):
annoFile = val_root_dir + "/" + anno_dir + "/" + files[i];
f = open(annoFile, 'r');
my_xml = f.read();
anno = dict(dict(xmltodict.parse(my_xml))["annotation"])
fname = anno["filename"];
label_str = "";
if(type(anno["object"]) == list):
for j in range(len(anno["object"])):
obj = dict(anno["object"][j]);
label = anno["object"][j]["name"];
bbox = dict(anno["object"][j]["bndbox"])
x1 = bbox["xmin"];
y1 = bbox["ymin"];
x2 = bbox["xmax"];
y2 = bbox["ymax"];
if(j == len(anno["object"])-1):
label_str += x1 + " " + y1 + " " + x2 + " " + y2 + " " + label;
else:
label_str += x1 + " " + y1 + " " + x2 + " " + y2 + " " + label + " ";
else:
obj = dict(anno["object"]);
label = anno["object"]["name"];
bbox = dict(anno["object"]["bndbox"])
x1 = bbox["xmin"];
y1 = bbox["ymin"];
x2 = bbox["xmax"];
y2 = bbox["ymax"];
label_str += x1 + " " + y1 + " " + x2 + " " + y2 + " " + label;
combined.append([fname, label_str])
df = pd.DataFrame(combined, columns = ['ID', 'Label']);
df.to_csv(val_root_dir + "/train_labels.csv", index=False);
anno_file = "train_labels.csv";
dataset_path = val_root_dir;
images_folder = val_root_dir + "/" + val_img_dir;
annotations_path = val_root_dir + "/annotations/";
if not os.path.isdir(annotations_path):
os.mkdir(annotations_path)
input_images_folder = images_folder;
input_annotations_path = val_root_dir + "/" + anno_file;
output_dataset_path = val_root_dir;
output_image_folder = input_images_folder;
output_annotation_folder = annotations_path;
tmp = val_img_dir.replace("/", "");
output_annotation_file = output_annotation_folder + "/instances_" + tmp + ".json";
output_classes_file = output_annotation_folder + "/classes.txt";
if not os.path.isdir(output_annotation_folder):
os.mkdir(output_annotation_folder);
df = pd.read_csv(input_annotations_path);
columns = df.columns
delimiter = " ";
list_dict = [];
anno = [];
for i in range(len(df)):
img_name = df[columns[0]][i];
labels = df[columns[1]][i];
tmp = labels.split(delimiter);
for j in range(len(tmp)//5):
label = tmp[j*5+4];
if(label not in anno):
anno.append(label);
anno = sorted(anno)
for i in tqdm(range(len(anno))):
tmp = {};
tmp["supercategory"] = "master";
tmp["id"] = i;
tmp["name"] = anno[i];
list_dict.append(tmp);
anno_f = open(output_classes_file, 'w');
for i in range(len(anno)):
anno_f.write(anno[i] + "\n");
anno_f.close();
coco_data = {};
coco_data["type"] = "instances";
coco_data["images"] = [];
coco_data["annotations"] = [];
coco_data["categories"] = list_dict;
image_id = 0;
annotation_id = 0;
for i in tqdm(range(len(df))):
img_name = df[columns[0]][i];
labels = df[columns[1]][i];
tmp = labels.split(delimiter);
image_in_path = input_images_folder + "/" + img_name;
img = cv2.imread(image_in_path, 1);
h, w, c = img.shape;
images_tmp = {};
images_tmp["file_name"] = img_name;
images_tmp["height"] = h;
images_tmp["width"] = w;
images_tmp["id"] = image_id;
coco_data["images"].append(images_tmp);
for j in range(len(tmp)//5):
x1 = int(tmp[j*5+0]);
y1 = int(tmp[j*5+1]);
x2 = int(tmp[j*5+2]);
y2 = int(tmp[j*5+3]);
label = tmp[j*5+4];
annotations_tmp = {};
annotations_tmp["id"] = annotation_id;
annotation_id += 1;
annotations_tmp["image_id"] = image_id;
annotations_tmp["segmentation"] = [];
annotations_tmp["ignore"] = 0;
annotations_tmp["area"] = (x2-x1)*(y2-y1);
annotations_tmp["iscrowd"] = 0;
annotations_tmp["bbox"] = [x1, y1, x2-x1, y2-y1];
annotations_tmp["category_id"] = anno.index(label);
coco_data["annotations"].append(annotations_tmp)
image_id += 1;
outfile = open(output_annotation_file, 'w');
json_str = json.dumps(coco_data, indent=4);
outfile.write(json_str);
outfile.close();
val_root_dir = system["val_monk_root_dir"];
val_coco_dir = "";
val_img_dir = "";
val_set_dir = system["val_monk_img_dir"];
else:
val_root_dir = system["val_coco_root_dir"];
val_coco_dir = system["val_coco_coco_dir"];
val_img_dir = system["val_coco_img_dir"];
val_set_dir = system["val_coco_set_dir"];
gtf = Detector();
gtf.Train_Dataset(root_dir, coco_dir, img_dir, set_dir,
batch_size=system["batch_size"],
num_workers=4)
if(system["val_data"] == "yes"):
gtf.Val_Dataset(val_root_dir, val_coco_dir, val_img_dir, val_set_dir)
gtf.Model(model_name=system["model"]);
gtf.Hyper_Params(lr=system["lr"],
total_iterations=system["iterations"],
val_interval=system["val_interval"])
gtf.Setup();
gtf.Train(display_interval=system["print_interval"]);
print("Completed");
| 31.083969
| 93
| 0.532367
| 2,429
| 20,360
| 4.202141
| 0.074928
| 0.032919
| 0.011757
| 0.01254
| 0.861272
| 0.842265
| 0.82414
| 0.81787
| 0.815323
| 0.81209
| 0
| 0.015125
| 0.318075
| 20,360
| 654
| 94
| 31.131498
| 0.720037
| 0.003389
| 0
| 0.842424
| 0
| 0
| 0.096569
| 0.004535
| 0
| 0
| 0
| 0
| 0
| 1
| 0.010101
| false
| 0
| 0.022222
| 0.00202
| 0.044444
| 0.008081
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
36e951fbcaa79eccff5fe0d2c1b47a9114c2923d
| 2,266
|
py
|
Python
|
tests/changes/api/test_project_commit_details.py
|
vault-the/changes
|
37e23c3141b75e4785cf398d015e3dbca41bdd56
|
[
"Apache-2.0"
] | 443
|
2015-01-03T16:28:39.000Z
|
2021-04-26T16:39:46.000Z
|
tests/changes/api/test_project_commit_details.py
|
vault-the/changes
|
37e23c3141b75e4785cf398d015e3dbca41bdd56
|
[
"Apache-2.0"
] | 12
|
2015-07-30T19:07:16.000Z
|
2016-11-07T23:11:21.000Z
|
tests/changes/api/test_project_commit_details.py
|
vault-the/changes
|
37e23c3141b75e4785cf398d015e3dbca41bdd56
|
[
"Apache-2.0"
] | 47
|
2015-01-09T10:04:00.000Z
|
2020-11-18T17:58:19.000Z
|
from uuid import uuid4
from changes.testutils import APITestCase
class ProjectCommitDetailsTest(APITestCase):
def test_simple(self):
fake_commit_id = uuid4()
project = self.create_project()
build = self.create_build(project)
self.create_job(build)
project2 = self.create_project()
revision = self.create_revision(repository=project2.repository)
source = self.create_source(project2, revision_sha=revision.sha)
build = self.create_build(project2, source=source)
path = '/api/0/projects/{0}/commits/{1}/'.format(
project.id.hex, fake_commit_id)
resp = self.client.get(path)
assert resp.status_code == 404
path = '/api/0/projects/{0}/commits/{1}/'.format(
project2.id.hex, revision.sha)
resp = self.client.get(path)
assert resp.status_code == 200
data = self.unserialize(resp)
assert data['id'] == revision.sha
def test_prefix(self):
fake_commit_id = uuid4()
project = self.create_project()
build = self.create_build(project)
self.create_job(build)
project2 = self.create_project()
revision = self.create_revision(repository=project2.repository)
source = self.create_source(project2, revision_sha=revision.sha)
build = self.create_build(project2, source=source)
path = '/api/0/projects/{0}/commits/{1}/'.format(
project.id.hex, fake_commit_id.hex[:39])
resp = self.client.get(path)
assert resp.status_code == 404
path = '/api/0/projects/{0}/commits/{1}/'.format(
project2.id.hex, revision.sha[:39])
resp = self.client.get(path)
assert resp.status_code == 200
data = self.unserialize(resp)
assert data['id'] == revision.sha
def test_missing_author(self):
project = self.create_project()
revision = self.create_revision(repository=project.repository, author=None)
path = '/api/0/projects/{0}/commits/{1}/'.format(
project.id.hex, revision.sha)
resp = self.client.get(path)
assert resp.status_code == 200
data = self.unserialize(resp)
assert data['id'] == revision.sha
| 32.371429
| 83
| 0.630627
| 273
| 2,266
| 5.106227
| 0.175824
| 0.114778
| 0.060976
| 0.057389
| 0.878766
| 0.878766
| 0.878766
| 0.878766
| 0.840746
| 0.840746
| 0
| 0.027469
| 0.244925
| 2,266
| 69
| 84
| 32.84058
| 0.787259
| 0
| 0
| 0.76
| 0
| 0
| 0.073257
| 0.070609
| 0
| 0
| 0
| 0
| 0.16
| 1
| 0.06
| false
| 0
| 0.04
| 0
| 0.12
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7fca38177b4a378d29e20254f38e24310edbc26a
| 58,149
|
py
|
Python
|
pythonx/neovim_rpc_server_api_info.py
|
CrossR/vim-hug-neovim-rpc
|
35ffb2a1b6f69da5018ce285504b992459bd7682
|
[
"MIT"
] | 2
|
2022-03-26T09:14:20.000Z
|
2022-03-26T17:04:43.000Z
|
pythonx/neovim_rpc_server_api_info.py
|
CrossR/vim-hug-neovim-rpc
|
35ffb2a1b6f69da5018ce285504b992459bd7682
|
[
"MIT"
] | 1
|
2019-12-05T23:47:24.000Z
|
2019-12-06T00:24:35.000Z
|
pythonx/neovim_rpc_server_api_info.py
|
CrossR/vim-hug-neovim-rpc
|
35ffb2a1b6f69da5018ce285504b992459bd7682
|
[
"MIT"
] | 1
|
2019-12-05T14:08:27.000Z
|
2019-12-05T14:08:27.000Z
|
# result of neovim `api_info()`
API_INFO = {
"version": {
"major": 0,
"api_level": 1,
"api_prerelease": False,
"patch": 7,
"api_compatible": 0,
"minor": 1
},
"types": {
"Window": {
"id": 1,
"prefix": "nvim_win_"
},
"Tabpage": {
"id": 2,
"prefix": "nvim_tabpage_"
},
"Buffer": {
"id": 0,
"prefix": "nvim_buf_"
}
},
"functions": [
{
"method": True,
"name": "nvim_buf_line_count",
"return_type": "Integer",
"parameters": [
[
"Buffer",
"buffer"
]
],
"since": 1
},
{
"method": False,
"deprecated_since": 1,
"name": "buffer_get_line",
"return_type": "String",
"parameters": [
[
"Buffer",
"buffer"
],
[
"Integer",
"index"
]
],
"since": 0
},
{
"method": False,
"deprecated_since": 1,
"name": "buffer_set_line",
"return_type": "void",
"parameters": [
[
"Buffer",
"buffer"
],
[
"Integer",
"index"
],
[
"String",
"line"
]
],
"since": 0
},
{
"method": False,
"deprecated_since": 1,
"name": "buffer_del_line",
"return_type": "void",
"parameters": [
[
"Buffer",
"buffer"
],
[
"Integer",
"index"
]
],
"since": 0
},
{
"method": False,
"deprecated_since": 1,
"name": "buffer_get_line_slice",
"return_type": "ArrayOf(String)",
"parameters": [
[
"Buffer",
"buffer"
],
[
"Integer",
"start"
],
[
"Integer",
"end"
],
[
"Boolean",
"include_start"
],
[
"Boolean",
"include_end"
]
],
"since": 0
},
{
"method": True,
"name": "nvim_buf_get_lines",
"return_type": "ArrayOf(String)",
"parameters": [
[
"Buffer",
"buffer"
],
[
"Integer",
"start"
],
[
"Integer",
"end"
],
[
"Boolean",
"strict_indexing"
]
],
"since": 1
},
{
"method": False,
"deprecated_since": 1,
"name": "buffer_set_line_slice",
"return_type": "void",
"parameters": [
[
"Buffer",
"buffer"
],
[
"Integer",
"start"
],
[
"Integer",
"end"
],
[
"Boolean",
"include_start"
],
[
"Boolean",
"include_end"
],
[
"ArrayOf(String)",
"replacement"
]
],
"since": 0
},
{
"method": True,
"name": "nvim_buf_set_lines",
"return_type": "void",
"parameters": [
[
"Buffer",
"buffer"
],
[
"Integer",
"start"
],
[
"Integer",
"end"
],
[
"Boolean",
"strict_indexing"
],
[
"ArrayOf(String)",
"replacement"
]
],
"since": 1
},
{
"method": True,
"name": "nvim_buf_get_var",
"return_type": "Object",
"parameters": [
[
"Buffer",
"buffer"
],
[
"String",
"name"
]
],
"since": 1
},
{
"method": True,
"name": "nvim_buf_set_var",
"return_type": "void",
"parameters": [
[
"Buffer",
"buffer"
],
[
"String",
"name"
],
[
"Object",
"value"
]
],
"since": 1
},
{
"method": True,
"name": "nvim_buf_del_var",
"return_type": "void",
"parameters": [
[
"Buffer",
"buffer"
],
[
"String",
"name"
]
],
"since": 1
},
{
"method": False,
"deprecated_since": 1,
"name": "buffer_set_var",
"return_type": "Object",
"parameters": [
[
"Buffer",
"buffer"
],
[
"String",
"name"
],
[
"Object",
"value"
]
],
"since": 0
},
{
"method": False,
"deprecated_since": 1,
"name": "buffer_del_var",
"return_type": "Object",
"parameters": [
[
"Buffer",
"buffer"
],
[
"String",
"name"
]
],
"since": 0
},
{
"method": True,
"name": "nvim_buf_get_option",
"return_type": "Object",
"parameters": [
[
"Buffer",
"buffer"
],
[
"String",
"name"
]
],
"since": 1
},
{
"method": True,
"name": "nvim_buf_set_option",
"return_type": "void",
"parameters": [
[
"Buffer",
"buffer"
],
[
"String",
"name"
],
[
"Object",
"value"
]
],
"since": 1
},
{
"method": True,
"name": "nvim_buf_get_number",
"return_type": "Integer",
"parameters": [
[
"Buffer",
"buffer"
]
],
"since": 1
},
{
"method": True,
"name": "nvim_buf_get_name",
"return_type": "String",
"parameters": [
[
"Buffer",
"buffer"
]
],
"since": 1
},
{
"method": True,
"name": "nvim_buf_set_name",
"return_type": "void",
"parameters": [
[
"Buffer",
"buffer"
],
[
"String",
"name"
]
],
"since": 1
},
{
"method": True,
"name": "nvim_buf_is_valid",
"return_type": "Boolean",
"parameters": [
[
"Buffer",
"buffer"
]
],
"since": 1
},
{
"method": False,
"deprecated_since": 1,
"name": "buffer_insert",
"return_type": "void",
"parameters": [
[
"Buffer",
"buffer"
],
[
"Integer",
"lnum"
],
[
"ArrayOf(String)",
"lines"
]
],
"since": 0
},
{
"method": True,
"name": "nvim_buf_get_mark",
"return_type": "ArrayOf(Integer, 2)",
"parameters": [
[
"Buffer",
"buffer"
],
[
"String",
"name"
]
],
"since": 1
},
{
"method": True,
"name": "nvim_buf_add_highlight",
"return_type": "Integer",
"parameters": [
[
"Buffer",
"buffer"
],
[
"Integer",
"src_id"
],
[
"String",
"hl_group"
],
[
"Integer",
"line"
],
[
"Integer",
"col_start"
],
[
"Integer",
"col_end"
]
],
"since": 1
},
{
"method": True,
"name": "nvim_buf_clear_highlight",
"return_type": "void",
"parameters": [
[
"Buffer",
"buffer"
],
[
"Integer",
"src_id"
],
[
"Integer",
"line_start"
],
[
"Integer",
"line_end"
]
],
"since": 1
},
{
"method": True,
"name": "nvim_tabpage_list_wins",
"return_type": "ArrayOf(Window)",
"parameters": [
[
"Tabpage",
"tabpage"
]
],
"since": 1
},
{
"method": True,
"name": "nvim_tabpage_get_var",
"return_type": "Object",
"parameters": [
[
"Tabpage",
"tabpage"
],
[
"String",
"name"
]
],
"since": 1
},
{
"method": True,
"name": "nvim_tabpage_set_var",
"return_type": "void",
"parameters": [
[
"Tabpage",
"tabpage"
],
[
"String",
"name"
],
[
"Object",
"value"
]
],
"since": 1
},
{
"method": True,
"name": "nvim_tabpage_del_var",
"return_type": "void",
"parameters": [
[
"Tabpage",
"tabpage"
],
[
"String",
"name"
]
],
"since": 1
},
{
"method": False,
"deprecated_since": 1,
"name": "tabpage_set_var",
"return_type": "Object",
"parameters": [
[
"Tabpage",
"tabpage"
],
[
"String",
"name"
],
[
"Object",
"value"
]
],
"since": 0
},
{
"method": False,
"deprecated_since": 1,
"name": "tabpage_del_var",
"return_type": "Object",
"parameters": [
[
"Tabpage",
"tabpage"
],
[
"String",
"name"
]
],
"since": 0
},
{
"method": True,
"name": "nvim_tabpage_get_win",
"return_type": "Window",
"parameters": [
[
"Tabpage",
"tabpage"
]
],
"since": 1
},
{
"method": True,
"name": "nvim_tabpage_get_number",
"return_type": "Integer",
"parameters": [
[
"Tabpage",
"tabpage"
]
],
"since": 1
},
{
"method": True,
"name": "nvim_tabpage_is_valid",
"return_type": "Boolean",
"parameters": [
[
"Tabpage",
"tabpage"
]
],
"since": 1
},
{
"method": False,
"name": "nvim_ui_attach",
"return_type": "void",
"parameters": [
[
"Integer",
"width"
],
[
"Integer",
"height"
],
[
"Dictionary",
"options"
]
],
"since": 1
},
{
"method": False,
"deprecated_since": 1,
"name": "ui_attach",
"return_type": "void",
"parameters": [
[
"Integer",
"width"
],
[
"Integer",
"height"
],
[
"Boolean",
"enable_rgb"
]
],
"since": 0
},
{
"method": False,
"name": "nvim_ui_detach",
"return_type": "void",
"parameters": [],
"since": 1
},
{
"method": False,
"name": "nvim_ui_try_resize",
"return_type": "void",
"parameters": [
[
"Integer",
"width"
],
[
"Integer",
"height"
]
],
"since": 1
},
{
"method": False,
"name": "nvim_ui_set_option",
"return_type": "void",
"parameters": [
[
"String",
"name"
],
[
"Object",
"value"
]
],
"since": 1
},
{
"method": False,
"name": "nvim_command",
"return_type": "void",
"parameters": [
[
"String",
"command"
]
],
"since": 1
},
{
"method": False,
"name": "nvim_feedkeys",
"return_type": "void",
"parameters": [
[
"String",
"keys"
],
[
"String",
"mode"
],
[
"Boolean",
"escape_csi"
]
],
"since": 1
},
{
"method": False,
"name": "nvim_input",
"return_type": "Integer",
"parameters": [
[
"String",
"keys"
]
],
"since": 1
},
{
"method": False,
"name": "nvim_replace_termcodes",
"return_type": "String",
"parameters": [
[
"String",
"str"
],
[
"Boolean",
"from_part"
],
[
"Boolean",
"do_lt"
],
[
"Boolean",
"special"
]
],
"since": 1
},
{
"method": False,
"name": "nvim_command_output",
"return_type": "String",
"parameters": [
[
"String",
"str"
]
],
"since": 1
},
{
"method": False,
"name": "nvim_eval",
"return_type": "Object",
"parameters": [
[
"String",
"expr"
]
],
"since": 1
},
{
"method": False,
"name": "nvim_call_function",
"return_type": "Object",
"parameters": [
[
"String",
"fname"
],
[
"Array",
"args"
]
],
"since": 1
},
{
"method": False,
"name": "nvim_strwidth",
"return_type": "Integer",
"parameters": [
[
"String",
"str"
]
],
"since": 1
},
{
"method": False,
"name": "nvim_list_runtime_paths",
"return_type": "ArrayOf(String)",
"parameters": [],
"since": 1
},
{
"method": False,
"name": "nvim_set_current_dir",
"return_type": "void",
"parameters": [
[
"String",
"dir"
]
],
"since": 1
},
{
"method": False,
"name": "nvim_get_current_line",
"return_type": "String",
"parameters": [],
"since": 1
},
{
"method": False,
"name": "nvim_set_current_line",
"return_type": "void",
"parameters": [
[
"String",
"line"
]
],
"since": 1
},
{
"method": False,
"name": "nvim_del_current_line",
"return_type": "void",
"parameters": [],
"since": 1
},
{
"method": False,
"name": "nvim_get_var",
"return_type": "Object",
"parameters": [
[
"String",
"name"
]
],
"since": 1
},
{
"method": False,
"name": "nvim_set_var",
"return_type": "void",
"parameters": [
[
"String",
"name"
],
[
"Object",
"value"
]
],
"since": 1
},
{
"method": False,
"name": "nvim_del_var",
"return_type": "void",
"parameters": [
[
"String",
"name"
]
],
"since": 1
},
{
"method": False,
"deprecated_since": 1,
"name": "vim_set_var",
"return_type": "Object",
"parameters": [
[
"String",
"name"
],
[
"Object",
"value"
]
],
"since": 0
},
{
"method": False,
"deprecated_since": 1,
"name": "vim_del_var",
"return_type": "Object",
"parameters": [
[
"String",
"name"
]
],
"since": 0
},
{
"method": False,
"name": "nvim_get_vvar",
"return_type": "Object",
"parameters": [
[
"String",
"name"
]
],
"since": 1
},
{
"method": False,
"name": "nvim_get_option",
"return_type": "Object",
"parameters": [
[
"String",
"name"
]
],
"since": 1
},
{
"method": False,
"name": "nvim_set_option",
"return_type": "void",
"parameters": [
[
"String",
"name"
],
[
"Object",
"value"
]
],
"since": 1
},
{
"method": False,
"name": "nvim_out_write",
"return_type": "void",
"parameters": [
[
"String",
"str"
]
],
"since": 1
},
{
"method": False,
"name": "nvim_err_write",
"return_type": "void",
"parameters": [
[
"String",
"str"
]
],
"since": 1
},
{
"method": False,
"name": "nvim_err_writeln",
"return_type": "void",
"parameters": [
[
"String",
"str"
]
],
"since": 1
},
{
"method": False,
"name": "nvim_list_bufs",
"return_type": "ArrayOf(Buffer)",
"parameters": [],
"since": 1
},
{
"method": False,
"name": "nvim_get_current_buf",
"return_type": "Buffer",
"parameters": [],
"since": 1
},
{
"method": False,
"name": "nvim_set_current_buf",
"return_type": "void",
"parameters": [
[
"Buffer",
"buffer"
]
],
"since": 1
},
{
"method": False,
"name": "nvim_list_wins",
"return_type": "ArrayOf(Window)",
"parameters": [],
"since": 1
},
{
"method": False,
"name": "nvim_get_current_win",
"return_type": "Window",
"parameters": [],
"since": 1
},
{
"method": False,
"name": "nvim_set_current_win",
"return_type": "void",
"parameters": [
[
"Window",
"window"
]
],
"since": 1
},
{
"method": False,
"name": "nvim_list_tabpages",
"return_type": "ArrayOf(Tabpage)",
"parameters": [],
"since": 1
},
{
"method": False,
"name": "nvim_get_current_tabpage",
"return_type": "Tabpage",
"parameters": [],
"since": 1
},
{
"method": False,
"name": "nvim_set_current_tabpage",
"return_type": "void",
"parameters": [
[
"Tabpage",
"tabpage"
]
],
"since": 1
},
{
"method": False,
"name": "nvim_subscribe",
"return_type": "void",
"parameters": [
[
"String",
"event"
]
],
"since": 1
},
{
"method": False,
"name": "nvim_unsubscribe",
"return_type": "void",
"parameters": [
[
"String",
"event"
]
],
"since": 1
},
{
"method": False,
"name": "nvim_get_color_by_name",
"return_type": "Integer",
"parameters": [
[
"String",
"name"
]
],
"since": 1
},
{
"method": False,
"name": "nvim_get_color_map",
"return_type": "Dictionary",
"parameters": [],
"since": 1
},
{
"method": False,
"name": "nvim_get_api_info",
"return_type": "Array",
"parameters": [],
"since": 1
},
{
"method": False,
"name": "nvim_call_atomic",
"return_type": "Array",
"parameters": [
[
"Array",
"calls"
]
],
"since": 1
},
{
"method": True,
"name": "nvim_win_get_buf",
"return_type": "Buffer",
"parameters": [
[
"Window",
"window"
]
],
"since": 1
},
{
"method": True,
"name": "nvim_win_get_cursor",
"return_type": "ArrayOf(Integer, 2)",
"parameters": [
[
"Window",
"window"
]
],
"since": 1
},
{
"method": True,
"name": "nvim_win_set_cursor",
"return_type": "void",
"parameters": [
[
"Window",
"window"
],
[
"ArrayOf(Integer, 2)",
"pos"
]
],
"since": 1
},
{
"method": True,
"name": "nvim_win_get_height",
"return_type": "Integer",
"parameters": [
[
"Window",
"window"
]
],
"since": 1
},
{
"method": True,
"name": "nvim_win_set_height",
"return_type": "void",
"parameters": [
[
"Window",
"window"
],
[
"Integer",
"height"
]
],
"since": 1
},
{
"method": True,
"name": "nvim_win_get_width",
"return_type": "Integer",
"parameters": [
[
"Window",
"window"
]
],
"since": 1
},
{
"method": True,
"name": "nvim_win_set_width",
"return_type": "void",
"parameters": [
[
"Window",
"window"
],
[
"Integer",
"width"
]
],
"since": 1
},
{
"method": True,
"name": "nvim_win_get_var",
"return_type": "Object",
"parameters": [
[
"Window",
"window"
],
[
"String",
"name"
]
],
"since": 1
},
{
"method": True,
"name": "nvim_win_set_var",
"return_type": "void",
"parameters": [
[
"Window",
"window"
],
[
"String",
"name"
],
[
"Object",
"value"
]
],
"since": 1
},
{
"method": True,
"name": "nvim_win_del_var",
"return_type": "void",
"parameters": [
[
"Window",
"window"
],
[
"String",
"name"
]
],
"since": 1
},
{
"method": False,
"deprecated_since": 1,
"name": "window_set_var",
"return_type": "Object",
"parameters": [
[
"Window",
"window"
],
[
"String",
"name"
],
[
"Object",
"value"
]
],
"since": 0
},
{
"method": False,
"deprecated_since": 1,
"name": "window_del_var",
"return_type": "Object",
"parameters": [
[
"Window",
"window"
],
[
"String",
"name"
]
],
"since": 0
},
{
"method": True,
"name": "nvim_win_get_option",
"return_type": "Object",
"parameters": [
[
"Window",
"window"
],
[
"String",
"name"
]
],
"since": 1
},
{
"method": True,
"name": "nvim_win_set_option",
"return_type": "void",
"parameters": [
[
"Window",
"window"
],
[
"String",
"name"
],
[
"Object",
"value"
]
],
"since": 1
},
{
"method": True,
"name": "nvim_win_get_position",
"return_type": "ArrayOf(Integer, 2)",
"parameters": [
[
"Window",
"window"
]
],
"since": 1
},
{
"method": True,
"name": "nvim_win_get_tabpage",
"return_type": "Tabpage",
"parameters": [
[
"Window",
"window"
]
],
"since": 1
},
{
"method": True,
"name": "nvim_win_get_number",
"return_type": "Integer",
"parameters": [
[
"Window",
"window"
]
],
"since": 1
},
{
"method": True,
"name": "nvim_win_is_valid",
"return_type": "Boolean",
"parameters": [
[
"Window",
"window"
]
],
"since": 1
},
{
"method": True,
"deprecated_since": 1,
"name": "buffer_line_count",
"return_type": "Integer",
"parameters": [
[
"Buffer",
"buffer"
]
],
"since": 0
},
{
"method": True,
"deprecated_since": 1,
"name": "buffer_get_lines",
"return_type": "ArrayOf(String)",
"parameters": [
[
"Buffer",
"buffer"
],
[
"Integer",
"start"
],
[
"Integer",
"end"
],
[
"Boolean",
"strict_indexing"
]
],
"since": 0
},
{
"method": True,
"deprecated_since": 1,
"name": "buffer_set_lines",
"return_type": "void",
"parameters": [
[
"Buffer",
"buffer"
],
[
"Integer",
"start"
],
[
"Integer",
"end"
],
[
"Boolean",
"strict_indexing"
],
[
"ArrayOf(String)",
"replacement"
]
],
"since": 0
},
{
"method": True,
"deprecated_since": 1,
"name": "buffer_get_var",
"return_type": "Object",
"parameters": [
[
"Buffer",
"buffer"
],
[
"String",
"name"
]
],
"since": 0
},
{
"method": True,
"deprecated_since": 1,
"name": "buffer_get_option",
"return_type": "Object",
"parameters": [
[
"Buffer",
"buffer"
],
[
"String",
"name"
]
],
"since": 0
},
{
"method": True,
"deprecated_since": 1,
"name": "buffer_set_option",
"return_type": "void",
"parameters": [
[
"Buffer",
"buffer"
],
[
"String",
"name"
],
[
"Object",
"value"
]
],
"since": 0
},
{
"method": True,
"deprecated_since": 1,
"name": "buffer_get_number",
"return_type": "Integer",
"parameters": [
[
"Buffer",
"buffer"
]
],
"since": 0
},
{
"method": True,
"deprecated_since": 1,
"name": "buffer_get_name",
"return_type": "String",
"parameters": [
[
"Buffer",
"buffer"
]
],
"since": 0
},
{
"method": True,
"deprecated_since": 1,
"name": "buffer_set_name",
"return_type": "void",
"parameters": [
[
"Buffer",
"buffer"
],
[
"String",
"name"
]
],
"since": 0
},
{
"method": True,
"deprecated_since": 1,
"name": "buffer_is_valid",
"return_type": "Boolean",
"parameters": [
[
"Buffer",
"buffer"
]
],
"since": 0
},
{
"method": True,
"deprecated_since": 1,
"name": "buffer_get_mark",
"return_type": "ArrayOf(Integer, 2)",
"parameters": [
[
"Buffer",
"buffer"
],
[
"String",
"name"
]
],
"since": 0
},
{
"method": True,
"deprecated_since": 1,
"name": "buffer_add_highlight",
"return_type": "Integer",
"parameters": [
[
"Buffer",
"buffer"
],
[
"Integer",
"src_id"
],
[
"String",
"hl_group"
],
[
"Integer",
"line"
],
[
"Integer",
"col_start"
],
[
"Integer",
"col_end"
]
],
"since": 0
},
{
"method": True,
"deprecated_since": 1,
"name": "buffer_clear_highlight",
"return_type": "void",
"parameters": [
[
"Buffer",
"buffer"
],
[
"Integer",
"src_id"
],
[
"Integer",
"line_start"
],
[
"Integer",
"line_end"
]
],
"since": 0
},
{
"method": True,
"deprecated_since": 1,
"name": "tabpage_get_windows",
"return_type": "ArrayOf(Window)",
"parameters": [
[
"Tabpage",
"tabpage"
]
],
"since": 0
},
{
"method": True,
"deprecated_since": 1,
"name": "tabpage_get_var",
"return_type": "Object",
"parameters": [
[
"Tabpage",
"tabpage"
],
[
"String",
"name"
]
],
"since": 0
},
{
"method": True,
"deprecated_since": 1,
"name": "tabpage_get_window",
"return_type": "Window",
"parameters": [
[
"Tabpage",
"tabpage"
]
],
"since": 0
},
{
"method": True,
"deprecated_since": 1,
"name": "tabpage_is_valid",
"return_type": "Boolean",
"parameters": [
[
"Tabpage",
"tabpage"
]
],
"since": 0
},
{
"method": False,
"deprecated_since": 1,
"name": "ui_detach",
"return_type": "void",
"parameters": [],
"since": 0
},
{
"method": False,
"deprecated_since": 1,
"name": "ui_try_resize",
"return_type": "Object",
"parameters": [
[
"Integer",
"width"
],
[
"Integer",
"height"
]
],
"since": 0
},
{
"method": False,
"deprecated_since": 1,
"name": "vim_command",
"return_type": "void",
"parameters": [
[
"String",
"command"
]
],
"since": 0
},
{
"method": False,
"deprecated_since": 1,
"name": "vim_feedkeys",
"return_type": "void",
"parameters": [
[
"String",
"keys"
],
[
"String",
"mode"
],
[
"Boolean",
"escape_csi"
]
],
"since": 0
},
{
"method": False,
"deprecated_since": 1,
"name": "vim_input",
"return_type": "Integer",
"parameters": [
[
"String",
"keys"
]
],
"since": 0
},
{
"method": False,
"deprecated_since": 1,
"name": "vim_replace_termcodes",
"return_type": "String",
"parameters": [
[
"String",
"str"
],
[
"Boolean",
"from_part"
],
[
"Boolean",
"do_lt"
],
[
"Boolean",
"special"
]
],
"since": 0
},
{
"method": False,
"deprecated_since": 1,
"name": "vim_command_output",
"return_type": "String",
"parameters": [
[
"String",
"str"
]
],
"since": 0
},
{
"method": False,
"deprecated_since": 1,
"name": "vim_eval",
"return_type": "Object",
"parameters": [
[
"String",
"expr"
]
],
"since": 0
},
{
"method": False,
"deprecated_since": 1,
"name": "vim_call_function",
"return_type": "Object",
"parameters": [
[
"String",
"fname"
],
[
"Array",
"args"
]
],
"since": 0
},
{
"method": False,
"deprecated_since": 1,
"name": "vim_strwidth",
"return_type": "Integer",
"parameters": [
[
"String",
"str"
]
],
"since": 0
},
{
"method": False,
"deprecated_since": 1,
"name": "vim_list_runtime_paths",
"return_type": "ArrayOf(String)",
"parameters": [],
"since": 0
},
{
"method": False,
"deprecated_since": 1,
"name": "vim_change_directory",
"return_type": "void",
"parameters": [
[
"String",
"dir"
]
],
"since": 0
},
{
"method": False,
"deprecated_since": 1,
"name": "vim_get_current_line",
"return_type": "String",
"parameters": [],
"since": 0
},
{
"method": False,
"deprecated_since": 1,
"name": "vim_set_current_line",
"return_type": "void",
"parameters": [
[
"String",
"line"
]
],
"since": 0
},
{
"method": False,
"deprecated_since": 1,
"name": "vim_del_current_line",
"return_type": "void",
"parameters": [],
"since": 0
},
{
"method": False,
"deprecated_since": 1,
"name": "vim_get_var",
"return_type": "Object",
"parameters": [
[
"String",
"name"
]
],
"since": 0
},
{
"method": False,
"deprecated_since": 1,
"name": "vim_get_vvar",
"return_type": "Object",
"parameters": [
[
"String",
"name"
]
],
"since": 0
},
{
"method": False,
"deprecated_since": 1,
"name": "vim_get_option",
"return_type": "Object",
"parameters": [
[
"String",
"name"
]
],
"since": 0
},
{
"method": False,
"deprecated_since": 1,
"name": "vim_set_option",
"return_type": "void",
"parameters": [
[
"String",
"name"
],
[
"Object",
"value"
]
],
"since": 0
},
{
"method": False,
"deprecated_since": 1,
"name": "vim_out_write",
"return_type": "void",
"parameters": [
[
"String",
"str"
]
],
"since": 0
},
{
"method": False,
"deprecated_since": 1,
"name": "vim_err_write",
"return_type": "void",
"parameters": [
[
"String",
"str"
]
],
"since": 0
},
{
"method": False,
"deprecated_since": 1,
"name": "vim_report_error",
"return_type": "void",
"parameters": [
[
"String",
"str"
]
],
"since": 0
},
{
"method": False,
"deprecated_since": 1,
"name": "vim_get_buffers",
"return_type": "ArrayOf(Buffer)",
"parameters": [],
"since": 0
},
{
"method": False,
"deprecated_since": 1,
"name": "vim_get_current_buffer",
"return_type": "Buffer",
"parameters": [],
"since": 0
},
{
"method": False,
"deprecated_since": 1,
"name": "vim_set_current_buffer",
"return_type": "void",
"parameters": [
[
"Buffer",
"buffer"
]
],
"since": 0
},
{
"method": False,
"deprecated_since": 1,
"name": "vim_get_windows",
"return_type": "ArrayOf(Window)",
"parameters": [],
"since": 0
},
{
"method": False,
"deprecated_since": 1,
"name": "vim_get_current_window",
"return_type": "Window",
"parameters": [],
"since": 0
},
{
"method": False,
"deprecated_since": 1,
"name": "vim_set_current_window",
"return_type": "void",
"parameters": [
[
"Window",
"window"
]
],
"since": 0
},
{
"method": False,
"deprecated_since": 1,
"name": "vim_get_tabpages",
"return_type": "ArrayOf(Tabpage)",
"parameters": [],
"since": 0
},
{
"method": False,
"deprecated_since": 1,
"name": "vim_get_current_tabpage",
"return_type": "Tabpage",
"parameters": [],
"since": 0
},
{
"method": False,
"deprecated_since": 1,
"name": "vim_set_current_tabpage",
"return_type": "void",
"parameters": [
[
"Tabpage",
"tabpage"
]
],
"since": 0
},
{
"method": False,
"deprecated_since": 1,
"name": "vim_subscribe",
"return_type": "void",
"parameters": [
[
"String",
"event"
]
],
"since": 0
},
{
"method": False,
"deprecated_since": 1,
"name": "vim_unsubscribe",
"return_type": "void",
"parameters": [
[
"String",
"event"
]
],
"since": 0
},
{
"method": False,
"deprecated_since": 1,
"name": "vim_name_to_color",
"return_type": "Integer",
"parameters": [
[
"String",
"name"
]
],
"since": 0
},
{
"method": False,
"deprecated_since": 1,
"name": "vim_get_color_map",
"return_type": "Dictionary",
"parameters": [],
"since": 0
},
{
"method": False,
"deprecated_since": 1,
"name": "vim_get_api_info",
"return_type": "Array",
"parameters": [],
"since": 0
},
{
"method": True,
"deprecated_since": 1,
"name": "window_get_buffer",
"return_type": "Buffer",
"parameters": [
[
"Window",
"window"
]
],
"since": 0
},
{
"method": True,
"deprecated_since": 1,
"name": "window_get_cursor",
"return_type": "ArrayOf(Integer, 2)",
"parameters": [
[
"Window",
"window"
]
],
"since": 0
},
{
"method": True,
"deprecated_since": 1,
"name": "window_set_cursor",
"return_type": "void",
"parameters": [
[
"Window",
"window"
],
[
"ArrayOf(Integer, 2)",
"pos"
]
],
"since": 0
},
{
"method": True,
"deprecated_since": 1,
"name": "window_get_height",
"return_type": "Integer",
"parameters": [
[
"Window",
"window"
]
],
"since": 0
},
{
"method": True,
"deprecated_since": 1,
"name": "window_set_height",
"return_type": "void",
"parameters": [
[
"Window",
"window"
],
[
"Integer",
"height"
]
],
"since": 0
},
{
"method": True,
"deprecated_since": 1,
"name": "window_get_width",
"return_type": "Integer",
"parameters": [
[
"Window",
"window"
]
],
"since": 0
},
{
"method": True,
"deprecated_since": 1,
"name": "window_set_width",
"return_type": "void",
"parameters": [
[
"Window",
"window"
],
[
"Integer",
"width"
]
],
"since": 0
},
{
"method": True,
"deprecated_since": 1,
"name": "window_get_var",
"return_type": "Object",
"parameters": [
[
"Window",
"window"
],
[
"String",
"name"
]
],
"since": 0
},
{
"method": True,
"deprecated_since": 1,
"name": "window_get_option",
"return_type": "Object",
"parameters": [
[
"Window",
"window"
],
[
"String",
"name"
]
],
"since": 0
},
{
"method": True,
"deprecated_since": 1,
"name": "window_set_option",
"return_type": "void",
"parameters": [
[
"Window",
"window"
],
[
"String",
"name"
],
[
"Object",
"value"
]
],
"since": 0
},
{
"method": True,
"deprecated_since": 1,
"name": "window_get_position",
"return_type": "ArrayOf(Integer, 2)",
"parameters": [
[
"Window",
"window"
]
],
"since": 0
},
{
"method": True,
"deprecated_since": 1,
"name": "window_get_tabpage",
"return_type": "Tabpage",
"parameters": [
[
"Window",
"window"
]
],
"since": 0
},
{
"method": True,
"deprecated_since": 1,
"name": "window_is_valid",
"return_type": "Boolean",
"parameters": [
[
"Window",
"window"
]
],
"since": 0
}
],
"error_types": {
"Validation": {
"id": 1
},
"Exception": {
"id": 0
}
}
}
| 24.504425
| 49
| 0.257941
| 2,829
| 58,149
| 5.070343
| 0.049487
| 0.111545
| 0.090351
| 0.112939
| 0.971138
| 0.962354
| 0.943949
| 0.883854
| 0.812395
| 0.717094
| 0
| 0.01186
| 0.62443
| 58,149
| 2,372
| 50
| 24.514755
| 0.644947
| 0.000499
| 0
| 0.618305
| 0
| 0
| 0.241698
| 0.009498
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
3d117e930339acf54d69f37441294dbab4c58c09
| 274
|
py
|
Python
|
p06.py
|
AI-Rabbit/Python-problems
|
3782abaa78b7835c197d008531001b50c5e632c6
|
[
"MIT"
] | null | null | null |
p06.py
|
AI-Rabbit/Python-problems
|
3782abaa78b7835c197d008531001b50c5e632c6
|
[
"MIT"
] | null | null | null |
p06.py
|
AI-Rabbit/Python-problems
|
3782abaa78b7835c197d008531001b50c5e632c6
|
[
"MIT"
] | null | null | null |
# p06.py
print('* ****** ****** ******\n')
print('* * * * * *\n')
print('* * * * * *\n')
print('* * * * * ******\n')
print('* * * * * *\n')
print('* * * * * *\n')
print('* ****** ****** ******\n')
| 34.25
| 37
| 0.171533
| 16
| 274
| 2.9375
| 0.25
| 0.893617
| 1.404255
| 1.531915
| 0.893617
| 0.893617
| 0.893617
| 0.893617
| 0.893617
| 0.893617
| 0
| 0.012739
| 0.427007
| 274
| 8
| 38
| 34.25
| 0.286624
| 0.021898
| 0
| 0.857143
| 0
| 0
| 0.726923
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 16
|
3d31dea12004b1c08b1f23c235816f75c1b3ad95
| 1,759
|
py
|
Python
|
gwapi/data.py
|
genwch/gwapi
|
00d0f631062207ccf47cc12ffd4d3597605fa570
|
[
"MIT"
] | null | null | null |
gwapi/data.py
|
genwch/gwapi
|
00d0f631062207ccf47cc12ffd4d3597605fa570
|
[
"MIT"
] | null | null | null |
gwapi/data.py
|
genwch/gwapi
|
00d0f631062207ccf47cc12ffd4d3597605fa570
|
[
"MIT"
] | null | null | null |
import gwcomm as comm
lg = comm.logger(__name__)
comm.add_env(["API_HTTP", "API_HOST", "API_PORT",
"API_DATA", "API_USR", "API_PWD"])
def get(url):
import requests
from .auth import get_header
conf = comm.sysconf
dataurl = conf.get("api", {}).get("data", "") if conf.get("api", {}).get(
"data", "") != "" else "{}://{}:{}{}".format(conf.get("api_http", "http"), conf.get("api_host", "127.0.0.1"), conf.get("api_port", "5000"), conf.get("api_data", "/"))
url = "{}{}".format(dataurl, url)
header = get_header(conf.get("token", None))
lg.info(f"init - url: {url}")
try:
res = requests.get(url, headers=header)
except:
lg.error(f"Error - connection fail - {url}")
comm.sysconf["token"] = None
return {}
if res.status_code != 200:
lg.error(f"Error - {res.json()}")
comm.sysconf["token"] = None
return {}
return res.json()
def upsert(url, data):
import requests
from .auth import get_header
conf = comm.sysconf
dataurl = conf.get("api", {}).get("data", "") if conf.get("api", {}).get(
"data", "") != "" else "{}://{}:{}{}".format(conf.get("api_http", "http"), conf.get("api_host", "127.0.0.1"), conf.get("api_port", "5000"), conf.get("api_data", "/"))
url = "{}{}".format(dataurl, url)
header = get_header(conf.get("token", None))
lg.info(f"init - url: {url}")
try:
res = requests.post(url, json=data, headers=header)
except:
lg.error(f"Error - connection fail - {url}")
comm.sysconf["token"] = None
return False
if res.status_code != 200:
lg.error(f"Error - {res.json()}")
comm.sysconf["token"] = None
return False
return True
| 34.490196
| 174
| 0.555998
| 237
| 1,759
| 4.021097
| 0.227848
| 0.102833
| 0.125918
| 0.054565
| 0.839454
| 0.839454
| 0.828961
| 0.828961
| 0.828961
| 0.828961
| 0
| 0.019146
| 0.22797
| 1,759
| 50
| 175
| 35.18
| 0.682622
| 0
| 0
| 0.772727
| 0
| 0
| 0.211484
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045455
| false
| 0
| 0.113636
| 0
| 0.295455
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
3d4ae2784a5e9c2eaf49eb09db9820975c71de4d
| 425
|
py
|
Python
|
wilmes/__init__.py
|
suutari/wilmes
|
2f2782a36206174ba160f1a8931550ac6b2971f6
|
[
"MIT"
] | null | null | null |
wilmes/__init__.py
|
suutari/wilmes
|
2f2782a36206174ba160f1a8931550ac6b2971f6
|
[
"MIT"
] | null | null | null |
wilmes/__init__.py
|
suutari/wilmes
|
2f2782a36206174ba160f1a8931550ac6b2971f6
|
[
"MIT"
] | null | null | null |
from ._client import Client, Connection
from ._types import (
Message,
MessageId,
MessageInfo,
NewsItem,
NewsItemId,
NewsItemInfo,
Person,
Pupil,
PupilId,
ReplyMessage,
)
__all__ = [
'Client',
'Connection',
'Message',
'MessageId',
'MessageInfo',
'NewsItem',
'NewsItemId',
'NewsItemInfo',
'Person',
'Pupil',
'PupilId',
'ReplyMessage',
]
| 14.655172
| 39
| 0.578824
| 31
| 425
| 7.741935
| 0.516129
| 0.133333
| 0.225
| 0.291667
| 0.725
| 0.725
| 0.725
| 0.725
| 0.725
| 0.725
| 0
| 0
| 0.294118
| 425
| 28
| 40
| 15.178571
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0.242353
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.074074
| 0
| 0.074074
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
43e48f6cce01b3b96d54c79f237c7f88d61198a3
| 3,363
|
py
|
Python
|
tests/test_dfe.py
|
msohaibalam/forest-benchmarking
|
40f5fd5235803204b34fa8ba1ced4ef2e0f3098d
|
[
"Apache-2.0"
] | null | null | null |
tests/test_dfe.py
|
msohaibalam/forest-benchmarking
|
40f5fd5235803204b34fa8ba1ced4ef2e0f3098d
|
[
"Apache-2.0"
] | null | null | null |
tests/test_dfe.py
|
msohaibalam/forest-benchmarking
|
40f5fd5235803204b34fa8ba1ced4ef2e0f3098d
|
[
"Apache-2.0"
] | null | null | null |
from math import pi
import numpy as np
from pyquil import Program
from pyquil.gates import CZ, RX, CNOT, H
from forest_benchmarking.dfe import generate_process_dfe_experiment, acquire_dfe_data, \
direct_fidelity_estimate, generate_state_dfe_experiment, ratio_variance
def test_exhaustive_gate_dfe_noiseless_qvm(qvm, benchmarker):
qvm.qam.random_seed = 1
process_exp = generate_process_dfe_experiment(Program([RX(pi / 2, 0)]), compiler=benchmarker)
data, cal = acquire_dfe_data(process_exp, qvm, var=0.01,)
est = direct_fidelity_estimate(data, cal, 'process')
assert est.fid_point_est == 1.0
assert est.fid_var_est == 0.0
assert all([exp == 1.0 for exp in data.expectation])
assert all(np.abs(cal) == 1.0 for cal in cal.expectation)
process_exp = generate_process_dfe_experiment(Program([CZ(0, 1)]), compiler=benchmarker)
data, cal = acquire_dfe_data(process_exp, qvm, var=0.01, )
est = direct_fidelity_estimate(data, cal, 'process')
assert est.fid_point_est == 1.0
assert est.fid_var_est == 0.0
assert all([exp == 1.0 for exp in data.expectation])
assert all(np.abs(cal) == 1.0 for cal in cal.expectation)
process_exp = generate_process_dfe_experiment(Program([CNOT(0, 1)]), compiler=benchmarker)
data, cal = acquire_dfe_data(process_exp, qvm, var=0.01, )
est = direct_fidelity_estimate(data, cal, 'process')
assert est.fid_point_est == 1.0
assert est.fid_var_est == 0.0
assert all([exp == 1.0 for exp in data.expectation])
assert all(np.abs(cal) == 1.0 for cal in cal.expectation)
def test_exhaustive_state_dfe_noiseless_qvm(qvm, benchmarker):
qvm.qam.random_seed = 1
state_exp = generate_state_dfe_experiment(Program([RX(pi / 2, 0)]), compiler=benchmarker)
data, cal = acquire_dfe_data(state_exp, qvm, var=0.01,)
est = direct_fidelity_estimate(data, cal, 'state')
assert est.fid_point_est == 1.0
assert est.fid_var_est == 0.0
assert all([exp == 1.0 for exp in data.expectation])
assert all(np.abs(cal) == 1.0 for cal in cal.expectation)
state_exp = generate_state_dfe_experiment(Program([H(0), H(1), CZ(0, 1)]), compiler=benchmarker)
data, cal = acquire_dfe_data(state_exp, qvm, var=0.01,)
est = direct_fidelity_estimate(data, cal, 'state')
assert est.fid_point_est == 1.0
assert est.fid_var_est == 0.0
assert all([exp == 1.0 for exp in data.expectation])
assert all(np.abs(cal) == 1.0 for cal in cal.expectation)
state_exp = generate_state_dfe_experiment(Program([H(0), CNOT(0, 1)]), compiler=benchmarker)
data, cal = acquire_dfe_data(state_exp, qvm, var=0.01,)
est = direct_fidelity_estimate(data, cal, 'state')
assert est.fid_point_est == 1.0
assert est.fid_var_est == 0.0
assert all([exp == 1.0 for exp in data.expectation])
assert all(np.abs(cal) == 1.0 for cal in cal.expectation)
def test_ratio_variance():
# If our uncertainty is 0 in each parameter, the uncertainty in the ratio should also be 0.
assert ratio_variance(1, 0, 1, 0) == 0
# If our uncertainty in the denominator is 0, and it's expectation value is one, then
# the uncertainty in the ratio should just be the uncertainty in the numerator.
assert ratio_variance(1, 1, 1, 0) == 1
# It shouldn't depend on the value in the numerator.
assert ratio_variance(2, 1, 1, 0) == 1
| 46.708333
| 100
| 0.709188
| 557
| 3,363
| 4.089767
| 0.140036
| 0.019315
| 0.063213
| 0.068481
| 0.799824
| 0.799824
| 0.744513
| 0.72432
| 0.72432
| 0.72432
| 0
| 0.03641
| 0.175141
| 3,363
| 71
| 101
| 47.366197
| 0.784787
| 0.089801
| 0
| 0.678571
| 1
| 0
| 0.01178
| 0
| 0
| 0
| 0
| 0
| 0.482143
| 1
| 0.053571
| false
| 0
| 0.089286
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a1000e4bed6374c7a25630b041fcd352c8a305f6
| 206
|
py
|
Python
|
Udemy/python-django-dev-to-deployment/btre_django/pages/views.py
|
mohammedelzanaty/myRoad2BeFullStack
|
eea3a5edb6c6a999136b04fdaea6ce0c81137a58
|
[
"MIT"
] | 2
|
2021-04-21T12:05:01.000Z
|
2022-01-19T09:58:38.000Z
|
Udemy/python-django-dev-to-deployment/btre_django/pages/views.py
|
mohammedelzanaty/myRoad2BeFullStack
|
eea3a5edb6c6a999136b04fdaea6ce0c81137a58
|
[
"MIT"
] | 34
|
2019-12-26T11:21:42.000Z
|
2022-02-27T19:55:10.000Z
|
Udemy/python-django-dev-to-deployment/btre_django/pages/views.py
|
mohammedelzanaty/myRoad2BeFullStack
|
eea3a5edb6c6a999136b04fdaea6ce0c81137a58
|
[
"MIT"
] | 2
|
2021-08-15T07:59:36.000Z
|
2022-01-16T06:17:32.000Z
|
from django.http import request
from django.shortcuts import render
def index(request):
return render(request, 'pages/index.html')
def about(request):
return render(request, 'pages/about.html')
| 18.727273
| 46
| 0.747573
| 28
| 206
| 5.5
| 0.464286
| 0.12987
| 0.246753
| 0.337662
| 0.402597
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.145631
| 206
| 10
| 47
| 20.6
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0.15534
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
a118608ad6201bc6386acd6cfacc509378542ad9
| 65,331
|
py
|
Python
|
support_oppose_deciding/controllers.py
|
ranadeepmitra21/WeVoteServer_Ranadeep
|
505945209aad5cef21e118b5bfa7f63d0bd462da
|
[
"MIT"
] | 2
|
2021-11-11T11:02:02.000Z
|
2021-11-11T11:02:15.000Z
|
support_oppose_deciding/controllers.py
|
ranadeepmitra21/WeVoteServer_Ranadeep
|
505945209aad5cef21e118b5bfa7f63d0bd462da
|
[
"MIT"
] | null | null | null |
support_oppose_deciding/controllers.py
|
ranadeepmitra21/WeVoteServer_Ranadeep
|
505945209aad5cef21e118b5bfa7f63d0bd462da
|
[
"MIT"
] | null | null | null |
# support_oppose_deciding/controllers.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
from ballot.controllers import figure_out_google_civic_election_id_voter_is_watching
from ballot.models import CANDIDATE, MEASURE, OFFICE, BallotItemListManager
from candidate.models import CandidateManager, CandidateListManager
from friend.models import FriendManager
from measure.models import ContestMeasureManager
from django.http import HttpResponse
from follow.models import FollowOrganizationList
import json
from position.controllers import update_or_create_position_network_score_wrapper
from position.models import ANY_STANCE, FRIENDS_ONLY, SUPPORT, OPPOSE, PositionManager, PositionListManager, PUBLIC_ONLY
from voter.models import fetch_voter_id_from_voter_device_link, VoterManager
import wevote_functions.admin
from wevote_functions.functions import convert_to_int, is_voter_device_id_valid, positive_value_exists
logger = wevote_functions.admin.get_logger(__name__)
def position_oppose_count_for_ballot_item_for_api(voter_device_id,
candidate_id, candidate_we_vote_id,
measure_id, measure_we_vote_id):
stance_we_are_looking_for = OPPOSE
return positions_count_for_api(voter_device_id,
candidate_id, candidate_we_vote_id,
measure_id, measure_we_vote_id, stance_we_are_looking_for)
def positions_count_for_api(voter_device_id,
candidate_id, candidate_we_vote_id,
measure_id, measure_we_vote_id,
stance_we_are_looking_for):
# Get voter_id from the voter_device_id so we can know who is supporting/opposing
results = is_voter_device_id_valid(voter_device_id)
if not results['success']:
json_data = {
'status': 'VALID_VOTER_DEVICE_ID_MISSING',
'success': False,
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
voter_id = fetch_voter_id_from_voter_device_link(voter_device_id)
if not positive_value_exists(voter_id):
json_data = {
'status': "VALID_VOTER_ID_MISSING ",
'success': False,
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
show_positions_this_voter_follows = True
if positive_value_exists(candidate_id) or positive_value_exists(candidate_we_vote_id):
results = positions_count_for_candidate(voter_id,
candidate_id, candidate_we_vote_id,
stance_we_are_looking_for,
show_positions_this_voter_follows)
json_data = results['json_data']
return HttpResponse(json.dumps(json_data), content_type='application/json')
elif positive_value_exists(measure_id) or positive_value_exists(measure_we_vote_id):
results = positions_count_for_contest_measure(voter_id,
measure_id, measure_we_vote_id,
stance_we_are_looking_for,
show_positions_this_voter_follows)
json_data = results['json_data']
return HttpResponse(json.dumps(json_data), content_type='application/json')
else:
status = 'UNABLE_TO_RETRIEVE-CANDIDATE_ID_AND_MEASURE_ID_MISSING'
success = False
json_data = {
'status': status,
'success': success,
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
def positions_count_for_candidate(voter_id, candidate_id, candidate_we_vote_id, stance_we_are_looking_for,
show_positions_this_voter_follows=True):
"""
We want to return a JSON file with the number of orgs, friends and public figures the voter follows who support
this particular candidate's campaign
"""
# Since we can take in either candidate_id or candidate_we_vote_id, we need to retrieve the candidate object
# so we make sure we have both of these values to return
if positive_value_exists(candidate_id):
candidate_manager = CandidateManager()
results = candidate_manager.retrieve_candidate_from_id(candidate_id)
if results['candidate_found']:
candidate = results['candidate']
candidate_we_vote_id = candidate.we_vote_id
elif positive_value_exists(candidate_we_vote_id):
candidate_manager = CandidateManager()
results = candidate_manager.retrieve_candidate_from_we_vote_id(candidate_we_vote_id)
if results['candidate_found']:
candidate = results['candidate']
candidate_id = candidate.id
position_list_manager = PositionListManager()
############################
# Retrieve public positions
retrieve_public_positions_now = True # The alternate is positions for friends-only
most_recent_only = True
public_positions_list_for_candidate = \
position_list_manager.retrieve_all_positions_for_candidate(
retrieve_public_positions_now, candidate_id, candidate_we_vote_id,
stance_we_are_looking_for, most_recent_only
)
organizations_followed_by_voter_by_id = []
if len(public_positions_list_for_candidate):
follow_organization_list_manager = FollowOrganizationList()
organizations_followed_by_voter_by_id = \
follow_organization_list_manager.retrieve_follow_organization_by_voter_id_simple_id_array(voter_id)
if show_positions_this_voter_follows:
position_objects = position_list_manager.calculate_positions_followed_by_voter(
voter_id, public_positions_list_for_candidate, organizations_followed_by_voter_by_id)
##################################
# Now retrieve friend's positions
voter_manager = VoterManager()
voter_results = voter_manager.retrieve_voter_by_id(voter_id)
if voter_results['voter_found']:
voter = voter_results['voter']
voter_we_vote_id = voter.we_vote_id
else:
voter_we_vote_id = ""
friends_we_vote_id_list = []
if positive_value_exists(voter_we_vote_id):
retrieve_public_positions_now = False # Retrieve positions intended for friends-only
most_recent_only = False
friend_manager = FriendManager()
friend_results = friend_manager.retrieve_friends_we_vote_id_list(voter_we_vote_id)
if friend_results['friends_we_vote_id_list_found']:
friends_we_vote_id_list = friend_results['friends_we_vote_id_list']
# Add yourself as a friend so your opinions show up
friends_we_vote_id_list.append(voter_we_vote_id)
friends_positions_list_for_candidate = \
position_list_manager.retrieve_all_positions_for_candidate(
retrieve_public_positions_now, candidate_id, candidate_we_vote_id,
stance_we_are_looking_for, most_recent_only,
friends_we_vote_id_list)
if len(friends_positions_list_for_candidate):
position_objects = friends_positions_list_for_candidate + position_objects
positions_followed_count = len(position_objects)
json_data = {
'status': 'SUCCESSFUL_RETRIEVE_OF_POSITIONS_FOLLOWED_COUNT_FOR_CANDIDATE',
'success': True,
'count': positions_followed_count,
'ballot_item_id': convert_to_int(candidate_id),
'ballot_item_we_vote_id': candidate_we_vote_id,
'kind_of_ballot_item': CANDIDATE,
}
results = {
'json_data': json_data,
}
return results
else:
positions_not_followed = position_list_manager.calculate_positions_not_followed_by_voter(
public_positions_list_for_candidate, organizations_followed_by_voter_by_id)
positions_not_followed_count = len(positions_not_followed)
json_data = {
'status': 'SUCCESSFUL_RETRIEVE_OF_POSITIONS_NOT_FOLLOWED_COUNT_FOR_CANDIDATE',
'success': True,
'count': positions_not_followed_count,
'ballot_item_id': convert_to_int(candidate_id),
'ballot_item_we_vote_id': candidate_we_vote_id,
'kind_of_ballot_item': CANDIDATE,
}
results = {
'json_data': json_data,
}
return results
def positions_count_for_contest_measure(voter_id, measure_id, measure_we_vote_id, stance_we_are_looking_for,
show_positions_this_voter_follows=True):
"""
We want to return a JSON file with the number of orgs, friends and public figures the voter follows who support
this particular measure
"""
# Since we can take in either measure_id or measure_we_vote_id, we need to retrieve the measure object
# so we make sure we have both of these values to return
if positive_value_exists(measure_id):
contest_measure_manager = ContestMeasureManager()
results = contest_measure_manager.retrieve_contest_measure_from_id(measure_id)
if results['contest_measure_found']:
contest_measure = results['contest_measure']
measure_we_vote_id = contest_measure.we_vote_id
elif positive_value_exists(measure_we_vote_id):
contest_measure_manager = ContestMeasureManager()
results = contest_measure_manager.retrieve_contest_measure_from_we_vote_id(measure_we_vote_id)
if results['contest_measure_found']:
contest_measure = results['contest_measure']
measure_id = contest_measure.id
position_list_manager = PositionListManager()
############################
# Retrieve public positions
retrieve_public_positions_now = True # The alternate is positions for friends-only
most_recent_only = True
public_positions_list_for_contest_measure = \
position_list_manager.retrieve_all_positions_for_contest_measure(
retrieve_public_positions_now, measure_id, measure_we_vote_id,
stance_we_are_looking_for, most_recent_only)
organizations_followed_by_voter_by_id = []
if len(public_positions_list_for_contest_measure):
follow_organization_list_manager = FollowOrganizationList()
organizations_followed_by_voter_by_id = \
follow_organization_list_manager.retrieve_follow_organization_by_voter_id_simple_id_array(voter_id)
if show_positions_this_voter_follows:
position_objects = position_list_manager.calculate_positions_followed_by_voter(
voter_id, public_positions_list_for_contest_measure, organizations_followed_by_voter_by_id)
##################################
# Now retrieve friend's positions
voter_manager = VoterManager()
voter_results = voter_manager.retrieve_voter_by_id(voter_id)
if voter_results['voter_found']:
voter = voter_results['voter']
voter_we_vote_id = voter.we_vote_id
else:
voter_we_vote_id = ""
friends_we_vote_id_list = []
if positive_value_exists(voter_we_vote_id):
retrieve_public_positions_now = False # Retrieve positions intended for friends-only
most_recent_only = False
friend_manager = FriendManager()
friend_results = friend_manager.retrieve_friends_we_vote_id_list(voter_we_vote_id)
if friend_results['friends_we_vote_id_list_found']:
friends_we_vote_id_list = friend_results['friends_we_vote_id_list']
# Add yourself as a friend so your opinions show up
friends_we_vote_id_list.append(voter_we_vote_id)
friends_positions_list_for_contest_measure = \
position_list_manager.retrieve_all_positions_for_contest_measure(
retrieve_public_positions_now, measure_id, measure_we_vote_id,
stance_we_are_looking_for, most_recent_only,
friends_we_vote_id_list)
if len(friends_positions_list_for_contest_measure):
position_objects = friends_positions_list_for_contest_measure + position_objects
positions_followed_count = len(position_objects)
json_data = {
'status': 'SUCCESSFUL_RETRIEVE_OF_POSITION_COUNT_FOR_CONTEST_MEASURE',
'success': True,
'count': positions_followed_count,
'ballot_item_id': convert_to_int(measure_id),
'ballot_item_we_vote_id': measure_we_vote_id,
'kind_of_ballot_item': MEASURE,
}
results = {
'json_data': json_data,
}
return results
else:
positions_not_followed = position_list_manager.calculate_positions_not_followed_by_voter(
public_positions_list_for_contest_measure, organizations_followed_by_voter_by_id)
positions_not_followed_count = len(positions_not_followed)
json_data = {
'status': 'SUCCESSFUL_RETRIEVE_OF_POSITIONS_NOT_FOLLOWED_COUNT_FOR_CONTEST_MEASURE',
'success': True,
'count': positions_not_followed_count,
'ballot_item_id': convert_to_int(measure_id),
'ballot_item_we_vote_id': measure_we_vote_id,
'kind_of_ballot_item': MEASURE,
}
results = {
'json_data': json_data,
}
return results
def position_support_count_for_ballot_item_for_api(voter_device_id,
candidate_id, candidate_we_vote_id,
measure_id, measure_we_vote_id):
stance_we_are_looking_for = SUPPORT
return positions_count_for_api(voter_device_id,
candidate_id, candidate_we_vote_id,
measure_id, measure_we_vote_id, stance_we_are_looking_for)
def position_public_oppose_count_for_ballot_item_for_api(candidate_id, candidate_we_vote_id,
measure_id, measure_we_vote_id):
stance_we_are_looking_for = OPPOSE
return positions_public_count_for_api(candidate_id, candidate_we_vote_id,
measure_id, measure_we_vote_id, stance_we_are_looking_for)
def position_public_support_count_for_ballot_item_for_api(candidate_id, candidate_we_vote_id,
measure_id, measure_we_vote_id):
stance_we_are_looking_for = SUPPORT
return positions_public_count_for_api(candidate_id, candidate_we_vote_id,
measure_id, measure_we_vote_id, stance_we_are_looking_for)
def positions_count_for_all_ballot_items_for_api( # positionsCountForAllBallotItems
voter_device_id, google_civic_election_id=0):
"""
We want to return a JSON file with the list of the support and oppose counts from the orgs, friends and
public figures the voter follows
"""
status = ""
# Get voter_id from the voter_device_id so we can know whose bookmarks to retrieve
results = is_voter_device_id_valid(voter_device_id)
if not results['success']:
json_data = {
'status': "VALID_VOTER_DEVICE_ID_MISSING-COUNT_FOR_ALL_BALLOT_ITEMS",
'success': False,
'google_civic_election_id': google_civic_election_id,
'position_counts_list': [],
}
return json_data
voter_manager = VoterManager()
voter_results = voter_manager.retrieve_voter_from_voter_device_id(voter_device_id)
if voter_results['voter_found']:
voter = voter_results['voter']
voter_id = voter.id
voter_we_vote_id = voter.we_vote_id
else:
voter_id = 0
voter_we_vote_id = ""
if not positive_value_exists(voter_id):
json_data = {
'status': "VALID_VOTER_ID_MISSING-COUNT_FOR_ALL_BALLOT_ITEMS",
'success': False,
'google_civic_election_id': google_civic_election_id,
'position_counts_list': [],
}
return json_data
position_list_manager = PositionListManager()
candidate_list_object = CandidateListManager()
follow_organization_list_manager = FollowOrganizationList()
return_we_vote_id = True
organizations_followed_by_voter_by_we_vote_id = \
follow_organization_list_manager.retrieve_follow_organization_by_voter_id_simple_id_array(
voter_id, return_we_vote_id, read_only=True)
# Get a list of all candidates and measures from this election (in the active election)
ballot_item_list_manager = BallotItemListManager()
if positive_value_exists(google_civic_election_id):
results = ballot_item_list_manager.retrieve_all_ballot_items_for_voter(
voter_id, google_civic_election_id, read_only=True)
status += results['status']
ballot_item_list = results['ballot_item_list']
else:
# Look up the current google_civic_election_id for this voter
results = figure_out_google_civic_election_id_voter_is_watching(voter_device_id)
google_civic_election_id_local_scope = results['google_civic_election_id']
# A google_civic_election_id is required to do any more work here (This API requires too much CPU work
# when a google_civic_election_id is missing.)
if not positive_value_exists(google_civic_election_id_local_scope):
json_data = {
'status': "VALID_GOOGLE_CIVIC_ELECTION_ID_MISSING-COUNT_FOR_ALL_BALLOT_ITEMS",
'success': False,
'google_civic_election_id': google_civic_election_id_local_scope,
'position_counts_list': [],
}
return json_data
results = ballot_item_list_manager.retrieve_all_ballot_items_for_voter(
voter_id, google_civic_election_id_local_scope, read_only=True)
status += results['status']
ballot_item_list = results['ballot_item_list']
google_civic_election_id = google_civic_election_id_local_scope
# The list where we capture results
position_counts_list_results = []
friends_we_vote_id_list = []
if positive_value_exists(voter_we_vote_id):
friend_manager = FriendManager()
friend_results = friend_manager.retrieve_friends_we_vote_id_list(voter_we_vote_id)
if friend_results['friends_we_vote_id_list_found']:
friends_we_vote_id_list = friend_results['friends_we_vote_id_list']
# Add yourself as a friend so your opinions show up
friends_we_vote_id_list.append(voter_we_vote_id)
# ballot_item_list is populated with contest_office and contest_measure entries
for one_ballot_item in ballot_item_list:
# Retrieve all positions for each ballot item
if one_ballot_item.is_contest_office():
results = candidate_list_object.retrieve_all_candidates_for_office(
office_we_vote_id=one_ballot_item.contest_office_we_vote_id, read_only=True)
success = results['success']
candidate_list = results['candidate_list']
if success:
for candidate in candidate_list:
# Loop through all candidates under this office
# Public Positions
public_support_count_for_one_ballot_item = \
position_list_manager.fetch_positions_count_for_candidate(
0,
candidate.we_vote_id,
SUPPORT,
PUBLIC_ONLY,
organizations_followed_we_vote_id_list=organizations_followed_by_voter_by_we_vote_id
)
public_oppose_count_for_one_ballot_item = \
position_list_manager.fetch_positions_count_for_candidate(
0,
candidate.we_vote_id,
OPPOSE,
PUBLIC_ONLY,
organizations_followed_we_vote_id_list=organizations_followed_by_voter_by_we_vote_id
)
# Friend's-only Positions
friends_only_support_count_for_one_ballot_item = \
position_list_manager.fetch_positions_count_for_candidate(
0,
candidate.we_vote_id,
SUPPORT,
FRIENDS_ONLY,
friends_we_vote_id_list=friends_we_vote_id_list
)
friends_only_oppose_count_for_one_ballot_item = \
position_list_manager.fetch_positions_count_for_candidate(
0,
candidate.we_vote_id,
OPPOSE,
FRIENDS_ONLY,
friends_we_vote_id_list=friends_we_vote_id_list
)
support_count_for_one_ballot_item = public_support_count_for_one_ballot_item + \
friends_only_support_count_for_one_ballot_item
oppose_count_for_one_ballot_item = public_oppose_count_for_one_ballot_item + \
friends_only_oppose_count_for_one_ballot_item
one_ballot_item_results = {
'ballot_item_we_vote_id': candidate.we_vote_id,
'support_count': support_count_for_one_ballot_item,
'oppose_count': oppose_count_for_one_ballot_item,
}
position_counts_list_results.append(one_ballot_item_results)
elif one_ballot_item.is_contest_measure():
# Public Positions
retrieve_public_positions_now = True # The alternate is positions for friends-only
most_recent_only = True
public_support_positions_list_for_one_ballot_item = \
position_list_manager.retrieve_all_positions_for_contest_measure(
retrieve_public_positions_now, 0, one_ballot_item.contest_measure_we_vote_id,
SUPPORT, most_recent_only,
organizations_followed_we_vote_id_list=organizations_followed_by_voter_by_we_vote_id,
read_only=True)
public_oppose_positions_list_for_one_ballot_item = \
position_list_manager.retrieve_all_positions_for_contest_measure(
retrieve_public_positions_now, 0, one_ballot_item.contest_measure_we_vote_id,
OPPOSE, most_recent_only,
organizations_followed_we_vote_id_list=organizations_followed_by_voter_by_we_vote_id,
read_only=True)
# Friend's-only Positions
retrieve_public_positions_now = False # Return friends-only positions counts
most_recent_only = True
friends_support_positions_list_for_one_ballot_item = \
position_list_manager.retrieve_all_positions_for_contest_measure(
retrieve_public_positions_now, 0, one_ballot_item.contest_measure_we_vote_id,
SUPPORT, most_recent_only, friends_we_vote_id_list, read_only=True)
friends_oppose_positions_list_for_one_ballot_item = \
position_list_manager.retrieve_all_positions_for_contest_measure(
retrieve_public_positions_now, 0, one_ballot_item.contest_measure_we_vote_id,
OPPOSE, most_recent_only, friends_we_vote_id_list, read_only=True)
support_positions_list_for_one_ballot_item = public_support_positions_list_for_one_ballot_item + \
friends_support_positions_list_for_one_ballot_item
oppose_positions_list_for_one_ballot_item = public_oppose_positions_list_for_one_ballot_item + \
friends_oppose_positions_list_for_one_ballot_item
one_ballot_item_results = {
'ballot_item_we_vote_id': one_ballot_item.contest_measure_we_vote_id,
'support_count': len(support_positions_list_for_one_ballot_item),
'oppose_count': len(oppose_positions_list_for_one_ballot_item),
}
position_counts_list_results.append(one_ballot_item_results)
else:
# Skip the rest of this loop
continue
json_data = {
'success': True,
'status': "POSITIONS_COUNT_FOR_ALL_BALLOT_ITEMS",
'google_civic_election_id': google_civic_election_id,
'position_counts_list': position_counts_list_results,
}
return json_data
def positions_count_for_one_ballot_item_for_api(voter_device_id, ballot_item_we_vote_id):
"""
We want to return a JSON file with the a list of the support and oppose counts from the orgs, friends and
public figures the voter follows
"""
status = "POSITIONS_COUNT_FOR_ONE_BALLOT_ITEM-ENTERING "
# Get voter_id from the voter_device_id so we can know whose bookmarks to retrieve
results = is_voter_device_id_valid(voter_device_id)
if not results['success']:
json_data = {
'status': "VALID_VOTER_DEVICE_ID_MISSING-COUNT_FOR_ONE_BALLOT_ITEM",
'success': False,
'ballot_item_we_vote_id': ballot_item_we_vote_id,
'position_counts_list': [],
}
return json_data
voter_manager = VoterManager()
voter_results = voter_manager.retrieve_voter_from_voter_device_id(voter_device_id)
if voter_results['voter_found']:
voter = voter_results['voter']
voter_id = voter.id
voter_we_vote_id = voter.we_vote_id
else:
voter_id = 0
voter_we_vote_id = ""
if not positive_value_exists(voter_id):
json_data = {
'status': "VALID_VOTER_ID_MISSING-COUNT_FOR_ONE_BALLOT_ITEM",
'success': False,
'ballot_item_we_vote_id': ballot_item_we_vote_id,
'position_counts_list': [],
}
return json_data
if not positive_value_exists(ballot_item_we_vote_id):
json_data = {
'status': "VALID_BALLOT_ITEM_WE_VOTE_ID_MISSING-COUNT_FOR_ONE_BALLOT_ITEM",
'success': False,
'ballot_item_we_vote_id': ballot_item_we_vote_id,
'position_counts_list': [],
}
return json_data
position_list_manager = PositionListManager()
show_positions_this_voter_follows = True
position_counts_list_results = []
follow_organization_list_manager = FollowOrganizationList()
organizations_followed_by_voter_by_id = \
follow_organization_list_manager.retrieve_follow_organization_by_voter_id_simple_id_array(voter_id)
friends_we_vote_id_list = []
if positive_value_exists(voter_we_vote_id):
friend_manager = FriendManager()
friend_results = friend_manager.retrieve_friends_we_vote_id_list(voter_we_vote_id)
if friend_results['friends_we_vote_id_list_found']:
friends_we_vote_id_list = friend_results['friends_we_vote_id_list']
# Add yourself as a friend so your opinions show up
friends_we_vote_id_list.append(voter_we_vote_id)
support_we_vote_id_list = []
support_name_list = []
oppose_we_vote_id_list = []
oppose_name_list = []
# Figure out if this ballot_item is a candidate or measure
if "cand" in ballot_item_we_vote_id: # Is a Candidate
# We don't need to retrieve the candidate
# #############################
# Public Positions: Candidates
retrieve_public_positions_now = True # The alternate is positions for friends-only
most_recent_only = True
public_support_positions_list_for_one_ballot_item = \
position_list_manager.retrieve_all_positions_for_candidate(
retrieve_public_positions_now, 0, ballot_item_we_vote_id,
SUPPORT, most_recent_only)
public_oppose_positions_list_for_one_ballot_item = \
position_list_manager.retrieve_all_positions_for_candidate(
retrieve_public_positions_now, 0, ballot_item_we_vote_id,
OPPOSE, most_recent_only)
# Filter to show positions of the orgs you are following
public_results = finalize_support_and_oppose_positions_count(
voter_id, show_positions_this_voter_follows,
organizations_followed_by_voter_by_id, friends_we_vote_id_list,
public_support_positions_list_for_one_ballot_item,
public_oppose_positions_list_for_one_ballot_item)
public_filtered_support_positions = public_results['support_positions_followed']
public_filtered_oppose_positions = public_results['oppose_positions_followed']
for one_position in public_filtered_support_positions:
support_we_vote_id_list.append(one_position.organization_we_vote_id)
support_name_list.append(one_position.speaker_display_name)
update_results = update_or_create_position_network_score_wrapper(
voter_id, voter_we_vote_id, one_position)
if update_results['position_network_score_updated']:
public_positions_updated = True
for one_position in public_filtered_oppose_positions:
oppose_we_vote_id_list.append(one_position.organization_we_vote_id)
oppose_name_list.append(one_position.speaker_display_name)
update_results = update_or_create_position_network_score_wrapper(
voter_id, voter_we_vote_id, one_position)
if update_results['position_network_score_updated']:
public_positions_updated = True
# ####################################
# Friend's-only Positions: Candidates
retrieve_public_positions_now = False # Return friends-only positions counts
most_recent_only = True
friends_only_support_positions_list_for_one_ballot_item = \
position_list_manager.retrieve_all_positions_for_candidate(
retrieve_public_positions_now, 0, ballot_item_we_vote_id,
SUPPORT, most_recent_only, friends_we_vote_id_list=friends_we_vote_id_list)
friends_only_oppose_positions_list_for_one_ballot_item = \
position_list_manager.retrieve_all_positions_for_candidate(
retrieve_public_positions_now, 0, ballot_item_we_vote_id,
OPPOSE, most_recent_only, friends_we_vote_id_list=friends_we_vote_id_list)
# Filter to show friend's positions
friends_results = finalize_support_and_oppose_positions_count(
voter_id, show_positions_this_voter_follows,
organizations_followed_by_voter_by_id, friends_we_vote_id_list,
friends_only_support_positions_list_for_one_ballot_item,
friends_only_oppose_positions_list_for_one_ballot_item)
friend_filtered_support_positions = friends_results['support_positions_followed']
friend_filtered_oppose_positions = friends_results['oppose_positions_followed']
for one_position in friend_filtered_support_positions:
# TODO: I think we might want to use organization_we_vote_id instead of voter_we_vote_id
# but this needs to be checked
support_we_vote_id_list.append(one_position.voter_we_vote_id) # Should we use organization_we_vote_id here?
support_name_list.append(one_position.speaker_display_name)
update_results = update_or_create_position_network_score_wrapper(
voter_id, voter_we_vote_id, one_position)
if update_results['position_network_score_updated']:
public_positions_updated = True
for one_position in friend_filtered_oppose_positions:
oppose_we_vote_id_list.append(one_position.voter_we_vote_id) # Should we use organization_we_vote_id here?
oppose_name_list.append(one_position.speaker_display_name)
update_results = update_or_create_position_network_score_wrapper(
voter_id, voter_we_vote_id, one_position)
if update_results['position_network_score_updated']:
public_positions_updated = True
# Now calculate the total counts
support_positions_list_for_one_ballot_item = public_support_positions_list_for_one_ballot_item + \
friends_only_support_positions_list_for_one_ballot_item
oppose_positions_list_for_one_ballot_item = public_oppose_positions_list_for_one_ballot_item + \
friends_only_oppose_positions_list_for_one_ballot_item
finalize_results = finalize_support_and_oppose_positions_count(
voter_id, show_positions_this_voter_follows,
organizations_followed_by_voter_by_id, friends_we_vote_id_list,
support_positions_list_for_one_ballot_item,
oppose_positions_list_for_one_ballot_item)
one_ballot_item_results = {
'ballot_item_we_vote_id': ballot_item_we_vote_id,
'support_count': finalize_results['support_positions_count'],
'oppose_count': finalize_results['oppose_positions_count'],
'support_we_vote_id_list': support_we_vote_id_list,
'support_name_list': support_name_list,
'oppose_we_vote_id_list': oppose_we_vote_id_list,
'oppose_name_list': oppose_name_list,
}
position_counts_list_results.append(one_ballot_item_results)
success = True
elif "meas" in ballot_item_we_vote_id: # Is a measure
# We don't need to retrieve the measure
# ###########################
# Public Positions: Measures
retrieve_public_positions_now = True # The alternate is positions for friends-only
most_recent_only = True
public_support_positions_list_for_one_ballot_item = \
position_list_manager.retrieve_all_positions_for_contest_measure(
retrieve_public_positions_now, 0, ballot_item_we_vote_id,
SUPPORT, most_recent_only)
public_oppose_positions_list_for_one_ballot_item = \
position_list_manager.retrieve_all_positions_for_contest_measure(
retrieve_public_positions_now, 0, ballot_item_we_vote_id,
OPPOSE, most_recent_only)
# Filter to show positions of the orgs you are following
public_results = finalize_support_and_oppose_positions_count(
voter_id, show_positions_this_voter_follows,
organizations_followed_by_voter_by_id, friends_we_vote_id_list,
public_support_positions_list_for_one_ballot_item,
public_oppose_positions_list_for_one_ballot_item)
public_filtered_support_positions = public_results['support_positions_followed']
public_filtered_oppose_positions = public_results['oppose_positions_followed']
for one_position in public_filtered_support_positions:
support_we_vote_id_list.append(one_position.organization_we_vote_id)
support_name_list.append(one_position.speaker_display_name)
update_results = update_or_create_position_network_score_wrapper(
voter_id, voter_we_vote_id, one_position)
if update_results['position_network_score_updated']:
public_positions_updated = True
for one_position in public_filtered_oppose_positions:
oppose_we_vote_id_list.append(one_position.organization_we_vote_id)
oppose_name_list.append(one_position.speaker_display_name)
update_results = update_or_create_position_network_score_wrapper(
voter_id, voter_we_vote_id, one_position)
if update_results['position_network_score_updated']:
public_positions_updated = True
# ##################################
# Friend's-only Positions: Measures
retrieve_public_positions_now = False # Return friends-only positions counts
most_recent_only = True
friends_only_support_positions_list_for_one_ballot_item = \
position_list_manager.retrieve_all_positions_for_contest_measure(
retrieve_public_positions_now, 0, ballot_item_we_vote_id,
SUPPORT, most_recent_only, friends_we_vote_id_list=friends_we_vote_id_list)
friends_only_oppose_positions_list_for_one_ballot_item = \
position_list_manager.retrieve_all_positions_for_contest_measure(
retrieve_public_positions_now, 0, ballot_item_we_vote_id,
OPPOSE, most_recent_only, friends_we_vote_id_list=friends_we_vote_id_list)
# Filter to show friend's positions
friends_results = finalize_support_and_oppose_positions_count(
voter_id, show_positions_this_voter_follows,
organizations_followed_by_voter_by_id, friends_we_vote_id_list,
friends_only_support_positions_list_for_one_ballot_item,
friends_only_oppose_positions_list_for_one_ballot_item)
friend_filtered_support_positions = friends_results['support_positions_followed']
friend_filtered_oppose_positions = friends_results['oppose_positions_followed']
for one_position in friend_filtered_support_positions:
# TODO: I think we might want to use organization_we_vote_id instead of voter_we_vote_id
# but this needs to be checked
support_we_vote_id_list.append(one_position.voter_we_vote_id)
support_name_list.append(one_position.speaker_display_name)
update_results = update_or_create_position_network_score_wrapper(
voter_id, voter_we_vote_id, one_position)
if update_results['position_network_score_updated']:
public_positions_updated = True
for one_position in friend_filtered_oppose_positions:
oppose_we_vote_id_list.append(one_position.voter_we_vote_id)
oppose_name_list.append(one_position.speaker_display_name)
update_results = update_or_create_position_network_score_wrapper(
voter_id, voter_we_vote_id, one_position)
if update_results['position_network_score_updated']:
public_positions_updated = True
# Now calculate the total counts
support_positions_list_for_one_ballot_item = public_support_positions_list_for_one_ballot_item + \
friends_only_support_positions_list_for_one_ballot_item
oppose_positions_list_for_one_ballot_item = public_oppose_positions_list_for_one_ballot_item + \
friends_only_oppose_positions_list_for_one_ballot_item
finalize_results = finalize_support_and_oppose_positions_count(
voter_id, show_positions_this_voter_follows,
organizations_followed_by_voter_by_id, friends_we_vote_id_list,
support_positions_list_for_one_ballot_item,
oppose_positions_list_for_one_ballot_item)
one_ballot_item_results = {
'ballot_item_we_vote_id': ballot_item_we_vote_id,
'support_count': finalize_results['support_positions_count'],
'oppose_count': finalize_results['oppose_positions_count'],
'support_we_vote_id_list': support_we_vote_id_list,
'support_name_list': support_name_list,
'oppose_we_vote_id_list': oppose_we_vote_id_list,
'oppose_name_list': oppose_name_list,
}
position_counts_list_results.append(one_ballot_item_results)
success = True
else:
# The ballot_item_we_vote_id is not for a candidate or measure
success = False
json_data = {
'success': success,
'status': status,
'ballot_item_we_vote_id': ballot_item_we_vote_id,
'position_counts_list': position_counts_list_results,
}
return json_data
def finalize_support_and_oppose_positions_count(voter_id, show_positions_this_voter_follows,
organizations_followed_by_voter_by_id, friends_we_vote_id_list,
support_positions_list_for_one_ballot_item,
oppose_positions_list_for_one_ballot_item):
oppose_positions_followed = []
position_list_manager = PositionListManager()
support_positions_followed = []
if show_positions_this_voter_follows:
support_positions_followed = position_list_manager.calculate_positions_followed_by_voter(
voter_id, support_positions_list_for_one_ballot_item, organizations_followed_by_voter_by_id,
friends_we_vote_id_list)
support_positions_count = len(support_positions_followed)
oppose_positions_followed = position_list_manager.calculate_positions_followed_by_voter(
voter_id, oppose_positions_list_for_one_ballot_item, organizations_followed_by_voter_by_id,
friends_we_vote_id_list)
oppose_positions_count = len(oppose_positions_followed)
else:
support_positions_not_followed = position_list_manager.calculate_positions_not_followed_by_voter(
support_positions_list_for_one_ballot_item, organizations_followed_by_voter_by_id,
friends_we_vote_id_list)
support_positions_count = len(support_positions_not_followed)
oppose_positions_not_followed = position_list_manager.calculate_positions_not_followed_by_voter(
oppose_positions_list_for_one_ballot_item, organizations_followed_by_voter_by_id,
friends_we_vote_id_list)
oppose_positions_count = len(oppose_positions_not_followed)
results = {
'support_positions_count': support_positions_count,
'support_positions_followed': support_positions_followed,
'oppose_positions_count': oppose_positions_count,
'oppose_positions_followed': oppose_positions_followed,
}
return results
def positions_public_count_for_api(candidate_id, candidate_we_vote_id, measure_id, measure_we_vote_id,
stance_we_are_looking_for):
if positive_value_exists(candidate_id) or positive_value_exists(candidate_we_vote_id):
results = positions_public_count_for_candidate(candidate_id, candidate_we_vote_id,
stance_we_are_looking_for)
json_data = results['json_data']
return HttpResponse(json.dumps(json_data), content_type='application/json')
elif positive_value_exists(measure_id) or positive_value_exists(measure_we_vote_id):
results = positions_public_count_for_contest_measure(measure_id, measure_we_vote_id,
stance_we_are_looking_for)
json_data = results['json_data']
return HttpResponse(json.dumps(json_data), content_type='application/json')
else:
pass
json_data = {
'status': 'UNABLE_TO_RETRIEVE-CANDIDATE_ID_AND_MEASURE_ID_MISSING',
'success': False,
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
def positions_public_count_for_candidate(candidate_id, candidate_we_vote_id, stance_we_are_looking_for):
"""
We want to return a JSON file with the number of orgs and public figures who support
this particular candidate's campaign
"""
# This implementation is built to make only two database calls. All other calculations are done here in the
# application layer
position_list_manager = PositionListManager()
all_positions_count_for_candidate = \
position_list_manager.fetch_public_positions_count_for_candidate(
candidate_id,
candidate_we_vote_id,
stance_we_are_looking_for)
if positive_value_exists(candidate_id) or positive_value_exists(candidate_we_vote_id):
candidate_manager = CandidateManager()
# Since we can take in either candidate_id or candidate_we_vote_id, we need to retrieve the value we don't have
if positive_value_exists(candidate_id):
candidate_we_vote_id = candidate_manager.fetch_candidate_we_vote_id_from_id(candidate_id)
elif positive_value_exists(candidate_we_vote_id):
candidate_id = candidate_manager.fetch_candidate_id_from_we_vote_id(candidate_we_vote_id)
json_data = {
'status': 'SUCCESSFUL_RETRIEVE_OF_PUBLIC_POSITION_COUNT_RE_CANDIDATE',
'success': True,
'count': all_positions_count_for_candidate,
'ballot_item_id': convert_to_int(candidate_id),
'ballot_item_we_vote_id': candidate_we_vote_id,
'kind_of_ballot_item': CANDIDATE,
}
results = {
'json_data': json_data,
}
return results
def positions_public_count_for_contest_measure(measure_id, measure_we_vote_id, stance_we_are_looking_for):
"""
We want to return a JSON file with the number of orgs and public figures who support
this particular measure
"""
# This implementation is built to make only two database calls. All other calculations are done here in the
# application layer
position_list_manager = PositionListManager()
all_positions_count_for_contest_measure = \
position_list_manager.fetch_public_positions_count_for_contest_measure(
measure_id, measure_we_vote_id, stance_we_are_looking_for)
if positive_value_exists(measure_id) or positive_value_exists(measure_we_vote_id):
contest_measure_manager = ContestMeasureManager()
# Since we can take in either measure_id or measure_we_vote_id, we need to retrieve the value we don't have
if positive_value_exists(measure_id):
measure_we_vote_id = contest_measure_manager.fetch_contest_measure_we_vote_id_from_id(measure_id)
elif positive_value_exists(measure_we_vote_id):
measure_id = contest_measure_manager.fetch_contest_measure_id_from_we_vote_id(measure_we_vote_id)
json_data = {
'status': 'SUCCESSFUL_RETRIEVE_OF_PUBLIC_POSITION_COUNT_FOR_CONTEST_MEASURE',
'success': True,
'count': all_positions_count_for_contest_measure,
'ballot_item_id': convert_to_int(measure_id),
'ballot_item_we_vote_id': measure_we_vote_id,
'kind_of_ballot_item': MEASURE,
}
results = {
'json_data': json_data,
}
return results
def voter_opposing_save(voter_device_id, candidate_id, candidate_we_vote_id, # voterOpposingSave
measure_id, measure_we_vote_id, user_agent_string, user_agent_object):
# Get voter_id from the voter_device_id so we can know who is supporting/opposing
results = is_voter_device_id_valid(voter_device_id)
if not results['success']:
json_data = {
'status': 'VALID_VOTER_DEVICE_ID_MISSING',
'success': False,
'ballot_item_id': 0,
'ballot_item_we_vote_id': '',
'kind_of_ballot_item': '',
'position_we_vote_id': '',
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
voter_id = fetch_voter_id_from_voter_device_link(voter_device_id)
if not positive_value_exists(voter_id):
json_data = {
'status': "VALID_VOTER_ID_MISSING",
'success': False,
'ballot_item_id': 0,
'ballot_item_we_vote_id': '',
'kind_of_ballot_item': '',
'position_we_vote_id': '',
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
position_manager = PositionManager()
if positive_value_exists(candidate_id) or positive_value_exists(candidate_we_vote_id):
candidate_manager = CandidateManager()
# Since we can take in either candidate_id or candidate_we_vote_id, we need to retrieve the value we don't have
if positive_value_exists(candidate_id):
candidate_we_vote_id = candidate_manager.fetch_candidate_we_vote_id_from_id(candidate_id)
elif positive_value_exists(candidate_we_vote_id):
candidate_id = candidate_manager.fetch_candidate_id_from_we_vote_id(candidate_we_vote_id)
results = position_manager.toggle_on_voter_oppose_for_candidate(voter_id, candidate_id,
user_agent_string, user_agent_object)
# toggle_off_voter_support_for_candidate
status = "OPPOSING_CANDIDATE " + results['status']
success = results['success']
json_data = {
'status': status,
'success': success,
'ballot_item_id': convert_to_int(candidate_id),
'ballot_item_we_vote_id': candidate_we_vote_id,
'kind_of_ballot_item': CANDIDATE,
'position_we_vote_id': results['position_we_vote_id'],
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
elif positive_value_exists(measure_id) or positive_value_exists(measure_we_vote_id):
contest_measure_manager = ContestMeasureManager()
# Since we can take in either measure_id or measure_we_vote_id, we need to retrieve the value we don't have
if positive_value_exists(measure_id):
measure_we_vote_id = contest_measure_manager.fetch_contest_measure_we_vote_id_from_id(measure_id)
elif positive_value_exists(measure_we_vote_id):
measure_id = contest_measure_manager.fetch_contest_measure_id_from_we_vote_id(measure_we_vote_id)
results = position_manager.toggle_on_voter_oppose_for_contest_measure(voter_id, measure_id,
user_agent_string, user_agent_object)
status = "OPPOSING_MEASURE " + results['status']
success = results['success']
json_data = {
'status': status,
'success': success,
'ballot_item_id': convert_to_int(measure_id),
'ballot_item_we_vote_id': measure_we_vote_id,
'kind_of_ballot_item': MEASURE,
'position_we_vote_id': results['position_we_vote_id'],
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
else:
status = 'UNABLE_TO_SAVE-CANDIDATE_ID_AND_MEASURE_ID_MISSING'
success = False
json_data = {
'status': status,
'success': success,
'ballot_item_id': 0,
'ballot_item_we_vote_id': '',
'kind_of_ballot_item': '',
'position_we_vote_id': '',
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
def voter_stop_opposing_save(voter_device_id, candidate_id, candidate_we_vote_id, # voterStopOpposingSave
measure_id, measure_we_vote_id, user_agent_string, user_agent_object):
# Get voter_id from the voter_device_id so we can know who is supporting/opposing
results = is_voter_device_id_valid(voter_device_id)
if not results['success']:
json_data = {
'status': 'VALID_VOTER_DEVICE_ID_MISSING',
'success': False,
'ballot_item_id': 0,
'ballot_item_we_vote_id': '',
'kind_of_ballot_item': '',
'position_we_vote_id': '',
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
voter_id = fetch_voter_id_from_voter_device_link(voter_device_id)
if not positive_value_exists(voter_id):
json_data = {
'status': "VALID_VOTER_ID_MISSING ",
'success': False,
'ballot_item_id': 0,
'ballot_item_we_vote_id': '',
'kind_of_ballot_item': '',
'position_we_vote_id': '',
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
position_manager = PositionManager()
if positive_value_exists(candidate_id) or positive_value_exists(candidate_we_vote_id):
candidate_manager = CandidateManager()
# Since we can take in either candidate_id or candidate_we_vote_id, we need to retrieve the value we don't have
if positive_value_exists(candidate_id):
candidate_we_vote_id = candidate_manager.fetch_candidate_we_vote_id_from_id(candidate_id)
elif positive_value_exists(candidate_we_vote_id):
candidate_id = candidate_manager.fetch_candidate_id_from_we_vote_id(candidate_we_vote_id)
results = position_manager.toggle_off_voter_oppose_for_candidate(voter_id, candidate_id,
user_agent_string, user_agent_object)
status = "STOP_OPPOSING_CANDIDATE " + results['status']
success = results['success']
json_data = {
'status': status,
'success': success,
'ballot_item_id': convert_to_int(candidate_id),
'ballot_item_we_vote_id': candidate_we_vote_id,
'kind_of_ballot_item': CANDIDATE,
'position_we_vote_id': results['position_we_vote_id'],
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
elif positive_value_exists(measure_id) or positive_value_exists(measure_we_vote_id):
contest_measure_manager = ContestMeasureManager()
# Since we can take in either measure_id or measure_we_vote_id, we need to retrieve the value we don't have
if positive_value_exists(measure_id):
measure_we_vote_id = contest_measure_manager.fetch_contest_measure_we_vote_id_from_id(measure_id)
elif positive_value_exists(measure_we_vote_id):
measure_id = contest_measure_manager.fetch_contest_measure_id_from_we_vote_id(measure_we_vote_id)
results = position_manager.toggle_off_voter_oppose_for_contest_measure(voter_id, measure_id,
user_agent_string, user_agent_object)
status = "STOP_OPPOSING_MEASURE" + results['status']
success = results['success']
json_data = {
'status': status,
'success': success,
'ballot_item_id': convert_to_int(measure_id),
'ballot_item_we_vote_id': measure_we_vote_id,
'kind_of_ballot_item': MEASURE,
'position_we_vote_id': results['position_we_vote_id'],
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
else:
status = 'UNABLE_TO_SAVE-CANDIDATE_ID_AND_MEASURE_ID_MISSING'
success = False
json_data = {
'status': status,
'success': success,
'ballot_item_id': 0,
'ballot_item_we_vote_id': '',
'kind_of_ballot_item': '',
'position_we_vote_id': '',
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
def voter_stop_supporting_save(voter_device_id, candidate_id, candidate_we_vote_id, # voterStopSupportingSave
measure_id, measure_we_vote_id, user_agent_string, user_agent_object):
# Get voter_id from the voter_device_id so we can know who is supporting/opposing
results = is_voter_device_id_valid(voter_device_id)
if not results['success']:
json_data = {
'status': 'VALID_VOTER_DEVICE_ID_MISSING',
'success': False,
'ballot_item_id': 0,
'ballot_item_we_vote_id': '',
'kind_of_ballot_item': '',
'position_we_vote_id': '',
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
voter_id = fetch_voter_id_from_voter_device_link(voter_device_id)
if not positive_value_exists(voter_id):
json_data = {
'status': "VALID_VOTER_ID_MISSING ",
'success': False,
'ballot_item_id': 0,
'ballot_item_we_vote_id': '',
'kind_of_ballot_item': '',
'position_we_vote_id': '',
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
position_manager = PositionManager()
if positive_value_exists(candidate_id) or positive_value_exists(candidate_we_vote_id):
candidate_manager = CandidateManager()
# Since we can take in either candidate_id or candidate_we_vote_id, we need to retrieve the value we don't have
if positive_value_exists(candidate_id):
candidate_we_vote_id = candidate_manager.fetch_candidate_we_vote_id_from_id(candidate_id)
elif positive_value_exists(candidate_we_vote_id):
candidate_id = candidate_manager.fetch_candidate_id_from_we_vote_id(candidate_we_vote_id)
results = position_manager.toggle_off_voter_support_for_candidate(voter_id, candidate_id,
user_agent_string, user_agent_object)
status = "STOP_SUPPORTING_CANDIDATE " + results['status']
success = results['success']
json_data = {
'status': status,
'success': success,
'ballot_item_id': convert_to_int(candidate_id),
'ballot_item_we_vote_id': candidate_we_vote_id,
'kind_of_ballot_item': CANDIDATE,
'position_we_vote_id': results['position_we_vote_id'],
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
elif positive_value_exists(measure_id) or positive_value_exists(measure_we_vote_id):
contest_measure_manager = ContestMeasureManager()
# Since we can take in either measure_id or measure_we_vote_id, we need to retrieve the value we don't have
if positive_value_exists(measure_id):
measure_we_vote_id = contest_measure_manager.fetch_contest_measure_we_vote_id_from_id(measure_id)
elif positive_value_exists(measure_we_vote_id):
measure_id = contest_measure_manager.fetch_contest_measure_id_from_we_vote_id(measure_we_vote_id)
results = position_manager.toggle_off_voter_support_for_contest_measure(voter_id, measure_id,
user_agent_string, user_agent_object)
status = "STOP_SUPPORTING_MEASURE " + results['status']
success = results['success']
json_data = {
'status': status,
'success': success,
'ballot_item_id': convert_to_int(measure_id),
'ballot_item_we_vote_id': measure_we_vote_id,
'kind_of_ballot_item': MEASURE,
'position_we_vote_id': results['position_we_vote_id'],
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
else:
status = 'UNABLE_TO_SAVE-CANDIDATE_ID_AND_MEASURE_ID_MISSING'
success = False
json_data = {
'status': status,
'success': success,
'ballot_item_id': 0,
'ballot_item_we_vote_id': '',
'kind_of_ballot_item': '',
'position_we_vote_id': '',
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
def voter_supporting_save_for_api(voter_device_id, # voterSupportingSave
candidate_id, candidate_we_vote_id,
measure_id, measure_we_vote_id, user_agent_string, user_agent_object):
"""
Default to this being a private position
:param voter_device_id:
:param candidate_id:
:param candidate_we_vote_id:
:param measure_id:
:param measure_we_vote_id:
:param user_agent_string:
:param user_agent_object:
:return:
"""
status = ""
# Get voter_id from the voter_device_id so we can know who is supporting/opposing
results = is_voter_device_id_valid(voter_device_id)
if not results['success']:
json_data = {
'status': 'VALID_VOTER_DEVICE_ID_MISSING ',
'success': False,
'ballot_item_id': 0,
'ballot_item_we_vote_id': '',
'kind_of_ballot_item': '',
'position_we_vote_id': '',
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
voter_id = fetch_voter_id_from_voter_device_link(voter_device_id)
if not positive_value_exists(voter_id):
json_data = {
'status': "VALID_VOTER_ID_MISSING ",
'success': False,
'ballot_item_id': 0,
'ballot_item_we_vote_id': '',
'kind_of_ballot_item': '',
'position_we_vote_id': '',
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
position_manager = PositionManager()
if positive_value_exists(candidate_id) or positive_value_exists(candidate_we_vote_id):
candidate_manager = CandidateManager()
# Since we can take in either candidate_id or candidate_we_vote_id, we need to retrieve the value we don't have
if positive_value_exists(candidate_id):
candidate_we_vote_id = candidate_manager.fetch_candidate_we_vote_id_from_id(candidate_id)
elif positive_value_exists(candidate_we_vote_id):
candidate_id = candidate_manager.fetch_candidate_id_from_we_vote_id(candidate_we_vote_id)
results = position_manager.toggle_on_voter_support_for_candidate(voter_id, candidate_id,
user_agent_string, user_agent_object)
status += "SUPPORTING_CANDIDATE " + results['status'] + " "
success = results['success']
json_data = {
'status': status,
'success': success,
'ballot_item_id': convert_to_int(candidate_id),
'ballot_item_we_vote_id': candidate_we_vote_id,
'kind_of_ballot_item': CANDIDATE,
'position_we_vote_id': results['position_we_vote_id'],
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
elif positive_value_exists(measure_id) or positive_value_exists(measure_we_vote_id):
contest_measure_manager = ContestMeasureManager()
# Since we can take in either measure_id or measure_we_vote_id, we need to retrieve the value we don't have
if positive_value_exists(measure_id):
measure_we_vote_id = contest_measure_manager.fetch_contest_measure_we_vote_id_from_id(measure_id)
elif positive_value_exists(measure_we_vote_id):
measure_id = contest_measure_manager.fetch_contest_measure_id_from_we_vote_id(measure_we_vote_id)
results = position_manager.toggle_on_voter_support_for_contest_measure(
voter_id, measure_id, user_agent_string, user_agent_object)
status += "SUPPORTING_MEASURE: " + results['status'] + " "
success = results['success']
json_data = {
'status': status,
'success': success,
'ballot_item_id': convert_to_int(measure_id),
'ballot_item_we_vote_id': measure_we_vote_id,
'kind_of_ballot_item': MEASURE,
'position_we_vote_id': results['position_we_vote_id'],
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
else:
status += 'UNABLE_TO_SAVE-CANDIDATE_ID_AND_MEASURE_ID_MISSING '
success = False
json_data = {
'status': status,
'success': success,
'ballot_item_id': 0,
'ballot_item_we_vote_id': '',
'kind_of_ballot_item': '',
'position_we_vote_id': '',
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
| 50.604957
| 120
| 0.676264
| 7,858
| 65,331
| 5.060193
| 0.031433
| 0.055076
| 0.073234
| 0.022031
| 0.943163
| 0.93084
| 0.915022
| 0.898172
| 0.883057
| 0.874481
| 0
| 0.000665
| 0.263703
| 65,331
| 1,290
| 121
| 50.644186
| 0.825957
| 0.087753
| 0
| 0.777886
| 0
| 0
| 0.110295
| 0.055554
| 0
| 0
| 0
| 0.000775
| 0
| 1
| 0.016489
| false
| 0.00097
| 0.012609
| 0
| 0.074685
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a1e483b090db799c3ae71510436182a0eed459f0
| 29,078
|
py
|
Python
|
fhirclient/r4models/consent_tests.py
|
Healthedata1/Flask-PL
|
88a2f40ca430c4cbb9fbded7fc92fdc166ebb9f1
|
[
"MIT"
] | null | null | null |
fhirclient/r4models/consent_tests.py
|
Healthedata1/Flask-PL
|
88a2f40ca430c4cbb9fbded7fc92fdc166ebb9f1
|
[
"MIT"
] | null | null | null |
fhirclient/r4models/consent_tests.py
|
Healthedata1/Flask-PL
|
88a2f40ca430c4cbb9fbded7fc92fdc166ebb9f1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.0-a53ec6ee1b on 2019-05-07.
# 2019, SMART Health IT.
import os
import io
import unittest
import json
from . import consent
from .fhirdate import FHIRDate
class ConsentTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get('FHIR_UNITTEST_DATADIR') or ''
with io.open(os.path.join(datadir, filename), 'r', encoding='utf-8') as handle:
js = json.load(handle)
self.assertEqual("Consent", js["resourceType"])
return consent.Consent(js)
def testConsent1(self):
inst = self.instantiate_from("consent-example-notThis.json")
self.assertIsNotNone(inst, "Must have instantiated a Consent instance")
self.implConsent1(inst)
js = inst.as_json()
self.assertEqual("Consent", js["resourceType"])
inst2 = consent.Consent(js)
self.implConsent1(inst2)
def implConsent1(self, inst):
self.assertEqual(inst.category[0].coding[0].code, "59284-0")
self.assertEqual(inst.category[0].coding[0].system, "http://loinc.org")
self.assertEqual(inst.dateTime.date, FHIRDate("2015-11-18").date)
self.assertEqual(inst.dateTime.as_json(), "2015-11-18")
self.assertEqual(inst.id, "consent-example-notThis")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.policyRule.coding[0].code, "OPTIN")
self.assertEqual(inst.policyRule.coding[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActCode")
self.assertEqual(inst.provision.data[0].meaning, "related")
self.assertEqual(inst.scope.coding[0].code, "patient-privacy")
self.assertEqual(inst.scope.coding[0].system, "http://terminology.hl7.org/CodeSystem/consentscope")
self.assertEqual(inst.sourceAttachment.title, "The terms of the consent in lawyer speak.")
self.assertEqual(inst.status, "active")
self.assertEqual(inst.text.status, "generated")
def testConsent2(self):
inst = self.instantiate_from("consent-example-smartonfhir.json")
self.assertIsNotNone(inst, "Must have instantiated a Consent instance")
self.implConsent2(inst)
js = inst.as_json()
self.assertEqual("Consent", js["resourceType"])
inst2 = consent.Consent(js)
self.implConsent2(inst2)
def implConsent2(self, inst):
self.assertEqual(inst.category[0].coding[0].code, "59284-0")
self.assertEqual(inst.category[0].coding[0].system, "http://loinc.org")
self.assertEqual(inst.dateTime.date, FHIRDate("2016-06-23T17:02:33+10:00").date)
self.assertEqual(inst.dateTime.as_json(), "2016-06-23T17:02:33+10:00")
self.assertEqual(inst.id, "consent-example-smartonfhir")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.policyRule.coding[0].code, "OPTIN")
self.assertEqual(inst.policyRule.coding[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActCode")
self.assertEqual(inst.provision.period.end.date, FHIRDate("2016-06-23T17:32:33+10:00").date)
self.assertEqual(inst.provision.period.end.as_json(), "2016-06-23T17:32:33+10:00")
self.assertEqual(inst.provision.period.start.date, FHIRDate("2016-06-23T17:02:33+10:00").date)
self.assertEqual(inst.provision.period.start.as_json(), "2016-06-23T17:02:33+10:00")
self.assertEqual(inst.provision.provision[0].action[0].coding[0].code, "access")
self.assertEqual(inst.provision.provision[0].action[0].coding[0].system, "http://terminology.hl7.org/CodeSystem/consentaction")
self.assertEqual(inst.provision.provision[0].class_fhir[0].code, "MedicationRequest")
self.assertEqual(inst.provision.provision[0].class_fhir[0].system, "http://hl7.org/fhir/resource-types")
self.assertEqual(inst.provision.provision[0].type, "permit")
self.assertEqual(inst.scope.coding[0].code, "patient-privacy")
self.assertEqual(inst.scope.coding[0].system, "http://terminology.hl7.org/CodeSystem/consentscope")
self.assertEqual(inst.status, "active")
self.assertEqual(inst.text.status, "generated")
def testConsent3(self):
inst = self.instantiate_from("consent-example-notAuthor.json")
self.assertIsNotNone(inst, "Must have instantiated a Consent instance")
self.implConsent3(inst)
js = inst.as_json()
self.assertEqual("Consent", js["resourceType"])
inst2 = consent.Consent(js)
self.implConsent3(inst2)
def implConsent3(self, inst):
self.assertEqual(inst.category[0].coding[0].code, "59284-0")
self.assertEqual(inst.category[0].coding[0].system, "http://loinc.org")
self.assertEqual(inst.dateTime.date, FHIRDate("2015-11-18").date)
self.assertEqual(inst.dateTime.as_json(), "2015-11-18")
self.assertEqual(inst.id, "consent-example-notAuthor")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.policyRule.coding[0].code, "OPTIN")
self.assertEqual(inst.policyRule.coding[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActCode")
self.assertEqual(inst.provision.actor[0].role.coding[0].code, "CST")
self.assertEqual(inst.provision.actor[0].role.coding[0].system, "http://terminology.hl7.org/CodeSystem/v3-ParticipationType")
self.assertEqual(inst.scope.coding[0].code, "patient-privacy")
self.assertEqual(inst.scope.coding[0].system, "http://terminology.hl7.org/CodeSystem/consentscope")
self.assertEqual(inst.sourceAttachment.title, "The terms of the consent in lawyer speak.")
self.assertEqual(inst.status, "active")
self.assertEqual(inst.text.status, "generated")
def testConsent4(self):
inst = self.instantiate_from("consent-example-notTime.json")
self.assertIsNotNone(inst, "Must have instantiated a Consent instance")
self.implConsent4(inst)
js = inst.as_json()
self.assertEqual("Consent", js["resourceType"])
inst2 = consent.Consent(js)
self.implConsent4(inst2)
def implConsent4(self, inst):
self.assertEqual(inst.category[0].coding[0].code, "59284-0")
self.assertEqual(inst.category[0].coding[0].system, "http://loinc.org")
self.assertEqual(inst.dateTime.date, FHIRDate("2015-11-18").date)
self.assertEqual(inst.dateTime.as_json(), "2015-11-18")
self.assertEqual(inst.id, "consent-example-notTime")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.policyRule.coding[0].code, "OPTIN")
self.assertEqual(inst.policyRule.coding[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActCode")
self.assertEqual(inst.provision.period.end.date, FHIRDate("2015-02-01").date)
self.assertEqual(inst.provision.period.end.as_json(), "2015-02-01")
self.assertEqual(inst.provision.period.start.date, FHIRDate("2015-01-01").date)
self.assertEqual(inst.provision.period.start.as_json(), "2015-01-01")
self.assertEqual(inst.scope.coding[0].code, "patient-privacy")
self.assertEqual(inst.scope.coding[0].system, "http://terminology.hl7.org/CodeSystem/consentscope")
self.assertEqual(inst.sourceAttachment.title, "The terms of the consent in lawyer speak.")
self.assertEqual(inst.status, "active")
self.assertEqual(inst.text.status, "generated")
def testConsent5(self):
inst = self.instantiate_from("consent-example-signature.json")
self.assertIsNotNone(inst, "Must have instantiated a Consent instance")
self.implConsent5(inst)
js = inst.as_json()
self.assertEqual("Consent", js["resourceType"])
inst2 = consent.Consent(js)
self.implConsent5(inst2)
def implConsent5(self, inst):
self.assertEqual(inst.category[0].coding[0].code, "npp")
self.assertEqual(inst.category[0].coding[0].system, "http://terminology.hl7.org/CodeSystem/consentcategorycodes")
self.assertEqual(inst.dateTime.date, FHIRDate("2016-05-26T00:41:10-04:00").date)
self.assertEqual(inst.dateTime.as_json(), "2016-05-26T00:41:10-04:00")
self.assertEqual(inst.id, "consent-example-signature")
self.assertEqual(inst.identifier[0].system, "urn:oid:2.16.840.1.113883.3.72.5.9.1")
self.assertEqual(inst.identifier[0].value, "494e0c7a-a69e-4fb4-9d02-6aae747790d7")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.policyRule.coding[0].code, "OPTIN")
self.assertEqual(inst.policyRule.coding[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActCode")
self.assertEqual(inst.provision.actor[0].role.coding[0].code, "PRCP")
self.assertEqual(inst.provision.actor[0].role.coding[0].system, "http://terminology.hl7.org/CodeSystem/v3-ParticipationType")
self.assertEqual(inst.provision.period.end.date, FHIRDate("2016-10-10").date)
self.assertEqual(inst.provision.period.end.as_json(), "2016-10-10")
self.assertEqual(inst.provision.period.start.date, FHIRDate("2015-10-10").date)
self.assertEqual(inst.provision.period.start.as_json(), "2015-10-10")
self.assertEqual(inst.provision.provision[0].actor[0].role.coding[0].code, "AUT")
self.assertEqual(inst.provision.provision[0].actor[0].role.coding[0].system, "http://terminology.hl7.org/CodeSystem/v3-ParticipationType")
self.assertEqual(inst.provision.provision[0].class_fhir[0].code, "application/hl7-cda+xml")
self.assertEqual(inst.provision.provision[0].class_fhir[0].system, "urn:ietf:bcp:13")
self.assertEqual(inst.provision.provision[0].code[0].coding[0].code, "34133-9")
self.assertEqual(inst.provision.provision[0].code[0].coding[0].system, "http://loinc.org")
self.assertEqual(inst.provision.provision[0].code[1].coding[0].code, "18842-5")
self.assertEqual(inst.provision.provision[0].code[1].coding[0].system, "http://loinc.org")
self.assertEqual(inst.provision.provision[0].type, "permit")
self.assertEqual(inst.scope.coding[0].code, "patient-privacy")
self.assertEqual(inst.scope.coding[0].system, "http://terminology.hl7.org/CodeSystem/consentscope")
self.assertEqual(inst.status, "active")
self.assertEqual(inst.text.status, "generated")
def testConsent6(self):
inst = self.instantiate_from("consent-example-notThem.json")
self.assertIsNotNone(inst, "Must have instantiated a Consent instance")
self.implConsent6(inst)
js = inst.as_json()
self.assertEqual("Consent", js["resourceType"])
inst2 = consent.Consent(js)
self.implConsent6(inst2)
def implConsent6(self, inst):
self.assertEqual(inst.category[0].coding[0].code, "59284-0")
self.assertEqual(inst.category[0].coding[0].system, "http://loinc.org")
self.assertEqual(inst.dateTime.date, FHIRDate("2015-11-18").date)
self.assertEqual(inst.dateTime.as_json(), "2015-11-18")
self.assertEqual(inst.id, "consent-example-notThem")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.policyRule.coding[0].code, "OPTIN")
self.assertEqual(inst.policyRule.coding[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActCode")
self.assertEqual(inst.provision.action[0].coding[0].code, "access")
self.assertEqual(inst.provision.action[0].coding[0].system, "http://terminology.hl7.org/CodeSystem/consentaction")
self.assertEqual(inst.provision.action[1].coding[0].code, "correct")
self.assertEqual(inst.provision.action[1].coding[0].system, "http://terminology.hl7.org/CodeSystem/consentaction")
self.assertEqual(inst.provision.actor[0].role.coding[0].code, "PRCP")
self.assertEqual(inst.provision.actor[0].role.coding[0].system, "http://terminology.hl7.org/CodeSystem/v3-ParticipationType")
self.assertEqual(inst.scope.coding[0].code, "patient-privacy")
self.assertEqual(inst.scope.coding[0].system, "http://terminology.hl7.org/CodeSystem/consentscope")
self.assertEqual(inst.sourceAttachment.title, "The terms of the consent in lawyer speak.")
self.assertEqual(inst.status, "active")
self.assertEqual(inst.text.status, "generated")
def testConsent7(self):
inst = self.instantiate_from("consent-example-grantor.json")
self.assertIsNotNone(inst, "Must have instantiated a Consent instance")
self.implConsent7(inst)
js = inst.as_json()
self.assertEqual("Consent", js["resourceType"])
inst2 = consent.Consent(js)
self.implConsent7(inst2)
def implConsent7(self, inst):
self.assertEqual(inst.category[0].coding[0].code, "INFAO")
self.assertEqual(inst.category[0].coding[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActCode")
self.assertEqual(inst.dateTime.date, FHIRDate("2015-11-18").date)
self.assertEqual(inst.dateTime.as_json(), "2015-11-18")
self.assertEqual(inst.id, "consent-example-grantor")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.policyRule.coding[0].code, "OPTOUT")
self.assertEqual(inst.policyRule.coding[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActCode")
self.assertEqual(inst.provision.action[0].coding[0].code, "access")
self.assertEqual(inst.provision.action[0].coding[0].system, "http://terminology.hl7.org/CodeSystem/consentaction")
self.assertEqual(inst.provision.actor[0].role.coding[0].code, "CST")
self.assertEqual(inst.provision.actor[0].role.coding[0].system, "http://terminology.hl7.org/CodeSystem/v3-ParticipationType")
self.assertEqual(inst.provision.actor[1].role.coding[0].code, "PRCP")
self.assertEqual(inst.provision.actor[1].role.coding[0].system, "http://terminology.hl7.org/CodeSystem/v3-ParticipationType")
self.assertEqual(inst.scope.coding[0].code, "patient-privacy")
self.assertEqual(inst.scope.coding[0].system, "http://terminology.hl7.org/CodeSystem/consentscope")
self.assertEqual(inst.sourceAttachment.title, "The terms of the consent in lawyer speak.")
self.assertEqual(inst.status, "active")
self.assertEqual(inst.text.status, "generated")
def testConsent8(self):
inst = self.instantiate_from("consent-example-notOrg.json")
self.assertIsNotNone(inst, "Must have instantiated a Consent instance")
self.implConsent8(inst)
js = inst.as_json()
self.assertEqual("Consent", js["resourceType"])
inst2 = consent.Consent(js)
self.implConsent8(inst2)
def implConsent8(self, inst):
self.assertEqual(inst.category[0].coding[0].code, "59284-0")
self.assertEqual(inst.category[0].coding[0].system, "http://loinc.org")
self.assertEqual(inst.dateTime.date, FHIRDate("2015-11-18").date)
self.assertEqual(inst.dateTime.as_json(), "2015-11-18")
self.assertEqual(inst.id, "consent-example-notOrg")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.policyRule.coding[0].code, "OPTIN")
self.assertEqual(inst.policyRule.coding[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActCode")
self.assertEqual(inst.provision.action[0].coding[0].code, "access")
self.assertEqual(inst.provision.action[0].coding[0].system, "http://terminology.hl7.org/CodeSystem/consentaction")
self.assertEqual(inst.provision.action[1].coding[0].code, "correct")
self.assertEqual(inst.provision.action[1].coding[0].system, "http://terminology.hl7.org/CodeSystem/consentaction")
self.assertEqual(inst.provision.actor[0].role.coding[0].code, "PRCP")
self.assertEqual(inst.provision.actor[0].role.coding[0].system, "http://terminology.hl7.org/CodeSystem/v3-ParticipationType")
self.assertEqual(inst.provision.type, "deny")
self.assertEqual(inst.scope.coding[0].code, "patient-privacy")
self.assertEqual(inst.scope.coding[0].system, "http://terminology.hl7.org/CodeSystem/consentscope")
self.assertEqual(inst.sourceAttachment.title, "The terms of the consent in lawyer speak.")
self.assertEqual(inst.status, "active")
self.assertEqual(inst.text.status, "generated")
def testConsent9(self):
inst = self.instantiate_from("consent-example-pkb.json")
self.assertIsNotNone(inst, "Must have instantiated a Consent instance")
self.implConsent9(inst)
js = inst.as_json()
self.assertEqual("Consent", js["resourceType"])
inst2 = consent.Consent(js)
self.implConsent9(inst2)
def implConsent9(self, inst):
self.assertEqual(inst.category[0].coding[0].code, "59284-0")
self.assertEqual(inst.category[0].coding[0].system, "http://loinc.org")
self.assertEqual(inst.dateTime.date, FHIRDate("2016-06-16").date)
self.assertEqual(inst.dateTime.as_json(), "2016-06-16")
self.assertEqual(inst.id, "consent-example-pkb")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.policyRule.coding[0].code, "OPTOUT")
self.assertEqual(inst.policyRule.coding[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActCode")
self.assertEqual(inst.provision.action[0].coding[0].code, "access")
self.assertEqual(inst.provision.action[0].coding[0].system, "http://terminology.hl7.org/CodeSystem/consentaction")
self.assertEqual(inst.provision.actor[0].role.coding[0].code, "PRCP")
self.assertEqual(inst.provision.actor[0].role.coding[0].system, "http://terminology.hl7.org/CodeSystem/v3-ParticipationType")
self.assertEqual(inst.provision.provision[0].action[0].coding[0].code, "access")
self.assertEqual(inst.provision.provision[0].action[0].coding[0].system, "http://terminology.hl7.org/CodeSystem/consentaction")
self.assertEqual(inst.provision.provision[0].actor[0].role.coding[0].code, "PRCP")
self.assertEqual(inst.provision.provision[0].actor[0].role.coding[0].system, "http://terminology.hl7.org/CodeSystem/v3-ParticipationType")
self.assertEqual(inst.provision.provision[0].securityLabel[0].code, "PSY")
self.assertEqual(inst.provision.provision[0].securityLabel[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActCode")
self.assertEqual(inst.provision.provision[1].action[0].coding[0].code, "access")
self.assertEqual(inst.provision.provision[1].action[0].coding[0].system, "http://terminology.hl7.org/CodeSystem/consentaction")
self.assertEqual(inst.provision.provision[1].actor[0].role.coding[0].code, "PRCP")
self.assertEqual(inst.provision.provision[1].actor[0].role.coding[0].system, "http://terminology.hl7.org/CodeSystem/v3-ParticipationType")
self.assertEqual(inst.provision.provision[1].securityLabel[0].code, "SPI")
self.assertEqual(inst.provision.provision[1].securityLabel[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActCode")
self.assertEqual(inst.provision.provision[2].action[0].coding[0].code, "access")
self.assertEqual(inst.provision.provision[2].action[0].coding[0].system, "http://terminology.hl7.org/CodeSystem/consentaction")
self.assertEqual(inst.provision.provision[2].actor[0].role.coding[0].code, "PRCP")
self.assertEqual(inst.provision.provision[2].actor[0].role.coding[0].system, "http://terminology.hl7.org/CodeSystem/v3-ParticipationType")
self.assertEqual(inst.provision.provision[2].securityLabel[0].code, "N")
self.assertEqual(inst.provision.provision[2].securityLabel[0].system, "http://terminology.hl7.org/CodeSystem/v3-Confidentiality")
self.assertEqual(inst.provision.provision[3].action[0].coding[0].code, "access")
self.assertEqual(inst.provision.provision[3].action[0].coding[0].system, "http://terminology.hl7.org/CodeSystem/consentaction")
self.assertEqual(inst.provision.provision[3].actor[0].role.coding[0].code, "PRCP")
self.assertEqual(inst.provision.provision[3].actor[0].role.coding[0].system, "http://terminology.hl7.org/CodeSystem/v3-ParticipationType")
self.assertEqual(inst.provision.provision[3].securityLabel[0].code, "PSY")
self.assertEqual(inst.provision.provision[3].securityLabel[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActCode")
self.assertEqual(inst.provision.provision[4].action[0].coding[0].code, "access")
self.assertEqual(inst.provision.provision[4].action[0].coding[0].system, "http://terminology.hl7.org/CodeSystem/consentaction")
self.assertEqual(inst.provision.provision[4].actor[0].role.coding[0].code, "PRCP")
self.assertEqual(inst.provision.provision[4].actor[0].role.coding[0].system, "http://terminology.hl7.org/CodeSystem/v3-ParticipationType")
self.assertEqual(inst.provision.provision[4].securityLabel[0].code, "SPI")
self.assertEqual(inst.provision.provision[4].securityLabel[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActCode")
self.assertEqual(inst.provision.provision[5].action[0].coding[0].code, "access")
self.assertEqual(inst.provision.provision[5].action[0].coding[0].system, "http://terminology.hl7.org/CodeSystem/consentaction")
self.assertEqual(inst.provision.provision[5].actor[0].role.coding[0].code, "PRCP")
self.assertEqual(inst.provision.provision[5].actor[0].role.coding[0].system, "http://terminology.hl7.org/CodeSystem/v3-ParticipationType")
self.assertEqual(inst.provision.provision[5].securityLabel[0].code, "SEX")
self.assertEqual(inst.provision.provision[5].securityLabel[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActCode")
self.assertEqual(inst.provision.provision[6].action[0].coding[0].code, "access")
self.assertEqual(inst.provision.provision[6].action[0].coding[0].system, "http://terminology.hl7.org/CodeSystem/consentaction")
self.assertEqual(inst.provision.provision[6].actor[0].role.coding[0].code, "PRCP")
self.assertEqual(inst.provision.provision[6].actor[0].role.coding[0].system, "http://terminology.hl7.org/CodeSystem/v3-ParticipationType")
self.assertEqual(inst.provision.provision[6].securityLabel[0].code, "N")
self.assertEqual(inst.provision.provision[6].securityLabel[0].system, "http://terminology.hl7.org/CodeSystem/v3-Confidentiality")
self.assertEqual(inst.provision.provision[7].action[0].coding[0].code, "access")
self.assertEqual(inst.provision.provision[7].action[0].coding[0].system, "http://terminology.hl7.org/CodeSystem/consentaction")
self.assertEqual(inst.provision.provision[7].actor[0].role.coding[0].code, "PRCP")
self.assertEqual(inst.provision.provision[7].actor[0].role.coding[0].system, "http://terminology.hl7.org/CodeSystem/v3-ParticipationType")
self.assertEqual(inst.provision.provision[7].securityLabel[0].code, "PSY")
self.assertEqual(inst.provision.provision[7].securityLabel[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActCode")
self.assertEqual(inst.provision.provision[8].action[0].coding[0].code, "access")
self.assertEqual(inst.provision.provision[8].action[0].coding[0].system, "http://terminology.hl7.org/CodeSystem/consentaction")
self.assertEqual(inst.provision.provision[8].actor[0].role.coding[0].code, "PRCP")
self.assertEqual(inst.provision.provision[8].actor[0].role.coding[0].system, "http://terminology.hl7.org/CodeSystem/v3-ParticipationType")
self.assertEqual(inst.provision.provision[8].securityLabel[0].code, "SPI")
self.assertEqual(inst.provision.provision[8].securityLabel[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActCode")
self.assertEqual(inst.provision.provision[9].action[0].coding[0].code, "access")
self.assertEqual(inst.provision.provision[9].action[0].coding[0].system, "http://terminology.hl7.org/CodeSystem/consentaction")
self.assertEqual(inst.provision.provision[9].actor[0].role.coding[0].code, "PRCP")
self.assertEqual(inst.provision.provision[9].actor[0].role.coding[0].system, "http://terminology.hl7.org/CodeSystem/v3-ParticipationType")
self.assertEqual(inst.provision.provision[9].securityLabel[0].code, "SEX")
self.assertEqual(inst.provision.provision[9].securityLabel[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActCode")
self.assertEqual(inst.provision.securityLabel[0].code, "N")
self.assertEqual(inst.provision.securityLabel[0].system, "http://terminology.hl7.org/CodeSystem/v3-Confidentiality")
self.assertEqual(inst.scope.coding[0].code, "patient-privacy")
self.assertEqual(inst.scope.coding[0].system, "http://terminology.hl7.org/CodeSystem/consentscope")
self.assertEqual(inst.status, "active")
self.assertEqual(inst.text.status, "generated")
def testConsent10(self):
inst = self.instantiate_from("consent-example.json")
self.assertIsNotNone(inst, "Must have instantiated a Consent instance")
self.implConsent10(inst)
js = inst.as_json()
self.assertEqual("Consent", js["resourceType"])
inst2 = consent.Consent(js)
self.implConsent10(inst2)
def implConsent10(self, inst):
self.assertEqual(inst.category[0].coding[0].code, "59284-0")
self.assertEqual(inst.category[0].coding[0].system, "http://loinc.org")
self.assertEqual(inst.dateTime.date, FHIRDate("2016-05-11").date)
self.assertEqual(inst.dateTime.as_json(), "2016-05-11")
self.assertEqual(inst.id, "consent-example-basic")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.policyRule.coding[0].code, "OPTIN")
self.assertEqual(inst.policyRule.coding[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActCode")
self.assertEqual(inst.provision.period.end.date, FHIRDate("2016-01-01").date)
self.assertEqual(inst.provision.period.end.as_json(), "2016-01-01")
self.assertEqual(inst.provision.period.start.date, FHIRDate("1964-01-01").date)
self.assertEqual(inst.provision.period.start.as_json(), "1964-01-01")
self.assertEqual(inst.scope.coding[0].code, "patient-privacy")
self.assertEqual(inst.scope.coding[0].system, "http://terminology.hl7.org/CodeSystem/consentscope")
self.assertEqual(inst.sourceAttachment.title, "The terms of the consent in lawyer speak.")
self.assertEqual(inst.status, "active")
self.assertEqual(inst.text.status, "generated")
| 70.40678
| 147
| 0.687977
| 3,629
| 29,078
| 5.500138
| 0.056489
| 0.210421
| 0.256062
| 0.168337
| 0.934068
| 0.929158
| 0.925251
| 0.89975
| 0.891633
| 0.844639
| 0
| 0.042294
| 0.150285
| 29,078
| 412
| 148
| 70.57767
| 0.765542
| 0.004092
| 0
| 0.532258
| 1
| 0.002688
| 0.250587
| 0.027434
| 0
| 0
| 0
| 0
| 0.77957
| 1
| 0.056452
| false
| 0
| 0.016129
| 0
| 0.077957
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 11
|
a1f1cb7c042d25f552c660ef24b446d2b9d12af0
| 674
|
py
|
Python
|
week09-password-cracking/step0903/benchmark.py
|
NPaspallis/CO1417
|
a91483d858b457ae3de8c032e9eb48e6aa223dfd
|
[
"MIT"
] | null | null | null |
week09-password-cracking/step0903/benchmark.py
|
NPaspallis/CO1417
|
a91483d858b457ae3de8c032e9eb48e6aa223dfd
|
[
"MIT"
] | null | null | null |
week09-password-cracking/step0903/benchmark.py
|
NPaspallis/CO1417
|
a91483d858b457ae3de8c032e9eb48e6aa223dfd
|
[
"MIT"
] | null | null | null |
import hashlib
import time
LOOPS = 1000000
start = time.perf_counter() # time at this point (begin measuring)
for int in range(LOOPS):
hashlib.md5("hello".encode('utf-8')).hexdigest()
end = time.perf_counter() # time at this point (end measuring)
delta = end - start # the difference of 'begin' and 'end', in seconds
print("MD5 time: %f" % delta)
start = time.perf_counter() # time at this point (begin measuring)
for int in range(LOOPS):
hashlib.sha256("hello".encode('utf-8')).hexdigest()
end = time.perf_counter() # time at this point (end measuring)
delta = end - start # the difference of 'begin' and 'end', in seconds
print("SHA256 time: %f" % delta)
| 35.473684
| 70
| 0.695846
| 102
| 674
| 4.558824
| 0.323529
| 0.068817
| 0.129032
| 0.163441
| 0.843011
| 0.843011
| 0.843011
| 0.843011
| 0.843011
| 0.843011
| 0
| 0.030249
| 0.166172
| 674
| 18
| 71
| 37.444444
| 0.797153
| 0.354599
| 0
| 0.533333
| 0
| 0
| 0.109813
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.133333
| 0
| 0.133333
| 0.133333
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
629c901c23e45c09bd44a7341218cd89e3333b8a
| 39
|
py
|
Python
|
lvq/__init__.py
|
SundayProgrammer/LearningVectorQuantization
|
4550d5530cd9a65b67cb40c3f52fd4ad00999b75
|
[
"MIT"
] | 3
|
2021-04-06T07:59:53.000Z
|
2021-12-06T08:33:23.000Z
|
lvq/__init__.py
|
SundayProgrammer/LearningVectorQuantization
|
4550d5530cd9a65b67cb40c3f52fd4ad00999b75
|
[
"MIT"
] | null | null | null |
lvq/__init__.py
|
SundayProgrammer/LearningVectorQuantization
|
4550d5530cd9a65b67cb40c3f52fd4ad00999b75
|
[
"MIT"
] | 1
|
2020-07-03T04:00:18.000Z
|
2020-07-03T04:00:18.000Z
|
from . import lvq_1
from . import lvq_2
| 19.5
| 19
| 0.769231
| 8
| 39
| 3.5
| 0.625
| 0.714286
| 0.928571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.0625
| 0.179487
| 39
| 2
| 20
| 19.5
| 0.8125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
62b1de92451d1fc2de6d709ae8014efa04b7eccc
| 2,881
|
py
|
Python
|
tests/test_decoders_tensorflow.py
|
shar999/mead-baseline
|
bd9cd02c0a1d9c0df91aca171774a6967e6ce190
|
[
"Apache-2.0"
] | 241
|
2016-04-25T20:02:31.000Z
|
2019-09-03T05:44:09.000Z
|
tests/test_decoders_tensorflow.py
|
shar999/mead-baseline
|
bd9cd02c0a1d9c0df91aca171774a6967e6ce190
|
[
"Apache-2.0"
] | 42
|
2017-08-21T16:04:36.000Z
|
2019-09-30T20:45:17.000Z
|
tests/test_decoders_tensorflow.py
|
shar999/mead-baseline
|
bd9cd02c0a1d9c0df91aca171774a6967e6ce190
|
[
"Apache-2.0"
] | 75
|
2016-06-28T01:18:58.000Z
|
2019-08-29T06:47:22.000Z
|
import pytest
import numpy as np
from eight_mile.utils import get_version
from eight_mile.embeddings import RandomInitVecModel
from collections import namedtuple
import string
tf = pytest.importorskip('tensorflow')
pytestmark = pytest.mark.skipif(get_version(tf) < 2, reason='TF1.X')
from eight_mile.utils import Offsets
def test_rnn_decode_shapes():
from baseline.tf.embeddings import LookupTableEmbeddingsModel
from baseline.tf.seq2seq.decoders import RNNDecoder
# Always pick the right path
encoder = namedtuple("EncoderOutput", "output src_mask")
batchsz = 2
temporal = 7
temporal_output = 4
hsz = 20
dsz = 10
layers = 1
# Always pick the right path
wv = RandomInitVecModel(
dsz, {k: 1 for k in list(string.ascii_letters)}
)
assert len(string.ascii_letters) + len(Offsets.VALUES) == wv.get_vsz()
encoder.output = tf.cast(np.random.randn(batchsz, temporal, hsz), dtype=tf.float32)
encoder.hidden = (tf.cast(np.random.randn(layers, batchsz, hsz), dtype=tf.float32),
tf.cast(np.random.randn(layers, batchsz, hsz), dtype=tf.float32))
encoder.src_mask = np.zeros((batchsz, temporal), dtype=np.uint8)
tgt_embed = LookupTableEmbeddingsModel.create(wv, 'output')
decoder = RNNDecoder(tgt_embed, hsz=hsz, tie_weights=False)
decode_start = np.full((batchsz, temporal_output), Offsets.GO, dtype=np.int64)
output = decoder(encoder, decode_start)
assert output.shape[0] == batchsz
assert output.shape[1] == temporal_output
assert output.shape[2] == wv.get_vsz()
def test_rnn_attn_decode_shapes():
from baseline.tf.embeddings import LookupTableEmbeddingsModel
from baseline.tf.seq2seq.decoders import RNNDecoderWithAttn
# Always pick the right path
encoder = namedtuple("EncoderOutput", "output src_mask")
batchsz = 2
temporal = 7
temporal_output = 4
hsz = 20
dsz = 10
layers = 1
# Always pick the right path
wv = RandomInitVecModel(
dsz, {k: 1 for k in list(string.ascii_letters)}
)
assert len(string.ascii_letters) + len(Offsets.VALUES) == wv.get_vsz()
encoder.output = tf.cast(np.random.randn(batchsz, temporal, hsz), dtype=tf.float32)
encoder.hidden = (tf.cast(np.random.randn(layers, batchsz, hsz), dtype=tf.float32),
tf.cast(np.random.randn(layers, batchsz, hsz), dtype=tf.float32))
encoder.src_mask = np.zeros((batchsz, temporal), dtype=np.uint8)
tgt_embed = LookupTableEmbeddingsModel.create(wv, 'output')
decoder = RNNDecoderWithAttn(tgt_embed, hsz=hsz, attn_type='sdpx', tie_weights=False)
decode_start = np.full((batchsz, temporal_output), Offsets.GO, dtype=np.int64)
output = decoder(encoder, decode_start)
assert output.shape[0] == batchsz
assert output.shape[1] == temporal_output
assert output.shape[2] == wv.get_vsz()
| 41.753623
| 89
| 0.708087
| 384
| 2,881
| 5.205729
| 0.242188
| 0.042021
| 0.024012
| 0.042021
| 0.833417
| 0.809405
| 0.809405
| 0.809405
| 0.809405
| 0.809405
| 0
| 0.019475
| 0.180146
| 2,881
| 68
| 90
| 42.367647
| 0.826842
| 0.03714
| 0
| 0.711864
| 0
| 0
| 0.031419
| 0
| 0
| 0
| 0
| 0
| 0.135593
| 1
| 0.033898
| false
| 0
| 0.20339
| 0
| 0.237288
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1a1868b20b7e0bd03f2be1af3ea65290424bf32c
| 31,296
|
py
|
Python
|
settings/jyxf/extensions/jssllv/get_sql_from_file_copy.py
|
Cookie-YY/cooshow
|
fe487ff27a4d5fa0a2f832c45694fb4526d9771b
|
[
"MIT"
] | null | null | null |
settings/jyxf/extensions/jssllv/get_sql_from_file_copy.py
|
Cookie-YY/cooshow
|
fe487ff27a4d5fa0a2f832c45694fb4526d9771b
|
[
"MIT"
] | null | null | null |
settings/jyxf/extensions/jssllv/get_sql_from_file_copy.py
|
Cookie-YY/cooshow
|
fe487ff27a4d5fa0a2f832c45694fb4526d9771b
|
[
"MIT"
] | null | null | null |
sql_1_2_all = """(select
r.region_name,
case when instr(r.region_code,'0000000000')>0 then '省级'
when instr(r.region_code,'00000000')>0 then '市级'
when instr(r.region_code,'000000')>0 then '县级'
end bmjb,
r.region_code,
count(distinct(t.xfjbh)) as zs,
count(distinct(case
when (t.sjslsj is not null or t.shijslsj is not null or t.xjslsj is not null)
then t.xfjbh
end)) as ysl,
count(distinct(case
when least(ifnull(t.sjslsj,SYSDATE()),ifnull(t.shijslsj,SYSDATE()),ifnull(t.xjslsj,SYSDATE())) <= DATE_ADD( t.djsj, INTERVAL 15 DAY )
and least(ifnull(t.sjslsj,SYSDATE()),ifnull(t.shijslsj,SYSDATE()),ifnull(t.xjslsj,SYSDATE())) <> SYSDATE()
then t.xfjbh
end)) as jssl,
count(distinct(case
when least(ifnull(t.sjslsj,SYSDATE()),ifnull(t.shijslsj,SYSDATE()),ifnull(t.xjslsj,SYSDATE())) > DATE_ADD( t.djsj, INTERVAL 15 DAY )
and least(ifnull(t.sjslsj,SYSDATE()),ifnull(t.shijslsj,SYSDATE()),ifnull(t.xjslsj,SYSDATE())) <> SYSDATE()
then t.xfjbh
end)) as cqsl,
count(distinct(case
when (t.sjslsj is null and t.shijslsj is null and t.xjslsj is null)
and SYSDATE() > DATE_ADD( t.djsj, INTERVAL 15 DAY )
then t.xfjbh
end)) as cqwsl
from rpt_region r
left join
(select
distinct x.xfjbh,
substr(IFNULL(IFNULL(e.qxjgdm,y.zjbljgdm),x.wtsddm), 1, 4) as wtsddm,
x.sjslsj,
x.shijslsj,
x.xjslsj,
x.ddzrdwsj,
x.djsj
from rpt_xfjxx x
LEFT JOIN xf_dbxx e ON e.xfjbh = x.xfjbh
LEFT JOIN xf_ybinfo y ON x.xfjbh = y.xfjbh
where 1=1
and DATE_ADD( x.djsj, INTERVAL 15 DAY ) >= '{start}'
and DATE_ADD( x.djsj, INTERVAL 15 DAY ) <= '{end}'
and x.check_flag = 0
and x.djjglbdm in ('0000','0100','0200','0300')
and x.xfrs >= 1 and x.xfrs <= 9999
and x.sfnrtj = 1
and x.xfxsdm in ('100','200','300','303','600')
) t
on r.region_code = concat(t.wtsddm,'00000000')
where
r.region_code like '44__00000000%%'
group by r.region_code)"""
sql_1_2_xfxs = """(select
r.region_name,
case when instr(r.region_code,'0000000000')>0 then '省级'
when instr(r.region_code,'00000000')>0 then '市级'
when instr(r.region_code,'000000')>0 then '县级'
end bmjb,
r.region_code,
t.xfxs,
count(distinct(t.xfjbh)) as zs,
count(distinct(case
when (t.sjslsj is not null or t.shijslsj is not null or t.xjslsj is not null)
then t.xfjbh
end)) as ysl,
count(distinct(case
when least(ifnull(t.sjslsj,SYSDATE()),ifnull(t.shijslsj,SYSDATE()),ifnull(t.xjslsj,SYSDATE())) <= DATE_ADD( t.djsj, INTERVAL 15 DAY )
and least(ifnull(t.sjslsj,SYSDATE()),ifnull(t.shijslsj,SYSDATE()),ifnull(t.xjslsj,SYSDATE())) <> SYSDATE()
then t.xfjbh
end)) as jssl,
count(distinct(case
when least(ifnull(t.sjslsj,SYSDATE()),ifnull(t.shijslsj,SYSDATE()),ifnull(t.xjslsj,SYSDATE())) > DATE_ADD( t.djsj, INTERVAL 15 DAY )
and least(ifnull(t.sjslsj,SYSDATE()),ifnull(t.shijslsj,SYSDATE()),ifnull(t.xjslsj,SYSDATE())) <> SYSDATE()
then t.xfjbh
end)) as cqsl,
count(distinct(case
when (t.sjslsj is null and t.shijslsj is null and t.xjslsj is null)
and SYSDATE() > DATE_ADD( t.djsj, INTERVAL 15 DAY )
then t.xfjbh
end)) as cqwsl
from rpt_region r
left join
(select
distinct x.xfjbh,
case when x.xfxsmc='来信' then '来信' when x.xfxsmc='来访' then '来访' else '网信' end xfxs,
substr(IFNULL(IFNULL(e.qxjgdm,y.zjbljgdm),x.wtsddm), 1, 4) as wtsddm,
x.sjslsj,
x.shijslsj,
x.xjslsj,
x.ddzrdwsj,
x.djsj
from rpt_xfjxx x
LEFT JOIN xf_dbxx e ON e.xfjbh = x.xfjbh
LEFT JOIN xf_ybinfo y ON x.xfjbh = y.xfjbh
where 1=1
and DATE_ADD( x.djsj, INTERVAL 15 DAY ) >= '{start}'
and DATE_ADD( x.djsj, INTERVAL 15 DAY ) <= '{end}'
and (case x.xfxsmc when '来信' then '来信' when '来访' then '来访' else '网信' end)='网信'
and x.check_flag = 0
and x.djjglbdm in ('0000','0100','0200','0300')
and x.xfrs >= 1 and x.xfrs <= 9999
and x.sfnrtj = 1
and x.xfxsdm in ('100','200','300','303','600')
) t
on r.region_code = concat(t.wtsddm,'00000000')
where
r.region_code like '44__00000000%%'
group by r.region_code,t.xfxs)"""
sql_1_4_all = """(select
r.region_name,
case when instr(r.region_code,'0000000000')>0 then '省级'
when instr(r.region_code,'00000000')>0 then '市级'
when instr(r.region_code,'000000')>0 then '县级'
end bmjb,
r.region_code,
-- 总数
count(distinct(t.xfjbh)) as zs,
-- 已受理
count(distinct(case
when (t.sjslsj is not null or t.shijslsj is not null or t.xjslsj is not null)
then t.xfjbh
end)) as ysl,
-- 及时受理
count(distinct(case
when least(ifnull(t.sjslsj,SYSDATE()),ifnull(t.shijslsj,SYSDATE()),ifnull(t.xjslsj,SYSDATE())) <= DATE_ADD( t.djsj, INTERVAL 15 DAY )
and least(ifnull(t.sjslsj,SYSDATE()),ifnull(t.shijslsj,SYSDATE()),ifnull(t.xjslsj,SYSDATE())) <> SYSDATE()
then t.xfjbh
end)) as jssl,
-- 超期受理
count(distinct(case
when least(ifnull(t.sjslsj,SYSDATE()),ifnull(t.shijslsj,SYSDATE()),ifnull(t.xjslsj,SYSDATE())) > DATE_ADD( t.djsj, INTERVAL 15 DAY )
and least(ifnull(t.sjslsj,SYSDATE()),ifnull(t.shijslsj,SYSDATE()),ifnull(t.xjslsj,SYSDATE())) <> SYSDATE()
then t.xfjbh
end)) as cqsl,
-- 超期未受理
count(distinct(case
when (t.sjslsj is null and t.shijslsj is null and t.xjslsj is null)
and SYSDATE() > DATE_ADD( t.djsj, INTERVAL 15 DAY )
then t.xfjbh
end)) as cqwsl
from rpt_region r
left join
(select
distinct x.xfjbh,
substr(IFNULL(IFNULL(e.qxjgdm,y.zjbljgdm),x.wtsddm), 1, 6) as wtsddm,
x.sjslsj,
x.shijslsj,
x.xjslsj,
x.ddzrdwsj,
x.djsj
from rpt_xfjxx x
LEFT JOIN xf_dbxx e ON e.xfjbh = x.xfjbh
LEFT JOIN xf_ybinfo y ON x.xfjbh = y.xfjbh
where 1=1
and DATE_ADD( x.djsj, INTERVAL 15 DAY ) >= '{start}'
and DATE_ADD( x.djsj, INTERVAL 15 DAY ) <= '{end}'
and x.check_flag = 0
and x.djjglbdm in ('0000','0100','0200','0300')
and x.xfrs >= 1 and x.xfrs <= 9999
and x.sfnrtj = 1
and x.xfxsdm in ('100','200','300','303','600')
) t
on r.region_code = concat(t.wtsddm,'000000')
where
r.region_code like '44____000000%%'
and r.region_code<>'440000000000'
and substr(r.region_code,1,4) =(select substr(region_code,1,4) from rpt_region where region_name='广州市')
group by r.region_code)"""
sql_1_4_xfxs = """(select
r.region_name,
case when instr(r.region_code,'0000000000')>0 then '省级'
when instr(r.region_code,'00000000')>0 then '市级'
when instr(r.region_code,'000000')>0 then '县级'
end bmjb,
r.region_code,
t.xfxs,
-- 总数
count(distinct(t.xfjbh)) as zs,
-- 已受理
count(distinct(case
when (t.sjslsj is not null or t.shijslsj is not null or t.xjslsj is not null)
then t.xfjbh
end)) as ysl,
-- 及时受理
count(distinct(case
when least(ifnull(t.sjslsj,SYSDATE()),ifnull(t.shijslsj,SYSDATE()),ifnull(t.xjslsj,SYSDATE())) <= DATE_ADD( t.djsj, INTERVAL 15 DAY )
and least(ifnull(t.sjslsj,SYSDATE()),ifnull(t.shijslsj,SYSDATE()),ifnull(t.xjslsj,SYSDATE())) <> SYSDATE()
then t.xfjbh
end)) as jssl,
-- 超期受理
count(distinct(case
when least(ifnull(t.sjslsj,SYSDATE()),ifnull(t.shijslsj,SYSDATE()),ifnull(t.xjslsj,SYSDATE())) > DATE_ADD( t.djsj, INTERVAL 15 DAY )
and least(ifnull(t.sjslsj,SYSDATE()),ifnull(t.shijslsj,SYSDATE()),ifnull(t.xjslsj,SYSDATE())) <> SYSDATE()
then t.xfjbh
end)) as cqsl,
-- 超期未受理
count(distinct(case
when (t.sjslsj is null and t.shijslsj is null and t.xjslsj is null)
and SYSDATE() > DATE_ADD( t.djsj, INTERVAL 15 DAY )
then t.xfjbh
end)) as cqwsl
from rpt_region r
left join
(select
distinct x.xfjbh,
case when x.xfxsmc='来信' then '来信' when x.xfxsmc='来访' then '来访' else '网信' end xfxs,
substr(IFNULL(IFNULL(e.qxjgdm,y.zjbljgdm),x.wtsddm), 1, 6) as wtsddm,
x.sjslsj,
x.shijslsj,
x.xjslsj,
x.ddzrdwsj,
x.djsj
from rpt_xfjxx x
LEFT JOIN xf_dbxx e ON e.xfjbh = x.xfjbh
LEFT JOIN xf_ybinfo y ON x.xfjbh = y.xfjbh
where 1=1
and DATE_ADD( x.djsj, INTERVAL 15 DAY ) >= '{start}'
and DATE_ADD( x.djsj, INTERVAL 15 DAY ) <= '{end}'
and (case x.xfxsmc when '来信' then '来信' when '来访' then '来访' else '网信' end)='网信'
and x.check_flag = 0
and x.djjglbdm in ('0000','0100','0200','0300')
and x.xfrs >= 1 and x.xfrs <= 9999
and x.sfnrtj = 1
and x.xfxsdm in ('100','200','300','303','600')
) t
on r.region_code = concat(t.wtsddm,'000000')
where
r.region_code like '44____000000%%'
and r.region_code<>'440000000000'
and substr(r.region_code,1,4) =(select substr(region_code,1,4) from rpt_region where region_name='广州市')
group by r.region_code,t.xfxs)"""
sql_1_5_all = """(select
r.region_name,
case when instr(r.region_code,'0000000000')>0 then '省级'
when instr(r.region_code,'00000000')>0 then '市级'
when instr(r.region_code,'000000')>0 then '县级'
end bmjb,
r.region_code,
r.company_name,
-- 总数
count(distinct(t.xfjbh)) as zs,
-- 已受理
count(distinct(case
when (t.sjslsj is not null or t.shijslsj is not null or t.xjslsj is not null)
then t.xfjbh
end)) as ysl,
-- 及时受理
count(distinct(case
when least(ifnull(t.sjslsj,SYSDATE()),ifnull(t.shijslsj,SYSDATE()),ifnull(t.xjslsj,SYSDATE())) <= DATE_ADD( t.djsj, INTERVAL 15 DAY )
and least(ifnull(t.sjslsj,SYSDATE()),ifnull(t.shijslsj,SYSDATE()),ifnull(t.xjslsj,SYSDATE())) <> SYSDATE()
then t.xfjbh
end)) as jssl,
-- 超期受理
count(distinct(case
when least(ifnull(t.sjslsj,SYSDATE()),ifnull(t.shijslsj,SYSDATE()),ifnull(t.xjslsj,SYSDATE())) > DATE_ADD( t.djsj, INTERVAL 15 DAY )
and least(ifnull(t.sjslsj,SYSDATE()),ifnull(t.shijslsj,SYSDATE()),ifnull(t.xjslsj,SYSDATE())) <> SYSDATE()
then t.xfjbh
end)) as cqsl,
-- 超期未受理
count(distinct(case
when (t.sjslsj is null and t.shijslsj is null and t.xjslsj is null)
and SYSDATE() > DATE_ADD( t.djsj, INTERVAL 15 DAY )
then t.xfjbh
end)) as cqwsl
from (select company_name,org_code,b.region_code,b.region_name from xf_org a,rpt_region b where a.region_code=b.region_code and a.is_petition=1) r
left join
(select
distinct x.xfjbh,
substr(IFNULL(IFNULL(e.qxjgdm,y.zjbljgdm),x.wtsddm), 1, 4) as wtsddm,
x.sjslsj,
x.shijslsj,
x.xjslsj,
x.ddzrdwsj,
x.djsj
from rpt_xfjxx x
LEFT JOIN xf_dbxx e ON e.xfjbh = x.xfjbh
LEFT JOIN xf_ybinfo y ON x.xfjbh = y.xfjbh
where 1=1
and DATE_ADD( x.djsj, INTERVAL 15 DAY ) >= '{start}'
and DATE_ADD( x.djsj, INTERVAL 15 DAY ) <= '{end}'
and x.check_flag = 0
and x.djjglbdm in ('0000','0100','0200','0300')
and x.xfrs >= 1 and x.xfrs <= 9999
and x.sfnrtj = 1
and x.xfxsdm in ('100','200','300','303','600')
) t
on r.region_code = concat(t.wtsddm,'00000000')
where r.region_code like '44__00000000%%'
group by r.region_code,r.company_name)"""
sql_1_5_xfxs = """(select
r.region_name,
case when instr(r.region_code,'0000000000')>0 then '省级'
when instr(r.region_code,'00000000')>0 then '市级'
when instr(r.region_code,'000000')>0 then '县级'
end bmjb,
r.region_code,
r.company_name,
t.xfxs,
-- 总数
count(distinct(t.xfjbh)) as zs,
-- 已受理
count(distinct(case
when (t.sjslsj is not null or t.shijslsj is not null or t.xjslsj is not null)
then t.xfjbh
end)) as ysl,
-- 及时受理
count(distinct(case
when least(ifnull(t.sjslsj,SYSDATE()),ifnull(t.shijslsj,SYSDATE()),ifnull(t.xjslsj,SYSDATE())) <= DATE_ADD( t.djsj, INTERVAL 15 DAY )
and least(ifnull(t.sjslsj,SYSDATE()),ifnull(t.shijslsj,SYSDATE()),ifnull(t.xjslsj,SYSDATE())) <> SYSDATE()
then t.xfjbh
end)) as jssl,
-- 超期受理
count(distinct(case
when least(ifnull(t.sjslsj,SYSDATE()),ifnull(t.shijslsj,SYSDATE()),ifnull(t.xjslsj,SYSDATE())) > DATE_ADD( t.djsj, INTERVAL 15 DAY )
and least(ifnull(t.sjslsj,SYSDATE()),ifnull(t.shijslsj,SYSDATE()),ifnull(t.xjslsj,SYSDATE())) <> SYSDATE()
then t.xfjbh
end)) as cqsl,
-- 超期未受理
count(distinct(case
when (t.sjslsj is null and t.shijslsj is null and t.xjslsj is null)
and SYSDATE() > DATE_ADD( t.djsj, INTERVAL 15 DAY )
then t.xfjbh
end)) as cqwsl
from (select company_name,org_code,b.region_code,b.region_name from xf_org a,rpt_region b where a.region_code=b.region_code and a.is_petition=1) r
left join
(select
distinct x.xfjbh,
case when x.xfxsmc='来信' then '来信' when x.xfxsmc='来访' then '来访' else '网信' end xfxs,
substr(IFNULL(IFNULL(e.qxjgdm,y.zjbljgdm),x.wtsddm), 1, 4) as wtsddm,
x.sjslsj,
x.shijslsj,
x.xjslsj,
x.ddzrdwsj,
x.djsj
from rpt_xfjxx x
LEFT JOIN xf_dbxx e ON e.xfjbh = x.xfjbh
LEFT JOIN xf_ybinfo y ON x.xfjbh = y.xfjbh
where 1=1
and DATE_ADD( x.djsj, INTERVAL 15 DAY ) >= '{start}'
and DATE_ADD( x.djsj, INTERVAL 15 DAY ) <= '{end}'
and (case x.xfxsmc when '来信' then '来信' when '来访' then '来访' else '网信' end)='网信'
and x.check_flag = 0
and x.djjglbdm in ('0000','0100','0200','0300')
and x.xfrs >= 1 and x.xfrs <= 9999
and x.sfnrtj = 1
and x.xfxsdm in ('100','200','300','303','600')
) t
on r.region_code = concat(t.wtsddm,'00000000')
where r.region_code like '44__00000000%%'
group by r.region_code,r.company_name,t.xfxs)"""
sql_1_6_all = """(select
r.region_name,
case when instr(r.region_code,'0000000000')>0 then '省级'
when instr(r.region_code,'00000000')>0 then '市级'
when instr(r.region_code,'000000')>0 then '县级'
end bmjb,
r.region_code,
r.company_name,
-- 总数
count(distinct(t.xfjbh)) as zs,
-- 已受理
count(distinct(case
when (t.sjslsj is not null or t.shijslsj is not null or t.xjslsj is not null)
then t.xfjbh
end)) as ysl,
-- 及时受理
count(distinct(case
when least(ifnull(t.sjslsj,SYSDATE()),ifnull(t.shijslsj,SYSDATE()),ifnull(t.xjslsj,SYSDATE())) <= DATE_ADD( t.djsj, INTERVAL 15 DAY )
and least(ifnull(t.sjslsj,SYSDATE()),ifnull(t.shijslsj,SYSDATE()),ifnull(t.xjslsj,SYSDATE())) <> SYSDATE()
then t.xfjbh
end)) as jssl,
-- 超期受理
count(distinct(case
when least(ifnull(t.sjslsj,SYSDATE()),ifnull(t.shijslsj,SYSDATE()),ifnull(t.xjslsj,SYSDATE())) > DATE_ADD( t.djsj, INTERVAL 15 DAY )
and least(ifnull(t.sjslsj,SYSDATE()),ifnull(t.shijslsj,SYSDATE()),ifnull(t.xjslsj,SYSDATE())) <> SYSDATE()
then t.xfjbh
end)) as cqsl,
-- 超期未受理
count(distinct(case
when (t.sjslsj is null and t.shijslsj is null and t.xjslsj is null)
and SYSDATE() > DATE_ADD( t.djsj, INTERVAL 15 DAY )
then t.xfjbh
end)) as cqwsl
from (select company_name,org_code,b.region_code,b.region_name from xf_org a,rpt_region b where a.region_code=b.region_code and a.is_petition=1) r
left join
(select
distinct x.xfjbh,
substr(IFNULL(IFNULL(e.qxjgdm,y.zjbljgdm),x.wtsddm), 1, 6) as wtsddm,
x.sjslsj,
x.shijslsj,
x.xjslsj,
x.ddzrdwsj,
x.djsj
from rpt_xfjxx x
LEFT JOIN xf_dbxx e ON e.xfjbh = x.xfjbh
LEFT JOIN xf_ybinfo y ON x.xfjbh = y.xfjbh
where 1=1
and DATE_ADD( x.djsj, INTERVAL 15 DAY ) >= '{start}'
and DATE_ADD( x.djsj, INTERVAL 15 DAY ) <= '{end}'
and x.check_flag = 0
and x.djjglbdm in ('0000','0100','0200','0300')
and x.xfrs >= 1 and x.xfrs <= 9999
and x.sfnrtj = 1
and x.xfxsdm in ('100','200','300','303','600')
) t
on r.region_code = concat(t.wtsddm,'000000')
where
r.region_code like '44____000000%%'
and r.region_code<>'440000000000'
and substr(r.region_code,1,4) =(select substr(region_code,1,4) from rpt_region where region_name='广州市')
group by r.region_code,r.company_name)"""
sql_1_6_xfxs = """(select
r.region_name,
case when instr(r.region_code,'0000000000')>0 then '省级'
when instr(r.region_code,'00000000')>0 then '市级'
when instr(r.region_code,'000000')>0 then '县级'
end bmjb,
r.region_code,
r.company_name,
t.xfxs,
-- 总数
count(distinct(t.xfjbh)) as zs,
-- 已受理
count(distinct(case
when (t.sjslsj is not null or t.shijslsj is not null or t.xjslsj is not null)
then t.xfjbh
end)) as ysl,
-- 及时受理
count(distinct(case
when least(ifnull(t.sjslsj,SYSDATE()),ifnull(t.shijslsj,SYSDATE()),ifnull(t.xjslsj,SYSDATE())) <= DATE_ADD( t.djsj, INTERVAL 15 DAY )
and least(ifnull(t.sjslsj,SYSDATE()),ifnull(t.shijslsj,SYSDATE()),ifnull(t.xjslsj,SYSDATE())) <> SYSDATE()
then t.xfjbh
end)) as jssl,
-- 超期受理
count(distinct(case
when least(ifnull(t.sjslsj,SYSDATE()),ifnull(t.shijslsj,SYSDATE()),ifnull(t.xjslsj,SYSDATE())) > DATE_ADD( t.djsj, INTERVAL 15 DAY )
and least(ifnull(t.sjslsj,SYSDATE()),ifnull(t.shijslsj,SYSDATE()),ifnull(t.xjslsj,SYSDATE())) <> SYSDATE()
then t.xfjbh
end)) as cqsl,
-- 超期未受理
count(distinct(case
when (t.sjslsj is null and t.shijslsj is null and t.xjslsj is null)
and SYSDATE() > DATE_ADD( t.djsj, INTERVAL 15 DAY )
then t.xfjbh
end)) as cqwsl
from (select company_name,org_code,b.region_code,b.region_name from xf_org a,rpt_region b where a.region_code=b.region_code and a.is_petition=1) r
left join
(select
distinct x.xfjbh,
case when x.xfxsmc='来信' then '来信' when x.xfxsmc='来访' then '来访' else '网信' end xfxs,
substr(IFNULL(IFNULL(e.qxjgdm,y.zjbljgdm),x.wtsddm), 1, 6) as wtsddm,
x.sjslsj,
x.shijslsj,
x.xjslsj,
x.ddzrdwsj,
x.djsj
from rpt_xfjxx x
LEFT JOIN xf_dbxx e ON e.xfjbh = x.xfjbh
LEFT JOIN xf_ybinfo y ON x.xfjbh = y.xfjbh
where 1=1
and DATE_ADD( x.djsj, INTERVAL 15 DAY ) >= '{start}'
and DATE_ADD( x.djsj, INTERVAL 15 DAY ) <= '{end}'
and (case x.xfxsmc when '来信' then '来信' when '来访' then '来访' else '网信' end)='网信'
and x.check_flag = 0
and x.djjglbdm in ('0000','0100','0200','0300')
and x.xfrs >= 1 and x.xfrs <= 9999
and x.sfnrtj = 1
and x.xfxsdm in ('100','200','300','303','600')
) t
on r.region_code = concat(t.wtsddm,'000000')
where
r.region_code like '44____000000%%'
and r.region_code<>'440000000000'
and substr(r.region_code,1,4) =(select substr(region_code,1,4) from rpt_region where region_name='广州市')
group by r.region_code,r.company_name,t.xfxs)"""
sql_2_2_all = """(select
r.region_name,
case when instr(r.region_code,'0000000000')>0 then '省级'
when instr(r.region_code,'00000000')>0 then '市级'
when instr(r.region_code,'000000')>0 then '县级'
end bmjb,
r.region_code,
#总数
count(distinct(t.xfjbh)) as zs,
#已受理
count(distinct(case
when t.zrdwslsj is not null then t.xfjbh
end)) as ysl,
#及时受理
count(distinct(case
when (t.zrdwslsj is not null and t.zrdwslsj <= DATE_ADD( t.djsj, INTERVAL 30 DAY ))
then t.xfjbh
end)) as jssl,
#超期受理
count(distinct(case
when (t.zrdwslsj is not null and t.zrdwslsj > DATE_ADD( t.djsj, INTERVAL 30 DAY ))
then t.xfjbh
end)) as cqsl,
#超期未受理
count(distinct(case
when t.zrdwslsj is null and SYSDATE() > DATE_ADD( t.djsj, INTERVAL 30 DAY )
then t.xfjbh
end)) as cqwsl
from rpt_region r
left join
(select
distinct(x.xfjbh) as xfjbh,
substr(IFNULL(IFNULL(e.qxjgdm,y.zjbljgdm),x.wtsddm), 1, 4) as wtsddm,
x.zrdwslsj,
x.djsj
from rpt_xfjxx x
left JOIN xf_dbxx e ON e.xfjbh = x.xfjbh
LEFT JOIN xf_ybinfo y ON x.xfjbh = y.xfjbh
where 1=1
and x.check_flag = 0
and DATE_ADD( x.djsj, INTERVAL 30 DAY ) >= '{start}'
and DATE_ADD( x.djsj, INTERVAL 30 DAY ) <= '{end}'
and x.djjglbdm in ('0000','0100','0200','0300','1000','1100','1200','1300','1400')
and x.xfrs >= 1 and x.xfrs <= 9999
and x.qxsfzrdw = 1
and x.sfnrtj = 1
and x.xfxsdm in ('100','200','300','303','600')
) t
on r.region_code = concat(t.wtsddm,'00000000')
where
r.region_code like '44__00000000%%'
group by r.region_code)"""
sql_2_2_xfxs = """(select
r.region_name,
case when instr(r.region_code,'0000000000')>0 then '省级'
when instr(r.region_code,'00000000')>0 then '市级'
when instr(r.region_code,'000000')>0 then '县级'
end bmjb,
r.region_code,
xfxs,
#总数
count(distinct(t.xfjbh)) as zs,
#已受理
count(distinct(case
when t.zrdwslsj is not null then t.xfjbh
end)) as ysl,
#及时受理
count(distinct(case
when (t.zrdwslsj is not null and t.zrdwslsj <= DATE_ADD( t.djsj, INTERVAL 30 DAY ))
then t.xfjbh
end)) as jssl,
#超期受理
count(distinct(case
when (t.zrdwslsj is not null and t.zrdwslsj > DATE_ADD( t.djsj, INTERVAL 30 DAY ))
then t.xfjbh
end)) as cqsl,
#超期未受理
count(distinct(case
when t.zrdwslsj is null and SYSDATE() > DATE_ADD( t.djsj, INTERVAL 30 DAY )
then t.xfjbh
end)) as cqwsl
from rpt_region r
left join
(select
distinct(x.xfjbh) as xfjbh,
case when x.xfxsmc='来信' then '来信' when x.xfxsmc='来访' then '来访' else '网信' end xfxs,
substr(IFNULL(IFNULL(e.qxjgdm,y.zjbljgdm),x.wtsddm), 1, 4) as wtsddm,
x.zrdwslsj,
x.djsj
from rpt_xfjxx x
left JOIN xf_dbxx e ON e.xfjbh = x.xfjbh
LEFT JOIN xf_ybinfo y ON x.xfjbh = y.xfjbh
where 1=1
and x.check_flag = 0
and DATE_ADD( x.djsj, INTERVAL 30 DAY ) >= '{start}'
and DATE_ADD( x.djsj, INTERVAL 30 DAY ) <= '{end}'
and (case x.xfxsmc when '来信' then '来信' when '来访' then '来访' else '网信' end)='网信'
and x.djjglbdm in ('0000','0100','0200','0300','1000','1100','1200','1300','1400')
and x.xfrs >= 1 and x.xfrs <= 9999
and x.qxsfzrdw = 1
and x.sfnrtj = 1
and x.xfxsdm in ('100','200','300','303','600')
) t
on r.region_code = concat(t.wtsddm,'00000000')
where
r.region_code like '44__00000000%%'
group by r.region_code,xfxs)"""
sql_2_4_all = """(select
r.region_name,
case when instr(r.region_code,'0000000000')>0 then '省级'
when instr(r.region_code,'00000000')>0 then '市级'
when instr(r.region_code,'000000')>0 then '县级'
end bmjb,
r.region_code,
#总数
count(distinct(t.xfjbh)) as zs,
#已受理
count(distinct(case
when t.zrdwslsj is not null then t.xfjbh
end)) as ysl,
#及时受理
count(distinct(case
when (t.zrdwslsj is not null and t.zrdwslsj <= DATE_ADD( t.djsj, INTERVAL 30 DAY ))
then t.xfjbh
end)) as jssl,
#超期受理
count(distinct(case
when (t.zrdwslsj is not null and t.zrdwslsj > DATE_ADD( t.djsj, INTERVAL 30 DAY ))
then t.xfjbh
end)) as cqsl,
#超期未受理
count(distinct(case
when t.zrdwslsj is null and SYSDATE() > DATE_ADD( t.djsj, INTERVAL 30 DAY )
then t.xfjbh
end)) as cqwsl
from rpt_region r
left join
(select
distinct(x.xfjbh) as xfjbh,
substr(IFNULL(IFNULL(e.qxjgdm,y.zjbljgdm),x.wtsddm), 1, 6) as wtsddm,
x.zrdwslsj,
x.djsj
from rpt_xfjxx x
left JOIN xf_dbxx e ON e.xfjbh = x.xfjbh
LEFT JOIN xf_ybinfo y ON x.xfjbh = y.xfjbh
where 1=1
and x.check_flag = 0
and DATE_ADD( x.djsj, INTERVAL 30 DAY ) >= '{start}'
and DATE_ADD( x.djsj, INTERVAL 30 DAY ) <= '{end}'
and x.djjglbdm in ('0000','0100','0200','0300','1000','1100','1200','1300','1400')
and x.xfrs >= 1 and x.xfrs <= 9999
and x.qxsfzrdw = 1
and x.sfnrtj = 1
and x.xfxsdm in ('100','200','300','303','600')
) t
on r.region_code = concat(t.wtsddm,'000000')
where
r.region_code like '44____000000%%'
and r.region_code<>'440000000000'
and substr(r.region_code,1,4) =(select substr(region_code,1,4) from rpt_region where region_name='广州市')
group by r.region_code)"""
sql_2_4_xfxs = """(select
r.region_name,
case when instr(r.region_code,'0000000000')>0 then '省级'
when instr(r.region_code,'00000000')>0 then '市级'
when instr(r.region_code,'000000')>0 then '县级'
end bmjb,
r.region_code,
xfxs,
#总数
count(distinct(t.xfjbh)) as zs,
#已受理
count(distinct(case
when t.zrdwslsj is not null then t.xfjbh
end)) as ysl,
#及时受理
count(distinct(case
when (t.zrdwslsj is not null and t.zrdwslsj <= DATE_ADD( t.djsj, INTERVAL 30 DAY ))
then t.xfjbh
end)) as jssl,
#超期受理
count(distinct(case
when (t.zrdwslsj is not null and t.zrdwslsj > DATE_ADD( t.djsj, INTERVAL 30 DAY ))
then t.xfjbh
end)) as cqsl,
#超期未受理
count(distinct(case
when t.zrdwslsj is null and SYSDATE() > DATE_ADD( t.djsj, INTERVAL 30 DAY )
then t.xfjbh
end)) as cqwsl
from rpt_region r
left join
(select
distinct(x.xfjbh) as xfjbh,
case when x.xfxsmc='来信' then '来信' when x.xfxsmc='来访' then '来访' else '网信' end xfxs,
substr(IFNULL(IFNULL(e.qxjgdm,y.zjbljgdm),x.wtsddm), 1, 6) as wtsddm,
x.zrdwslsj,
x.djsj
from rpt_xfjxx x
left JOIN xf_dbxx e ON e.xfjbh = x.xfjbh
LEFT JOIN xf_ybinfo y ON x.xfjbh = y.xfjbh
where 1=1
and x.check_flag = 0
and DATE_ADD( x.djsj, INTERVAL 30 DAY ) >= '{start}'
and DATE_ADD( x.djsj, INTERVAL 30 DAY ) <= '{end}'
and (case x.xfxsmc when '来信' then '来信' when '来访' then '来访' else '网信' end)='网信'
and x.djjglbdm in ('0000','0100','0200','0300','1000','1100','1200','1300','1400')
and x.xfrs >= 1 and x.xfrs <= 9999
and x.qxsfzrdw = 1
and x.sfnrtj = 1
and x.xfxsdm in ('100','200','300','303','600')
) t
on r.region_code = concat(t.wtsddm,'000000')
where
r.region_code like '44____000000%%'
and r.region_code<>'440000000000'
and substr(r.region_code,1,4) =(select substr(region_code,1,4) from rpt_region where region_name='广州市')
group by r.region_code,xfxs)"""
sql_2_5_all = """(select
r.region_name,
r.company_name,
r.org_code,
#总数
count(distinct(t.xfjbh)) as zs,
#已受理
count(distinct(case
when t.zrdwslsj is not null then t.xfjbh
end)) as ysl,
#及时受理
count(distinct(case
when (t.zrdwslsj is not null and t.zrdwslsj <= DATE_ADD( t.djsj, INTERVAL 30 DAY ))
then t.xfjbh
end)) as jssl,
#超期受理
count(distinct(case
when (t.zrdwslsj is not null and t.zrdwslsj > DATE_ADD( t.djsj, INTERVAL 30 DAY ))
then t.xfjbh
end)) as cqsl,
#超期未受理
count(distinct(case
when t.zrdwslsj is null and SYSDATE() > DATE_ADD( t.djsj, INTERVAL 30 DAY )
then t.xfjbh
end)) as cqwsl
from (select company_name,org_code,b.region_code,b.region_name from xf_org a,xf_region b where a.region_code=b.region_code and a.is_petition=0) r
left join
(select
distinct(x.xfjbh) as xfjbh,
x.wtsddm,
x.zrdwslsj,
x.djsj,
x.zjqxdm,
x.djjgdm,
b.qxmcdm
from rpt_xfjxx x
left join rpt_blfsxx b on x.xfjbh = b.xfjbh
where 1=1
and DATE_ADD( x.djsj, INTERVAL 30 DAY ) >= '{start}'
and DATE_ADD( x.djsj, INTERVAL 30 DAY ) <= '{end}'
and x.check_flag = 0
and x.djjglbdm in ('0000','0100','0200','0300','1100','1200','1300','1400')
and x.xfrs >= 1 and x.xfrs <= 9999
and x.qxsfzrdw = 1
and x.sfnrtj = 1
and x.xfxsdm in ('100','200','300','303','600')
) t
on (t.qxmcdm = r.org_code or t.djjgdm = r.org_code)
where r.region_name={Cqh}
group by r.region_name,r.org_code)"""
sql_2_5_xfxs = """(select
r.region_name,
r.company_name,
r.org_code,
xfxs,
#总数
count(distinct(t.xfjbh)) as zs,
#已受理
count(distinct(case
when t.zrdwslsj is not null then t.xfjbh
end)) as ysl,
#及时受理
count(distinct(case
when (t.zrdwslsj is not null and t.zrdwslsj <= DATE_ADD( t.djsj, INTERVAL 30 DAY ))
then t.xfjbh
end)) as jssl,
#超期受理
count(distinct(case
when (t.zrdwslsj is not null and t.zrdwslsj > DATE_ADD( t.djsj, INTERVAL 30 DAY ))
then t.xfjbh
end)) as cqsl,
#超期未受理
count(distinct(case
when t.zrdwslsj is null and SYSDATE() > DATE_ADD( t.djsj, INTERVAL 30 DAY )
then t.xfjbh
end)) as cqwsl
from (select company_name,org_code,b.region_code,b.region_name from xf_org a,xf_region b where a.region_code=b.region_code and a.is_petition=0) r
left join
(select
distinct(x.xfjbh) as xfjbh,
case when x.xfxsmc='来信' then '来信' when x.xfxsmc='来访' then '来访' else '网信' end xfxs,
x.wtsddm,
x.zrdwslsj,
x.djsj,
x.zjqxdm,
x.djjgdm,
b.qxmcdm
from rpt_xfjxx x
left join rpt_blfsxx b on x.xfjbh = b.xfjbh
where 1=1
and DATE_ADD( x.djsj, INTERVAL 30 DAY ) >= '{start}'
and DATE_ADD( x.djsj, INTERVAL 30 DAY ) <= '{end}'
and (case x.xfxsmc when '来信' then '来信' when '来访' then '来访' else '网信' end)='网信'
and x.check_flag = 0
and x.djjglbdm in ('0000','0100','0200','0300','1100','1200','1300','1400')
and x.xfrs >= 1 and x.xfrs <= 9999
and x.qxsfzrdw = 1
and x.sfnrtj = 1
and x.xfxsdm in ('100','200','300','303','600')
) t
on (t.qxmcdm = r.org_code or t.djjgdm = r.org_code)
where r.region_name={Cqh}
group by r.region_name,r.org_code,xfxs)"""
| 38.637037
| 148
| 0.590267
| 4,857
| 31,296
| 3.718139
| 0.02553
| 0.066449
| 0.058475
| 0.06512
| 0.999336
| 0.999114
| 0.998505
| 0.998283
| 0.997397
| 0.996401
| 0
| 0.065591
| 0.275115
| 31,296
| 810
| 149
| 38.637037
| 0.730451
| 0
| 0
| 0.979925
| 0
| 0.120452
| 0.989967
| 0.239288
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
1a27158f971e77ad1bdc1d3b9c3761f0dfbbcb26
| 105
|
py
|
Python
|
AAATOMD/__init__.py
|
tomduckering/livedjscript
|
f84156d782e93a6d53f5083d8211f7eacee541d0
|
[
"MIT"
] | 1
|
2016-12-04T17:23:51.000Z
|
2016-12-04T17:23:51.000Z
|
AAATOMD/__init__.py
|
tomduckering/livedjscript
|
f84156d782e93a6d53f5083d8211f7eacee541d0
|
[
"MIT"
] | null | null | null |
AAATOMD/__init__.py
|
tomduckering/livedjscript
|
f84156d782e93a6d53f5083d8211f7eacee541d0
|
[
"MIT"
] | null | null | null |
import Live
from AAATOMD import AAATOMD
def create_instance(c_instance):
return AAATOMD(c_instance)
| 17.5
| 32
| 0.809524
| 15
| 105
| 5.466667
| 0.6
| 0.219512
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 105
| 5
| 33
| 21
| 0.911111
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.5
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
a7ed95a702374d321acbffed23461b2835ed528b
| 4,297
|
py
|
Python
|
blender/2.79/scripts/addons/rigify/legacy/rigs/pitchipoy/limbs/ui.py
|
uzairakbar/bpy2.79
|
3a3e0004ac6783c4e4b89d939e4432de99026a85
|
[
"MIT"
] | 3
|
2019-09-16T10:29:19.000Z
|
2022-02-11T14:43:18.000Z
|
engine/2.80/scripts/addons/rigify/legacy/rigs/pitchipoy/limbs/ui.py
|
byteinc/Phasor
|
f7d23a489c2b4bcc3c1961ac955926484ff8b8d9
|
[
"Unlicense"
] | null | null | null |
engine/2.80/scripts/addons/rigify/legacy/rigs/pitchipoy/limbs/ui.py
|
byteinc/Phasor
|
f7d23a489c2b4bcc3c1961ac955926484ff8b8d9
|
[
"Unlicense"
] | 4
|
2020-02-19T20:02:26.000Z
|
2022-02-11T18:47:56.000Z
|
script_arm = """
controls = [%s]
tweaks = [%s]
ik_ctrl = [%s]
fk_ctrl = '%s'
parent = '%s'
# IK/FK Switch on all Control Bones
if is_selected( controls ):
layout.prop( pose_bones[ parent ], '["%s"]', slider = True )
props = layout.operator("pose.rigify_arm_fk2ik_" + rig_id, text="Snap FK->IK (" + fk_ctrl + ")")
props.uarm_fk = controls[1]
props.farm_fk = controls[2]
props.hand_fk = controls[3]
props.uarm_ik = controls[0]
props.farm_ik = ik_ctrl[1]
props.hand_ik = controls[4]
props = layout.operator("pose.rigify_arm_ik2fk_" + rig_id, text="Snap IK->FK (" + fk_ctrl + ")")
props.uarm_fk = controls[1]
props.farm_fk = controls[2]
props.hand_fk = controls[3]
props.uarm_ik = controls[0]
props.farm_ik = ik_ctrl[1]
props.hand_ik = controls[4]
props.pole = ""
# BBone rubber hose on each Respective Tweak
for t in tweaks:
if is_selected( t ):
layout.prop( pose_bones[ t ], '["%s"]', slider = True )
# IK Stretch on IK Control bone
if is_selected( ik_ctrl ):
layout.prop( pose_bones[ parent ], '["%s"]', slider = True )
# FK limb follow
if is_selected( fk_ctrl ):
layout.prop( pose_bones[ parent ], '["%s"]', slider = True )
"""
script_leg = """
controls = [%s]
tweaks = [%s]
ik_ctrl = [%s]
fk_ctrl = '%s'
parent = '%s'
# IK/FK Switch on all Control Bones
if is_selected( controls ):
layout.prop( pose_bones[ parent ], '["%s"]', slider = True )
props = layout.operator("pose.rigify_leg_fk2ik_" + rig_id, text="Snap FK->IK (" + fk_ctrl + ")")
props.thigh_fk = controls[1]
props.shin_fk = controls[2]
props.foot_fk = controls[3]
props.mfoot_fk = controls[7]
props.thigh_ik = controls[0]
props.shin_ik = ik_ctrl[1]
props.foot_ik = ik_ctrl[2]
props.mfoot_ik = ik_ctrl[2]
props = layout.operator("pose.rigify_leg_ik2fk_" + rig_id, text="Snap IK->FK (" + fk_ctrl + ")")
props.thigh_fk = controls[1]
props.shin_fk = controls[2]
props.foot_fk = controls[3]
props.mfoot_fk = controls[7]
props.thigh_ik = controls[0]
props.shin_ik = ik_ctrl[1]
props.foot_ik = controls[6]
props.pole = ""
props.footroll = controls[5]
props.mfoot_ik = ik_ctrl[2]
# BBone rubber hose on each Respective Tweak
for t in tweaks:
if is_selected( t ):
layout.prop( pose_bones[ t ], '["%s"]', slider = True )
# IK Stretch on IK Control bone
if is_selected( ik_ctrl ):
layout.prop( pose_bones[ parent ], '["%s"]', slider = True )
# FK limb follow
if is_selected( fk_ctrl ):
layout.prop( pose_bones[ parent ], '["%s"]', slider = True )
"""
def create_script( bones, limb_type=None):
# All ctrls have IK/FK switch
controls = [ bones['ik']['ctrl']['limb'] ] + bones['fk']['ctrl']
controls += bones['ik']['ctrl']['terminal']
controls += [ bones['fk']['mch'] ]
controls_string = ", ".join(["'" + x + "'" for x in controls])
# All tweaks have their own bbone prop
tweaks = bones['tweak']['ctrl'][1:-1]
tweaks_string = ", ".join(["'" + x + "'" for x in tweaks])
# IK ctrl has IK stretch
ik_ctrl = [ bones['ik']['ctrl']['terminal'][-1] ]
ik_ctrl += [ bones['ik']['mch_ik'] ]
ik_ctrl += [ bones['ik']['mch_target'] ]
ik_ctrl_string = ", ".join(["'" + x + "'" for x in ik_ctrl])
if limb_type == 'arm':
return script_arm % (
controls_string,
tweaks_string,
ik_ctrl_string,
bones['fk']['ctrl'][0],
bones['parent'],
'IK/FK',
'rubber_tweak',
'IK_Strertch',
'FK_limb_follow'
)
elif limb_type == 'leg':
return script_leg % (
controls_string,
tweaks_string,
ik_ctrl_string,
bones['fk']['ctrl'][0],
bones['parent'],
'IK/FK',
'rubber_tweak',
'IK_Strertch',
'FK_limb_follow'
)
elif limb_type == 'paw':
return script_leg % (
controls_string,
tweaks_string,
ik_ctrl_string,
bones['fk']['ctrl'][0],
bones['parent'],
'IK/FK',
'rubber_tweak',
'IK_Strertch',
'FK_limb_follow'
)
| 29.840278
| 100
| 0.565278
| 577
| 4,297
| 3.996534
| 0.140381
| 0.059844
| 0.041631
| 0.065915
| 0.838248
| 0.821336
| 0.754553
| 0.754553
| 0.754553
| 0.754553
| 0
| 0.01248
| 0.272748
| 4,297
| 143
| 101
| 30.048951
| 0.72544
| 0.020247
| 0
| 0.778689
| 0
| 0.032787
| 0.678079
| 0.038041
| 0
| 0
| 0
| 0
| 0
| 1
| 0.008197
| false
| 0
| 0
| 0
| 0.032787
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
c501992406c7cde094153808bfad429997a617fe
| 2,629
|
py
|
Python
|
testcode/PHY161autoGrader/alm_hw1check-checkpoint+grader.py
|
mtesseracted/TestBed
|
b96a655ed460b5af236ef0e51c68fc31e9c6f5d4
|
[
"BSD-3-Clause"
] | null | null | null |
testcode/PHY161autoGrader/alm_hw1check-checkpoint+grader.py
|
mtesseracted/TestBed
|
b96a655ed460b5af236ef0e51c68fc31e9c6f5d4
|
[
"BSD-3-Clause"
] | null | null | null |
testcode/PHY161autoGrader/alm_hw1check-checkpoint+grader.py
|
mtesseracted/TestBed
|
b96a655ed460b5af236ef0e51c68fc31e9c6f5d4
|
[
"BSD-3-Clause"
] | null | null | null |
# coding: utf-8
# In[66]:
import pandas as pd
import sys
#xdata = np.linspace(0.01,10.,200)
#ydata = xdata**1.15
#print 'Number of arguments:', len(sys.argv), 'arguments.'
#print 'Argument List:', str(sys.argv)
if( len(sys.argv) < 2):
print "No cmd line arg for dummy txt save data\n"
sys.exit()
dumfile = str(sys.argv[1])
#print dumfile
def alm_llsquares(xd, yd):
xm = xd.mean()
ym = yd.mean()
m = np.sum( (xd-xm)*(yd-ym) ) / np.sum( (xd-xm)**2 )
b = ym - m*xm
return (m,b) #dummy return
# In[89]:
mym, myb = alm_llsquares(xdata,ydata)
studm, studb = llsquares(xdata,ydata)
dm = np.abs(mym-studm)
db = np.abs(myb-studb)
gra1 = 0.0
com1 = str("")
if( dm < 1.e-12):
gra1 += 2.5
else :
com1 += " Slope off by "
com1 += "{:.2E}".format(dm)
com1 += "."
if( db < 1.e-12):
gra1 += 2.5
else :
com1 += " Y-int off by "
com1 += "{:.2E}".format(db)
com1 += "."
# In[90]:
#dumfile="dum4.csv"
d1=[{'Grade': gra1, 'Comment': com1}]
pd.DataFrame(data=d1).to_csv(dumfile)
#print(pd.DataFrame(data=d1))
# dum = pd.read_csv(dumfile, skipinitialspace=True)
# print(dum)
# ckey = dum.keys()[1]
# gkey = dum.keys()[2]
# gread = str(dum.get_value(0,gkey))
# cread = str(dum.get_value(0,ckey))
# In[ ]:
# In[ ]:
# coding: utf-8
# In[66]:
import pandas as pd
import sys
#xdata = np.linspace(0.01,10.,200)
#ydata = xdata**1.15
#print 'Number of arguments:', len(sys.argv), 'arguments.'
#print 'Argument List:', str(sys.argv)
if( len(sys.argv) < 2):
print "No cmd line arg for dummy txt save data\n"
sys.exit()
dumfile = str(sys.argv[1])
print ("Dummy file passed: "+dumfile)
def alm_llsquares(xd, yd):
xm = xd.mean()
ym = yd.mean()
m = np.sum( (xd-xm)*(yd-ym) ) / np.sum( (xd-xm)**2 )
b = ym - m*xm
return (m,b) #dummy return
# In[89]:
mym, myb = alm_llsquares(xdata,ydata)
studm, studb = llsquares(xdata,ydata)
dm = np.abs(mym-studm)
db = np.abs(myb-studb)
gra1 = 0.0
com1 = str("")
if( dm < 1.e-12):
gra1 += 2.5
else :
com1 += " Slope off by "
com1 += "{:.2E}".format(dm)
com1 += "."
if( db < 1.e-12):
gra1 += 2.5
else :
com1 += " Y-int off by "
com1 += "{:.2E}".format(db)
com1 += "."
# In[90]:
#dumfile="dum4.csv"
d1=[{'Grade': gra1, 'Comment': com1}]
pd.DataFrame(data=d1).to_csv(dumfile)
#print(pd.DataFrame(data=d1))
# dum = pd.read_csv(dumfile, skipinitialspace=True)
# print(dum)
# ckey = dum.keys()[1]
# gkey = dum.keys()[2]
# gread = str(dum.get_value(0,gkey))
# cread = str(dum.get_value(0,ckey))
# In[ ]:
# In[ ]:
| 16.745223
| 58
| 0.566375
| 429
| 2,629
| 3.44289
| 0.226107
| 0.037915
| 0.027082
| 0.024374
| 0.989844
| 0.989844
| 0.989844
| 0.989844
| 0.989844
| 0.989844
| 0
| 0.053816
| 0.222518
| 2,629
| 156
| 59
| 16.852564
| 0.668787
| 0.330924
| 0
| 0.984615
| 0
| 0
| 0.121866
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.015385
| 0.061538
| null | null | 0.046154
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
c504ff28aa17500a7d362ef0d988bd6122647faf
| 14,098
|
py
|
Python
|
tests/http/testroute.py
|
LowieHuyghe/edmunds-python
|
236d087746cb8802a8854b2706b8d3ff009e9209
|
[
"Apache-2.0"
] | 4
|
2017-09-07T13:39:50.000Z
|
2018-05-31T16:14:50.000Z
|
tests/http/testroute.py
|
LowieHuyghe/edmunds-python
|
236d087746cb8802a8854b2706b8d3ff009e9209
|
[
"Apache-2.0"
] | 103
|
2017-03-19T15:58:21.000Z
|
2018-07-11T20:36:17.000Z
|
tests/http/testroute.py
|
LowieHuyghe/edmunds-python
|
236d087746cb8802a8854b2706b8d3ff009e9209
|
[
"Apache-2.0"
] | 2
|
2017-10-14T15:20:11.000Z
|
2018-04-20T09:55:44.000Z
|
from tests.testcase import TestCase
from edmunds.http.controller import Controller
from edmunds.http.requestmiddleware import RequestMiddleware
from flask import Response
class TestRoute(TestCase):
"""
Test the Request Routing
"""
cache = None
def set_up(self):
"""
Set up the test case
"""
super(TestRoute, self).set_up()
TestRoute.cache = dict()
TestRoute.cache['timeline'] = []
def test_original_routing(self):
"""
Test original routing
"""
rule = '/' + self.rand_str(20)
# Add route
@self.app.route(rule)
def handle_route():
TestRoute.cache['timeline'].append('handle_route')
return ''
# Call route
with self.app.test_client() as c:
c.get(rule)
self.assert_equal(1, len(TestRoute.cache['timeline']))
self.assert_in('handle_route', TestRoute.cache['timeline'])
self.assert_equal(0, TestRoute.cache['timeline'].index('handle_route'))
def test_original_routing_with_parameter(self):
"""
Test original routing with parameter
"""
rule = '/' + self.rand_str(20)
rule_with_param = rule + '/<param>'
param = 'myparam'
# Add route
@self.app.route(rule_with_param)
def handle_route(param=None):
TestRoute.cache['timeline'].append('handle_route')
TestRoute.cache['param'] = param
return ''
# Call route
with self.app.test_client() as c:
c.get(rule + '/' + param)
self.assert_equal(1, len(TestRoute.cache['timeline']))
self.assert_in('handle_route', TestRoute.cache['timeline'])
self.assert_equal(0, TestRoute.cache['timeline'].index('handle_route'))
self.assert_in('param', TestRoute.cache)
self.assert_equal(param, TestRoute.cache['param'])
def test_new_routing(self):
"""
Test new routing
"""
rule = '/' + self.rand_str(20)
# Add route
self.app.route(rule, uses=(MyController, 'get'))
# Call route
with self.app.test_client() as c:
c.get(rule)
self.assert_equal(1, len(TestRoute.cache['timeline']))
self.assert_in('handle_route', TestRoute.cache['timeline'])
self.assert_equal(0, TestRoute.cache['timeline'].index('handle_route'))
def test_new_routing_with_parameter(self):
"""
Test new routing with parameter
"""
rule = '/' + self.rand_str(20)
rule_with_param = rule + '/<param>'
param = 'myparam'
# Add route
self.app.route(rule_with_param, uses=(MyController, 'get_with_param'))
# Call route
with self.app.test_client() as c:
c.get(rule + '/' + param)
self.assert_equal(1, len(TestRoute.cache['timeline']))
self.assert_in('handle_route', TestRoute.cache['timeline'])
self.assert_equal(0, TestRoute.cache['timeline'].index('handle_route'))
self.assert_in('param', TestRoute.cache)
self.assert_equal(param, TestRoute.cache['param'])
def test_initialize(self):
"""
Test initialize
"""
rule = '/' + self.rand_str(20)
# Add route
self.app.route(rule, uses=(MyController, 'get'))
# Call route
with self.app.test_client() as c:
c.get(rule)
self.assert_in('init_params', TestRoute.cache)
self.assert_equal(0, len(TestRoute.cache['init_params']))
def test_initialize_with_parameter(self):
"""
Test initialize with parameter
"""
rule = '/' + self.rand_str(20)
rule_with_param = rule + '/<param>'
param = 'myparam'
# Add route
self.app.route(rule_with_param, uses=(MyController, 'get_with_param'))
# Call route
with self.app.test_client() as c:
c.get(rule + '/' + param)
self.assert_in('init_params', TestRoute.cache)
self.assert_equal(1, len(TestRoute.cache['init_params']))
self.assert_in('param', TestRoute.cache['init_params'])
self.assert_equal(param, TestRoute.cache['init_params']['param'])
def test_faulty_routing(self):
"""
Test faulty routing
"""
rule = '/' + self.rand_str(20)
# Add route with both uses and handler
with self.assert_raises_regexp(TypeError, "'Route' object is not callable"):
@self.app.route(rule, uses=(MyController, 'get'))
def handle_route():
pass
def test_middleware_no_middleware(self):
"""
Test route with no middleware
"""
rule = '/' + self.rand_str(20)
# Add route
@self.app.route(rule)
def handle_route():
TestRoute.cache['timeline'].append('handle_route')
return ''
# Call route
with self.app.test_client() as c:
c.get(rule)
self.assert_equal(1, len(TestRoute.cache['timeline']))
self.assert_in('handle_route', TestRoute.cache['timeline'])
self.assert_equal(0, TestRoute.cache['timeline'].index('handle_route'))
def test_middleware_registering(self):
"""
Test registering the request middleware
"""
rule = '/' + self.rand_str(20)
rule2 = '/' + self.rand_str(20)
self.assert_not_equal(rule, rule2)
# Add route
@self.app.route(rule, middleware=[MyRequestMiddleware])
def handle_route():
TestRoute.cache['timeline'].append('handle_route')
return ''
# Call route
with self.app.test_request_context(rule):
self.app.preprocess_request()
rv = self.app.dispatch_request()
response = self.app.make_response(rv)
response = self.app.process_response(response)
self.assert_equal(3, len(TestRoute.cache['timeline']))
self.assert_in(MyRequestMiddleware.__name__ + '.before', TestRoute.cache['timeline'])
self.assert_equal(0, TestRoute.cache['timeline'].index(MyRequestMiddleware.__name__ + '.before'))
self.assert_in('handle_route', TestRoute.cache['timeline'])
self.assert_equal(1, TestRoute.cache['timeline'].index('handle_route'))
self.assert_in(MyRequestMiddleware.__name__ + '.after', TestRoute.cache['timeline'])
self.assert_equal(2, TestRoute.cache['timeline'].index(MyRequestMiddleware.__name__ + '.after'))
# Add second route
@self.app.route(rule2, middleware=[MyRequestMiddleware, (MySecondRequestMiddleware, 'arg1')])
def handleecond_route():
TestRoute.cache['timeline'].append('handle_route')
return ''
# Call route
TestRoute.cache = dict()
TestRoute.cache['timeline'] = []
with self.app.test_request_context(rule2):
self.app.preprocess_request()
rv = self.app.dispatch_request()
response = self.app.make_response(rv)
response = self.app.process_response(response)
self.assert_equal(5, len(TestRoute.cache['timeline']))
self.assert_in(MyRequestMiddleware.__name__ + '.before', TestRoute.cache['timeline'])
self.assert_equal(0, TestRoute.cache['timeline'].index(MyRequestMiddleware.__name__ + '.before'))
self.assert_in(MySecondRequestMiddleware.__name__ + '.before', TestRoute.cache['timeline'])
self.assert_equal(1, TestRoute.cache['timeline'].index(MySecondRequestMiddleware.__name__ + '.before'))
self.assert_in('handle_route', TestRoute.cache['timeline'])
self.assert_equal(2, TestRoute.cache['timeline'].index('handle_route'))
self.assert_in(MySecondRequestMiddleware.__name__ + '.after', TestRoute.cache['timeline'])
self.assert_equal(3, TestRoute.cache['timeline'].index(MySecondRequestMiddleware.__name__ + '.after'))
self.assert_in(MyRequestMiddleware.__name__ + '.after', TestRoute.cache['timeline'])
self.assert_equal(4, TestRoute.cache['timeline'].index(MyRequestMiddleware.__name__ + '.after'))
def test_middleware_overwriting(self):
"""
Test overwriting of middleware
"""
rule = '/' + self.rand_str(20)
rule2 = '/' + self.rand_str(20)
# Add route
@self.app.route(rule, middleware=[MyRequestMiddleware])
def handle_route():
pass
# Overwrite route
@self.app.route(rule2, middleware=[MyRequestMiddleware, (MySecondRequestMiddleware, 'arg1')])
def handleOverwrittenRoute():
pass
def test_middleware_before_returning_none_null(self):
"""
Test before returning none null
:return: void
"""
rule = '/' + self.rand_str(20)
# Overwrite route
@self.app.route(rule, middleware=[(MyThirdRequestMiddleware, 'arg1'), MyRequestMiddleware])
def handle_route():
TestRoute.cache['timeline'].append('handle_route')
return ''
# Call route
with self.app.test_request_context(rule):
self.app.preprocess_request()
rv = self.app.dispatch_request()
response = self.app.make_response(rv)
self.app.process_response(response)
self.assert_equal(1, len(TestRoute.cache['timeline']))
self.assert_equal(MyThirdRequestMiddleware.__name__ + '.before', TestRoute.cache['timeline'][0])
def test_middleware_with_uses(self):
"""
Test middleware with uses
:return: void
"""
rule = '/' + self.rand_str(20)
# Overwrite route
self.app.route(rule, middleware=[(MySecondRequestMiddleware, 'arg1'), MyRequestMiddleware], uses=(MyController, 'get'))
# Call route
with self.app.test_request_context(rule):
self.app.preprocess_request()
rv = self.app.dispatch_request()
response = self.app.make_response(rv)
self.app.process_response(response)
self.assert_equal(5, len(TestRoute.cache['timeline']))
self.assert_equal(MySecondRequestMiddleware.__name__ + '.before', TestRoute.cache['timeline'][0])
self.assert_equal(MyRequestMiddleware.__name__ + '.before', TestRoute.cache['timeline'][1])
self.assert_equal(2, TestRoute.cache['timeline'].index('handle_route'))
self.assert_equal(MyRequestMiddleware.__name__ + '.after', TestRoute.cache['timeline'][3])
self.assert_equal(MySecondRequestMiddleware.__name__ + '.after', TestRoute.cache['timeline'][4])
def test_middleware_with_uses_with_function(self):
"""
Test middleware with uses with function
:return: void
"""
rule = '/' + self.rand_str(20)
# Overwrite route
self.app.route(rule, uses=(MyController, 'get')) \
.middleware(MySecondRequestMiddleware, 'arg1', kwarg1='some value') \
.middleware(MyRequestMiddleware)
# Call route
with self.app.test_request_context(rule):
self.app.preprocess_request()
rv = self.app.dispatch_request()
response = self.app.make_response(rv)
self.app.process_response(response)
self.assert_equal(5, len(TestRoute.cache['timeline']))
self.assert_equal(MySecondRequestMiddleware.__name__ + '.before', TestRoute.cache['timeline'][0])
self.assert_equal(MyRequestMiddleware.__name__ + '.before', TestRoute.cache['timeline'][1])
self.assert_equal(2, TestRoute.cache['timeline'].index('handle_route'))
self.assert_equal(MyRequestMiddleware.__name__ + '.after', TestRoute.cache['timeline'][3])
self.assert_equal(MySecondRequestMiddleware.__name__ + '.after', TestRoute.cache['timeline'][4])
class MyController(Controller):
def initialize(self, **params):
TestRoute.cache['init_params'] = params
super(MyController, self).initialize(**params)
def get(self):
TestRoute.cache['timeline'].append('handle_route')
return ''
def get_with_param(self, param=None):
TestRoute.cache['timeline'].append('handle_route')
TestRoute.cache['param'] = param
return ''
class MyRequestMiddleware(RequestMiddleware):
"""
Request Middleware class
"""
def before(self):
TestRoute.cache['timeline'].append(self.__class__.__name__ + '.before')
return super(MyRequestMiddleware, self).before()
def after(self, response):
assert isinstance(response, Response)
TestRoute.cache['timeline'].append(self.__class__.__name__ + '.after')
return super(MyRequestMiddleware, self).after(response)
class MySecondRequestMiddleware(RequestMiddleware):
"""
Second Request Middleware class
"""
def before(self, arg1, kwarg1=None):
TestRoute.cache['timeline'].append(self.__class__.__name__ + '.before')
return super(MySecondRequestMiddleware, self).before()
def after(self, response, arg1, kwarg1=None):
assert isinstance(response, Response)
TestRoute.cache['timeline'].append(self.__class__.__name__ + '.after')
return super(MySecondRequestMiddleware, self).after(response)
class MyThirdRequestMiddleware(RequestMiddleware):
"""
Third Request Middleware class
"""
def before(self, arg1, kwarg1=None):
TestRoute.cache['timeline'].append(self.__class__.__name__ + '.before')
return 'Not none'
def after(self, response, arg1, kwarg1=None):
assert isinstance(response, Response)
TestRoute.cache['timeline'].append(self.__class__.__name__ + '.after')
return super(MyThirdRequestMiddleware, self).after(response)
| 33.093897
| 127
| 0.615761
| 1,472
| 14,098
| 5.665082
| 0.07269
| 0.130951
| 0.166207
| 0.071711
| 0.821441
| 0.799137
| 0.737858
| 0.715553
| 0.702722
| 0.698885
| 0
| 0.008187
| 0.25493
| 14,098
| 425
| 128
| 33.171765
| 0.785701
| 0.063768
| 0
| 0.710407
| 0
| 0
| 0.098629
| 0
| 0
| 0
| 0
| 0
| 0.280543
| 1
| 0.144796
| false
| 0.013575
| 0.0181
| 0
| 0.253394
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c56398258d24284acbae621ab5f1750d7152c056
| 21,017
|
py
|
Python
|
seed/schema.py
|
eubr-bigsea/seed
|
e2be2ecfb6f731f804a19e9b9173cd380fcc4560
|
[
"Apache-2.0"
] | null | null | null |
seed/schema.py
|
eubr-bigsea/seed
|
e2be2ecfb6f731f804a19e9b9173cd380fcc4560
|
[
"Apache-2.0"
] | 10
|
2021-06-01T23:22:47.000Z
|
2021-09-23T17:30:25.000Z
|
seed/schema.py
|
eubr-bigsea/seed
|
e2be2ecfb6f731f804a19e9b9173cd380fcc4560
|
[
"Apache-2.0"
] | 2
|
2019-11-05T13:40:09.000Z
|
2020-11-13T22:02:41.000Z
|
# -*- coding: utf-8 -*-
import datetime
import json
from copy import deepcopy
from marshmallow import Schema, fields, post_load, post_dump, EXCLUDE, INCLUDE
from marshmallow.validate import OneOf
from flask_babel import gettext
from seed.models import *
def partial_schema_factory(schema_cls):
schema = schema_cls(partial=True)
for field_name, field in list(schema.fields.items()):
if isinstance(field, fields.Nested):
new_field = deepcopy(field)
new_field.schema.partial = True
schema.fields[field_name] = new_field
return schema
def translate_validation(validation_errors):
for field, errors in list(validation_errors.items()):
if isinstance(errors, dict):
validation_errors[field] = translate_validation(errors)
else:
validation_errors[field] = [gettext(error) for error in errors]
return validation_errors
def load_json(str_value):
try:
return json.loads(str_value)
except BaseException:
return None
# region Protected
# endregion
class BaseSchema(Schema):
@post_dump
def remove_skip_values(self, data, **kwargs):
return {
key: value for key, value in data.items()
if value is not None # Empty lists must be kept!
}
class ClientCreateRequestSchema(BaseSchema):
""" JSON serialization schema """
name = fields.String(required=True)
enabled = fields.Boolean(required=True)
token = fields.String(required=True)
# noinspection PyUnresolvedReferences
@post_load
def make_object(self, data, **kwargs):
""" Deserialize data into an instance of Client"""
return Client(**data)
class Meta:
ordered = True
unknown = EXCLUDE
class ClientListResponseSchema(BaseSchema):
""" JSON serialization schema """
id = fields.Integer(required=True)
name = fields.String(required=True)
enabled = fields.Boolean(required=True)
token = fields.String(required=True)
# noinspection PyUnresolvedReferences
@post_load
def make_object(self, data, **kwargs):
""" Deserialize data into an instance of Client"""
return Client(**data)
class Meta:
ordered = True
unknown = EXCLUDE
class ClientItemResponseSchema(BaseSchema):
""" JSON serialization schema """
id = fields.Integer(required=True)
name = fields.String(required=True)
enabled = fields.Boolean(required=True)
token = fields.String(required=True)
# noinspection PyUnresolvedReferences
@post_load
def make_object(self, data, **kwargs):
""" Deserialize data into an instance of Client"""
return Client(**data)
class Meta:
ordered = True
unknown = EXCLUDE
class DeploymentCreateRequestSchema(BaseSchema):
""" JSON serialization schema """
name = fields.String(required=True)
version = fields.Integer(required=True)
internal_name = fields.String(required=False, allow_none=True)
description = fields.String(required=False, allow_none=True)
created = fields.DateTime(required=False, allow_none=True)
updated = fields.DateTime(required=False, allow_none=True)
command = fields.String(required=False, allow_none=True)
workflow_name = fields.String(
required=False,
allow_none=True,
missing='',
default='')
workflow_id = fields.Integer(required=False, allow_none=True)
job_id = fields.Integer(required=False, allow_none=True)
model_id = fields.Integer(required=False, allow_none=True)
model_name = fields.String(required=True)
user_id = fields.Integer(required=True)
user_login = fields.String(required=True)
user_name = fields.String(required=True)
enabled = fields.Boolean(
required=False,
allow_none=True,
missing=False,
default=False)
current_status = fields.String(required=False, allow_none=True, missing=DeploymentStatus.PENDING, default=DeploymentStatus.PENDING,
validate=[OneOf(list(DeploymentStatus.__dict__.keys()))])
type = fields.String(required=False, allow_none=True, missing=DeploymentType.MODEL, default=DeploymentType.MODEL,
validate=[OneOf(list(DeploymentType.__dict__.keys()))])
attempts = fields.Integer(
required=False,
allow_none=True,
missing=0,
default=0)
entry_point = fields.String(required=False, allow_none=True)
replicas = fields.Integer(
required=False,
allow_none=True,
missing=1,
default=1)
request_memory = fields.String(
required=False,
allow_none=True,
missing='128M',
default='128M')
limit_memory = fields.String(required=False, allow_none=True)
request_cpu = fields.String(
required=False,
allow_none=True,
missing='500m',
default='500m')
limit_cpu = fields.String(
required=False,
allow_none=True,
missing='1000m',
default='1000m')
base_service_url = fields.String(required=False, allow_none=True)
port = fields.Integer(required=False, allow_none=True)
extra_parameters = fields.String(required=False, allow_none=True)
input_spec = fields.String(required=False, allow_none=True)
output_spec = fields.String(required=False, allow_none=True)
assets = fields.String(required=False, allow_none=True)
execution_id = fields.String(required=False, allow_none=True)
deploy = fields.Boolean(default=False, allow_none=True)
target_id = fields.Integer(required=True)
image_id = fields.Integer(required=True)
# noinspection PyUnresolvedReferences
@post_load
def make_object(self, data, **kwargs):
""" Deserialize data into an instance of Deployment"""
return Deployment(**data)
class Meta:
ordered = True
unknown = EXCLUDE
class DeploymentListResponseSchema(BaseSchema):
""" JSON serialization schema """
id = fields.Integer(required=True)
name = fields.String(required=True)
version = fields.Integer(required=True)
internal_name = fields.String(required=False, allow_none=True)
description = fields.String(required=False, allow_none=True)
created = fields.DateTime(required=False, allow_none=True)
updated = fields.DateTime(required=False, allow_none=True)
command = fields.String(required=False, allow_none=True)
job_id = fields.Integer(required=False, allow_none=True)
model_id = fields.Integer(required=False, allow_none=True)
model_name = fields.String(required=True)
user_id = fields.Integer(required=True)
user_login = fields.String(required=True)
user_name = fields.String(required=True)
enabled = fields.Boolean(
required=False,
allow_none=True,
missing=False,
default=False)
current_status = fields.String(required=False, allow_none=True, missing=DeploymentStatus.PENDING, default=DeploymentStatus.PENDING,
validate=[OneOf(list(DeploymentStatus.__dict__.keys()))])
type = fields.String(required=False, allow_none=True, missing=DeploymentType.MODEL, default=DeploymentType.MODEL,
validate=[OneOf(list(DeploymentType.__dict__.keys()))])
attempts = fields.Integer(
required=False,
allow_none=True,
missing=0,
default=0)
log = fields.String(required=False, allow_none=True)
entry_point = fields.String(required=False, allow_none=True)
replicas = fields.Integer(
required=False,
allow_none=True,
missing=1,
default=1)
request_memory = fields.String(
required=False,
allow_none=True,
missing='128M',
default='128M')
limit_memory = fields.String(required=False, allow_none=True)
request_cpu = fields.String(
required=False,
allow_none=True,
missing='500m',
default='500m')
limit_cpu = fields.String(
required=False,
allow_none=True,
missing='1000m',
default='1000m')
base_service_url = fields.String(required=False, allow_none=True)
port = fields.Integer(required=False, allow_none=True)
extra_parameters = fields.String(required=False, allow_none=True)
input_spec = fields.String(required=False, allow_none=True)
output_spec = fields.String(required=False, allow_none=True)
assets = fields.String(required=False, allow_none=True)
execution_id = fields.String(required=False, allow_none=True)
target = fields.Nested(
'seed.schema.DeploymentTargetListResponseSchema',
required=True)
image = fields.Nested(
'seed.schema.DeploymentImageListResponseSchema',
required=True)
user = fields.Function(
lambda x: {
"id": x.user_id,
"name": x.user_name,
"login": x.user_login})
workflow = fields.Function(
lambda x: {
"id": x.workflow_id,
"name": x.workflow_name})
job = fields.Function(lambda x: {"id": x.job_id})
# noinspection PyUnresolvedReferences
@post_load
def make_object(self, data, **kwargs):
""" Deserialize data into an instance of Deployment"""
return Deployment(**data)
class Meta:
ordered = True
unknown = EXCLUDE
class DeploymentItemResponseSchema(BaseSchema):
""" JSON serialization schema """
id = fields.Integer(required=True)
name = fields.String(required=True)
version = fields.Integer(required=True)
internal_name = fields.String(required=False, allow_none=True)
description = fields.String(required=False, allow_none=True)
created = fields.DateTime(required=False, allow_none=True)
updated = fields.DateTime(required=False, allow_none=True)
command = fields.String(required=False, allow_none=True)
model_id = fields.Integer(required=False, allow_none=True)
model_name = fields.String(required=True)
enabled = fields.Boolean(
required=False,
allow_none=True,
missing=False,
default=False)
current_status = fields.String(required=False, allow_none=True, missing=DeploymentStatus.PENDING, default=DeploymentStatus.PENDING,
validate=[OneOf(list(DeploymentStatus.__dict__.keys()))])
type = fields.String(required=False, allow_none=True, missing=DeploymentType.MODEL, default=DeploymentType.MODEL,
validate=[OneOf(list(DeploymentType.__dict__.keys()))])
attempts = fields.Integer(
required=False,
allow_none=True,
missing=0,
default=0)
log = fields.String(required=False, allow_none=True)
entry_point = fields.String(required=False, allow_none=True)
replicas = fields.Integer(
required=False,
allow_none=True,
missing=1,
default=1)
request_memory = fields.String(
required=False,
allow_none=True,
missing='128M',
default='128M')
limit_memory = fields.String(required=False, allow_none=True)
request_cpu = fields.String(
required=False,
allow_none=True,
missing='500m',
default='500m')
limit_cpu = fields.String(
required=False,
allow_none=True,
missing='1000m',
default='1000m')
base_service_url = fields.String(required=False, allow_none=True)
port = fields.Integer(required=False, allow_none=True)
extra_parameters = fields.String(required=False, allow_none=True)
input_spec = fields.String(required=False, allow_none=True)
output_spec = fields.String(required=False, allow_none=True)
assets = fields.String(required=False, allow_none=True)
execution_id = fields.String(required=False, allow_none=True)
target = fields.Nested(
'seed.schema.DeploymentTargetItemResponseSchema',
required=True)
image = fields.Nested(
'seed.schema.DeploymentImageItemResponseSchema',
required=True)
user = fields.Function(
lambda x: {
"id": x.user_id,
"name": x.user_name,
"login": x.user_login})
workflow = fields.Function(
lambda x: {
"id": x.workflow_id,
"name": x.workflow_name})
job = fields.Function(lambda x: {"id": x.job_id})
# noinspection PyUnresolvedReferences
@post_load
def make_object(self, data, **kwargs):
""" Deserialize data into an instance of Deployment"""
return Deployment(**data)
class Meta:
ordered = True
unknown = EXCLUDE
class DeploymentImageListResponseSchema(BaseSchema):
""" JSON serialization schema """
id = fields.Integer(required=True)
description = fields.String(required=True)
name = fields.String(required=True)
tag = fields.String(required=True)
enabled = fields.Boolean(required=True)
# noinspection PyUnresolvedReferences
@post_load
def make_object(self, data, **kwargs):
""" Deserialize data into an instance of DeploymentImage"""
return DeploymentImage(**data)
class Meta:
ordered = True
unknown = EXCLUDE
class DeploymentImageItemResponseSchema(BaseSchema):
""" JSON serialization schema """
id = fields.Integer(required=True)
description = fields.String(required=True)
name = fields.String(required=True)
tag = fields.String(required=True)
enabled = fields.Boolean(required=True)
# noinspection PyUnresolvedReferences
@post_load
def make_object(self, data, **kwargs):
""" Deserialize data into an instance of DeploymentImage"""
return DeploymentImage(**data)
class Meta:
ordered = True
unknown = EXCLUDE
class DeploymentImageCreateRequestSchema(BaseSchema):
""" JSON serialization schema """
id = fields.Integer(required=True)
description = fields.String(required=True)
name = fields.String(required=True)
tag = fields.String(required=True)
enabled = fields.Boolean(required=True)
# noinspection PyUnresolvedReferences
@post_load
def make_object(self, data, **kwargs):
""" Deserialize data into an instance of DeploymentImage"""
return DeploymentImage(**data)
class Meta:
ordered = True
unknown = EXCLUDE
class DeploymentLogCreateRequestSchema(BaseSchema):
""" JSON serialization schema """
date = fields.DateTime(
required=False,
allow_none=True,
missing=datetime.datetime.utcnow,
default=datetime.datetime.utcnow)
status = fields.String(required=True,
validate=[OneOf(list(DeploymentStatus.__dict__.keys()))])
log = fields.String(required=True)
# noinspection PyUnresolvedReferences
@post_load
def make_object(self, data, **kwargs):
""" Deserialize data into an instance of DeploymentLog"""
return DeploymentLog(**data)
class Meta:
ordered = True
unknown = EXCLUDE
class DeploymentLogListResponseSchema(BaseSchema):
""" JSON serialization schema """
id = fields.Integer(required=True)
date = fields.DateTime(
required=False,
allow_none=True,
missing=datetime.datetime.utcnow,
default=datetime.datetime.utcnow)
status = fields.String(required=True,
validate=[OneOf(list(DeploymentStatus.__dict__.keys()))])
log = fields.String(required=True)
# noinspection PyUnresolvedReferences
@post_load
def make_object(self, data, **kwargs):
""" Deserialize data into an instance of DeploymentLog"""
return DeploymentLog(**data)
class Meta:
ordered = True
unknown = EXCLUDE
class DeploymentLogItemResponseSchema(BaseSchema):
""" JSON serialization schema """
id = fields.Integer(required=True)
date = fields.DateTime(
required=False,
allow_none=True,
missing=datetime.datetime.utcnow,
default=datetime.datetime.utcnow)
status = fields.String(required=True,
validate=[OneOf(list(DeploymentStatus.__dict__.keys()))])
log = fields.String(required=True)
# noinspection PyUnresolvedReferences
@post_load
def make_object(self, data, **kwargs):
""" Deserialize data into an instance of DeploymentLog"""
return DeploymentLog(**data)
class Meta:
ordered = True
unknown = EXCLUDE
class DeploymentMetricCreateRequestSchema(BaseSchema):
""" JSON serialization schema """
name = fields.String(required=True)
parameters = fields.String(required=True)
enabled = fields.Boolean(required=True)
user_id = fields.Integer(required=True)
user_login = fields.String(required=True)
# noinspection PyUnresolvedReferences
@post_load
def make_object(self, data, **kwargs):
""" Deserialize data into an instance of DeploymentMetric"""
return DeploymentMetric(**data)
class Meta:
ordered = True
unknown = EXCLUDE
class DeploymentMetricListResponseSchema(BaseSchema):
""" JSON serialization schema """
id = fields.Integer(required=True)
name = fields.String(required=True)
parameters = fields.String(required=True)
enabled = fields.Boolean(required=True)
user_id = fields.Integer(required=True)
user_login = fields.String(required=True)
# noinspection PyUnresolvedReferences
@post_load
def make_object(self, data, **kwargs):
""" Deserialize data into an instance of DeploymentMetric"""
return DeploymentMetric(**data)
class Meta:
ordered = True
unknown = EXCLUDE
class DeploymentMetricItemResponseSchema(BaseSchema):
""" JSON serialization schema """
id = fields.Integer(required=True)
name = fields.String(required=True)
parameters = fields.String(required=True)
enabled = fields.Boolean(required=True)
user_id = fields.Integer(required=True)
user_login = fields.String(required=True)
# noinspection PyUnresolvedReferences
@post_load
def make_object(self, data, **kwargs):
""" Deserialize data into an instance of DeploymentMetric"""
return DeploymentMetric(**data)
class Meta:
ordered = True
unknown = EXCLUDE
class DeploymentTargetCreateRequestSchema(BaseSchema):
""" JSON serialization schema """
name = fields.String(required=True)
namespace = fields.String(required=True)
volume_path = fields.String(required=True)
description = fields.String(required=False, allow_none=True)
url = fields.String(required=True)
authentication_info = fields.String(required=False, allow_none=True)
enabled = fields.Boolean(required=True)
base_service_url = fields.String(required=True)
port = fields.Integer(required=True)
target_type = fields.String(required=True,
validate=[OneOf(list(DeploymentTargetType.__dict__.keys()))])
descriptor = fields.String(required=False, allow_none=True)
# noinspection PyUnresolvedReferences
@post_load
def make_object(self, data, **kwargs):
""" Deserialize data into an instance of DeploymentTarget"""
return DeploymentTarget(**data)
class Meta:
ordered = True
unknown = EXCLUDE
class DeploymentTargetListResponseSchema(BaseSchema):
""" JSON serialization schema """
id = fields.Integer(required=True)
name = fields.String(required=True)
namespace = fields.String(required=True)
description = fields.String(required=False, allow_none=True)
enabled = fields.Boolean(required=True)
base_service_url = fields.String(required=True)
target_type = fields.String(required=True,
validate=[OneOf(list(DeploymentTargetType.__dict__.keys()))])
# noinspection PyUnresolvedReferences
@post_load
def make_object(self, data, **kwargs):
""" Deserialize data into an instance of DeploymentTarget"""
return DeploymentTarget(**data)
class Meta:
ordered = True
unknown = EXCLUDE
class DeploymentTargetItemResponseSchema(BaseSchema):
""" JSON serialization schema """
id = fields.Integer(required=True)
name = fields.String(required=True)
namespace = fields.String(required=True)
description = fields.String(required=False, allow_none=True)
enabled = fields.Boolean(required=True)
base_service_url = fields.String(required=True)
target_type = fields.String(required=True,
validate=[OneOf(list(DeploymentTargetType.__dict__.keys()))])
# noinspection PyUnresolvedReferences
@post_load
def make_object(self, data, **kwargs):
""" Deserialize data into an instance of DeploymentTarget"""
return DeploymentTarget(**data)
class Meta:
ordered = True
unknown = EXCLUDE
| 34.681518
| 135
| 0.67374
| 2,251
| 21,017
| 6.166593
| 0.07952
| 0.095094
| 0.15849
| 0.108926
| 0.878251
| 0.870326
| 0.870326
| 0.862258
| 0.857647
| 0.848138
| 0
| 0.004474
| 0.223676
| 21,017
| 605
| 136
| 34.738843
| 0.846286
| 0.100918
| 0
| 0.854077
| 0
| 0
| 0.015973
| 0.009755
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04721
| false
| 0
| 0.015021
| 0.002146
| 0.585837
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
3d8404d8466691de3c80854b48ed32df86a581d2
| 2,619
|
py
|
Python
|
games/forms.py
|
alexdeathway/Gecom
|
2a0fc87887d73d15eba183625dc8a429defe851f
|
[
"MIT"
] | 7
|
2021-11-15T06:28:05.000Z
|
2022-02-22T11:36:00.000Z
|
games/forms.py
|
alexdeathway/Gecom
|
2a0fc87887d73d15eba183625dc8a429defe851f
|
[
"MIT"
] | 3
|
2021-11-02T16:10:49.000Z
|
2022-02-01T08:30:38.000Z
|
games/forms.py
|
alexdeathway/Gecom
|
2a0fc87887d73d15eba183625dc8a429defe851f
|
[
"MIT"
] | null | null | null |
import re
from django import forms
from games.models import GamesModel, OrganisationModel
class GameCreationForm(forms.ModelForm):
def __init__(self,*args, **kwargs):
request=kwargs.pop("request")
publisher=OrganisationModel.objects.filter(owner=request.user)
super(GameCreationForm,self).__init__(*args,**kwargs)
self.fields["publisher"]=forms.ModelChoiceField(queryset=publisher)
class Meta:
model=GamesModel
labels={
"sale": "Allot for new release sale?",
}
fields=[
"name",
"category",
"cover",
"price",
"sale",
"discription",
"publisher"
]
class OrganisationCreationForm(forms.ModelForm):
class Meta:
model=OrganisationModel
labels={
"name": "Organisation name",
"email":"Organisation email",
}
fields=[
"name",
"username",
"email",
]
def clean_code_name(self):
username= self.cleaned_data['username']
if not re.match(r'^[0-9a-zA-Z]*$',username) or username.lower() != username:
raise forms.ValidationError("Sorry , you can only have lower alphanumeric in username")
return username
class OrganisationUpdateForm(forms.ModelForm):
class Meta:
model=OrganisationModel
labels={
"name": "Organisation name",
"email":"Organisation email",
}
fields=[
"name",
"username",
"email",
"about",
]
def clean_code_name(self):
username= self.cleaned_data['username']
if not re.match(r'^[0-9a-zA-Z]*$',username):
raise forms.ValidationError("Sorry , you can only have alphanumeric in username")
return username
class GameUpdateForm(forms.ModelForm):
def __init__(self,*args, **kwargs):
request=kwargs.pop("request")
publisher=OrganisationModel.objects.filter(owner=request.user)
super(GameUpdateForm,self).__init__(*args,**kwargs)
self.fields["publisher"]=forms.ModelChoiceField(queryset=publisher)
class Meta:
model=GamesModel
labels={
"sale": "Allot for new release sale?",
}
fields=[
"name",
"category",
"cover",
"price",
"sale",
"discription",
"publisher"
]
| 27.568421
| 104
| 0.53761
| 226
| 2,619
| 6.132743
| 0.318584
| 0.040404
| 0.040404
| 0.030303
| 0.842713
| 0.842713
| 0.78355
| 0.78355
| 0.78355
| 0.714286
| 0
| 0.002347
| 0.34937
| 2,619
| 94
| 105
| 27.861702
| 0.811033
| 0
| 0
| 0.717949
| 0
| 0
| 0.176785
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.051282
| false
| 0
| 0.038462
| 0
| 0.217949
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
3da9f02e3f94a197fa8bbf8b33b52cfff7a08d59
| 41,527
|
py
|
Python
|
test/test_metrics.py
|
deb-intel/LPOTtest
|
f7b7524c733e581668d15192b69f9d9a7ca5222d
|
[
"Apache-2.0"
] | null | null | null |
test/test_metrics.py
|
deb-intel/LPOTtest
|
f7b7524c733e581668d15192b69f9d9a7ca5222d
|
[
"Apache-2.0"
] | null | null | null |
test/test_metrics.py
|
deb-intel/LPOTtest
|
f7b7524c733e581668d15192b69f9d9a7ca5222d
|
[
"Apache-2.0"
] | null | null | null |
"""Tests for the metrics module."""
import numpy as np
import unittest
from lpot.metric import METRICS
from lpot.experimental.metric.f1 import evaluate
from lpot.experimental.metric import bleu
class TestMetrics(unittest.TestCase):
def testBLEU(self):
metrics = METRICS('tensorflow')
bleu = metrics['BLEU']()
preds = ['Gutach: Mehr Sicherheit für Fußgänger']
labels = ('Gutach: Noch mehr Sicherheit für Fußgänger',)
bleu.update(preds, labels)
self.assertAlmostEqual(bleu.result(), 51.1507809)
bleu.reset()
preds = ['Dies wurde auch von Peter Arnold vom Offenburg District Office bestätigt.']
labels = ('Dies bestätigt auch Peter Arnold vom Landratsamt Offenburg.',)
bleu.update(preds, labels)
self.assertAlmostEqual(bleu.result(), 16.108992695)
with self.assertRaises(ValueError):
bleu.update(['a','b'], ('c',))
def test_tensorflow_F1(self):
metrics = METRICS('tensorflow')
F1 = metrics['F1']()
preds = [1, 1, 1, 1]
labels = [0, 1, 1, 0]
F1.update(preds, labels)
self.assertEqual(F1.result(), 0.5)
def test_squad_evaluate(self):
label = [{'paragraphs':\
[{'qas':[{'answers': [{'answer_start': 177, 'text': 'Denver Broncos'}, \
{'answer_start': 177, 'text': 'Denver Broncos'}, \
{'answer_start': 177, 'text': 'Denver Broncos'}], \
'question': 'Which NFL team represented the AFC at Super Bowl 50?', \
'id': '56be4db0acb8001400a502ec'}]}]}]
preds = {'56be4db0acb8001400a502ec': 'Denver Broncos'}
f1 = evaluate(preds, label)
self.assertEqual(f1, 100.)
def test_pytorch_F1(self):
metrics = METRICS('pytorch')
F1 = metrics['F1']()
F1.reset()
preds = [1, 1]
labels = [2, 1, 1]
F1.update(preds, labels)
self.assertEqual(F1.result(), 0.8)
def test_mxnet_F1(self):
metrics = METRICS('mxnet')
F1 = metrics['F1']()
preds = [0, 1, 1, 1, 1, 0]
labels = [0, 1, 1, 1]
F1.update(preds, labels)
self.assertEqual(F1.result(), 0.8)
def test_onnx_topk(self):
metrics = METRICS('onnxrt_qlinearops')
top1 = metrics['topk']()
top1.reset()
self.assertEqual(top1.result(), 0)
self.assertEqual(top1.result(), 0)
top2 = metrics['topk'](k=2)
top3 = metrics['topk'](k=3)
predicts = [[0, 0.2, 0.9, 0.3], [0, 0.9, 0.8, 0]]
single_predict = [0, 0.2, 0.9, 0.3]
labels = [[0, 1, 0, 0], [0, 0, 1, 0]]
sparse_labels = [2, 2]
single_label = 2
# test functionality of one-hot label
top1.update(predicts, labels)
top2.update(predicts, labels)
top3.update(predicts, labels)
self.assertEqual(top1.result(), 0.0)
self.assertEqual(top2.result(), 0.5)
self.assertEqual(top3.result(), 1)
# test functionality of sparse label
top1.update(predicts, sparse_labels)
top2.update(predicts, sparse_labels)
top3.update(predicts, sparse_labels)
self.assertEqual(top1.result(), 0.25)
self.assertEqual(top2.result(), 0.75)
self.assertEqual(top3.result(), 1)
# test functionality of single label
top1.update(single_predict, single_label)
top2.update(single_predict, single_label)
top3.update(single_predict, single_label)
self.assertEqual(top1.result(), 0.4)
self.assertEqual(top2.result(), 0.8)
self.assertEqual(top3.result(), 1)
def test_mxnet_topk(self):
metrics = METRICS('mxnet')
top1 = metrics['topk']()
top1.reset()
self.assertEqual(top1.result(), 0)
top2 = metrics['topk'](k=2)
top3 = metrics['topk'](k=3)
predicts = [[0, 0.2, 0.9, 0.3], [0, 0.9, 0.8, 0]]
single_predict = [0, 0.2, 0.9, 0.3]
labels = [[0, 1, 0, 0], [0, 0, 1, 0]]
sparse_labels = [2, 2]
single_label = 2
# test functionality of one-hot label
top1.update(predicts, labels)
top2.update(predicts, labels)
top3.update(predicts, labels)
self.assertEqual(top1.result(), 0.0)
self.assertEqual(top2.result(), 0.5)
self.assertEqual(top3.result(), 1)
# test functionality of sparse label
top1.update(predicts, sparse_labels)
top2.update(predicts, sparse_labels)
top3.update(predicts, sparse_labels)
self.assertEqual(top1.result(), 0.25)
self.assertEqual(top2.result(), 0.75)
self.assertEqual(top3.result(), 1)
# test functionality of single label
top1.update(single_predict, single_label)
top2.update(single_predict, single_label)
top3.update(single_predict, single_label)
self.assertEqual(top1.result(), 0.4)
self.assertEqual(top2.result(), 0.8)
self.assertEqual(top3.result(), 1)
def test_tensorflow_topk(self):
metrics = METRICS('tensorflow')
top1 = metrics['topk']()
top1.reset()
self.assertEqual(top1.result(), 0)
top2 = metrics['topk'](k=2)
top3 = metrics['topk'](k=3)
predicts = [[0, 0.2, 0.9, 0.3], [0, 0.9, 0.8, 0]]
single_predict = [0, 0.2, 0.9, 0.3]
labels = [[0, 1, 0, 0], [0, 0, 1, 0]]
sparse_labels = [2, 2]
single_label = 2
# test functionality of one-hot label
top1.update(predicts, labels)
top2.update(predicts, labels)
top3.update(predicts, labels)
self.assertEqual(top1.result(), 0.0)
self.assertEqual(top2.result(), 0.5)
self.assertEqual(top3.result(), 1)
# test functionality of sparse label
top1.update(predicts, sparse_labels)
top2.update(predicts, sparse_labels)
top3.update(predicts, sparse_labels)
self.assertEqual(top1.result(), 0.25)
self.assertEqual(top2.result(), 0.75)
self.assertEqual(top3.result(), 1)
# test functionality of single label
top1.update(single_predict, single_label)
top2.update(single_predict, single_label)
top3.update(single_predict, single_label)
self.assertEqual(top1.result(), 0.4)
self.assertEqual(top2.result(), 0.8)
self.assertEqual(top3.result(), 1)
def test_tensorflow_mAP(self):
import json
import os
metrics = METRICS('tensorflow')
fake_dict = {
'info': {},
'licenses':{},
'images':[{
'file_name': '000000397133.jpg',
'height': 100,
'width': 100,
'id': 397133
}],
'annotations':[{
'category_id': 18,
'id': 1768,
'iscrowd': 0,
'image_id': 397133,
'bbox': [473.07, 395.93, 38.65, 28.67]
}],
'categories':[{
'supercategory': 'animal',
'id': 18,
'name': 'dog'
}]
}
fake_json = json.dumps(fake_dict)
with open('anno.json', 'w') as f:
f.write(fake_json)
mAP = metrics['mAP']('anno.json')
self.assertEqual(mAP.category_map_reverse['dog'], 1)
detection = [
np.array([[5]]),
np.array([[5]]),
np.array([[[0.16117382, 0.59801614, 0.81511605, 0.7858219 ],
[0.5589304 , 0. , 0.98301625, 0.520178 ],
[0.62706745, 0.35748824, 0.6892729 , 0.41513762],
[0.40032804, 0.01218696, 0.6924763 , 0.30341768],
[0.62706745, 0.35748824, 0.6892729 , 0.41513762]]]),
np.array([[0.9267181 , 0.8510787 , 0.60418576, 0.35155892, 0.31158054]]),
np.array([[ 1., 67., 51., 79., 47.]])
]
ground_truth = [
np.array([[[0.5633255 , 0.34003124, 0.69857144, 0.4009531 ],
[0.4763466 , 0.7769531 , 0.54334897, 0.9675937 ]]]),
np.array([['a', 'b']]),
np.array([[]]),
np.array([b'000000397133.jpg'])
]
self.assertRaises(ValueError, mAP.update, detection, ground_truth)
os.remove('anno.json')
mAP = metrics['mAP']()
detection = [
np.array([[[0.16117382, 0.59801614, 0.81511605, 0.7858219 ],
[0.5589304 , 0. , 0.98301625, 0.520178 ],
[0.62706745, 0.35748824, 0.6892729 , 0.41513762],
[0.40032804, 0.01218696, 0.6924763 , 0.30341768],
[0.62706745, 0.35748824, 0.6892729 , 0.41513762]]]),
np.array([[0.9267181 , 0.8510787 , 0.60418576, 0.35155892, 0.31158054]]),
np.array([[ 1., 67., 51., 79., 47.]])
]
detection_2 = [
np.array([[8]]),
np.array([[[0.82776225, 0.5865939 , 0.8927653 , 0.6302338 ],
[0.8375764 , 0.6424138 , 0.9055594 , 0.6921875 ],
[0.57902956, 0.39394334, 0.8342961 , 0.5577197 ],
[0.7949219 , 0.6513021 , 0.8472295 , 0.68427753],
[0.809729 , 0.5947042 , 0.8539927 , 0.62916476],
[0.7258591 , 0.08907133, 1. , 0.86224866],
[0.43100086, 0.37782395, 0.8384069 , 0.5616918 ],
[0.32005906, 0.84334356, 1. , 1. ]]]),
np.array([[0.86698544, 0.7562499 , 0.66414887, 0.64498234,\
0.63083494,0.46618757, 0.3914739 , 0.3094324 ]]),
np.array([[55., 55., 79., 55., 55., 67., 79., 82.]])
]
ground_truth = [
np.array([[[0.5633255 , 0.34003124, 0.69857144, 0.4009531 ],
[0.56262296, 0.0015625 , 1. , 0.5431719 ],
[0.16374707, 0.60728127, 0.813911 , 0.77823436],
[0.5841452 , 0.21182813, 0.65156907, 0.24670312],
[0.8056206 , 0.048875 , 0.90124124, 0.1553125 ],
[0.6729742 , 0.09317187, 0.7696956 , 0.21203125],
[0.3848478 , 0.002125 , 0.61522245, 0.303 ],
[0.61548007, 0. , 0.7015925 , 0.097125 ],
[0.6381967 , 0.1865625 , 0.7184075 , 0.22534375],
[0.6274239 , 0.22104688, 0.71140516, 0.27134374],
[0.39566743, 0.24370313, 0.43578455, 0.284375 ],
[0.2673302 , 0.245625 , 0.3043794 , 0.27353126],
[0.7137705 , 0.15429688, 0.726815 , 0.17114063],
[0.6003747 , 0.25942189, 0.6438876 , 0.27320313],
[0.68845433, 0.13501562, 0.714637 , 0.17245312],
[0.69358313, 0.10959375, 0.7043091 , 0.12409375],
[0.493911 , 0. , 0.72571427, 0.299 ],
[0.69576114, 0.15107812, 0.70714283, 0.16332813],
[0.4763466 , 0.7769531 , 0.54334897, 0.9675937 ]]]),
np.array([[]]),
np.array([[44, 67, 1, 49, 51, 51, 79, 1, 47, 47, 51, 51,\
56, 50, 56, 56, 79, 57, 81]]),
np.array([b'000000397133.jpg'])
]
ground_truth_2 = [
np.array([[[0.51508695, 0.2911648 , 0.5903478 , 0.31360796],
[0.9358696 , 0.07528409, 0.99891305, 0.25 ],
[0.8242174 , 0.3309659 , 0.93508697, 0.47301137],
[0.77413046, 0.22599432, 0.9858696 , 0.8179261 ],
[0.32582608, 0.8575 , 0.98426086, 0.9984659 ],
[0.77795655, 0.6268466 , 0.89930433, 0.73434657],
[0.5396087 , 0.39053977, 0.8483913 , 0.5615057 ],
[0.58473915, 0.75661933, 0.5998261 , 0.83579546],
[0.80391306, 0.6129829 , 0.8733478 , 0.66201705],
[0.8737391 , 0.6579546 , 0.943 , 0.7053693 ],
[0.775 , 0.6549716 , 0.8227391 , 0.6882955 ],
[0.8130869 , 0.58292615, 0.90526086, 0.62551135],
[0.7844348 , 0.68735796, 0.98182607, 0.83329546],
[0.872 , 0.6190057 , 0.9306522 , 0.6591761 ]]]),
np.array([[]]),
np.array([[64, 62, 62, 67, 82, 52, 79, 81, 55, 55, 55, 55, 62, 55]]),
np.array([b'000000037777.jpg'])
]
self.assertEqual(mAP.result(), 0)
mAP.update(detection, ground_truth)
mAP.update(detection, ground_truth)
self.assertEqual(format(mAP.result(), '.5f'),
'0.18182')
mAP.update(detection_2, ground_truth_2)
self.assertEqual(format(mAP.result(), '.5f'),
'0.20347')
mAP.reset()
mAP.update(detection, ground_truth)
self.assertEqual(format(mAP.result(), '.5f'),
'0.18182')
ground_truth_1 = [
np.array([[[0.51508695, 0.2911648 , 0.5903478 , 0.31360796],
[0.872 , 0.6190057 , 0.9306522 , 0.6591761 ]]]),
np.array([[]]),
np.array([[[64, 62]]]),
np.array([b'000000037777.jpg'])
]
self.assertRaises(ValueError, mAP.update, detection, ground_truth_1)
ground_truth_2 = [
np.array([[[0.51508695, 0.2911648 , 0.5903478 , 0.31360796],
[0.872 , 0.6190057 , 0.9306522 , 0.6591761 ]]]),
np.array([[]]),
np.array([[64]]),
np.array([b'000000037700.jpg'])
]
self.assertRaises(ValueError, mAP.update, detection, ground_truth_2)
detection_1 = [
np.array([[[0.16117382, 0.59801614, 0.81511605, 0.7858219 ],
[0.5589304 , 0. , 0.98301625, 0.520178 ]]]),
np.array([[0.9267181 , 0.8510787 , 0.60418576, 0.35155892, 0.31158054]]),
np.array([[ 1., 67., 51., 79., 47.]])
]
ground_truth_1 = [
np.array([[[0.51508695, 0.2911648 , 0.5903478 , 0.31360796],
[0.872 , 0.6190057 , 0.9306522 , 0.6591761 ]]]),
np.array([[]]),
np.array([[64, 62]]),
np.array([b'000000011.jpg'])
]
self.assertRaises(ValueError, mAP.update, detection_1, ground_truth_1)
ground_truth_2 = [
np.array([[[0.51508695, 0.2911648 , 0.5903478 , 0.31360796],
[0.872 , 0.6190057 , 0.9306522 , 0.6591761 ]]]),
np.array([[]]),
np.array([[64, 62]]),
np.array([b'000000012.jpg'])
]
detection_2 = [
np.array([[[0.16117382, 0.59801614, 0.81511605, 0.7858219 ],
[0.5589304 , 0. , 0.98301625, 0.520178 ]]]),
np.array([[0.9267181 , 0.8510787]]),
np.array([[ 1., 67., 51., 79., 47.]])
]
self.assertRaises(ValueError, mAP.update, detection_2, ground_truth_2)
def test_tensorflow_VOCmAP(self):
import json
import os
metrics = METRICS('tensorflow')
fake_dict = {
'info': {},
'licenses':{},
'images':[{
'file_name': '000000397133.jpg',
'height': 100,
'width': 100,
'id': 397133
}],
'annotations':[{
'category_id': 18,
'id': 1768,
'iscrowd': 0,
'image_id': 397133,
'bbox': [473.07, 395.93, 38.65, 28.67]
}],
'categories':[{
'supercategory': 'animal',
'id': 18,
'name': 'dog'
}]
}
fake_json = json.dumps(fake_dict)
with open('anno.json', 'w') as f:
f.write(fake_json)
mAP = metrics['VOCmAP']('anno.json')
self.assertEqual(mAP.iou_thrs, 0.5)
self.assertEqual(mAP.map_points, 0)
self.assertEqual(mAP.category_map_reverse['dog'], 1)
detection = [
np.array([[5]]),
np.array([[5]]),
np.array([[[0.16117382, 0.59801614, 0.81511605, 0.7858219 ],
[0.5589304 , 0. , 0.98301625, 0.520178 ],
[0.62706745, 0.35748824, 0.6892729 , 0.41513762],
[0.40032804, 0.01218696, 0.6924763 , 0.30341768],
[0.62706745, 0.35748824, 0.6892729 , 0.41513762]]]),
np.array([[0.9267181 , 0.8510787 , 0.60418576, 0.35155892, 0.31158054]]),
np.array([[ 1., 67., 51., 79., 47.]])
]
ground_truth = [
np.array([[[0.5633255 , 0.34003124, 0.69857144, 0.4009531 ],
[0.4763466 , 0.7769531 , 0.54334897, 0.9675937 ]]]),
np.array([['a', 'b']]),
np.array([[]]),
np.array([b'000000397133.jpg'])
]
self.assertRaises(ValueError, mAP.update, detection, ground_truth)
os.remove('anno.json')
mAP = metrics['VOCmAP']()
detection = [
np.array([[[0.16117382, 0.59801614, 0.81511605, 0.7858219 ],
[0.5589304 , 0. , 0.98301625, 0.520178 ],
[0.62706745, 0.35748824, 0.6892729 , 0.41513762],
[0.40032804, 0.01218696, 0.6924763 , 0.30341768],
[0.62706745, 0.35748824, 0.6892729 , 0.41513762]]]),
np.array([[0.9267181 , 0.8510787 , 0.60418576, 0.35155892, 0.31158054]]),
np.array([[ 1., 67., 51., 79., 47.]])
]
detection_2 = [
np.array([[8]]),
np.array([[[0.82776225, 0.5865939 , 0.8927653 , 0.6302338 ],
[0.8375764 , 0.6424138 , 0.9055594 , 0.6921875 ],
[0.57902956, 0.39394334, 0.8342961 , 0.5577197 ],
[0.7949219 , 0.6513021 , 0.8472295 , 0.68427753],
[0.809729 , 0.5947042 , 0.8539927 , 0.62916476],
[0.7258591 , 0.08907133, 1. , 0.86224866],
[0.43100086, 0.37782395, 0.8384069 , 0.5616918 ],
[0.32005906, 0.84334356, 1. , 1. ]]]),
np.array([[0.86698544, 0.7562499 , 0.66414887, 0.64498234,\
0.63083494,0.46618757, 0.3914739 , 0.3094324 ]]),
np.array([[55., 55., 79., 55., 55., 67., 79., 82.]])
]
ground_truth = [
np.array([[[0.5633255 , 0.34003124, 0.69857144, 0.4009531 ],
[0.56262296, 0.0015625 , 1. , 0.5431719 ],
[0.16374707, 0.60728127, 0.813911 , 0.77823436],
[0.5841452 , 0.21182813, 0.65156907, 0.24670312],
[0.8056206 , 0.048875 , 0.90124124, 0.1553125 ],
[0.6729742 , 0.09317187, 0.7696956 , 0.21203125],
[0.3848478 , 0.002125 , 0.61522245, 0.303 ],
[0.61548007, 0. , 0.7015925 , 0.097125 ],
[0.6381967 , 0.1865625 , 0.7184075 , 0.22534375],
[0.6274239 , 0.22104688, 0.71140516, 0.27134374],
[0.39566743, 0.24370313, 0.43578455, 0.284375 ],
[0.2673302 , 0.245625 , 0.3043794 , 0.27353126],
[0.7137705 , 0.15429688, 0.726815 , 0.17114063],
[0.6003747 , 0.25942189, 0.6438876 , 0.27320313],
[0.68845433, 0.13501562, 0.714637 , 0.17245312],
[0.69358313, 0.10959375, 0.7043091 , 0.12409375],
[0.493911 , 0. , 0.72571427, 0.299 ],
[0.69576114, 0.15107812, 0.70714283, 0.16332813],
[0.4763466 , 0.7769531 , 0.54334897, 0.9675937 ]]]),
np.array([[]]),
np.array([[44, 67, 1, 49, 51, 51, 79, 1, 47, 47, 51, 51,\
56, 50, 56, 56, 79, 57, 81]]),
np.array([b'000000397133.jpg'])
]
ground_truth_2 = [
np.array([[[0.51508695, 0.2911648 , 0.5903478 , 0.31360796],
[0.9358696 , 0.07528409, 0.99891305, 0.25 ],
[0.8242174 , 0.3309659 , 0.93508697, 0.47301137],
[0.77413046, 0.22599432, 0.9858696 , 0.8179261 ],
[0.32582608, 0.8575 , 0.98426086, 0.9984659 ],
[0.77795655, 0.6268466 , 0.89930433, 0.73434657],
[0.5396087 , 0.39053977, 0.8483913 , 0.5615057 ],
[0.58473915, 0.75661933, 0.5998261 , 0.83579546],
[0.80391306, 0.6129829 , 0.8733478 , 0.66201705],
[0.8737391 , 0.6579546 , 0.943 , 0.7053693 ],
[0.775 , 0.6549716 , 0.8227391 , 0.6882955 ],
[0.8130869 , 0.58292615, 0.90526086, 0.62551135],
[0.7844348 , 0.68735796, 0.98182607, 0.83329546],
[0.872 , 0.6190057 , 0.9306522 , 0.6591761 ]]]),
np.array([[]]),
np.array([[64, 62, 62, 67, 82, 52, 79, 81, 55, 55, 55, 55, 62, 55]]),
np.array([b'000000037777.jpg'])
]
self.assertEqual(mAP.result(), 0)
mAP.update(detection, ground_truth)
mAP.update(detection, ground_truth)
self.assertEqual(format(mAP.result(), '.5f'),
'0.18182')
mAP.update(detection_2, ground_truth_2)
self.assertEqual(format(mAP.result(), '.5f'),
'0.20347')
mAP.reset()
mAP.update(detection, ground_truth)
self.assertEqual(format(mAP.result(), '.5f'),
'0.18182')
ground_truth_1 = [
np.array([[[0.51508695, 0.2911648 , 0.5903478 , 0.31360796],
[0.872 , 0.6190057 , 0.9306522 , 0.6591761 ]]]),
np.array([[]]),
np.array([[[64, 62]]]),
np.array([b'000000037777.jpg'])
]
self.assertRaises(ValueError, mAP.update, detection, ground_truth_1)
ground_truth_2 = [
np.array([[[0.51508695, 0.2911648 , 0.5903478 , 0.31360796],
[0.872 , 0.6190057 , 0.9306522 , 0.6591761 ]]]),
np.array([[]]),
np.array([[64]]),
np.array([b'000000037700.jpg'])
]
self.assertRaises(ValueError, mAP.update, detection, ground_truth_2)
detection_1 = [
np.array([[[0.16117382, 0.59801614, 0.81511605, 0.7858219 ],
[0.5589304 , 0. , 0.98301625, 0.520178 ]]]),
np.array([[0.9267181 , 0.8510787 , 0.60418576, 0.35155892, 0.31158054]]),
np.array([[ 1., 67., 51., 79., 47.]])
]
ground_truth_1 = [
np.array([[[0.51508695, 0.2911648 , 0.5903478 , 0.31360796],
[0.872 , 0.6190057 , 0.9306522 , 0.6591761 ]]]),
np.array([[]]),
np.array([[64, 62]]),
np.array([b'000000011.jpg'])
]
self.assertRaises(ValueError, mAP.update, detection_1, ground_truth_1)
ground_truth_2 = [
np.array([[[0.51508695, 0.2911648 , 0.5903478 , 0.31360796],
[0.872 , 0.6190057 , 0.9306522 , 0.6591761 ]]]),
np.array([[]]),
np.array([[64, 62]]),
np.array([b'000000012.jpg'])
]
detection_2 = [
np.array([[[0.16117382, 0.59801614, 0.81511605, 0.7858219 ],
[0.5589304 , 0. , 0.98301625, 0.520178 ]]]),
np.array([[0.9267181 , 0.8510787]]),
np.array([[ 1., 67., 51., 79., 47.]])
]
self.assertRaises(ValueError, mAP.update, detection_2, ground_truth_2)
def test_tensorflow_COCOmAP(self):
import json
import os
metrics = METRICS('tensorflow')
fake_dict = {
'info': {},
'licenses':{},
'images':[{
'file_name': '000000397133.jpg',
'height': 100,
'width': 100,
'id': 397133
}],
'annotations':[{
'category_id': 18,
'id': 1768,
'iscrowd': 0,
'image_id': 397133,
'bbox': [473.07, 395.93, 38.65, 28.67]
}],
'categories':[{
'supercategory': 'animal',
'id': 18,
'name': 'dog'
}]
}
fake_json = json.dumps(fake_dict)
with open('anno.json', 'w') as f:
f.write(fake_json)
mAP = metrics['COCOmAP']('anno.json')
self.assertEqual(mAP.category_map_reverse['dog'], 1)
detection = [
np.array([[5]]),
np.array([[5]]),
np.array([[[0.16117382, 0.59801614, 0.81511605, 0.7858219 ],
[0.5589304 , 0. , 0.98301625, 0.520178 ],
[0.62706745, 0.35748824, 0.6892729 , 0.41513762],
[0.40032804, 0.01218696, 0.6924763 , 0.30341768],
[0.62706745, 0.35748824, 0.6892729 , 0.41513762]]]),
np.array([[0.9267181 , 0.8510787 , 0.60418576, 0.35155892, 0.31158054]]),
np.array([[ 1., 67., 51., 79., 47.]])
]
ground_truth = [
np.array([[[0.5633255 , 0.34003124, 0.69857144, 0.4009531 ],
[0.4763466 , 0.7769531 , 0.54334897, 0.9675937 ]]]),
np.array([['a', 'b']]),
np.array([[]]),
np.array([b'000000397133.jpg'])
]
self.assertRaises(ValueError, mAP.update, detection, ground_truth)
os.remove('anno.json')
mAP = metrics['COCOmAP']()
detection = [
np.array([[[0.16117382, 0.59801614, 0.81511605, 0.7858219 ],
[0.5589304 , 0. , 0.98301625, 0.520178 ],
[0.62706745, 0.35748824, 0.6892729 , 0.41513762],
[0.40032804, 0.01218696, 0.6924763 , 0.30341768],
[0.62706745, 0.35748824, 0.6892729 , 0.41513762]]]),
np.array([[0.9267181 , 0.8510787 , 0.60418576, 0.35155892, 0.31158054]]),
np.array([[ 1., 67., 51., 79., 47.]])
]
detection_2 = [
np.array([[8]]),
np.array([[[0.82776225, 0.5865939 , 0.8927653 , 0.6302338 ],
[0.8375764 , 0.6424138 , 0.9055594 , 0.6921875 ],
[0.57902956, 0.39394334, 0.8342961 , 0.5577197 ],
[0.7949219 , 0.6513021 , 0.8472295 , 0.68427753],
[0.809729 , 0.5947042 , 0.8539927 , 0.62916476],
[0.7258591 , 0.08907133, 1. , 0.86224866],
[0.43100086, 0.37782395, 0.8384069 , 0.5616918 ],
[0.32005906, 0.84334356, 1. , 1. ]]]),
np.array([[0.86698544, 0.7562499 , 0.66414887, 0.64498234,\
0.63083494,0.46618757, 0.3914739 , 0.3094324 ]]),
np.array([[55., 55., 79., 55., 55., 67., 79., 82.]])
]
ground_truth = [
np.array([[[0.5633255 , 0.34003124, 0.69857144, 0.4009531 ],
[0.56262296, 0.0015625 , 1. , 0.5431719 ],
[0.16374707, 0.60728127, 0.813911 , 0.77823436],
[0.5841452 , 0.21182813, 0.65156907, 0.24670312],
[0.8056206 , 0.048875 , 0.90124124, 0.1553125 ],
[0.6729742 , 0.09317187, 0.7696956 , 0.21203125],
[0.3848478 , 0.002125 , 0.61522245, 0.303 ],
[0.61548007, 0. , 0.7015925 , 0.097125 ],
[0.6381967 , 0.1865625 , 0.7184075 , 0.22534375],
[0.6274239 , 0.22104688, 0.71140516, 0.27134374],
[0.39566743, 0.24370313, 0.43578455, 0.284375 ],
[0.2673302 , 0.245625 , 0.3043794 , 0.27353126],
[0.7137705 , 0.15429688, 0.726815 , 0.17114063],
[0.6003747 , 0.25942189, 0.6438876 , 0.27320313],
[0.68845433, 0.13501562, 0.714637 , 0.17245312],
[0.69358313, 0.10959375, 0.7043091 , 0.12409375],
[0.493911 , 0. , 0.72571427, 0.299 ],
[0.69576114, 0.15107812, 0.70714283, 0.16332813],
[0.4763466 , 0.7769531 , 0.54334897, 0.9675937 ]]]),
np.array([[]]),
np.array([[44, 67, 1, 49, 51, 51, 79, 1, 47, 47, 51, 51,\
56, 50, 56, 56, 79, 57, 81]]),
np.array([b'000000397133.jpg'])
]
ground_truth_2 = [
np.array([[[0.51508695, 0.2911648 , 0.5903478 , 0.31360796],
[0.9358696 , 0.07528409, 0.99891305, 0.25 ],
[0.8242174 , 0.3309659 , 0.93508697, 0.47301137],
[0.77413046, 0.22599432, 0.9858696 , 0.8179261 ],
[0.32582608, 0.8575 , 0.98426086, 0.9984659 ],
[0.77795655, 0.6268466 , 0.89930433, 0.73434657],
[0.5396087 , 0.39053977, 0.8483913 , 0.5615057 ],
[0.58473915, 0.75661933, 0.5998261 , 0.83579546],
[0.80391306, 0.6129829 , 0.8733478 , 0.66201705],
[0.8737391 , 0.6579546 , 0.943 , 0.7053693 ],
[0.775 , 0.6549716 , 0.8227391 , 0.6882955 ],
[0.8130869 , 0.58292615, 0.90526086, 0.62551135],
[0.7844348 , 0.68735796, 0.98182607, 0.83329546],
[0.872 , 0.6190057 , 0.9306522 , 0.6591761 ]]]),
np.array([[]]),
np.array([[64, 62, 62, 67, 82, 52, 79, 81, 55, 55, 55, 55, 62, 55]]),
np.array([b'000000037777.jpg'])
]
self.assertEqual(mAP.result(), 0)
mAP.update(detection, ground_truth)
mAP.update(detection, ground_truth)
self.assertEqual(format(mAP.result(), '.5f'),
'0.14149')
mAP.update(detection_2, ground_truth_2)
self.assertEqual(format(mAP.result(), '.5f'),
'0.13366')
mAP.reset()
mAP.update(detection, ground_truth)
self.assertEqual(format(mAP.result(), '.5f'),
'0.14149')
ground_truth_1 = [
np.array([[[0.51508695, 0.2911648 , 0.5903478 , 0.31360796],
[0.872 , 0.6190057 , 0.9306522 , 0.6591761 ]]]),
np.array([[]]),
np.array([[[64, 62]]]),
np.array([b'000000037777.jpg'])
]
self.assertRaises(ValueError, mAP.update, detection, ground_truth_1)
ground_truth_2 = [
np.array([[[0.51508695, 0.2911648 , 0.5903478 , 0.31360796],
[0.872 , 0.6190057 , 0.9306522 , 0.6591761 ]]]),
np.array([[]]),
np.array([[64]]),
np.array([b'000000037700.jpg'])
]
self.assertRaises(ValueError, mAP.update, detection, ground_truth_2)
detection_1 = [
np.array([[[0.16117382, 0.59801614, 0.81511605, 0.7858219 ],
[0.5589304 , 0. , 0.98301625, 0.520178 ]]]),
np.array([[0.9267181 , 0.8510787 , 0.60418576, 0.35155892, 0.31158054]]),
np.array([[ 1., 67., 51., 79., 47.]])
]
ground_truth_1 = [
np.array([[[0.51508695, 0.2911648 , 0.5903478 , 0.31360796],
[0.872 , 0.6190057 , 0.9306522 , 0.6591761 ]]]),
np.array([[]]),
np.array([[64, 62]]),
np.array([b'000000011.jpg'])
]
self.assertRaises(ValueError, mAP.update, detection_1, ground_truth_1)
ground_truth_2 = [
np.array([[[0.51508695, 0.2911648 , 0.5903478 , 0.31360796],
[0.872 , 0.6190057 , 0.9306522 , 0.6591761 ]]]),
np.array([[]]),
np.array([[64, 62]]),
np.array([b'000000012.jpg'])
]
detection_2 = [
np.array([[[0.16117382, 0.59801614, 0.81511605, 0.7858219 ],
[0.5589304 , 0. , 0.98301625, 0.520178 ]]]),
np.array([[0.9267181 , 0.8510787]]),
np.array([[ 1., 67., 51., 79., 47.]])
]
self.assertRaises(ValueError, mAP.update, detection_2, ground_truth_2)
def test__accuracy(self):
predicts1 = [1, 0, 1, 1]
labels1 = [0, 1, 1, 1]
predicts2 = [[0, 0], [0, 0]]
labels2 = [[0, 1], [1, 1]]
predicts3 = [[[0, 1], [0, 0], [0, 1]], [[0, 1], [0, 1], [0, 1]]]
labels3 = [[[0, 1], [0, 1], [1, 0]], [[1, 0], [1, 0], [1, 0]]]
predicts4 = [[0.2, 0.8], [0.1, 0.9], [0.3, 0.7], [0.4, 0.6]] #1,1,1,1
labels4 = [0, 1, 0, 0]
metrics = METRICS('pytorch')
acc = metrics['Accuracy']()
acc.update(predicts1, labels1)
acc_result = acc.result()
self.assertEqual(acc_result, 0.5)
acc.reset()
acc.update(predicts2, labels2)
self.assertEqual(acc.result(), 0.25)
acc.reset()
acc.update(predicts3, labels3)
self.assertEqual(acc.result(), 0.25)
acc.reset()
acc.update(predicts4, labels4)
self.assertEqual(acc.result(), 0.25)
metrics = METRICS('mxnet')
acc = metrics['Accuracy']()
acc.update(predicts1, labels1)
acc_result = acc.result()
self.assertEqual(acc_result, 0.5)
acc.reset()
acc.update(predicts2, labels2)
self.assertEqual(acc.result(), 0.25)
acc.reset()
acc.update(predicts3, labels3)
self.assertEqual(acc.result(), 0.25)
acc.reset()
acc.update(predicts4, labels4)
self.assertEqual(acc.result(), 0.25)
metrics = METRICS('onnxrt_qlinearops')
acc = metrics['Accuracy']()
acc.update(predicts1, labels1)
acc_result = acc.result()
self.assertEqual(acc_result, 0.5)
acc.reset()
acc.update(predicts2, labels2)
self.assertEqual(acc.result(), 0.25)
acc.reset()
acc.update(predicts3, labels3)
self.assertEqual(acc.result(), 0.25)
acc.reset()
acc.update(predicts4, labels4)
self.assertEqual(acc.result(), 0.25)
acc.reset()
acc.update(1, 1)
self.assertEqual(acc.result(), 1.0)
wrong_predictions = [1, 0, 0]
wrong_labels = [[0, 1, 1]]
self.assertRaises(ValueError, acc.update, wrong_predictions, wrong_labels)
def test_mxnet_accuracy(self):
metrics = METRICS('mxnet')
acc = metrics['Accuracy']()
predicts = [1, 0, 1, 1]
labels = [0, 1, 1, 1]
acc.update(predicts, labels)
acc_result = acc.result()
self.assertEqual(acc_result, 0.5)
def test_mse(self):
predicts1 = [1, 0, 0, 1]
labels1 = [0, 1, 0, 0]
predicts2 = [1, 1, 1, 1]
labels2 = [0, 1, 1, 0]
metrics = METRICS('onnxrt_qlinearops')
mse = metrics['MSE'](compare_label=False)
mse.update(predicts1, labels1)
mse_result = mse.result()
self.assertEqual(mse_result, 0.75)
mse.update(predicts2, labels2)
mse_result = mse.result()
self.assertEqual(mse_result, 0.625)
metrics = METRICS('tensorflow')
mse = metrics['MSE'](compare_label=False)
mse.update(predicts1, labels1)
mse_result = mse.result()
self.assertEqual(mse_result, 0.75)
mse.update(predicts2, labels2)
mse_result = mse.result()
self.assertEqual(mse_result, 0.625)
metrics = METRICS('mxnet')
mse = metrics['MSE']()
mse.update(predicts1, labels1)
mse_result = mse.result()
self.assertEqual(mse_result, 0.75)
mse.update(predicts2, labels2)
mse_result = mse.result()
self.assertEqual(mse_result, 0.625)
metrics = METRICS('pytorch')
mse = metrics['MSE']()
mse.update(predicts1, labels1)
mse_result = mse.result()
self.assertEqual(mse_result, 0.75)
mse.update(predicts2, labels2)
mse_result = mse.result()
self.assertEqual(mse_result, 0.625)
def test_mae(self):
predicts1 = [1, 0, 0, 1]
labels1 = [0, 1, 0, 0]
predicts2 = [1, 1, 1, 1]
labels2 = [1, 1, 1, 0]
metrics = METRICS('tensorflow')
mae = metrics['MAE']()
mae.update(predicts1, labels1)
mae_result = mae.result()
self.assertEqual(mae_result, 0.75)
mae.update(0, 1)
mae_result = mae.result()
self.assertEqual(mae_result, 0.8)
mae.reset()
mae.update(predicts2, labels2)
mae_result = mae.result()
self.assertEqual(mae_result, 0.25)
metrics = METRICS('pytorch')
mae = metrics['MAE']()
mae.update(predicts1, labels1)
mae_result = mae.result()
self.assertEqual(mae_result, 0.75)
mae.update(predicts2, labels2)
mae_result = mae.result()
self.assertEqual(mae_result, 0.5)
metrics = METRICS('mxnet')
mae = metrics['MAE']()
mae.update(predicts1, labels1)
mae_result = mae.result()
self.assertEqual(mae_result, 0.75)
mae.update(predicts2, labels2)
mae_result = mae.result()
self.assertEqual(mae_result, 0.5)
metrics = METRICS('onnxrt_qlinearops')
mae = metrics['MAE']()
mae.update(predicts1, labels1)
mae_result = mae.result()
self.assertEqual(mae_result, 0.75)
mae.update(predicts2, labels2)
mae_result = mae.result()
self.assertEqual(mae_result, 0.5)
def test_rmse(self):
predicts1 = [1, 0, 0, 1]
labels1 = [1, 0, 0, 0]
predicts2 = [1, 1, 1, 1]
labels2 = [1, 0, 0, 0]
metrics = METRICS('tensorflow')
rmse = metrics['RMSE']()
rmse.update(predicts1, labels1)
rmse_result = rmse.result()
self.assertEqual(rmse_result, 0.5)
rmse.reset()
rmse.update(predicts2, labels2)
rmse_result = rmse.result()
self.assertAlmostEqual(rmse_result, np.sqrt(0.75))
metrics = METRICS('pytorch')
rmse = metrics['RMSE']()
rmse.update(predicts1, labels1)
rmse_result = rmse.result()
self.assertEqual(rmse_result, 0.5)
rmse.update(predicts2, labels2)
rmse_result = rmse.result()
self.assertAlmostEqual(rmse_result, np.sqrt(0.5))
metrics = METRICS('mxnet')
rmse = metrics['RMSE']()
rmse.update(predicts1, labels1)
rmse_result = rmse.result()
self.assertEqual(rmse_result, 0.5)
rmse.update(predicts2, labels2)
rmse_result = rmse.result()
self.assertAlmostEqual(rmse_result, np.sqrt(0.5))
metrics = METRICS('onnxrt_qlinearops')
rmse = metrics['RMSE']()
rmse.update(predicts1, labels1)
rmse_result = rmse.result()
self.assertEqual(rmse_result, 0.5)
rmse.update(predicts2, labels2)
rmse_result = rmse.result()
self.assertAlmostEqual(rmse_result, np.sqrt(0.5))
def test_loss(self):
metrics = METRICS('pytorch')
loss = metrics['Loss']()
predicts = [1, 0, 0, 1]
labels = [0, 1, 0, 0]
loss.update(predicts, labels)
loss_result = loss.result()
self.assertEqual(loss_result, 0.5)
predicts = [1, 1, 0, 1]
labels = [0, 1, 0, 0]
loss.update(predicts, labels)
loss_result = loss.result()
self.assertEqual(loss_result, 0.625)
loss.reset()
predicts = [1, 0, 0, 1]
labels = [0, 1, 0, 0]
loss.update(predicts, labels)
self.assertEqual(loss.result(), 0.5)
metrics = METRICS('onnxrt_qlinearops')
loss = metrics['Loss']()
predicts = [1, 0, 0, 1]
labels = [0, 1, 0, 0]
loss.update(predicts, labels)
loss_result = loss.result()
self.assertEqual(loss_result, 0.5)
predicts = [1, 1, 0, 1]
labels = [0, 1, 0, 0]
loss.update(predicts, labels)
loss_result = loss.result()
self.assertEqual(loss_result, 0.625)
loss.reset()
predicts = [1, 0, 0, 1]
labels = [0, 1, 0, 0]
loss.update(predicts, labels)
self.assertEqual(loss.result(), 0.5)
if __name__ == "__main__":
unittest.main()
| 42.202236
| 93
| 0.489079
| 4,686
| 41,527
| 4.274861
| 0.08408
| 0.048223
| 0.020367
| 0.014677
| 0.918281
| 0.911991
| 0.907997
| 0.903704
| 0.898512
| 0.893021
| 0
| 0.318097
| 0.356226
| 41,527
| 983
| 94
| 42.245168
| 0.431211
| 0.008549
| 0
| 0.86036
| 0
| 0
| 0.043759
| 0.001166
| 0
| 0
| 0
| 0
| 0.130631
| 1
| 0.019144
| false
| 0
| 0.012387
| 0
| 0.032658
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
3dcf8ec871bd2f7af941dba5c1ddcaa1660aa959
| 42
|
py
|
Python
|
python/hello-world/hello_world.py
|
mmphego/exercism.io
|
026e8790d3302130cdb4e2efda8cd9e89e1c6055
|
[
"MIT"
] | null | null | null |
python/hello-world/hello_world.py
|
mmphego/exercism.io
|
026e8790d3302130cdb4e2efda8cd9e89e1c6055
|
[
"MIT"
] | null | null | null |
python/hello-world/hello_world.py
|
mmphego/exercism.io
|
026e8790d3302130cdb4e2efda8cd9e89e1c6055
|
[
"MIT"
] | null | null | null |
def hello():
return ("Hello, World!")
| 14
| 28
| 0.571429
| 5
| 42
| 4.8
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.214286
| 42
| 2
| 29
| 21
| 0.727273
| 0
| 0
| 0
| 0
| 0
| 0.309524
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0.5
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
9a86d48e07e0a57e5d95ec4b4b7ac5c3aee37399
| 19,053
|
py
|
Python
|
bif_with_color/timescale/compare2.py
|
helene-todd/M2_thesis_code
|
f844d6652229c6abe09bd40aa43f5002faa9e5ba
|
[
"MIT"
] | null | null | null |
bif_with_color/timescale/compare2.py
|
helene-todd/M2_thesis_code
|
f844d6652229c6abe09bd40aa43f5002faa9e5ba
|
[
"MIT"
] | null | null | null |
bif_with_color/timescale/compare2.py
|
helene-todd/M2_thesis_code
|
f844d6652229c6abe09bd40aa43f5002faa9e5ba
|
[
"MIT"
] | null | null | null |
from matplotlib import cm, rcParams
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib as matplotlib
import numpy as np
import math as math
import random as rand
import os, sys
import csv
import argparse
#plt.rcParams['axes.xmargin'] = 0
#plt.rcParams['axes.facecolor'] = 'black'
#matplotlib.pyplot.xkcd(scale=.4, length=100, randomness=2)
c = ['#aa3863', '#3b7d86']
s = ['-', '--']
del_line = 'k'
fig, ax = plt.subplots(2, 3, figsize=(16,8), sharey='row', sharex='col')
min_val = 10**0
max_val = 40
def truncate_colormap(cmap, minval=0.0, maxval=1.0, n=100):
new_cmap = colors.LinearSegmentedColormap.from_list(
'trunc({n},{a:.2f},{b:.2f})'.format(n=cmap.name, a=minval, b=maxval),
cmap(np.linspace(minval, maxval, n)))
return new_cmap
def row_count(filename):
with open(filename) as in_file:
return sum(1 for _ in in_file)
""" beta = 0.1, gamma = 0.1 """
I = [[]]
phi = [[]]
stability = []
for filename in ['beta=0.1/gamma = 0.1/gamma_0.1.dat', 'beta=0.1/gamma = 0.1/stable1.dat', 'beta=0.1/gamma = 0.1/stable2.dat'] :
with open(filename, newline='') as file:
datareader = csv.reader(file, delimiter=' ')
last_line_nb = row_count(filename)
last_I = -999
last_phi = -999
last_stability = 0
# seperate into sublists by checking if two consecutive values are duplicates
for row in datareader:
# the 2nd condition avoids a list with one value when two consecutive values are duplicates
if last_I == float(row[0]) and len(I[-1]) > 1 :
if last_stability != int(row[3]):
I[-1].append(last_I)
phi[-1].append(last_phi)
I.append([])
phi.append([])
if last_stability != 0 :
stability.append(last_stability)
if last_I != -999 :
I[-1].append(last_I)
phi[-1].append(last_phi)
if last_stability != int(row[3]) and len(I[-1]) > 1:
I.append([])
phi.append([])
if last_stability != 0 :
stability.append(last_stability)
# if at last line, then stop checking for consecutive values and just add the remaining data
if last_line_nb == datareader.line_num:
I[-1].append(float(row[0]))
phi[-1].append(float(row[1]))
stability.append(int(row[3]))
last_I = float(row[0])
last_phi = float(row[1])
last_stability = int(row[3])
Imin, Imax = 2, 0
for l in range(len(I)) :
for k in range(len(I[l])) :
if phi[l][k] not in [0, 1, 0.5] and I[l][k] > Imax :
Imax = I[l][k]
if phi[l][k] == 0.5 and stability[l] == 1 and I[l][k] < Imin :
Imin = I[l][k]
data = np.load('beta=0.1/gamma = 0.1/mesh_cycles.npz')
legend_stable, legend_unstable = False, False
for k in range(len(I)) :
if stability[k] == 1 and legend_stable == False :
ax[0,0].plot(I[k], phi[k], color='black', linewidth=2, linestyle=s[stability[k]-1], label='stable')
legend_stable = True
if stability[k] == 1 and legend_stable == True :
ax[0,0].plot(I[k], phi[k], color='black', linewidth=2, linestyle=s[stability[k]-1])
if stability[k] == 2 and legend_unstable == False :
ax[0,0].plot(I[k], phi[k], color='black', linewidth=2, linestyle=s[stability[k]-1], label='unstable')
legend_unstable = True
if stability[k] == 2 and legend_unstable == True :
ax[0,0].plot(I[k], phi[k], color='black', linewidth=2, linestyle=s[stability[k]-1])
ax[0,0].legend(loc='upper right', bbox_to_anchor=(1, 0.95), fontsize=10)
# regime delimiter to make things more visual
ax[0,0].set_ylim(-0.05, 1.05)
ax[0,0].set_xlim(1, 2)
ax[0,0].set_title('$\gamma$=0.1, $\\beta$=0.1', fontsize=13)
ax[0,0].set_ylabel('Phase Difference $\phi$', size=12)
ax[0,0].pcolormesh(data['I'], data['phi'], data['cycles'], cmap='Spectral', shading='smooth', edgecolors=None, vmin=min_val, vmax=max_val)
# the delimiter line
#data = np.load('beta=0.1/gamma = 0.1/line.npz')
#ax[0,0].plot(data['I'], data['phi'], color=del_line, linestyle='-.', alpha=0.4)
#ax[0,0].plot([min(data['I']), min(data['I'])], [0, 1], color=del_line, linestyle='-.', alpha=0.5)
""" beta = 0.1, gamma = 0.2 """
I = [[]]
phi = [[]]
stability = []
for filename in ['beta=0.1/gamma = 0.2/gamma_0.2.dat', 'beta=0.1/gamma = 0.2/stable1.dat', 'beta=0.1/gamma = 0.2/stable2.dat']:
with open(filename, newline='') as file:
datareader = csv.reader(file, delimiter=' ')
last_line_nb = row_count(filename)
last_I = -999
last_phi = -999
last_stability = 0
# seperate into sublists by checking if two consecutive values are duplicates
for row in datareader:
# the 2nd condition avoids a list with one value when two consecutive values are duplicates
if last_I == float(row[0]) and len(I[-1]) > 1 :
if last_stability != int(row[3]):
I[-1].append(last_I)
phi[-1].append(last_phi)
I.append([])
phi.append([])
if last_stability != 0 :
stability.append(last_stability)
if last_I != -999 :
I[-1].append(last_I)
phi[-1].append(last_phi)
if last_stability != int(row[3]) and len(I[-1]) > 1:
I.append([])
phi.append([])
if last_stability != 0 :
stability.append(last_stability)
# if at last line, then stop checking for consecutive values and just add the remaining data
if last_line_nb == datareader.line_num:
I[-1].append(float(row[0]))
phi[-1].append(float(row[1]))
stability.append(int(row[3]))
last_I = float(row[0])
last_phi = float(row[1])
last_stability = int(row[3])
Imin, Imax = 2, 0
for l in range(len(I)) :
for k in range(len(I[l])) :
if phi[l][k] not in [0, 1, 0.5] and I[l][k] > Imax :
Imax = I[l][k]
if phi[l][k] == 0.5 and stability[l] == 1 and I[l][k] < Imin :
Imin = I[l][k]
data = np.load('beta=0.1/gamma = 0.2/mesh_cycles.npz')
for k in range(len(I)) :
if stability[k] == 1 :
ax[0,1].plot(I[k], phi[k], color='black', linewidth=2, linestyle=s[stability[k]-1], label='stable')
if stability[k] == 2 :
ax[0,1].plot(I[k], phi[k], color='black', linewidth=2, linestyle=s[stability[k]-1], label='unstable')
# regime delimiter to make things more visual
ax[0,1].set_ylim(-0.05, 1.05)
ax[0,1].set_xlim(1, 2)
ax[0,1].set_title('$\gamma$=0.2, $\\beta$=0.1', fontsize=13)
ax[0,1].pcolormesh(data['I'], data['phi'], data['cycles'], cmap='Spectral', shading='smooth', edgecolors=None, vmin=min_val, vmax=max_val)
""" beta = 0.1, gamma = 0.4 """
I = [[]]
phi = [[]]
stability = []
for filename in ['beta=0.1/gamma = 0.4/gamma_0.4.dat', 'beta=0.1/gamma = 0.4/stable1.dat', 'beta=0.1/gamma = 0.4/stable2.dat']:
with open(filename, newline='') as file:
datareader = csv.reader(file, delimiter=' ')
last_line_nb = row_count(filename)
last_I = -999
last_phi = -999
last_stability = 0
# seperate into sublists by checking if two consecutive values are duplicates
for row in datareader:
# the 2nd condition avoids a list with one value when two consecutive values are duplicates
if last_I == float(row[0]) and len(I[-1]) > 1 :
if last_stability != int(row[3]):
I[-1].append(last_I)
phi[-1].append(last_phi)
I.append([])
phi.append([])
if last_stability != 0 :
stability.append(last_stability)
if last_I != -999 :
I[-1].append(last_I)
phi[-1].append(last_phi)
if last_stability != int(row[3]) and len(I[-1]) > 1:
I.append([])
phi.append([])
if last_stability != 0 :
stability.append(last_stability)
# if at last line, then stop checking for consecutive values and just add the remaining data
if last_line_nb == datareader.line_num:
I[-1].append(float(row[0]))
phi[-1].append(float(row[1]))
stability.append(int(row[3]))
last_I = float(row[0])
last_phi = float(row[1])
last_stability = int(row[3])
Imin, Imax = 2, 0
for l in range(len(I)) :
for k in range(len(I[l])) :
if phi[l][k] not in [0, 1, 0.5] and I[l][k] > Imax :
Imax = I[l][k]
if phi[l][k] == 0.5 and stability[l] == 1 and I[l][k] < Imin :
Imin = I[l][k]
data = np.load('beta=0.1/gamma = 0.4/mesh_cycles.npz')
for k in range(len(I)) :
if stability[k] == 1 :
ax[0,2].plot(I[k], phi[k], color='black', linewidth=2, linestyle=s[stability[k]-1], label='stable')
if stability[k] == 2 :
ax[0,2].plot(I[k], phi[k], color='black', linewidth=2, linestyle=s[stability[k]-1], label='unstable')
# regime delimiter to make things more visual
ax[0,2].set_ylim(-0.05, 1.05)
ax[0,2].set_xlim(1, 2)
ax[0,2].set_title('$\gamma$=0.4, $\\beta$=0.1', fontsize=13)
ax[0,2].pcolormesh(data['I'], data['phi'], data['cycles'], cmap='Spectral', shading='smooth', edgecolors=None, vmin=min_val, vmax=max_val)
# the delimiter line
#data = np.load('beta=0.1/gamma = 0.4/line.npz')
#ax[0,1].plot(data['I'], data['phi'], color=del_line, linestyle='-.', alpha=0.4)
#ax[0,1].plot([min(data['I']), min(data['I'])], [0, 1], color=del_line, linestyle='-.', alpha=0.5)
""" beta = 0.2, gamma = 0.1 """
I = [[]]
phi = [[]]
stability = []
for filename in ['beta=0.2/gamma = 0.1/gamma_0.1.dat', 'beta=0.2/gamma = 0.1/stable1.dat', 'beta=0.2/gamma = 0.1/stable2.dat'] :
with open(filename, newline='') as file:
datareader = csv.reader(file, delimiter=' ')
last_line_nb = row_count(filename)
last_I = -999
last_phi = -999
last_stability = 0
# seperate into sublists by checking if two consecutive values are duplicates
for row in datareader:
# the 2nd condition avoids a list with one value when two consecutive values are duplicates
if last_I == float(row[0]) and len(I[-1]) > 1 :
if last_stability != int(row[3]):
I[-1].append(last_I)
phi[-1].append(last_phi)
I.append([])
phi.append([])
if last_stability != 0 :
stability.append(last_stability)
if last_I != -999 :
I[-1].append(last_I)
phi[-1].append(last_phi)
if last_stability != int(row[3]) and len(I[-1]) > 1:
I.append([])
phi.append([])
if last_stability != 0 :
stability.append(last_stability)
# if at last line, then stop checking for consecutive values and just add the remaining data
if last_line_nb == datareader.line_num:
I[-1].append(float(row[0]))
phi[-1].append(float(row[1]))
stability.append(int(row[3]))
last_I = float(row[0])
last_phi = float(row[1])
last_stability = int(row[3])
Imin, Imax = 2, 0
for l in range(len(I)) :
for k in range(len(I[l])) :
if phi[l][k] not in [0, 1, 0.5] and I[l][k] > Imax :
Imax = I[l][k]
if phi[l][k] == 0.5 and stability[l] == 1 and I[l][k] < Imin :
Imin = I[l][k]
data = np.load('beta=0.2/gamma = 0.1/mesh_cycles.npz')
for k in range(len(I)) :
if stability[k] == 1 :
ax[1,0].plot(I[k], phi[k], color='black', linewidth=2, linestyle=s[stability[k]-1], label='stable')
if stability[k] == 2 :
ax[1,0].plot(I[k], phi[k], color='black', linewidth=2, linestyle=s[stability[k]-1], label='unstable')
# regime delimiter to make things more visual
ax[1,0].set_ylim(-0.05, 1.05)
ax[1,0].set_xlim(1, 2)
ax[1,0].set_title('$\gamma$=0.1, $\\beta$=0.2', fontsize=13)
ax[1,0].set_xlabel('Current $I$', size=12)
ax[1,0].set_ylabel('Phase Difference $\phi$', size=12)
ax[1,0].pcolormesh(data['I'], data['phi'], data['cycles'], cmap='Spectral', shading='smooth', edgecolors=None, vmin=min_val, vmax=max_val)
# the delimiter line
#data = np.load('beta=0.2/gamma = 0.1/line.npz')
#ax[1,0].plot(data['I'], data['phi'], color=del_line, linestyle='-.', alpha=0.4)
#ax[1,0].plot([min(data['I']), min(data['I'])], [0, 1], color=del_line, linestyle='-.', alpha=0.5)
""" beta = 0.2, gamma = 0.2 """
I = [[]]
phi = [[]]
stability = []
for filename in ['beta=0.2/gamma = 0.2/gamma_0.2.dat', 'beta=0.2/gamma = 0.2/stable1.dat', 'beta=0.2/gamma = 0.2/stable2.dat']:
with open(filename, newline='') as file:
datareader = csv.reader(file, delimiter=' ')
last_line_nb = row_count(filename)
last_I = -999
last_phi = -999
last_stability = 0
# seperate into sublists by checking if two consecutive values are duplicates
for row in datareader:
# the 2nd condition avoids a list with one value when two consecutive values are duplicates
if last_I == float(row[0]) and len(I[-1]) > 1 :
if last_stability != int(row[3]):
I[-1].append(last_I)
phi[-1].append(last_phi)
I.append([])
phi.append([])
if last_stability != 0 :
stability.append(last_stability)
if last_I != -999 :
I[-1].append(last_I)
phi[-1].append(last_phi)
if last_stability != int(row[3]) and len(I[-1]) > 1:
I.append([])
phi.append([])
if last_stability != 0 :
stability.append(last_stability)
# if at last line, then stop checking for consecutive values and just add the remaining data
if last_line_nb == datareader.line_num:
I[-1].append(float(row[0]))
phi[-1].append(float(row[1]))
stability.append(int(row[3]))
last_I = float(row[0])
last_phi = float(row[1])
last_stability = int(row[3])
Imin, Imax = 2, 0
for l in range(len(I)) :
for k in range(len(I[l])) :
if phi[l][k] not in [0, 1, 0.5] and I[l][k] > Imax :
Imax = I[l][k]
if phi[l][k] == 0.5 and stability[l] == 1 and I[l][k] < Imin :
Imin = I[l][k]
data = np.load('beta=0.2/gamma = 0.2/mesh_cycles.npz')
for k in range(len(I)) :
if stability[k] == 1 :
ax[1,1].plot(I[k], phi[k], color='black', linewidth=2, linestyle=s[stability[k]-1], label='stable')
if stability[k] == 2 :
ax[1,1].plot(I[k], phi[k], color='black', linewidth=2, linestyle=s[stability[k]-1], label='unstable')
# regime delimiter to make things more visual
ax[1,1].set_ylim(-0.05, 1.05)
ax[1,1].set_xlim(1, 2)
ax[1,1].set_title('$\gamma$=0.2, $\\beta$=0.2', fontsize=13)
ax[1,1].set_xlabel('Current $I$', size=12)
ax[1,1].pcolormesh(data['I'], data['phi'], data['cycles'], cmap='Spectral', shading='smooth', edgecolors=None, vmin=min_val, vmax=max_val)
""" beta = 0.2, gamma = 0.4 """
I = [[]]
phi = [[]]
stability = []
for filename in ['beta=0.2/gamma = 0.4/gamma_0.4.dat', 'beta=0.2/gamma = 0.4/stable1.dat', 'beta=0.2/gamma = 0.4/stable2.dat']:
with open(filename, newline='') as file:
datareader = csv.reader(file, delimiter=' ')
last_line_nb = row_count(filename)
last_I = -999
last_phi = -999
last_stability = 0
# seperate into sublists by checking if two consecutive values are duplicates
for row in datareader:
# the 2nd condition avoids a list with one value when two consecutive values are duplicates
if last_I == float(row[0]) and len(I[-1]) > 1 :
if last_stability != int(row[3]):
I[-1].append(last_I)
phi[-1].append(last_phi)
I.append([])
phi.append([])
if last_stability != 0 :
stability.append(last_stability)
if last_I != -999 :
I[-1].append(last_I)
phi[-1].append(last_phi)
if last_stability != int(row[3]) and len(I[-1]) > 1:
I.append([])
phi.append([])
if last_stability != 0 :
stability.append(last_stability)
# if at last line, then stop checking for consecutive values and just add the remaining data
if last_line_nb == datareader.line_num:
I[-1].append(float(row[0]))
phi[-1].append(float(row[1]))
stability.append(int(row[3]))
last_I = float(row[0])
last_phi = float(row[1])
last_stability = int(row[3])
Imin, Imax = 2, 0
for l in range(len(I)) :
for k in range(len(I[l])) :
if phi[l][k] not in [0, 1, 0.5] and I[l][k] > Imax :
Imax = I[l][k]
if phi[l][k] == 0.5 and stability[l] == 1 and I[l][k] < Imin :
Imin = I[l][k]
data = np.load('beta=0.2/gamma = 0.4/mesh_cycles.npz')
for k in range(len(I)) :
if stability[k] == 1 :
ax[1,2].plot(I[k], phi[k], color='black', linewidth=2, linestyle=s[stability[k]-1], label='stable')
if stability[k] == 2 :
ax[1,2].plot(I[k], phi[k], color='black', linewidth=2, linestyle=s[stability[k]-1], label='unstable')
# regime delimiter to make things more visual
ax[1,2].set_ylim(-0.05, 1.05)
ax[1,2].set_xlim(1, 2)
ax[1,2].set_title('$\gamma$=0.4, $\\beta$=0.2', fontsize=13)
ax[1,2].set_xlabel('Current $I$', size=12)
im = ax[1,2].pcolormesh(data['I'], data['phi'], data['cycles'], cmap='Spectral', shading='smooth', edgecolors=None, vmin=min_val, vmax=max_val)
# the delimiter line
#data = np.load('beta=0.2/gamma = 0.4/line.npz')
#ax[1,1].plot(data['I'], data['phi'], color=del_line, linestyle='-.', alpha=0.4)
#ax[1,1].plot([min(data['I']), min(data['I'])], [0, 1], color=del_line, linestyle='-.', alpha=0.5)
""" General Settings """
plt.suptitle('Bifurcation diagrams for moderately coupled neurons, with convergence speed', size=18)
fig.tight_layout()
right = 0.95
fig.subplots_adjust(right=right)
cbar = fig.colorbar(im, ax=ax[:])
print(cbar.ax.get_yticklabels()[:-1])
cbar.ax.set_yticklabels(['5', '10', '15', '20','25', '30', '35', '>40'])
cbar.set_label('Time (in $10^{-2}$ s) to converge towards synchrony', labelpad=20, fontsize=15)
plt.savefig('comparing_bifs_cv_speed6.png', dpi=600)
plt.show()
| 35.949057
| 143
| 0.555661
| 2,956
| 19,053
| 3.504398
| 0.074425
| 0.010426
| 0.034752
| 0.014673
| 0.895164
| 0.894777
| 0.879429
| 0.847476
| 0.820446
| 0.799788
| 0
| 0.053285
| 0.269144
| 19,053
| 529
| 144
| 36.017013
| 0.690628
| 0.152417
| 0
| 0.730878
| 0
| 0
| 0.099019
| 0.003397
| 0
| 0
| 0
| 0
| 0
| 1
| 0.005666
| false
| 0
| 0.028329
| 0
| 0.03966
| 0.002833
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
9a8b0c10d58795e636f590f366b0cf0bec1a6156
| 190
|
py
|
Python
|
src/commandblock_py/commands/recipe.py
|
skandabhairava/Datapack_generator
|
47341151dbf94f1ccf5461e8824fbcec8c3951ba
|
[
"MIT"
] | 5
|
2021-08-03T07:02:22.000Z
|
2022-03-19T05:48:56.000Z
|
src/commandblock_py/commands/recipe.py
|
skandabhairava/Datapack_generator
|
47341151dbf94f1ccf5461e8824fbcec8c3951ba
|
[
"MIT"
] | 1
|
2021-08-03T06:31:24.000Z
|
2021-08-03T20:25:56.000Z
|
src/commandblock_py/commands/recipe.py
|
skandabhairava/Datapack_generator
|
47341151dbf94f1ccf5461e8824fbcec8c3951ba
|
[
"MIT"
] | 2
|
2021-08-03T21:01:13.000Z
|
2021-09-04T00:50:25.000Z
|
def give(selector:str = '@s', recipe:str = '*'):
return f'recipe give {selector} {recipe}'
def take(selector:str = '@s', recipe:str = '*'):
return f'recipe take {selector} {recipe}'
| 38
| 48
| 0.621053
| 26
| 190
| 4.538462
| 0.346154
| 0.20339
| 0.20339
| 0.305085
| 0.576271
| 0.576271
| 0.576271
| 0.576271
| 0
| 0
| 0
| 0
| 0.168421
| 190
| 5
| 49
| 38
| 0.746835
| 0
| 0
| 0
| 0
| 0
| 0.356021
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 8
|
9abeabfa54a439eb1ec37afd2f4dbcac9ad2f54e
| 4,551
|
py
|
Python
|
nasspython/nass_api.py
|
jackheinemann/nass_python
|
1cce7862c4ce671aef6336c2b91d172ea41f1cd1
|
[
"MIT"
] | 1
|
2022-03-03T19:47:52.000Z
|
2022-03-03T19:47:52.000Z
|
nasspython/nass_api.py
|
jackheinemann/nass_python
|
1cce7862c4ce671aef6336c2b91d172ea41f1cd1
|
[
"MIT"
] | null | null | null |
nasspython/nass_api.py
|
jackheinemann/nass_python
|
1cce7862c4ce671aef6336c2b91d172ea41f1cd1
|
[
"MIT"
] | 1
|
2021-12-08T14:45:52.000Z
|
2021-12-08T14:45:52.000Z
|
import requests
nass_url = "http://quickstats.nass.usda.gov/api/"
def nass_count(api_key, source_desc=None, sector_desc=None, group_desc=None, commodity_desc=None, short_desc=None, domain_desc=None, agg_level_desc=None, domaincat_desc=None, statisticcat_desc=None, state_name=None, asd_desc=None, county_name=None, region_desc=None, zip_5=None, watershed_desc=None, year=None, freq_desc=None, reference_period_desc=None):
# get dict of paramaters
inputs = vars()
inputs.pop('api_key') # get rid of api_key in dict for building url, as it is hardcoded below
# set up the url with api key
base_url = nass_url + 'get_counts/get?key=' + api_key
# filter dict for non-None values and add actual values to the request
for item in list(inputs):
if inputs[item] == None:
inputs.pop(item)
else:
# make sure the desc inputs are all uppercase, and strings
if item != 'numeric_vals':
inputs[item] = str((inputs[item])).upper()
# add on the url parameters
base_url += '&' + item + '=' + requests.utils.quote(inputs[item]) #encodes unsafe / reserved chars in the user input (such as in ANIMALS & PRODUCTS)
# make the request
r = requests.get(base_url)
# validate the response
status = r.status_code
if status >= 200 and status < 300:
# success
return r.json()['count']
else:
return 'Response code ' + str(status) + ': ' + r.json()['error'][0]
def nass_data(api_key, source_desc=None, sector_desc=None, group_desc=None, commodity_desc=None, short_desc=None, domain_desc=None, agg_level_desc=None, domaincat_desc=None, statisticcat_desc=None, state_name=None, asd_desc=None, county_name=None, region_desc=None, zip_5=None, watershed_desc=None, year=None, freq_desc=None, reference_period_desc=None, format=None, numeric_vals=None):
# get dict of paramaters
inputs = vars()
inputs.pop('api_key') # get rid of api_key in dict for building url, as it is hardcoded below
# set up the url with api key
base_url = nass_url + 'api_GET?key=' + api_key
# filter dict for non-None values and add actual values to the request
for item in list(inputs):
if inputs[item] == None:
inputs.pop(item)
else:
# make sure the desc inputs are all uppercase, and strings
if item != 'numeric_vals':
inputs[item] = str((inputs[item])).upper()
# add on the url parameters
base_url += '&' + item + '=' + requests.utils.quote(inputs[item]) #encodes unsafe / reserved chars in the user input (such as in ANIMALS & PRODUCTS)
# make the request
r = requests.get(base_url)
# validate the response
status = r.status_code
if status >= 200 and status < 300:
# success
return r.json()
else:
return 'Response code ' + str(status) + ': ' + r.json()['error'][0]
def nass_param(api_key, param=None, source_desc=None, sector_desc=None, group_desc=None, commodity_desc=None, short_desc=None, domain_desc=None, agg_level_desc=None, domaincat_desc=None, statisticcat_desc=None, state_name=None, asd_desc=None, county_name=None, region_desc=None, zip_5=None, watershed_desc=None, year=None, freq_desc=None, reference_period_desc=None):
# get dict of paramaters
inputs = vars()
inputs.pop('api_key') # get rid of api_key in dict for building url, as it is hardcoded below
# set up the url with api key
base_url = nass_url + 'get_param_values/' + 'get?key=' + api_key
# filter dict for non-None values and add actual values to the request
for item in list(inputs):
if inputs[item] == None:
inputs.pop(item)
else:
# make sure the desc inputs are all uppercase, and strings
if item != 'param':
inputs[item] = str((inputs[item])).upper()
else:
inputs[item] = str((inputs[item])).lower()
# add on the url parameters
base_url += '&' + item + '=' + requests.utils.quote(inputs[item]) #encodes unsafe / reserved chars in the user input (such as in ANIMALS & PRODUCTS)
# make the request
r = requests.get(base_url)
# validate the response
status = r.status_code
if status >= 200 and status < 300:
# success
return r.json()
else:
return 'Response code ' + str(status) + ': ' + r.json()['error'][0]
| 41.752294
| 386
| 0.641397
| 651
| 4,551
| 4.337942
| 0.167435
| 0.11898
| 0.018414
| 0.026912
| 0.948654
| 0.94051
| 0.930595
| 0.930595
| 0.930595
| 0.930595
| 0
| 0.007042
| 0.251154
| 4,551
| 108
| 387
| 42.138889
| 0.821596
| 0.264118
| 0
| 0.8
| 0
| 0
| 0.065119
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.054545
| false
| 0
| 0.018182
| 0
| 0.181818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b180a5ecb01ed1446901b20159351480df83996c
| 17,190
|
py
|
Python
|
rlkit/torch/vae/vq_vae_trainer.py
|
Asap7772/railrl_evalsawyer
|
baba8ce634d32a48c7dfe4dc03b123e18e96e0a3
|
[
"MIT"
] | 1
|
2020-10-23T14:40:09.000Z
|
2020-10-23T14:40:09.000Z
|
rlkit/torch/vae/vq_vae_trainer.py
|
Asap7772/railrl_evalsawyer
|
baba8ce634d32a48c7dfe4dc03b123e18e96e0a3
|
[
"MIT"
] | null | null | null |
rlkit/torch/vae/vq_vae_trainer.py
|
Asap7772/railrl_evalsawyer
|
baba8ce634d32a48c7dfe4dc03b123e18e96e0a3
|
[
"MIT"
] | 1
|
2021-05-27T20:38:45.000Z
|
2021-05-27T20:38:45.000Z
|
from collections import OrderedDict
import os
from os import path as osp
import numpy as np
import torch
from rlkit.core.loss import LossFunction
from rlkit.torch.vae.vae_trainer import ConvVAETrainer
from torch import optim
from torch.distributions import Normal
from torch.utils.data import DataLoader
from torch.nn import functional as F
from torchvision.utils import save_image
from rlkit.data_management.images import normalize_image
from rlkit.core import logger
import rlkit.core.util as util
from rlkit.misc.eval_util import create_stats_ordered_dict
from rlkit.misc.ml_util import ConstantSchedule
from rlkit.torch import pytorch_util as ptu
from rlkit.torch.data import (
ImageDataset, InfiniteWeightedRandomSampler,
InfiniteRandomSampler,
)
from rlkit.torch.core import np_to_pytorch_batch
import collections
import time
class VQ_VAETrainer(ConvVAETrainer, LossFunction):
def train_batch(self, epoch, batch):
self.model.train()
self.optimizer.zero_grad()
loss = self.compute_loss(batch, epoch, False)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
def test_batch(
self,
epoch,
batch,
):
self.model.eval()
loss = self.compute_loss(batch, epoch, True)
# def encode_dataset(self, dataset):
# encoding_list = []
# save_dir = osp.join(self.log_dir, 'dataset_latents.npy')
# for i in range(len(dataset)):
# obs = dataset.random_batch(self.batch_size)["x_t"]
# encodings = self.model.encode(obs, cont=False)
# encoding_list.append(encodings)
# encodings = ptu.get_numpy(torch.cat(encoding_list))
# np.save(save_dir, encodings)
def encode_dataset(self, dataset):
encoding_list = []
save_dir = osp.join(self.log_dir, 'dataset_latents.npy')
for i in range(len(dataset)):
batch = dataset.random_batch(self.batch_size)
obs, cond = batch["x_t"], batch["env"]
z_delta = self.model.encode(obs, cont=False)
z_cond = self.model.encode(cond, cont=False)
encodings = torch.cat([z_delta, z_cond], dim=1)
encoding_list.append(encodings)
encodings = ptu.get_numpy(torch.cat(encoding_list))
np.save(save_dir, encodings)
def train_epoch(self, epoch, dataset, batches=100):
# if epoch % 100 == 0 and epoch > 0:
# self.encode_dataset(dataset)
start_time = time.time()
for b in range(batches):
batch = dataset.random_batch(self.batch_size)
self.train_batch(epoch, batch)
self.eval_statistics["train/epoch_duration"].append(time.time() - start_time)
def test_epoch(self, epoch, dataset, batches=10):
start_time = time.time()
for b in range(batches):
self.test_batch(epoch, dataset.random_batch(self.batch_size))
self.eval_statistics["test/epoch_duration"].append(time.time() - start_time)
def compute_loss(self, batch, epoch=-1, test=False):
prefix = "test/" if test else "train/"
beta = float(self.beta_schedule.get_value(epoch))
obs = batch[self.key_to_reconstruct]
vq_loss, data_recon, perplexity, recon_error = self.model.compute_loss(obs)
loss = vq_loss + recon_error
self.eval_statistics['epoch'] = epoch
self.eval_statistics[prefix + "losses"].append(loss.item())
self.eval_statistics[prefix + "Recon Error"].append(recon_error.item())
self.eval_statistics[prefix + "VQ Loss"].append(vq_loss.item())
self.eval_statistics[prefix + "Perplexity"].append(perplexity.item())
self.eval_data[prefix + "last_batch"] = (obs, data_recon.detach())
return loss
def dump_samples(self, epoch):
return
self.model.eval()
z = self.model.sample_prior(64)
sample = self.model.decode(z, quantized=False).cpu()
save_dir = osp.join(self.log_dir, 's%d.png' % epoch)
save_image(
sample.data.transpose(2, 3),
save_dir
)
class CVQVAETrainer(VQ_VAETrainer):
def encode_dataset(self, dataset):
encoding_list = []
save_dir = osp.join(self.log_dir, 'dataset_latents.npy')
for i in range(len(dataset)):
batch = dataset.random_batch(self.batch_size)
encodings = self.model.encode(batch["x_t"], batch["env"], cont=False)
encoding_list.append(encodings)
encodings = ptu.get_numpy(torch.cat(encoding_list))
np.save(save_dir, encodings)
def test_epoch(self, epoch, dataset, batches=10):
start_time = time.time()
for b in range(batches):
self.test_batch(epoch, dataset.random_batch(self.batch_size))
self.eval_statistics["test/epoch_duration"].append(time.time() - start_time)
def compute_loss(self, batch, epoch=-1, test=False):
prefix = "test/" if test else "train/"
beta = float(self.beta_schedule.get_value(epoch))
vq_loss, quantized, recon, perplexity, error = self.model.compute_loss(batch["x_t"], batch["env"])
#vq_loss, perplexity, recon, error = self.model.compute_loss(batch["x_t"], batch["env"])
loss = error + vq_loss
#loss = sum(errors) + beta * kle
self.eval_statistics['epoch'] = epoch
#self.eval_statistics['beta'] = beta
self.eval_statistics[prefix + "losses"].append(loss.item())
#self.eval_statistics[prefix + "kle"].append(kle.item())
self.eval_statistics[prefix + "Obs Recon Error"].append(error.item())
# self.eval_statistics[prefix + "Cond Obs Recon Error"].append(errors[1].item())
self.eval_statistics[prefix + "VQ Loss"].append(vq_loss.item())
self.eval_statistics[prefix + "Perplexity"].append(perplexity.item())
# self.eval_statistics[prefix + "Cond VQ Loss"].append(vq_losses[1].item())
# self.eval_statistics[prefix + "Cond Perplexity"].append(perplexities[1].item())
self.eval_data[prefix + "last_batch"] = (batch, recon)
#self.eval_data[prefix + "last_batch"] = (batch, recons[0], recons[1])
return loss
def dump_mixed_latents(self, epoch):
n = 8
batch, reconstructions = self.eval_data["test/last_batch"]
x_t, env = batch["x_t"][:n], batch["env"][:n]
z_comb = self.model.encode(x_t, env)
z_pos = z_comb[:, :self.model.latent_sizes[0]]
z_obj = z_comb[:, self.model.latent_sizes[0]:]
grid = []
for i in range(n):
for j in range(n):
if i + j == 0:
grid.append(ptu.zeros(1, self.input_channels, self.imsize, self.imsize))
elif i == 0:
#grid.append(self.model.decode(torch.cat([z_pos[j], z_obj[i]], dim=1)))
grid.append(x_t[j].reshape(1, self.input_channels, self.imsize, self.imsize))
elif j == 0:
#grid.append(self.model.decode(torch.cat([z_pos[j], z_obj[i]], dim=1)))
grid.append(env[i].reshape(1, self.input_channels, self.imsize, self.imsize))
else:
z, z_c = z_pos[j].reshape(1, -1), z_obj[i].reshape(1, -1)
grid.append(self.model.decode(torch.cat([z, z_c], dim=1)))
samples = torch.cat(grid)
save_dir = osp.join(self.log_dir, 'mixed_latents_%d.png' % epoch)
save_image(samples.data.cpu().transpose(2, 3), save_dir, nrow=n)
def dump_samples(self, epoch):
return
# def dump_samples(self, epoch):
# self.model.eval()
# batch, reconstructions, env_reconstructions = self.eval_data["test/last_batch"]
# #self.dump_distances(batch, epoch)
# env = batch["env"]
# n = min(env.size(0), 8)
# all_imgs = [
# env[:n].narrow(start=0, length=self.imlength, dim=1)
# .contiguous().view(
# -1,
# self.input_channels,
# self.imsize,
# self.imsize
# ).transpose(2, 3)]
# for i in range(7):
# latent = self.model.sample_prior(self.batch_size, env)
# samples = self.model.decode(latent)
# all_imgs.extend([
# samples.view(
# self.batch_size,
# self.input_channels,
# self.imsize,
# self.imsize,
# )[:n].transpose(2, 3)])
# comparison = torch.cat(all_imgs)
# save_dir = osp.join(self.log_dir, 's%d.png' % epoch)
# save_image(comparison.data.cpu(), save_dir, nrow=8)
def dump_reconstructions(self, epoch):
self.dump_mixed_latents(epoch)
batch, reconstructions = self.eval_data["test/last_batch"]
obs = batch["x_t"]
env = batch["env"]
n = min(obs.size(0), 8)
comparison = torch.cat([
obs[:n].narrow(start=0, length=self.imlength, dim=1)
.contiguous().view(
-1,
3,
self.imsize,
self.imsize
).transpose(2, 3),
reconstructions.view(
self.batch_size,
3,
self.imsize,
self.imsize,
)[:n].transpose(2, 3),
])
save_dir = osp.join(self.log_dir, 'r%d.png' % epoch)
save_image(comparison.data.cpu(), save_dir, nrow=n)
# def dump_reconstructions(self, epoch):
# self.dump_mixed_latents(epoch)
# batch, reconstructions, env_reconstructions = self.eval_data["test/last_batch"]
# obs = batch["x_t"]
# env = batch["env"]
# n = min(obs.size(0), 8)
# comparison = torch.cat([
# env[:n].narrow(start=0, length=self.imlength, dim=1)
# .contiguous().view(
# -1,
# 3,
# self.imsize,
# self.imsize
# ).transpose(2, 3),
# obs[:n].narrow(start=0, length=self.imlength, dim=1)
# .contiguous().view(
# -1,
# 3,
# self.imsize,
# self.imsize
# ).transpose(2, 3),
# reconstructions.view(
# self.batch_size,
# 3,
# self.imsize,
# self.imsize,
# )[:n].transpose(2, 3),
# env_reconstructions.view(
# self.batch_size,
# 3,
# self.imsize,
# self.imsize,
# )[:n].transpose(2, 3)
# ])
# save_dir = osp.join(self.log_dir, 'r%d.png' % epoch)
# save_image(comparison.data.cpu(), save_dir, nrow=n)
class CVAETrainer(VQ_VAETrainer):
def compute_loss(self, batch, epoch=-1, test=False):
prefix = "test/" if test else "train/"
beta = float(self.beta_schedule.get_value(epoch))
recon, x_recon_error, c_recon_error, kle = self.model.compute_loss(batch["x_t"], batch["env"])
loss = x_recon_error + c_recon_error + beta * kle
self.eval_statistics['epoch'] = epoch
self.eval_statistics['beta'] = beta
self.eval_statistics[prefix + "losses"].append(loss.item())
self.eval_statistics[prefix + "kle"].append(kle.item())
self.eval_statistics[prefix + "Obs Recon Error"].append(x_recon_error.item())
self.eval_statistics[prefix + "Cond Obs Recon Error"].append(c_recon_error.item())
self.eval_data[prefix + "last_batch"] = (batch, recon)
return loss
def dump_mixed_latents(self, epoch):
n = 8
batch, reconstructions, env_reconstructions = self.eval_data["test/last_batch"]
x_t, env = batch["x_t"][:n], batch["env"][:n]
z_comb = self.model.encode(x_t, env)
z_pos = z_comb[:, :self.model.latent_sizes[0]]
z_obj = z_comb[:, self.model.latent_sizes[0]:]
grid = []
for i in range(n):
for j in range(n):
if i + j == 0:
grid.append(ptu.zeros(1, self.input_channels, self.imsize, self.imsize))
elif i == 0:
#grid.append(self.model.decode(torch.cat([z_pos[j], z_obj[i]], dim=1)))
grid.append(x_t[j].reshape(1, self.input_channels, self.imsize, self.imsize))
elif j == 0:
#grid.append(self.model.decode(torch.cat([z_pos[j], z_obj[i]], dim=1)))
grid.append(env[i].reshape(1, self.input_channels, self.imsize, self.imsize))
else:
z, z_c = z_pos[j].reshape(1, -1), z_obj[i].reshape(1, -1)
grid.append(self.model.decode(torch.cat([z, z_c], dim=1)))
samples = torch.cat(grid)
save_dir = osp.join(self.log_dir, 'mixed_latents_%d.png' % epoch)
save_image(samples.data.cpu().transpose(2, 3), save_dir, nrow=n)
def dump_samples(self, epoch):
self.model.eval()
batch, reconstructions = self.eval_data["test/last_batch"]
#self.dump_distances(batch, epoch)
env = batch["env"]
n = min(env.size(0), 8)
all_imgs = [
env[:n].narrow(start=0, length=self.imlength, dim=1)
.contiguous().view(
-1,
self.input_channels,
self.imsize,
self.imsize
).transpose(2, 3)]
for i in range(7):
latent = self.model.sample_prior(self.batch_size, env)
samples = self.model.decode(latent)
all_imgs.extend([
samples.view(
self.batch_size,
self.input_channels,
self.imsize,
self.imsize,
)[:n].transpose(2, 3)])
comparison = torch.cat(all_imgs)
save_dir = osp.join(self.log_dir, 's%d.png' % epoch)
save_image(comparison.data.cpu(), save_dir, nrow=8)
def dump_reconstructions(self, epoch):
#self.dump_mixed_latents(epoch)
batch, reconstructions = self.eval_data["test/last_batch"]
obs = batch["x_t"]
env = batch["env"]
n = min(obs.size(0), 8)
comparison = torch.cat([
# env[:n].narrow(start=0, length=self.imlength, dim=1)
# .contiguous().view(
# -1,
# 3,
# self.imsize,
# self.imsize
# ).transpose(2, 3),
obs[:n].narrow(start=0, length=self.imlength, dim=1)
.contiguous().view(
-1,
3,
self.imsize,
self.imsize
).transpose(2, 3),
reconstructions.view(
self.batch_size,
3,
self.imsize,
self.imsize,
)[:n].transpose(2, 3),
# env_reconstructions.view(
# self.batch_size,
# 3,
# self.imsize,
# self.imsize,
# )[:n].transpose(2, 3)
])
save_dir = osp.join(self.log_dir, 'r%d.png' % epoch)
save_image(comparison.data.cpu(), save_dir, nrow=n)
class VAETrainer(VQ_VAETrainer):
def compute_loss(self, batch, epoch=-1, test=False):
prefix = "test/" if test else "train/"
beta = float(self.beta_schedule.get_value(epoch))
recon, error, kle = self.model.compute_loss(batch["x_t"])
loss = error + beta * kle
self.eval_statistics['epoch'] = epoch
self.eval_statistics['beta'] = beta
self.eval_statistics[prefix + "losses"].append(loss.item())
self.eval_statistics[prefix + "kle"].append(kle.item())
self.eval_statistics[prefix + "Obs Recon Error"].append(error.item())
self.eval_data[prefix + "last_batch"] = (batch, recon)
return loss
def dump_samples(self, epoch):
self.model.eval()
sample = ptu.randn(64, self.representation_size)
sample = self.model.decode(sample).cpu()
save_dir = osp.join(self.log_dir, 's%d.png' % epoch)
save_image(
sample.data.transpose(2, 3),
save_dir
)
def dump_reconstructions(self, epoch):
#self.dump_mixed_latents(epoch)
batch, reconstructions = self.eval_data["test/last_batch"]
obs = batch["x_t"]
env = batch["env"]
n = min(obs.size(0), 8)
comparison = torch.cat([
obs[:n].narrow(start=0, length=self.imlength, dim=1)
.contiguous().view(
-1,
3,
self.imsize,
self.imsize
).transpose(2, 3),
reconstructions.view(
self.batch_size,
3,
self.imsize,
self.imsize,
)[:n].transpose(2, 3),
])
save_dir = osp.join(self.log_dir, 'r%d.png' % epoch)
save_image(comparison.data.cpu(), save_dir, nrow=n)
def dump_samples(self, epoch):
return
| 39.157175
| 106
| 0.564282
| 2,131
| 17,190
| 4.395589
| 0.083998
| 0.046973
| 0.055728
| 0.046973
| 0.846803
| 0.835166
| 0.805381
| 0.78467
| 0.774741
| 0.766628
| 0
| 0.012511
| 0.30256
| 17,190
| 438
| 107
| 39.246575
| 0.768788
| 0.212333
| 0
| 0.72069
| 0
| 0
| 0.043168
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.072414
| false
| 0
| 0.075862
| 0.006897
| 0.186207
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b18ac27d870845ef507788bef0af2023e7d13bff
| 29,019
|
py
|
Python
|
nova/tests/unit/objects/test_numa.py
|
bopopescu/nova-token
|
ec98f69dea7b3e2b9013b27fd55a2c1a1ac6bfb2
|
[
"Apache-2.0"
] | null | null | null |
nova/tests/unit/objects/test_numa.py
|
bopopescu/nova-token
|
ec98f69dea7b3e2b9013b27fd55a2c1a1ac6bfb2
|
[
"Apache-2.0"
] | null | null | null |
nova/tests/unit/objects/test_numa.py
|
bopopescu/nova-token
|
ec98f69dea7b3e2b9013b27fd55a2c1a1ac6bfb2
|
[
"Apache-2.0"
] | 2
|
2017-07-20T17:31:34.000Z
|
2020-07-24T02:42:19.000Z
|
begin_unit
comment|'# Licensed under the Apache License, Version 2.0 (the "License"); you may'
nl|'\n'
comment|'# not use this file except in compliance with the License. You may obtain'
nl|'\n'
comment|'# a copy of the License at'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# http://www.apache.org/licenses/LICENSE-2.0'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Unless required by applicable law or agreed to in writing, software'
nl|'\n'
comment|'# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT'
nl|'\n'
comment|'# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the'
nl|'\n'
comment|'# License for the specific language governing permissions and limitations'
nl|'\n'
comment|'# under the License.'
nl|'\n'
nl|'\n'
name|'from'
name|'nova'
name|'import'
name|'exception'
newline|'\n'
name|'from'
name|'nova'
name|'import'
name|'objects'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'tests'
op|'.'
name|'unit'
op|'.'
name|'objects'
name|'import'
name|'test_objects'
newline|'\n'
nl|'\n'
DECL|variable|fake_obj_numa
name|'fake_obj_numa'
op|'='
name|'objects'
op|'.'
name|'NUMATopology'
op|'('
nl|'\n'
DECL|variable|cells
name|'cells'
op|'='
op|'['
nl|'\n'
name|'objects'
op|'.'
name|'NUMACell'
op|'('
nl|'\n'
name|'id'
op|'='
number|'0'
op|','
name|'cpuset'
op|'='
name|'set'
op|'('
op|'['
number|'1'
op|','
number|'2'
op|']'
op|')'
op|','
name|'memory'
op|'='
number|'512'
op|','
nl|'\n'
name|'cpu_usage'
op|'='
number|'2'
op|','
name|'memory_usage'
op|'='
number|'256'
op|','
nl|'\n'
name|'mempages'
op|'='
op|'['
op|']'
op|','
name|'pinned_cpus'
op|'='
name|'set'
op|'('
op|'['
op|']'
op|')'
op|','
nl|'\n'
DECL|variable|siblings
name|'siblings'
op|'='
op|'['
op|']'
op|')'
op|','
nl|'\n'
name|'objects'
op|'.'
name|'NUMACell'
op|'('
nl|'\n'
name|'id'
op|'='
number|'1'
op|','
name|'cpuset'
op|'='
name|'set'
op|'('
op|'['
number|'3'
op|','
number|'4'
op|']'
op|')'
op|','
name|'memory'
op|'='
number|'512'
op|','
nl|'\n'
name|'cpu_usage'
op|'='
number|'1'
op|','
name|'memory_usage'
op|'='
number|'128'
op|','
nl|'\n'
name|'mempages'
op|'='
op|'['
op|']'
op|','
name|'pinned_cpus'
op|'='
name|'set'
op|'('
op|'['
op|']'
op|')'
op|','
nl|'\n'
DECL|variable|siblings
name|'siblings'
op|'='
op|'['
op|']'
op|')'
op|']'
op|')'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|class|_TestNUMA
name|'class'
name|'_TestNUMA'
op|'('
name|'object'
op|')'
op|':'
newline|'\n'
nl|'\n'
DECL|member|test_convert_wipe
indent|' '
name|'def'
name|'test_convert_wipe'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'d1'
op|'='
name|'fake_obj_numa'
op|'.'
name|'_to_dict'
op|'('
op|')'
newline|'\n'
name|'d2'
op|'='
name|'objects'
op|'.'
name|'NUMATopology'
op|'.'
name|'obj_from_primitive'
op|'('
name|'d1'
op|')'
op|'.'
name|'_to_dict'
op|'('
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'d1'
op|','
name|'d2'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_from_legacy_limits
dedent|''
name|'def'
name|'test_from_legacy_limits'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'old_style'
op|'='
op|'{'
string|'"cells"'
op|':'
op|'['
nl|'\n'
op|'{'
string|'"mem"'
op|':'
op|'{'
nl|'\n'
string|'"total"'
op|':'
number|'1024'
op|','
nl|'\n'
string|'"limit"'
op|':'
number|'2048'
op|'}'
op|','
nl|'\n'
string|'"cpu_limit"'
op|':'
number|'96.0'
op|','
nl|'\n'
string|'"cpus"'
op|':'
string|'"0,1,2,3,4,5"'
op|','
nl|'\n'
string|'"id"'
op|':'
number|'0'
op|'}'
op|']'
op|'}'
newline|'\n'
nl|'\n'
name|'limits'
op|'='
name|'objects'
op|'.'
name|'NUMATopologyLimits'
op|'.'
name|'obj_from_db_obj'
op|'('
name|'old_style'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
number|'16.0'
op|','
name|'limits'
op|'.'
name|'cpu_allocation_ratio'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
number|'2.0'
op|','
name|'limits'
op|'.'
name|'ram_allocation_ratio'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_to_legacy_limits
dedent|''
name|'def'
name|'test_to_legacy_limits'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'limits'
op|'='
name|'objects'
op|'.'
name|'NUMATopologyLimits'
op|'('
nl|'\n'
name|'cpu_allocation_ratio'
op|'='
number|'16'
op|','
nl|'\n'
name|'ram_allocation_ratio'
op|'='
number|'2'
op|')'
newline|'\n'
name|'host_topo'
op|'='
name|'objects'
op|'.'
name|'NUMATopology'
op|'('
name|'cells'
op|'='
op|'['
nl|'\n'
name|'objects'
op|'.'
name|'NUMACell'
op|'('
name|'id'
op|'='
number|'0'
op|','
name|'cpuset'
op|'='
name|'set'
op|'('
op|'['
number|'1'
op|','
number|'2'
op|']'
op|')'
op|','
name|'memory'
op|'='
number|'1024'
op|')'
nl|'\n'
op|']'
op|')'
newline|'\n'
nl|'\n'
name|'old_style'
op|'='
op|'{'
string|"'cells'"
op|':'
op|'['
nl|'\n'
op|'{'
string|"'mem'"
op|':'
op|'{'
string|"'total'"
op|':'
number|'1024'
op|','
nl|'\n'
string|"'limit'"
op|':'
number|'2048.0'
op|'}'
op|','
nl|'\n'
string|"'id'"
op|':'
number|'0'
op|','
nl|'\n'
string|"'cpus'"
op|':'
string|"'1,2'"
op|','
nl|'\n'
string|"'cpu_limit'"
op|':'
number|'32.0'
op|'}'
op|']'
op|'}'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'old_style'
op|','
name|'limits'
op|'.'
name|'to_dict_legacy'
op|'('
name|'host_topo'
op|')'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_free_cpus
dedent|''
name|'def'
name|'test_free_cpus'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'obj'
op|'='
name|'objects'
op|'.'
name|'NUMATopology'
op|'('
name|'cells'
op|'='
op|'['
nl|'\n'
name|'objects'
op|'.'
name|'NUMACell'
op|'('
nl|'\n'
name|'id'
op|'='
number|'0'
op|','
name|'cpuset'
op|'='
name|'set'
op|'('
op|'['
number|'1'
op|','
number|'2'
op|']'
op|')'
op|','
name|'memory'
op|'='
number|'512'
op|','
nl|'\n'
name|'cpu_usage'
op|'='
number|'2'
op|','
name|'memory_usage'
op|'='
number|'256'
op|','
nl|'\n'
name|'pinned_cpus'
op|'='
name|'set'
op|'('
op|'['
number|'1'
op|']'
op|')'
op|','
name|'siblings'
op|'='
op|'['
op|']'
op|','
nl|'\n'
name|'mempages'
op|'='
op|'['
op|']'
op|')'
op|','
nl|'\n'
name|'objects'
op|'.'
name|'NUMACell'
op|'('
nl|'\n'
name|'id'
op|'='
number|'1'
op|','
name|'cpuset'
op|'='
name|'set'
op|'('
op|'['
number|'3'
op|','
number|'4'
op|']'
op|')'
op|','
name|'memory'
op|'='
number|'512'
op|','
nl|'\n'
name|'cpu_usage'
op|'='
number|'1'
op|','
name|'memory_usage'
op|'='
number|'128'
op|','
nl|'\n'
name|'pinned_cpus'
op|'='
name|'set'
op|'('
op|'['
op|']'
op|')'
op|','
name|'siblings'
op|'='
op|'['
op|']'
op|','
nl|'\n'
name|'mempages'
op|'='
op|'['
op|']'
op|')'
nl|'\n'
op|']'
nl|'\n'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'set'
op|'('
op|'['
number|'2'
op|']'
op|')'
op|','
name|'obj'
op|'.'
name|'cells'
op|'['
number|'0'
op|']'
op|'.'
name|'free_cpus'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'set'
op|'('
op|'['
number|'3'
op|','
number|'4'
op|']'
op|')'
op|','
name|'obj'
op|'.'
name|'cells'
op|'['
number|'1'
op|']'
op|'.'
name|'free_cpus'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_pinning_logic
dedent|''
name|'def'
name|'test_pinning_logic'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'numacell'
op|'='
name|'objects'
op|'.'
name|'NUMACell'
op|'('
name|'id'
op|'='
number|'0'
op|','
name|'cpuset'
op|'='
name|'set'
op|'('
op|'['
number|'1'
op|','
number|'2'
op|','
number|'3'
op|','
number|'4'
op|']'
op|')'
op|','
name|'memory'
op|'='
number|'512'
op|','
nl|'\n'
name|'cpu_usage'
op|'='
number|'2'
op|','
name|'memory_usage'
op|'='
number|'256'
op|','
nl|'\n'
name|'pinned_cpus'
op|'='
name|'set'
op|'('
op|'['
number|'1'
op|']'
op|')'
op|','
name|'siblings'
op|'='
op|'['
op|']'
op|','
nl|'\n'
name|'mempages'
op|'='
op|'['
op|']'
op|')'
newline|'\n'
name|'numacell'
op|'.'
name|'pin_cpus'
op|'('
name|'set'
op|'('
op|'['
number|'2'
op|','
number|'3'
op|']'
op|')'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'set'
op|'('
op|'['
number|'4'
op|']'
op|')'
op|','
name|'numacell'
op|'.'
name|'free_cpus'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertRaises'
op|'('
name|'exception'
op|'.'
name|'CPUPinningUnknown'
op|','
nl|'\n'
name|'numacell'
op|'.'
name|'pin_cpus'
op|','
name|'set'
op|'('
op|'['
number|'1'
op|','
number|'55'
op|']'
op|')'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertRaises'
op|'('
name|'exception'
op|'.'
name|'CPUPinningInvalid'
op|','
nl|'\n'
name|'numacell'
op|'.'
name|'pin_cpus'
op|','
name|'set'
op|'('
op|'['
number|'1'
op|','
number|'4'
op|']'
op|')'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertRaises'
op|'('
name|'exception'
op|'.'
name|'CPUPinningUnknown'
op|','
nl|'\n'
name|'numacell'
op|'.'
name|'unpin_cpus'
op|','
name|'set'
op|'('
op|'['
number|'1'
op|','
number|'55'
op|']'
op|')'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertRaises'
op|'('
name|'exception'
op|'.'
name|'CPUPinningInvalid'
op|','
nl|'\n'
name|'numacell'
op|'.'
name|'unpin_cpus'
op|','
name|'set'
op|'('
op|'['
number|'1'
op|','
number|'4'
op|']'
op|')'
op|')'
newline|'\n'
name|'numacell'
op|'.'
name|'unpin_cpus'
op|'('
name|'set'
op|'('
op|'['
number|'1'
op|','
number|'2'
op|','
number|'3'
op|']'
op|')'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'set'
op|'('
op|'['
number|'1'
op|','
number|'2'
op|','
number|'3'
op|','
number|'4'
op|']'
op|')'
op|','
name|'numacell'
op|'.'
name|'free_cpus'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_pinning_with_siblings
dedent|''
name|'def'
name|'test_pinning_with_siblings'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'numacell'
op|'='
name|'objects'
op|'.'
name|'NUMACell'
op|'('
name|'id'
op|'='
number|'0'
op|','
name|'cpuset'
op|'='
name|'set'
op|'('
op|'['
number|'1'
op|','
number|'2'
op|','
number|'3'
op|','
number|'4'
op|']'
op|')'
op|','
name|'memory'
op|'='
number|'512'
op|','
nl|'\n'
name|'cpu_usage'
op|'='
number|'2'
op|','
name|'memory_usage'
op|'='
number|'256'
op|','
nl|'\n'
name|'pinned_cpus'
op|'='
name|'set'
op|'('
op|'['
op|']'
op|')'
op|','
nl|'\n'
name|'siblings'
op|'='
op|'['
name|'set'
op|'('
op|'['
number|'1'
op|','
number|'3'
op|']'
op|')'
op|','
name|'set'
op|'('
op|'['
number|'2'
op|','
number|'4'
op|']'
op|')'
op|']'
op|','
nl|'\n'
name|'mempages'
op|'='
op|'['
op|']'
op|')'
newline|'\n'
nl|'\n'
name|'numacell'
op|'.'
name|'pin_cpus_with_siblings'
op|'('
name|'set'
op|'('
op|'['
number|'1'
op|','
number|'2'
op|']'
op|')'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'set'
op|'('
op|')'
op|','
name|'numacell'
op|'.'
name|'free_cpus'
op|')'
newline|'\n'
name|'numacell'
op|'.'
name|'unpin_cpus_with_siblings'
op|'('
name|'set'
op|'('
op|'['
number|'1'
op|']'
op|')'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'set'
op|'('
op|'['
number|'1'
op|','
number|'3'
op|']'
op|')'
op|','
name|'numacell'
op|'.'
name|'free_cpus'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertRaises'
op|'('
name|'exception'
op|'.'
name|'CPUPinningInvalid'
op|','
nl|'\n'
name|'numacell'
op|'.'
name|'unpin_cpus_with_siblings'
op|','
nl|'\n'
name|'set'
op|'('
op|'['
number|'3'
op|']'
op|')'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertRaises'
op|'('
name|'exception'
op|'.'
name|'CPUPinningInvalid'
op|','
nl|'\n'
name|'numacell'
op|'.'
name|'pin_cpus_with_siblings'
op|','
nl|'\n'
name|'set'
op|'('
op|'['
number|'4'
op|']'
op|')'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertRaises'
op|'('
name|'exception'
op|'.'
name|'CPUPinningInvalid'
op|','
nl|'\n'
name|'numacell'
op|'.'
name|'unpin_cpus_with_siblings'
op|','
nl|'\n'
name|'set'
op|'('
op|'['
number|'3'
op|','
number|'4'
op|']'
op|')'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'set'
op|'('
op|'['
number|'1'
op|','
number|'3'
op|']'
op|')'
op|','
name|'numacell'
op|'.'
name|'free_cpus'
op|')'
newline|'\n'
name|'numacell'
op|'.'
name|'unpin_cpus_with_siblings'
op|'('
name|'set'
op|'('
op|'['
number|'4'
op|']'
op|')'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'set'
op|'('
op|'['
number|'1'
op|','
number|'2'
op|','
number|'3'
op|','
number|'4'
op|']'
op|')'
op|','
name|'numacell'
op|'.'
name|'free_cpus'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_pages_topology_wipe
dedent|''
name|'def'
name|'test_pages_topology_wipe'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'pages_topology'
op|'='
name|'objects'
op|'.'
name|'NUMAPagesTopology'
op|'('
nl|'\n'
name|'size_kb'
op|'='
number|'2048'
op|','
name|'total'
op|'='
number|'1024'
op|','
name|'used'
op|'='
number|'512'
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
number|'2048'
op|','
name|'pages_topology'
op|'.'
name|'size_kb'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
number|'1024'
op|','
name|'pages_topology'
op|'.'
name|'total'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
number|'512'
op|','
name|'pages_topology'
op|'.'
name|'used'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
number|'512'
op|','
name|'pages_topology'
op|'.'
name|'free'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
number|'1048576'
op|','
name|'pages_topology'
op|'.'
name|'free_kb'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_can_fit_hugepages
dedent|''
name|'def'
name|'test_can_fit_hugepages'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'cell'
op|'='
name|'objects'
op|'.'
name|'NUMACell'
op|'('
nl|'\n'
name|'id'
op|'='
number|'0'
op|','
name|'cpuset'
op|'='
name|'set'
op|'('
op|'['
number|'1'
op|','
number|'2'
op|']'
op|')'
op|','
name|'memory'
op|'='
number|'1024'
op|','
nl|'\n'
name|'siblings'
op|'='
op|'['
op|']'
op|','
name|'pinned_cpus'
op|'='
name|'set'
op|'('
op|'['
op|']'
op|')'
op|','
nl|'\n'
name|'mempages'
op|'='
op|'['
nl|'\n'
name|'objects'
op|'.'
name|'NUMAPagesTopology'
op|'('
nl|'\n'
name|'size_kb'
op|'='
number|'4'
op|','
name|'total'
op|'='
number|'1548736'
op|','
name|'used'
op|'='
number|'0'
op|')'
op|','
nl|'\n'
name|'objects'
op|'.'
name|'NUMAPagesTopology'
op|'('
nl|'\n'
name|'size_kb'
op|'='
number|'2048'
op|','
name|'total'
op|'='
number|'513'
op|','
name|'used'
op|'='
number|'0'
op|')'
op|','
nl|'\n'
name|'objects'
op|'.'
name|'NUMAPagesTopology'
op|'('
nl|'\n'
name|'size_kb'
op|'='
number|'1048576'
op|','
name|'total'
op|'='
number|'4'
op|','
name|'used'
op|'='
number|'1'
op|','
name|'reserved'
op|'='
number|'1'
op|')'
op|']'
op|')'
newline|'\n'
nl|'\n'
name|'pagesize'
op|'='
number|'2048'
newline|'\n'
name|'self'
op|'.'
name|'assertTrue'
op|'('
name|'cell'
op|'.'
name|'can_fit_hugepages'
op|'('
name|'pagesize'
op|','
number|'2'
op|'**'
number|'20'
op|')'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertFalse'
op|'('
name|'cell'
op|'.'
name|'can_fit_hugepages'
op|'('
name|'pagesize'
op|','
number|'2'
op|'**'
number|'21'
op|')'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertFalse'
op|'('
name|'cell'
op|'.'
name|'can_fit_hugepages'
op|'('
name|'pagesize'
op|','
number|'2'
op|'**'
number|'19'
op|'+'
number|'1'
op|')'
op|')'
newline|'\n'
nl|'\n'
name|'pagesize'
op|'='
number|'1048576'
newline|'\n'
name|'self'
op|'.'
name|'assertTrue'
op|'('
name|'cell'
op|'.'
name|'can_fit_hugepages'
op|'('
name|'pagesize'
op|','
number|'2'
op|'**'
number|'20'
op|')'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertTrue'
op|'('
name|'cell'
op|'.'
name|'can_fit_hugepages'
op|'('
name|'pagesize'
op|','
number|'2'
op|'**'
number|'20'
op|'*'
number|'2'
op|')'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertFalse'
op|'('
name|'cell'
op|'.'
name|'can_fit_hugepages'
op|'('
name|'pagesize'
op|','
number|'2'
op|'**'
number|'20'
op|'*'
number|'3'
op|')'
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'assertRaises'
op|'('
nl|'\n'
name|'exception'
op|'.'
name|'MemoryPageSizeNotSupported'
op|','
nl|'\n'
name|'cell'
op|'.'
name|'can_fit_hugepages'
op|','
number|'12345'
op|','
number|'2'
op|'**'
number|'20'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_default_behavior
dedent|''
name|'def'
name|'test_default_behavior'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'inst_cell'
op|'='
name|'objects'
op|'.'
name|'NUMACell'
op|'('
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
number|'0'
op|','
name|'len'
op|'('
name|'inst_cell'
op|'.'
name|'obj_get_changes'
op|'('
op|')'
op|')'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_numa_pages_equivalent
dedent|''
name|'def'
name|'test_numa_pages_equivalent'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'pt1'
op|'='
name|'objects'
op|'.'
name|'NUMAPagesTopology'
op|'('
name|'size_kb'
op|'='
number|'1024'
op|','
name|'total'
op|'='
number|'32'
op|','
name|'used'
op|'='
number|'0'
op|')'
newline|'\n'
name|'pt2'
op|'='
name|'objects'
op|'.'
name|'NUMAPagesTopology'
op|'('
name|'size_kb'
op|'='
number|'1024'
op|','
name|'total'
op|'='
number|'32'
op|','
name|'used'
op|'='
number|'0'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'pt1'
op|','
name|'pt2'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_numa_pages_not_equivalent
dedent|''
name|'def'
name|'test_numa_pages_not_equivalent'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'pt1'
op|'='
name|'objects'
op|'.'
name|'NUMAPagesTopology'
op|'('
name|'size_kb'
op|'='
number|'1024'
op|','
name|'total'
op|'='
number|'32'
op|','
name|'used'
op|'='
number|'0'
op|')'
newline|'\n'
name|'pt2'
op|'='
name|'objects'
op|'.'
name|'NUMAPagesTopology'
op|'('
name|'size_kb'
op|'='
number|'1024'
op|','
name|'total'
op|'='
number|'33'
op|','
name|'used'
op|'='
number|'0'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertNotEqual'
op|'('
name|'pt1'
op|','
name|'pt2'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_numa_pages_not_equivalent_missing_a
dedent|''
name|'def'
name|'test_numa_pages_not_equivalent_missing_a'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'pt1'
op|'='
name|'objects'
op|'.'
name|'NUMAPagesTopology'
op|'('
name|'size_kb'
op|'='
number|'1024'
op|','
name|'used'
op|'='
number|'0'
op|')'
newline|'\n'
name|'pt2'
op|'='
name|'objects'
op|'.'
name|'NUMAPagesTopology'
op|'('
name|'size_kb'
op|'='
number|'1024'
op|','
name|'total'
op|'='
number|'32'
op|','
name|'used'
op|'='
number|'0'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertNotEqual'
op|'('
name|'pt1'
op|','
name|'pt2'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_numa_pages_not_equivalent_missing_b
dedent|''
name|'def'
name|'test_numa_pages_not_equivalent_missing_b'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'pt1'
op|'='
name|'objects'
op|'.'
name|'NUMAPagesTopology'
op|'('
name|'size_kb'
op|'='
number|'1024'
op|','
name|'total'
op|'='
number|'32'
op|','
name|'used'
op|'='
number|'0'
op|')'
newline|'\n'
name|'pt2'
op|'='
name|'objects'
op|'.'
name|'NUMAPagesTopology'
op|'('
name|'size_kb'
op|'='
number|'1024'
op|','
name|'used'
op|'='
number|'0'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertNotEqual'
op|'('
name|'pt1'
op|','
name|'pt2'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_numa_cell_equivalent
dedent|''
name|'def'
name|'test_numa_cell_equivalent'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'cell1'
op|'='
name|'objects'
op|'.'
name|'NUMACell'
op|'('
name|'id'
op|'='
number|'1'
op|','
name|'cpuset'
op|'='
name|'set'
op|'('
op|'['
number|'1'
op|','
number|'2'
op|']'
op|')'
op|','
name|'memory'
op|'='
number|'32'
op|','
nl|'\n'
name|'cpu_usage'
op|'='
number|'10'
op|','
name|'pinned_cpus'
op|'='
name|'set'
op|'('
op|'['
number|'3'
op|','
number|'4'
op|']'
op|')'
op|','
nl|'\n'
name|'siblings'
op|'='
op|'['
name|'set'
op|'('
op|'['
number|'5'
op|','
number|'6'
op|']'
op|')'
op|']'
op|')'
newline|'\n'
name|'cell2'
op|'='
name|'objects'
op|'.'
name|'NUMACell'
op|'('
name|'id'
op|'='
number|'1'
op|','
name|'cpuset'
op|'='
name|'set'
op|'('
op|'['
number|'1'
op|','
number|'2'
op|']'
op|')'
op|','
name|'memory'
op|'='
number|'32'
op|','
nl|'\n'
name|'cpu_usage'
op|'='
number|'10'
op|','
name|'pinned_cpus'
op|'='
name|'set'
op|'('
op|'['
number|'3'
op|','
number|'4'
op|']'
op|')'
op|','
nl|'\n'
name|'siblings'
op|'='
op|'['
name|'set'
op|'('
op|'['
number|'5'
op|','
number|'6'
op|']'
op|')'
op|']'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'cell1'
op|','
name|'cell2'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_numa_cell_not_equivalent
dedent|''
name|'def'
name|'test_numa_cell_not_equivalent'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'cell1'
op|'='
name|'objects'
op|'.'
name|'NUMACell'
op|'('
name|'id'
op|'='
number|'1'
op|','
name|'cpuset'
op|'='
name|'set'
op|'('
op|'['
number|'1'
op|','
number|'2'
op|']'
op|')'
op|','
name|'memory'
op|'='
number|'32'
op|','
nl|'\n'
name|'cpu_usage'
op|'='
number|'10'
op|','
name|'pinned_cpus'
op|'='
name|'set'
op|'('
op|'['
number|'3'
op|','
number|'4'
op|']'
op|')'
op|','
nl|'\n'
name|'siblings'
op|'='
op|'['
name|'set'
op|'('
op|'['
number|'5'
op|','
number|'6'
op|']'
op|')'
op|']'
op|')'
newline|'\n'
name|'cell2'
op|'='
name|'objects'
op|'.'
name|'NUMACell'
op|'('
name|'id'
op|'='
number|'2'
op|','
name|'cpuset'
op|'='
name|'set'
op|'('
op|'['
number|'1'
op|','
number|'2'
op|']'
op|')'
op|','
name|'memory'
op|'='
number|'32'
op|','
nl|'\n'
name|'cpu_usage'
op|'='
number|'10'
op|','
name|'pinned_cpus'
op|'='
name|'set'
op|'('
op|'['
number|'3'
op|','
number|'4'
op|']'
op|')'
op|','
nl|'\n'
name|'siblings'
op|'='
op|'['
name|'set'
op|'('
op|'['
number|'5'
op|','
number|'6'
op|']'
op|')'
op|']'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertNotEqual'
op|'('
name|'cell1'
op|','
name|'cell2'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_numa_cell_not_equivalent_missing_a
dedent|''
name|'def'
name|'test_numa_cell_not_equivalent_missing_a'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'cell1'
op|'='
name|'objects'
op|'.'
name|'NUMACell'
op|'('
name|'id'
op|'='
number|'1'
op|','
name|'cpuset'
op|'='
name|'set'
op|'('
op|'['
number|'1'
op|','
number|'2'
op|']'
op|')'
op|','
name|'memory'
op|'='
number|'32'
op|','
nl|'\n'
name|'pinned_cpus'
op|'='
name|'set'
op|'('
op|'['
number|'3'
op|','
number|'4'
op|']'
op|')'
op|','
nl|'\n'
name|'siblings'
op|'='
op|'['
name|'set'
op|'('
op|'['
number|'5'
op|','
number|'6'
op|']'
op|')'
op|']'
op|')'
newline|'\n'
name|'cell2'
op|'='
name|'objects'
op|'.'
name|'NUMACell'
op|'('
name|'id'
op|'='
number|'2'
op|','
name|'cpuset'
op|'='
name|'set'
op|'('
op|'['
number|'1'
op|','
number|'2'
op|']'
op|')'
op|','
name|'memory'
op|'='
number|'32'
op|','
nl|'\n'
name|'cpu_usage'
op|'='
number|'10'
op|','
name|'pinned_cpus'
op|'='
name|'set'
op|'('
op|'['
number|'3'
op|','
number|'4'
op|']'
op|')'
op|','
nl|'\n'
name|'siblings'
op|'='
op|'['
name|'set'
op|'('
op|'['
number|'5'
op|','
number|'6'
op|']'
op|')'
op|']'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertNotEqual'
op|'('
name|'cell1'
op|','
name|'cell2'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_numa_cell_not_equivalent_missing_b
dedent|''
name|'def'
name|'test_numa_cell_not_equivalent_missing_b'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'cell1'
op|'='
name|'objects'
op|'.'
name|'NUMACell'
op|'('
name|'id'
op|'='
number|'1'
op|','
name|'cpuset'
op|'='
name|'set'
op|'('
op|'['
number|'1'
op|','
number|'2'
op|']'
op|')'
op|','
name|'memory'
op|'='
number|'32'
op|','
nl|'\n'
name|'cpu_usage'
op|'='
number|'10'
op|','
name|'pinned_cpus'
op|'='
name|'set'
op|'('
op|'['
number|'3'
op|','
number|'4'
op|']'
op|')'
op|','
nl|'\n'
name|'siblings'
op|'='
op|'['
name|'set'
op|'('
op|'['
number|'5'
op|','
number|'6'
op|']'
op|')'
op|']'
op|')'
newline|'\n'
name|'cell2'
op|'='
name|'objects'
op|'.'
name|'NUMACell'
op|'('
name|'id'
op|'='
number|'2'
op|','
name|'cpuset'
op|'='
name|'set'
op|'('
op|'['
number|'1'
op|','
number|'2'
op|']'
op|')'
op|','
name|'memory'
op|'='
number|'32'
op|','
nl|'\n'
name|'pinned_cpus'
op|'='
name|'set'
op|'('
op|'['
number|'3'
op|','
number|'4'
op|']'
op|')'
op|','
nl|'\n'
name|'siblings'
op|'='
op|'['
name|'set'
op|'('
op|'['
number|'5'
op|','
number|'6'
op|']'
op|')'
op|']'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertNotEqual'
op|'('
name|'cell1'
op|','
name|'cell2'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_numa_cell_equivalent_different_pages
dedent|''
name|'def'
name|'test_numa_cell_equivalent_different_pages'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'pt1'
op|'='
name|'objects'
op|'.'
name|'NUMAPagesTopology'
op|'('
name|'size_kb'
op|'='
number|'1024'
op|','
name|'total'
op|'='
number|'32'
op|','
name|'used'
op|'='
number|'0'
op|')'
newline|'\n'
name|'pt2'
op|'='
name|'objects'
op|'.'
name|'NUMAPagesTopology'
op|'('
name|'size_kb'
op|'='
number|'1024'
op|','
name|'total'
op|'='
number|'32'
op|','
name|'used'
op|'='
number|'0'
op|')'
newline|'\n'
name|'cell1'
op|'='
name|'objects'
op|'.'
name|'NUMACell'
op|'('
name|'id'
op|'='
number|'1'
op|','
name|'cpuset'
op|'='
name|'set'
op|'('
op|'['
number|'1'
op|','
number|'2'
op|']'
op|')'
op|','
name|'memory'
op|'='
number|'32'
op|','
nl|'\n'
name|'cpu_usage'
op|'='
number|'10'
op|','
name|'pinned_cpus'
op|'='
name|'set'
op|'('
op|'['
number|'3'
op|','
number|'4'
op|']'
op|')'
op|','
nl|'\n'
name|'siblings'
op|'='
op|'['
name|'set'
op|'('
op|'['
number|'5'
op|','
number|'6'
op|']'
op|')'
op|']'
op|','
nl|'\n'
name|'mempages'
op|'='
op|'['
name|'pt1'
op|']'
op|')'
newline|'\n'
name|'cell2'
op|'='
name|'objects'
op|'.'
name|'NUMACell'
op|'('
name|'id'
op|'='
number|'1'
op|','
name|'cpuset'
op|'='
name|'set'
op|'('
op|'['
number|'1'
op|','
number|'2'
op|']'
op|')'
op|','
name|'memory'
op|'='
number|'32'
op|','
nl|'\n'
name|'cpu_usage'
op|'='
number|'10'
op|','
name|'pinned_cpus'
op|'='
name|'set'
op|'('
op|'['
number|'3'
op|','
number|'4'
op|']'
op|')'
op|','
nl|'\n'
name|'siblings'
op|'='
op|'['
name|'set'
op|'('
op|'['
number|'5'
op|','
number|'6'
op|']'
op|')'
op|']'
op|','
nl|'\n'
name|'mempages'
op|'='
op|'['
name|'pt2'
op|']'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'cell1'
op|','
name|'cell2'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_numa_cell_not_equivalent_different_pages
dedent|''
name|'def'
name|'test_numa_cell_not_equivalent_different_pages'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'pt1'
op|'='
name|'objects'
op|'.'
name|'NUMAPagesTopology'
op|'('
name|'size_kb'
op|'='
number|'1024'
op|','
name|'total'
op|'='
number|'32'
op|','
name|'used'
op|'='
number|'0'
op|')'
newline|'\n'
name|'pt2'
op|'='
name|'objects'
op|'.'
name|'NUMAPagesTopology'
op|'('
name|'size_kb'
op|'='
number|'1024'
op|','
name|'total'
op|'='
number|'32'
op|','
name|'used'
op|'='
number|'1'
op|')'
newline|'\n'
name|'cell1'
op|'='
name|'objects'
op|'.'
name|'NUMACell'
op|'('
name|'id'
op|'='
number|'1'
op|','
name|'cpuset'
op|'='
name|'set'
op|'('
op|'['
number|'1'
op|','
number|'2'
op|']'
op|')'
op|','
name|'memory'
op|'='
number|'32'
op|','
nl|'\n'
name|'cpu_usage'
op|'='
number|'10'
op|','
name|'pinned_cpus'
op|'='
name|'set'
op|'('
op|'['
number|'3'
op|','
number|'4'
op|']'
op|')'
op|','
nl|'\n'
name|'siblings'
op|'='
op|'['
name|'set'
op|'('
op|'['
number|'5'
op|','
number|'6'
op|']'
op|')'
op|']'
op|','
nl|'\n'
name|'mempages'
op|'='
op|'['
name|'pt1'
op|']'
op|')'
newline|'\n'
name|'cell2'
op|'='
name|'objects'
op|'.'
name|'NUMACell'
op|'('
name|'id'
op|'='
number|'1'
op|','
name|'cpuset'
op|'='
name|'set'
op|'('
op|'['
number|'1'
op|','
number|'2'
op|']'
op|')'
op|','
name|'memory'
op|'='
number|'32'
op|','
nl|'\n'
name|'cpu_usage'
op|'='
number|'10'
op|','
name|'pinned_cpus'
op|'='
name|'set'
op|'('
op|'['
number|'3'
op|','
number|'4'
op|']'
op|')'
op|','
nl|'\n'
name|'siblings'
op|'='
op|'['
name|'set'
op|'('
op|'['
number|'5'
op|','
number|'6'
op|']'
op|')'
op|']'
op|','
nl|'\n'
name|'mempages'
op|'='
op|'['
name|'pt2'
op|']'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertNotEqual'
op|'('
name|'cell1'
op|','
name|'cell2'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_reserved_property_not_set
dedent|''
name|'def'
name|'test_reserved_property_not_set'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'p'
op|'='
name|'objects'
op|'.'
name|'NUMAPagesTopology'
op|'('
nl|'\n'
comment|'# To have reserved not set is similar than to have receive'
nl|'\n'
comment|'# a NUMAPageTopology version 1.0'
nl|'\n'
name|'size_kb'
op|'='
number|'1024'
op|','
name|'total'
op|'='
number|'64'
op|','
name|'used'
op|'='
number|'32'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
number|'32'
op|','
name|'p'
op|'.'
name|'free'
op|')'
newline|'\n'
nl|'\n'
nl|'\n'
dedent|''
dedent|''
name|'class'
name|'TestNUMA'
op|'('
name|'test_objects'
op|'.'
name|'_LocalTest'
op|','
nl|'\n'
DECL|class|TestNUMA
name|'_TestNUMA'
op|')'
op|':'
newline|'\n'
indent|' '
name|'pass'
newline|'\n'
nl|'\n'
nl|'\n'
dedent|''
name|'class'
name|'TestNUMARemote'
op|'('
name|'test_objects'
op|'.'
name|'_RemoteTest'
op|','
nl|'\n'
DECL|class|TestNUMARemote
name|'_TestNUMA'
op|')'
op|':'
newline|'\n'
indent|' '
name|'pass'
newline|'\n'
dedent|''
endmarker|''
end_unit
| 10.823946
| 88
| 0.5637
| 4,417
| 29,019
| 3.632782
| 0.044827
| 0.173875
| 0.048236
| 0.043749
| 0.922784
| 0.891375
| 0.867007
| 0.851178
| 0.816341
| 0.78786
| 0
| 0.020851
| 0.102588
| 29,019
| 2,680
| 89
| 10.827985
| 0.595308
| 0
| 0
| 0.961194
| 0
| 0
| 0.298046
| 0.023674
| 0
| 0
| 0
| 0
| 0.016045
| 0
| null | null | 0.000746
| 0.001119
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
49095330d38438dbd6e613b65cfae75e4e99e1cd
| 291
|
py
|
Python
|
sqlakeyset/serial/__init__.py
|
interjektio/sqlakeyset
|
93bc7da469d56c840836de31341ccb9863c31f63
|
[
"Unlicense"
] | 233
|
2016-08-26T10:53:54.000Z
|
2022-03-05T19:26:36.000Z
|
sqlakeyset/serial/__init__.py
|
interjektio/sqlakeyset
|
93bc7da469d56c840836de31341ccb9863c31f63
|
[
"Unlicense"
] | 48
|
2016-08-27T01:47:21.000Z
|
2022-01-13T14:55:11.000Z
|
sqlakeyset/serial/__init__.py
|
interjektio/sqlakeyset
|
93bc7da469d56c840836de31341ccb9863c31f63
|
[
"Unlicense"
] | 35
|
2016-08-26T18:46:20.000Z
|
2022-02-11T23:33:38.000Z
|
from .serial import (
BadBookmark,
ConfigurationError,
InvalidPage,
PageSerializationError,
Serial,
UnregisteredType,
)
__all__ = [
"BadBookmark",
"ConfigurationError",
"InvalidPage",
"PageSerializationError",
"Serial",
"UnregisteredType",
]
| 16.166667
| 29
| 0.656357
| 16
| 291
| 11.6875
| 0.5625
| 0.31016
| 0.427807
| 0.663102
| 0.898396
| 0.898396
| 0
| 0
| 0
| 0
| 0
| 0
| 0.24055
| 291
| 17
| 30
| 17.117647
| 0.846154
| 0
| 0
| 0
| 0
| 0
| 0.28866
| 0.075601
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.0625
| 0
| 0.0625
| 0
| 1
| 0
| 1
| null | 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
494afb7440efd09decda7e6d3645956a63d9d894
| 309,311
|
py
|
Python
|
tests/asp/AllAnswerSets/aggregates/solitaire7.test.py
|
bernardocuteri/wasp
|
05c8f961776dbdbf7afbf905ee00fc262eba51ad
|
[
"Apache-2.0"
] | 19
|
2015-12-03T08:53:45.000Z
|
2022-03-31T02:09:43.000Z
|
tests/asp/AllAnswerSets/aggregates/solitaire7.test.py
|
bernardocuteri/wasp
|
05c8f961776dbdbf7afbf905ee00fc262eba51ad
|
[
"Apache-2.0"
] | 80
|
2017-11-25T07:57:32.000Z
|
2018-06-10T19:03:30.000Z
|
tests/asp/AllAnswerSets/aggregates/solitaire7.test.py
|
bernardocuteri/wasp
|
05c8f961776dbdbf7afbf905ee00fc262eba51ad
|
[
"Apache-2.0"
] | 6
|
2015-01-15T07:51:48.000Z
|
2020-06-18T14:47:48.000Z
|
input = """
1 2 0 0
1 3 0 0
1 4 0 0
1 5 0 0
1 6 0 0
1 7 0 0
1 8 0 0
1 9 0 0
1 10 0 0
1 11 0 0
1 12 0 0
1 13 0 0
1 14 0 0
1 15 0 0
1 16 0 0
1 17 0 0
1 18 0 0
1 19 0 0
1 20 0 0
1 21 0 0
1 22 0 0
1 23 0 0
1 24 0 0
1 25 0 0
1 26 0 0
1 27 0 0
1 28 0 0
1 29 0 0
1 30 0 0
1 31 0 0
1 32 0 0
1 33 0 0
1 34 0 0
1 35 0 0
1 36 0 0
1 37 0 0
1 38 0 0
1 39 0 0
1 40 0 0
1 41 0 0
1 42 0 0
1 43 0 0
1 44 0 0
1 45 0 0
1 46 0 0
1 47 0 0
1 48 0 0
1 49 0 0
1 50 0 0
1 51 0 0
1 52 0 0
1 53 0 0
1 54 0 0
1 55 0 0
1 56 0 0
1 57 0 0
1 58 0 0
1 59 0 0
1 60 0 0
1 61 0 0
1 62 0 0
1 63 0 0
1 64 0 0
1 65 0 0
1 66 0 0
1 67 0 0
1 68 0 0
1 69 0 0
1 70 0 0
1 71 0 0
1 72 0 0
1 73 0 0
1 74 0 0
1 75 0 0
1 76 0 0
1 77 0 0
1 78 0 0
1 79 0 0
1 80 0 0
1 81 0 0
1 82 0 0
1 83 0 0
3 132 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 1 0 216
2 217 132 0 1 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215
1 1 2 1 217 216
2 218 132 0 2 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215
1 1 2 0 218 216
1 216 0 0
3 132 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 1 0 351
2 352 132 0 1 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350
1 1 2 1 352 351
2 353 132 0 2 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350
1 1 2 0 353 351
1 351 0 0
3 132 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 1 0 486
2 487 132 0 1 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485
1 1 2 1 487 486
2 488 132 0 2 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485
1 1 2 0 488 486
1 486 0 0
1 489 1 0 215
1 490 1 0 214
1 491 1 0 213
1 492 1 0 212
1 493 1 0 211
1 494 1 0 210
1 495 1 0 209
1 496 1 0 208
1 497 1 0 207
1 498 1 0 206
1 499 1 0 205
1 500 1 0 204
1 501 1 0 203
1 502 1 0 202
1 503 1 0 201
1 504 1 0 200
1 505 1 0 199
1 506 1 0 198
1 507 1 0 197
1 508 1 0 196
1 509 1 0 195
1 510 1 0 194
1 511 1 0 193
1 512 1 0 192
1 513 1 0 191
1 514 1 0 190
1 515 1 0 189
1 516 1 0 188
1 517 1 0 187
1 518 1 0 186
1 519 1 0 185
1 520 1 0 184
1 521 1 0 183
1 522 1 0 350
1 523 1 0 349
1 524 1 0 348
1 525 1 0 347
1 526 1 0 346
1 527 1 0 345
1 528 1 0 344
1 529 1 0 343
1 530 1 0 342
1 531 1 0 341
1 532 1 0 340
1 533 1 0 339
1 534 1 0 338
1 535 1 0 337
1 536 1 0 336
1 537 1 0 335
1 538 1 0 334
1 539 1 0 333
1 540 1 0 332
1 541 1 0 331
1 542 1 0 330
1 543 1 0 329
1 544 1 0 328
1 545 1 0 327
1 546 1 0 326
1 547 1 0 325
1 548 1 0 324
1 549 1 0 323
1 550 1 0 322
1 551 1 0 321
1 552 1 0 320
1 553 1 0 319
1 554 1 0 318
1 555 1 0 485
1 556 1 0 484
1 557 1 0 483
1 558 1 0 482
1 559 1 0 481
1 560 1 0 480
1 561 1 0 479
1 562 1 0 478
1 563 1 0 477
1 564 1 0 476
1 565 1 0 475
1 566 1 0 474
1 567 1 0 473
1 568 1 0 472
1 569 1 0 471
1 570 1 0 470
1 571 1 0 469
1 572 1 0 468
1 573 1 0 467
1 574 1 0 466
1 575 1 0 465
1 576 1 0 464
1 577 1 0 463
1 578 1 0 462
1 579 1 0 461
1 580 1 0 460
1 581 1 0 459
1 582 1 0 458
1 583 1 0 457
1 584 1 0 456
1 585 1 0 455
1 586 1 0 454
1 587 1 0 453
1 490 1 0 215
1 491 1 0 214
1 493 1 0 212
1 494 1 0 211
1 496 1 0 209
1 497 1 0 208
1 498 1 0 207
1 499 1 0 206
1 500 1 0 205
1 501 1 0 204
1 503 1 0 202
1 504 1 0 201
1 505 1 0 200
1 506 1 0 199
1 507 1 0 198
1 508 1 0 197
1 510 1 0 195
1 511 1 0 194
1 512 1 0 193
1 513 1 0 192
1 514 1 0 191
1 515 1 0 190
1 517 1 0 188
1 518 1 0 187
1 520 1 0 185
1 521 1 0 184
1 523 1 0 350
1 524 1 0 349
1 526 1 0 347
1 527 1 0 346
1 529 1 0 344
1 530 1 0 343
1 531 1 0 342
1 532 1 0 341
1 533 1 0 340
1 534 1 0 339
1 536 1 0 337
1 537 1 0 336
1 538 1 0 335
1 539 1 0 334
1 540 1 0 333
1 541 1 0 332
1 543 1 0 330
1 544 1 0 329
1 545 1 0 328
1 546 1 0 327
1 547 1 0 326
1 548 1 0 325
1 550 1 0 323
1 551 1 0 322
1 553 1 0 320
1 554 1 0 319
1 556 1 0 485
1 557 1 0 484
1 559 1 0 482
1 560 1 0 481
1 562 1 0 479
1 563 1 0 478
1 564 1 0 477
1 565 1 0 476
1 566 1 0 475
1 567 1 0 474
1 569 1 0 472
1 570 1 0 471
1 571 1 0 470
1 572 1 0 469
1 573 1 0 468
1 574 1 0 467
1 576 1 0 465
1 577 1 0 464
1 578 1 0 463
1 579 1 0 462
1 580 1 0 461
1 581 1 0 460
1 583 1 0 458
1 584 1 0 457
1 586 1 0 455
1 587 1 0 454
1 588 1 0 215
1 589 1 0 212
1 590 1 0 209
1 591 1 0 208
1 592 1 0 207
1 593 1 0 206
1 594 1 0 205
1 595 1 0 202
1 596 1 0 201
1 597 1 0 200
1 598 1 0 199
1 599 1 0 198
1 600 1 0 195
1 601 1 0 194
1 602 1 0 193
1 603 1 0 192
1 604 1 0 191
1 605 1 0 188
1 606 1 0 185
1 607 1 0 350
1 608 1 0 347
1 609 1 0 344
1 610 1 0 343
1 611 1 0 342
1 612 1 0 341
1 613 1 0 340
1 614 1 0 337
1 615 1 0 336
1 616 1 0 335
1 617 1 0 334
1 618 1 0 333
1 619 1 0 330
1 620 1 0 329
1 621 1 0 328
1 622 1 0 327
1 623 1 0 326
1 624 1 0 323
1 625 1 0 320
1 626 1 0 485
1 627 1 0 482
1 628 1 0 479
1 629 1 0 478
1 630 1 0 477
1 631 1 0 476
1 632 1 0 475
1 633 1 0 472
1 634 1 0 471
1 635 1 0 470
1 636 1 0 469
1 637 1 0 468
1 638 1 0 465
1 639 1 0 464
1 640 1 0 463
1 641 1 0 462
1 642 1 0 461
1 643 1 0 458
1 644 1 0 455
1 489 1 0 182
1 490 1 0 181
1 491 1 0 180
1 492 1 0 179
1 493 1 0 178
1 494 1 0 177
1 495 1 0 176
1 496 1 0 175
1 497 1 0 174
1 498 1 0 173
1 499 1 0 172
1 500 1 0 171
1 501 1 0 170
1 502 1 0 169
1 503 1 0 168
1 504 1 0 167
1 505 1 0 166
1 506 1 0 165
1 507 1 0 164
1 508 1 0 163
1 509 1 0 162
1 510 1 0 161
1 511 1 0 160
1 512 1 0 159
1 513 1 0 158
1 514 1 0 157
1 515 1 0 156
1 516 1 0 155
1 517 1 0 154
1 518 1 0 153
1 519 1 0 152
1 520 1 0 151
1 521 1 0 150
1 522 1 0 317
1 523 1 0 316
1 524 1 0 315
1 525 1 0 314
1 526 1 0 313
1 527 1 0 312
1 528 1 0 311
1 529 1 0 310
1 530 1 0 309
1 531 1 0 308
1 532 1 0 307
1 533 1 0 306
1 534 1 0 305
1 535 1 0 304
1 536 1 0 303
1 537 1 0 302
1 538 1 0 301
1 539 1 0 300
1 540 1 0 299
1 541 1 0 298
1 542 1 0 297
1 543 1 0 296
1 544 1 0 295
1 545 1 0 294
1 546 1 0 293
1 547 1 0 292
1 548 1 0 291
1 549 1 0 290
1 550 1 0 289
1 551 1 0 288
1 552 1 0 287
1 553 1 0 286
1 554 1 0 285
1 555 1 0 452
1 556 1 0 451
1 557 1 0 450
1 558 1 0 449
1 559 1 0 448
1 560 1 0 447
1 561 1 0 446
1 562 1 0 445
1 563 1 0 444
1 564 1 0 443
1 565 1 0 442
1 566 1 0 441
1 567 1 0 440
1 568 1 0 439
1 569 1 0 438
1 570 1 0 437
1 571 1 0 436
1 572 1 0 435
1 573 1 0 434
1 574 1 0 433
1 575 1 0 432
1 576 1 0 431
1 577 1 0 430
1 578 1 0 429
1 579 1 0 428
1 580 1 0 427
1 581 1 0 426
1 582 1 0 425
1 583 1 0 424
1 584 1 0 423
1 585 1 0 422
1 586 1 0 421
1 587 1 0 420
1 489 1 0 181
1 490 1 0 180
1 492 1 0 178
1 493 1 0 177
1 495 1 0 175
1 496 1 0 174
1 497 1 0 173
1 498 1 0 172
1 499 1 0 171
1 500 1 0 170
1 502 1 0 168
1 503 1 0 167
1 504 1 0 166
1 505 1 0 165
1 506 1 0 164
1 507 1 0 163
1 509 1 0 161
1 510 1 0 160
1 511 1 0 159
1 512 1 0 158
1 513 1 0 157
1 514 1 0 156
1 516 1 0 154
1 517 1 0 153
1 519 1 0 151
1 520 1 0 150
1 522 1 0 316
1 523 1 0 315
1 525 1 0 313
1 526 1 0 312
1 528 1 0 310
1 529 1 0 309
1 530 1 0 308
1 531 1 0 307
1 532 1 0 306
1 533 1 0 305
1 535 1 0 303
1 536 1 0 302
1 537 1 0 301
1 538 1 0 300
1 539 1 0 299
1 540 1 0 298
1 542 1 0 296
1 543 1 0 295
1 544 1 0 294
1 545 1 0 293
1 546 1 0 292
1 547 1 0 291
1 549 1 0 289
1 550 1 0 288
1 552 1 0 286
1 553 1 0 285
1 555 1 0 451
1 556 1 0 450
1 558 1 0 448
1 559 1 0 447
1 561 1 0 445
1 562 1 0 444
1 563 1 0 443
1 564 1 0 442
1 565 1 0 441
1 566 1 0 440
1 568 1 0 438
1 569 1 0 437
1 570 1 0 436
1 571 1 0 435
1 572 1 0 434
1 573 1 0 433
1 575 1 0 431
1 576 1 0 430
1 577 1 0 429
1 578 1 0 428
1 579 1 0 427
1 580 1 0 426
1 582 1 0 424
1 583 1 0 423
1 585 1 0 421
1 586 1 0 420
1 645 1 0 180
1 646 1 0 177
1 647 1 0 174
1 648 1 0 173
1 590 1 0 172
1 591 1 0 171
1 592 1 0 170
1 649 1 0 167
1 650 1 0 166
1 595 1 0 165
1 596 1 0 164
1 597 1 0 163
1 651 1 0 160
1 652 1 0 159
1 600 1 0 158
1 601 1 0 157
1 602 1 0 156
1 653 1 0 153
1 654 1 0 150
1 655 1 0 315
1 656 1 0 312
1 657 1 0 309
1 658 1 0 308
1 609 1 0 307
1 610 1 0 306
1 611 1 0 305
1 659 1 0 302
1 660 1 0 301
1 614 1 0 300
1 615 1 0 299
1 616 1 0 298
1 661 1 0 295
1 662 1 0 294
1 619 1 0 293
1 620 1 0 292
1 621 1 0 291
1 663 1 0 288
1 664 1 0 285
1 665 1 0 450
1 666 1 0 447
1 667 1 0 444
1 668 1 0 443
1 628 1 0 442
1 629 1 0 441
1 630 1 0 440
1 669 1 0 437
1 670 1 0 436
1 633 1 0 435
1 634 1 0 434
1 635 1 0 433
1 671 1 0 430
1 672 1 0 429
1 638 1 0 428
1 639 1 0 427
1 640 1 0 426
1 673 1 0 423
1 674 1 0 420
1 489 1 0 149
1 490 1 0 148
1 491 1 0 147
1 492 1 0 146
1 493 1 0 145
1 494 1 0 144
1 495 1 0 143
1 496 1 0 142
1 497 1 0 141
1 498 1 0 140
1 499 1 0 139
1 500 1 0 138
1 501 1 0 137
1 502 1 0 136
1 503 1 0 135
1 504 1 0 134
1 505 1 0 133
1 506 1 0 132
1 507 1 0 131
1 508 1 0 130
1 509 1 0 129
1 510 1 0 128
1 511 1 0 127
1 512 1 0 126
1 513 1 0 125
1 514 1 0 124
1 515 1 0 123
1 516 1 0 122
1 517 1 0 121
1 518 1 0 120
1 519 1 0 119
1 520 1 0 118
1 521 1 0 117
1 522 1 0 284
1 523 1 0 283
1 524 1 0 282
1 525 1 0 281
1 526 1 0 280
1 527 1 0 279
1 528 1 0 278
1 529 1 0 277
1 530 1 0 276
1 531 1 0 275
1 532 1 0 274
1 533 1 0 273
1 534 1 0 272
1 535 1 0 271
1 536 1 0 270
1 537 1 0 269
1 538 1 0 268
1 539 1 0 267
1 540 1 0 266
1 541 1 0 265
1 542 1 0 264
1 543 1 0 263
1 544 1 0 262
1 545 1 0 261
1 546 1 0 260
1 547 1 0 259
1 548 1 0 258
1 549 1 0 257
1 550 1 0 256
1 551 1 0 255
1 552 1 0 254
1 553 1 0 253
1 554 1 0 252
1 555 1 0 419
1 556 1 0 418
1 557 1 0 417
1 558 1 0 416
1 559 1 0 415
1 560 1 0 414
1 561 1 0 413
1 562 1 0 412
1 563 1 0 411
1 564 1 0 410
1 565 1 0 409
1 566 1 0 408
1 567 1 0 407
1 568 1 0 406
1 569 1 0 405
1 570 1 0 404
1 571 1 0 403
1 572 1 0 402
1 573 1 0 401
1 574 1 0 400
1 575 1 0 399
1 576 1 0 398
1 577 1 0 397
1 578 1 0 396
1 579 1 0 395
1 580 1 0 394
1 581 1 0 393
1 582 1 0 392
1 583 1 0 391
1 584 1 0 390
1 585 1 0 389
1 586 1 0 388
1 587 1 0 387
1 489 1 0 146
1 490 1 0 145
1 491 1 0 144
1 502 1 0 143
1 503 1 0 142
1 504 1 0 141
1 505 1 0 140
1 506 1 0 139
1 507 1 0 138
1 508 1 0 137
1 509 1 0 136
1 510 1 0 135
1 511 1 0 134
1 512 1 0 133
1 513 1 0 132
1 514 1 0 131
1 515 1 0 130
1 492 1 0 127
1 493 1 0 126
1 494 1 0 125
1 497 1 0 122
1 498 1 0 121
1 499 1 0 120
1 516 1 0 119
1 517 1 0 118
1 518 1 0 117
1 522 1 0 281
1 523 1 0 280
1 524 1 0 279
1 535 1 0 278
1 536 1 0 277
1 537 1 0 276
1 538 1 0 275
1 539 1 0 274
1 540 1 0 273
1 541 1 0 272
1 542 1 0 271
1 543 1 0 270
1 544 1 0 269
1 545 1 0 268
1 546 1 0 267
1 547 1 0 266
1 548 1 0 265
1 525 1 0 262
1 526 1 0 261
1 527 1 0 260
1 530 1 0 257
1 531 1 0 256
1 532 1 0 255
1 549 1 0 254
1 550 1 0 253
1 551 1 0 252
1 555 1 0 416
1 556 1 0 415
1 557 1 0 414
1 568 1 0 413
1 569 1 0 412
1 570 1 0 411
1 571 1 0 410
1 572 1 0 409
1 573 1 0 408
1 574 1 0 407
1 575 1 0 406
1 576 1 0 405
1 577 1 0 404
1 578 1 0 403
1 579 1 0 402
1 580 1 0 401
1 581 1 0 400
1 558 1 0 397
1 559 1 0 396
1 560 1 0 395
1 563 1 0 392
1 564 1 0 391
1 565 1 0 390
1 582 1 0 389
1 583 1 0 388
1 584 1 0 387
1 651 1 0 143
1 652 1 0 142
1 600 1 0 141
1 601 1 0 140
1 602 1 0 139
1 603 1 0 138
1 604 1 0 137
1 646 1 0 134
1 675 1 0 133
1 589 1 0 132
1 645 1 0 127
1 676 1 0 126
1 588 1 0 125
1 595 1 0 122
1 596 1 0 121
1 597 1 0 120
1 590 1 0 119
1 591 1 0 118
1 592 1 0 117
1 661 1 0 278
1 662 1 0 277
1 619 1 0 276
1 620 1 0 275
1 621 1 0 274
1 622 1 0 273
1 623 1 0 272
1 656 1 0 269
1 677 1 0 268
1 608 1 0 267
1 655 1 0 262
1 678 1 0 261
1 607 1 0 260
1 614 1 0 257
1 615 1 0 256
1 616 1 0 255
1 609 1 0 254
1 610 1 0 253
1 611 1 0 252
1 671 1 0 413
1 672 1 0 412
1 638 1 0 411
1 639 1 0 410
1 640 1 0 409
1 641 1 0 408
1 642 1 0 407
1 666 1 0 404
1 679 1 0 403
1 627 1 0 402
1 665 1 0 397
1 680 1 0 396
1 626 1 0 395
1 633 1 0 392
1 634 1 0 391
1 635 1 0 390
1 628 1 0 389
1 629 1 0 388
1 630 1 0 387
1 489 1 0 116
1 490 1 0 115
1 491 1 0 114
1 492 1 0 113
1 493 1 0 112
1 494 1 0 111
1 495 1 0 110
1 496 1 0 109
1 497 1 0 108
1 498 1 0 107
1 499 1 0 106
1 500 1 0 105
1 501 1 0 104
1 502 1 0 103
1 503 1 0 102
1 504 1 0 101
1 505 1 0 100
1 506 1 0 99
1 507 1 0 98
1 508 1 0 97
1 509 1 0 96
1 510 1 0 95
1 511 1 0 94
1 512 1 0 93
1 513 1 0 92
1 514 1 0 91
1 515 1 0 90
1 516 1 0 89
1 517 1 0 88
1 518 1 0 87
1 519 1 0 86
1 520 1 0 85
1 521 1 0 84
1 522 1 0 251
1 523 1 0 250
1 524 1 0 249
1 525 1 0 248
1 526 1 0 247
1 527 1 0 246
1 528 1 0 245
1 529 1 0 244
1 530 1 0 243
1 531 1 0 242
1 532 1 0 241
1 533 1 0 240
1 534 1 0 239
1 535 1 0 238
1 536 1 0 237
1 537 1 0 236
1 538 1 0 235
1 539 1 0 234
1 540 1 0 233
1 541 1 0 232
1 542 1 0 231
1 543 1 0 230
1 544 1 0 229
1 545 1 0 228
1 546 1 0 227
1 547 1 0 226
1 548 1 0 225
1 549 1 0 224
1 550 1 0 223
1 551 1 0 222
1 552 1 0 221
1 553 1 0 220
1 554 1 0 219
1 555 1 0 386
1 556 1 0 385
1 557 1 0 384
1 558 1 0 383
1 559 1 0 382
1 560 1 0 381
1 561 1 0 380
1 562 1 0 379
1 563 1 0 378
1 564 1 0 377
1 565 1 0 376
1 566 1 0 375
1 567 1 0 374
1 568 1 0 373
1 569 1 0 372
1 570 1 0 371
1 571 1 0 370
1 572 1 0 369
1 573 1 0 368
1 574 1 0 367
1 575 1 0 366
1 576 1 0 365
1 577 1 0 364
1 578 1 0 363
1 579 1 0 362
1 580 1 0 361
1 581 1 0 360
1 582 1 0 359
1 583 1 0 358
1 584 1 0 357
1 585 1 0 356
1 586 1 0 355
1 587 1 0 354
1 492 1 0 116
1 493 1 0 115
1 494 1 0 114
1 511 1 0 113
1 512 1 0 112
1 513 1 0 111
1 516 1 0 108
1 517 1 0 107
1 518 1 0 106
1 495 1 0 103
1 496 1 0 102
1 497 1 0 101
1 498 1 0 100
1 499 1 0 99
1 500 1 0 98
1 501 1 0 97
1 502 1 0 96
1 503 1 0 95
1 504 1 0 94
1 505 1 0 93
1 506 1 0 92
1 507 1 0 91
1 508 1 0 90
1 519 1 0 89
1 520 1 0 88
1 521 1 0 87
1 525 1 0 251
1 526 1 0 250
1 527 1 0 249
1 544 1 0 248
1 545 1 0 247
1 546 1 0 246
1 549 1 0 243
1 550 1 0 242
1 551 1 0 241
1 528 1 0 238
1 529 1 0 237
1 530 1 0 236
1 531 1 0 235
1 532 1 0 234
1 533 1 0 233
1 534 1 0 232
1 535 1 0 231
1 536 1 0 230
1 537 1 0 229
1 538 1 0 228
1 539 1 0 227
1 540 1 0 226
1 541 1 0 225
1 552 1 0 224
1 553 1 0 223
1 554 1 0 222
1 558 1 0 386
1 559 1 0 385
1 560 1 0 384
1 577 1 0 383
1 578 1 0 382
1 579 1 0 381
1 582 1 0 378
1 583 1 0 377
1 584 1 0 376
1 561 1 0 373
1 562 1 0 372
1 563 1 0 371
1 564 1 0 370
1 565 1 0 369
1 566 1 0 368
1 567 1 0 367
1 568 1 0 366
1 569 1 0 365
1 570 1 0 364
1 571 1 0 363
1 572 1 0 362
1 573 1 0 361
1 574 1 0 360
1 585 1 0 359
1 586 1 0 358
1 587 1 0 357
1 600 1 0 116
1 601 1 0 115
1 602 1 0 114
1 595 1 0 113
1 596 1 0 112
1 597 1 0 111
1 654 1 0 108
1 681 1 0 107
1 606 1 0 106
1 653 1 0 101
1 682 1 0 100
1 605 1 0 99
1 647 1 0 96
1 648 1 0 95
1 590 1 0 94
1 591 1 0 93
1 592 1 0 92
1 593 1 0 91
1 594 1 0 90
1 619 1 0 251
1 620 1 0 250
1 621 1 0 249
1 614 1 0 248
1 615 1 0 247
1 616 1 0 246
1 664 1 0 243
1 683 1 0 242
1 625 1 0 241
1 663 1 0 236
1 684 1 0 235
1 624 1 0 234
1 657 1 0 231
1 658 1 0 230
1 609 1 0 229
1 610 1 0 228
1 611 1 0 227
1 612 1 0 226
1 613 1 0 225
1 638 1 0 386
1 639 1 0 385
1 640 1 0 384
1 633 1 0 383
1 634 1 0 382
1 635 1 0 381
1 674 1 0 378
1 685 1 0 377
1 644 1 0 376
1 673 1 0 371
1 686 1 0 370
1 643 1 0 369
1 667 1 0 366
1 668 1 0 365
1 628 1 0 364
1 629 1 0 363
1 630 1 0 362
1 631 1 0 361
1 632 1 0 360
1 687 1 0 215
1 688 1 0 214
1 689 1 0 213
1 690 1 0 212
1 691 1 0 211
1 692 1 0 210
1 693 1 0 209
1 694 1 0 208
1 695 1 0 207
1 696 1 0 206
1 697 1 0 205
1 698 1 0 204
1 699 1 0 203
1 700 1 0 202
1 701 1 0 201
1 702 1 0 200
1 703 1 0 199
1 704 1 0 198
1 705 1 0 197
1 706 1 0 196
1 707 1 0 195
1 708 1 0 194
1 709 1 0 193
1 710 1 0 192
1 711 1 0 191
1 712 1 0 190
1 713 1 0 189
1 714 1 0 188
1 715 1 0 187
1 716 1 0 186
1 717 1 0 185
1 718 1 0 184
1 719 1 0 183
1 720 1 0 350
1 721 1 0 349
1 722 1 0 348
1 723 1 0 347
1 724 1 0 346
1 725 1 0 345
1 726 1 0 344
1 727 1 0 343
1 728 1 0 342
1 729 1 0 341
1 730 1 0 340
1 731 1 0 339
1 732 1 0 338
1 733 1 0 337
1 734 1 0 336
1 735 1 0 335
1 736 1 0 334
1 737 1 0 333
1 738 1 0 332
1 739 1 0 331
1 740 1 0 330
1 741 1 0 329
1 742 1 0 328
1 743 1 0 327
1 744 1 0 326
1 745 1 0 325
1 746 1 0 324
1 747 1 0 323
1 748 1 0 322
1 749 1 0 321
1 750 1 0 320
1 751 1 0 319
1 752 1 0 318
1 753 1 0 485
1 754 1 0 484
1 755 1 0 483
1 756 1 0 482
1 757 1 0 481
1 758 1 0 480
1 759 1 0 479
1 760 1 0 478
1 761 1 0 477
1 762 1 0 476
1 763 1 0 475
1 764 1 0 474
1 765 1 0 473
1 766 1 0 472
1 767 1 0 471
1 768 1 0 470
1 769 1 0 469
1 770 1 0 468
1 771 1 0 467
1 772 1 0 466
1 773 1 0 465
1 774 1 0 464
1 775 1 0 463
1 776 1 0 462
1 777 1 0 461
1 778 1 0 460
1 779 1 0 459
1 780 1 0 458
1 781 1 0 457
1 782 1 0 456
1 783 1 0 455
1 784 1 0 454
1 785 1 0 453
1 688 1 0 215
1 689 1 0 214
1 691 1 0 212
1 692 1 0 211
1 694 1 0 209
1 695 1 0 208
1 696 1 0 207
1 697 1 0 206
1 698 1 0 205
1 699 1 0 204
1 701 1 0 202
1 702 1 0 201
1 703 1 0 200
1 704 1 0 199
1 705 1 0 198
1 706 1 0 197
1 708 1 0 195
1 709 1 0 194
1 710 1 0 193
1 711 1 0 192
1 712 1 0 191
1 713 1 0 190
1 715 1 0 188
1 716 1 0 187
1 718 1 0 185
1 719 1 0 184
1 721 1 0 350
1 722 1 0 349
1 724 1 0 347
1 725 1 0 346
1 727 1 0 344
1 728 1 0 343
1 729 1 0 342
1 730 1 0 341
1 731 1 0 340
1 732 1 0 339
1 734 1 0 337
1 735 1 0 336
1 736 1 0 335
1 737 1 0 334
1 738 1 0 333
1 739 1 0 332
1 741 1 0 330
1 742 1 0 329
1 743 1 0 328
1 744 1 0 327
1 745 1 0 326
1 746 1 0 325
1 748 1 0 323
1 749 1 0 322
1 751 1 0 320
1 752 1 0 319
1 754 1 0 485
1 755 1 0 484
1 757 1 0 482
1 758 1 0 481
1 760 1 0 479
1 761 1 0 478
1 762 1 0 477
1 763 1 0 476
1 764 1 0 475
1 765 1 0 474
1 767 1 0 472
1 768 1 0 471
1 769 1 0 470
1 770 1 0 469
1 771 1 0 468
1 772 1 0 467
1 774 1 0 465
1 775 1 0 464
1 776 1 0 463
1 777 1 0 462
1 778 1 0 461
1 779 1 0 460
1 781 1 0 458
1 782 1 0 457
1 784 1 0 455
1 785 1 0 454
1 689 1 0 215
1 692 1 0 212
1 695 1 0 209
1 696 1 0 208
1 697 1 0 207
1 698 1 0 206
1 699 1 0 205
1 702 1 0 202
1 703 1 0 201
1 704 1 0 200
1 705 1 0 199
1 706 1 0 198
1 709 1 0 195
1 710 1 0 194
1 711 1 0 193
1 712 1 0 192
1 713 1 0 191
1 716 1 0 188
1 719 1 0 185
1 722 1 0 350
1 725 1 0 347
1 728 1 0 344
1 729 1 0 343
1 730 1 0 342
1 731 1 0 341
1 732 1 0 340
1 735 1 0 337
1 736 1 0 336
1 737 1 0 335
1 738 1 0 334
1 739 1 0 333
1 742 1 0 330
1 743 1 0 329
1 744 1 0 328
1 745 1 0 327
1 746 1 0 326
1 749 1 0 323
1 752 1 0 320
1 755 1 0 485
1 758 1 0 482
1 761 1 0 479
1 762 1 0 478
1 763 1 0 477
1 764 1 0 476
1 765 1 0 475
1 768 1 0 472
1 769 1 0 471
1 770 1 0 470
1 771 1 0 469
1 772 1 0 468
1 775 1 0 465
1 776 1 0 464
1 777 1 0 463
1 778 1 0 462
1 779 1 0 461
1 782 1 0 458
1 785 1 0 455
1 687 1 0 182
1 688 1 0 181
1 689 1 0 180
1 690 1 0 179
1 691 1 0 178
1 692 1 0 177
1 693 1 0 176
1 694 1 0 175
1 695 1 0 174
1 696 1 0 173
1 697 1 0 172
1 698 1 0 171
1 699 1 0 170
1 700 1 0 169
1 701 1 0 168
1 702 1 0 167
1 703 1 0 166
1 704 1 0 165
1 705 1 0 164
1 706 1 0 163
1 707 1 0 162
1 708 1 0 161
1 709 1 0 160
1 710 1 0 159
1 711 1 0 158
1 712 1 0 157
1 713 1 0 156
1 714 1 0 155
1 715 1 0 154
1 716 1 0 153
1 717 1 0 152
1 718 1 0 151
1 719 1 0 150
1 720 1 0 317
1 721 1 0 316
1 722 1 0 315
1 723 1 0 314
1 724 1 0 313
1 725 1 0 312
1 726 1 0 311
1 727 1 0 310
1 728 1 0 309
1 729 1 0 308
1 730 1 0 307
1 731 1 0 306
1 732 1 0 305
1 733 1 0 304
1 734 1 0 303
1 735 1 0 302
1 736 1 0 301
1 737 1 0 300
1 738 1 0 299
1 739 1 0 298
1 740 1 0 297
1 741 1 0 296
1 742 1 0 295
1 743 1 0 294
1 744 1 0 293
1 745 1 0 292
1 746 1 0 291
1 747 1 0 290
1 748 1 0 289
1 749 1 0 288
1 750 1 0 287
1 751 1 0 286
1 752 1 0 285
1 753 1 0 452
1 754 1 0 451
1 755 1 0 450
1 756 1 0 449
1 757 1 0 448
1 758 1 0 447
1 759 1 0 446
1 760 1 0 445
1 761 1 0 444
1 762 1 0 443
1 763 1 0 442
1 764 1 0 441
1 765 1 0 440
1 766 1 0 439
1 767 1 0 438
1 768 1 0 437
1 769 1 0 436
1 770 1 0 435
1 771 1 0 434
1 772 1 0 433
1 773 1 0 432
1 774 1 0 431
1 775 1 0 430
1 776 1 0 429
1 777 1 0 428
1 778 1 0 427
1 779 1 0 426
1 780 1 0 425
1 781 1 0 424
1 782 1 0 423
1 783 1 0 422
1 784 1 0 421
1 785 1 0 420
1 687 1 0 181
1 688 1 0 180
1 690 1 0 178
1 691 1 0 177
1 693 1 0 175
1 694 1 0 174
1 695 1 0 173
1 696 1 0 172
1 697 1 0 171
1 698 1 0 170
1 700 1 0 168
1 701 1 0 167
1 702 1 0 166
1 703 1 0 165
1 704 1 0 164
1 705 1 0 163
1 707 1 0 161
1 708 1 0 160
1 709 1 0 159
1 710 1 0 158
1 711 1 0 157
1 712 1 0 156
1 714 1 0 154
1 715 1 0 153
1 717 1 0 151
1 718 1 0 150
1 720 1 0 316
1 721 1 0 315
1 723 1 0 313
1 724 1 0 312
1 726 1 0 310
1 727 1 0 309
1 728 1 0 308
1 729 1 0 307
1 730 1 0 306
1 731 1 0 305
1 733 1 0 303
1 734 1 0 302
1 735 1 0 301
1 736 1 0 300
1 737 1 0 299
1 738 1 0 298
1 740 1 0 296
1 741 1 0 295
1 742 1 0 294
1 743 1 0 293
1 744 1 0 292
1 745 1 0 291
1 747 1 0 289
1 748 1 0 288
1 750 1 0 286
1 751 1 0 285
1 753 1 0 451
1 754 1 0 450
1 756 1 0 448
1 757 1 0 447
1 759 1 0 445
1 760 1 0 444
1 761 1 0 443
1 762 1 0 442
1 763 1 0 441
1 764 1 0 440
1 766 1 0 438
1 767 1 0 437
1 768 1 0 436
1 769 1 0 435
1 770 1 0 434
1 771 1 0 433
1 773 1 0 431
1 774 1 0 430
1 775 1 0 429
1 776 1 0 428
1 777 1 0 427
1 778 1 0 426
1 780 1 0 424
1 781 1 0 423
1 783 1 0 421
1 784 1 0 420
1 687 1 0 180
1 690 1 0 177
1 693 1 0 174
1 694 1 0 173
1 695 1 0 172
1 696 1 0 171
1 697 1 0 170
1 700 1 0 167
1 701 1 0 166
1 702 1 0 165
1 703 1 0 164
1 704 1 0 163
1 707 1 0 160
1 708 1 0 159
1 709 1 0 158
1 710 1 0 157
1 711 1 0 156
1 714 1 0 153
1 717 1 0 150
1 720 1 0 315
1 723 1 0 312
1 726 1 0 309
1 727 1 0 308
1 728 1 0 307
1 729 1 0 306
1 730 1 0 305
1 733 1 0 302
1 734 1 0 301
1 735 1 0 300
1 736 1 0 299
1 737 1 0 298
1 740 1 0 295
1 741 1 0 294
1 742 1 0 293
1 743 1 0 292
1 744 1 0 291
1 747 1 0 288
1 750 1 0 285
1 753 1 0 450
1 756 1 0 447
1 759 1 0 444
1 760 1 0 443
1 761 1 0 442
1 762 1 0 441
1 763 1 0 440
1 766 1 0 437
1 767 1 0 436
1 768 1 0 435
1 769 1 0 434
1 770 1 0 433
1 773 1 0 430
1 774 1 0 429
1 775 1 0 428
1 776 1 0 427
1 777 1 0 426
1 780 1 0 423
1 783 1 0 420
1 687 1 0 149
1 688 1 0 148
1 689 1 0 147
1 690 1 0 146
1 691 1 0 145
1 692 1 0 144
1 693 1 0 143
1 694 1 0 142
1 695 1 0 141
1 696 1 0 140
1 697 1 0 139
1 698 1 0 138
1 699 1 0 137
1 700 1 0 136
1 701 1 0 135
1 702 1 0 134
1 703 1 0 133
1 704 1 0 132
1 705 1 0 131
1 706 1 0 130
1 707 1 0 129
1 708 1 0 128
1 709 1 0 127
1 710 1 0 126
1 711 1 0 125
1 712 1 0 124
1 713 1 0 123
1 714 1 0 122
1 715 1 0 121
1 716 1 0 120
1 717 1 0 119
1 718 1 0 118
1 719 1 0 117
1 720 1 0 284
1 721 1 0 283
1 722 1 0 282
1 723 1 0 281
1 724 1 0 280
1 725 1 0 279
1 726 1 0 278
1 727 1 0 277
1 728 1 0 276
1 729 1 0 275
1 730 1 0 274
1 731 1 0 273
1 732 1 0 272
1 733 1 0 271
1 734 1 0 270
1 735 1 0 269
1 736 1 0 268
1 737 1 0 267
1 738 1 0 266
1 739 1 0 265
1 740 1 0 264
1 741 1 0 263
1 742 1 0 262
1 743 1 0 261
1 744 1 0 260
1 745 1 0 259
1 746 1 0 258
1 747 1 0 257
1 748 1 0 256
1 749 1 0 255
1 750 1 0 254
1 751 1 0 253
1 752 1 0 252
1 753 1 0 419
1 754 1 0 418
1 755 1 0 417
1 756 1 0 416
1 757 1 0 415
1 758 1 0 414
1 759 1 0 413
1 760 1 0 412
1 761 1 0 411
1 762 1 0 410
1 763 1 0 409
1 764 1 0 408
1 765 1 0 407
1 766 1 0 406
1 767 1 0 405
1 768 1 0 404
1 769 1 0 403
1 770 1 0 402
1 771 1 0 401
1 772 1 0 400
1 773 1 0 399
1 774 1 0 398
1 775 1 0 397
1 776 1 0 396
1 777 1 0 395
1 778 1 0 394
1 779 1 0 393
1 780 1 0 392
1 781 1 0 391
1 782 1 0 390
1 783 1 0 389
1 784 1 0 388
1 785 1 0 387
1 687 1 0 146
1 688 1 0 145
1 689 1 0 144
1 700 1 0 143
1 701 1 0 142
1 702 1 0 141
1 703 1 0 140
1 704 1 0 139
1 705 1 0 138
1 706 1 0 137
1 707 1 0 136
1 708 1 0 135
1 709 1 0 134
1 710 1 0 133
1 711 1 0 132
1 712 1 0 131
1 713 1 0 130
1 690 1 0 127
1 691 1 0 126
1 692 1 0 125
1 695 1 0 122
1 696 1 0 121
1 697 1 0 120
1 714 1 0 119
1 715 1 0 118
1 716 1 0 117
1 720 1 0 281
1 721 1 0 280
1 722 1 0 279
1 733 1 0 278
1 734 1 0 277
1 735 1 0 276
1 736 1 0 275
1 737 1 0 274
1 738 1 0 273
1 739 1 0 272
1 740 1 0 271
1 741 1 0 270
1 742 1 0 269
1 743 1 0 268
1 744 1 0 267
1 745 1 0 266
1 746 1 0 265
1 723 1 0 262
1 724 1 0 261
1 725 1 0 260
1 728 1 0 257
1 729 1 0 256
1 730 1 0 255
1 747 1 0 254
1 748 1 0 253
1 749 1 0 252
1 753 1 0 416
1 754 1 0 415
1 755 1 0 414
1 766 1 0 413
1 767 1 0 412
1 768 1 0 411
1 769 1 0 410
1 770 1 0 409
1 771 1 0 408
1 772 1 0 407
1 773 1 0 406
1 774 1 0 405
1 775 1 0 404
1 776 1 0 403
1 777 1 0 402
1 778 1 0 401
1 779 1 0 400
1 756 1 0 397
1 757 1 0 396
1 758 1 0 395
1 761 1 0 392
1 762 1 0 391
1 763 1 0 390
1 780 1 0 389
1 781 1 0 388
1 782 1 0 387
1 707 1 0 143
1 708 1 0 142
1 709 1 0 141
1 710 1 0 140
1 711 1 0 139
1 712 1 0 138
1 713 1 0 137
1 690 1 0 134
1 691 1 0 133
1 692 1 0 132
1 687 1 0 127
1 688 1 0 126
1 689 1 0 125
1 702 1 0 122
1 703 1 0 121
1 704 1 0 120
1 695 1 0 119
1 696 1 0 118
1 697 1 0 117
1 740 1 0 278
1 741 1 0 277
1 742 1 0 276
1 743 1 0 275
1 744 1 0 274
1 745 1 0 273
1 746 1 0 272
1 723 1 0 269
1 724 1 0 268
1 725 1 0 267
1 720 1 0 262
1 721 1 0 261
1 722 1 0 260
1 735 1 0 257
1 736 1 0 256
1 737 1 0 255
1 728 1 0 254
1 729 1 0 253
1 730 1 0 252
1 773 1 0 413
1 774 1 0 412
1 775 1 0 411
1 776 1 0 410
1 777 1 0 409
1 778 1 0 408
1 779 1 0 407
1 756 1 0 404
1 757 1 0 403
1 758 1 0 402
1 753 1 0 397
1 754 1 0 396
1 755 1 0 395
1 768 1 0 392
1 769 1 0 391
1 770 1 0 390
1 761 1 0 389
1 762 1 0 388
1 763 1 0 387
1 687 1 0 116
1 688 1 0 115
1 689 1 0 114
1 690 1 0 113
1 691 1 0 112
1 692 1 0 111
1 693 1 0 110
1 694 1 0 109
1 695 1 0 108
1 696 1 0 107
1 697 1 0 106
1 698 1 0 105
1 699 1 0 104
1 700 1 0 103
1 701 1 0 102
1 702 1 0 101
1 703 1 0 100
1 704 1 0 99
1 705 1 0 98
1 706 1 0 97
1 707 1 0 96
1 708 1 0 95
1 709 1 0 94
1 710 1 0 93
1 711 1 0 92
1 712 1 0 91
1 713 1 0 90
1 714 1 0 89
1 715 1 0 88
1 716 1 0 87
1 717 1 0 86
1 718 1 0 85
1 719 1 0 84
1 720 1 0 251
1 721 1 0 250
1 722 1 0 249
1 723 1 0 248
1 724 1 0 247
1 725 1 0 246
1 726 1 0 245
1 727 1 0 244
1 728 1 0 243
1 729 1 0 242
1 730 1 0 241
1 731 1 0 240
1 732 1 0 239
1 733 1 0 238
1 734 1 0 237
1 735 1 0 236
1 736 1 0 235
1 737 1 0 234
1 738 1 0 233
1 739 1 0 232
1 740 1 0 231
1 741 1 0 230
1 742 1 0 229
1 743 1 0 228
1 744 1 0 227
1 745 1 0 226
1 746 1 0 225
1 747 1 0 224
1 748 1 0 223
1 749 1 0 222
1 750 1 0 221
1 751 1 0 220
1 752 1 0 219
1 753 1 0 386
1 754 1 0 385
1 755 1 0 384
1 756 1 0 383
1 757 1 0 382
1 758 1 0 381
1 759 1 0 380
1 760 1 0 379
1 761 1 0 378
1 762 1 0 377
1 763 1 0 376
1 764 1 0 375
1 765 1 0 374
1 766 1 0 373
1 767 1 0 372
1 768 1 0 371
1 769 1 0 370
1 770 1 0 369
1 771 1 0 368
1 772 1 0 367
1 773 1 0 366
1 774 1 0 365
1 775 1 0 364
1 776 1 0 363
1 777 1 0 362
1 778 1 0 361
1 779 1 0 360
1 780 1 0 359
1 781 1 0 358
1 782 1 0 357
1 783 1 0 356
1 784 1 0 355
1 785 1 0 354
1 690 1 0 116
1 691 1 0 115
1 692 1 0 114
1 709 1 0 113
1 710 1 0 112
1 711 1 0 111
1 714 1 0 108
1 715 1 0 107
1 716 1 0 106
1 693 1 0 103
1 694 1 0 102
1 695 1 0 101
1 696 1 0 100
1 697 1 0 99
1 698 1 0 98
1 699 1 0 97
1 700 1 0 96
1 701 1 0 95
1 702 1 0 94
1 703 1 0 93
1 704 1 0 92
1 705 1 0 91
1 706 1 0 90
1 717 1 0 89
1 718 1 0 88
1 719 1 0 87
1 723 1 0 251
1 724 1 0 250
1 725 1 0 249
1 742 1 0 248
1 743 1 0 247
1 744 1 0 246
1 747 1 0 243
1 748 1 0 242
1 749 1 0 241
1 726 1 0 238
1 727 1 0 237
1 728 1 0 236
1 729 1 0 235
1 730 1 0 234
1 731 1 0 233
1 732 1 0 232
1 733 1 0 231
1 734 1 0 230
1 735 1 0 229
1 736 1 0 228
1 737 1 0 227
1 738 1 0 226
1 739 1 0 225
1 750 1 0 224
1 751 1 0 223
1 752 1 0 222
1 756 1 0 386
1 757 1 0 385
1 758 1 0 384
1 775 1 0 383
1 776 1 0 382
1 777 1 0 381
1 780 1 0 378
1 781 1 0 377
1 782 1 0 376
1 759 1 0 373
1 760 1 0 372
1 761 1 0 371
1 762 1 0 370
1 763 1 0 369
1 764 1 0 368
1 765 1 0 367
1 766 1 0 366
1 767 1 0 365
1 768 1 0 364
1 769 1 0 363
1 770 1 0 362
1 771 1 0 361
1 772 1 0 360
1 783 1 0 359
1 784 1 0 358
1 785 1 0 357
1 709 1 0 116
1 710 1 0 115
1 711 1 0 114
1 702 1 0 113
1 703 1 0 112
1 704 1 0 111
1 717 1 0 108
1 718 1 0 107
1 719 1 0 106
1 714 1 0 101
1 715 1 0 100
1 716 1 0 99
1 693 1 0 96
1 694 1 0 95
1 695 1 0 94
1 696 1 0 93
1 697 1 0 92
1 698 1 0 91
1 699 1 0 90
1 742 1 0 251
1 743 1 0 250
1 744 1 0 249
1 735 1 0 248
1 736 1 0 247
1 737 1 0 246
1 750 1 0 243
1 751 1 0 242
1 752 1 0 241
1 747 1 0 236
1 748 1 0 235
1 749 1 0 234
1 726 1 0 231
1 727 1 0 230
1 728 1 0 229
1 729 1 0 228
1 730 1 0 227
1 731 1 0 226
1 732 1 0 225
1 775 1 0 386
1 776 1 0 385
1 777 1 0 384
1 768 1 0 383
1 769 1 0 382
1 770 1 0 381
1 783 1 0 378
1 784 1 0 377
1 785 1 0 376
1 780 1 0 371
1 781 1 0 370
1 782 1 0 369
1 759 1 0 366
1 760 1 0 365
1 761 1 0 364
1 762 1 0 363
1 763 1 0 362
1 764 1 0 361
1 765 1 0 360
1 786 0 0
1 787 0 0
1 788 0 0
1 789 0 0
1 790 0 0
1 791 0 0
1 792 0 0
1 793 0 0
1 794 0 0
1 795 0 0
1 796 0 0
1 797 0 0
1 798 0 0
1 799 0 0
1 800 0 0
1 801 0 0
1 802 0 0
1 803 0 0
1 804 0 0
1 805 0 0
1 806 0 0
1 807 0 0
1 808 0 0
1 809 0 0
1 810 0 0
1 811 0 0
1 812 0 0
1 813 0 0
1 814 0 0
1 815 0 0
1 816 0 0
1 817 0 0
1 818 0 0
1 521 2 1 719 554
1 520 2 1 718 553
1 519 2 1 717 552
1 518 2 1 716 551
1 517 2 1 715 550
1 516 2 1 714 549
1 515 2 1 713 548
1 514 2 1 712 547
1 513 2 1 711 546
1 512 2 1 710 545
1 511 2 1 709 544
1 510 2 1 708 543
1 509 2 1 707 542
1 508 2 1 706 541
1 507 2 1 705 540
1 506 2 1 704 539
1 505 2 1 703 538
1 504 2 1 702 537
1 503 2 1 701 536
1 502 2 1 700 535
1 501 2 1 699 534
1 500 2 1 698 533
1 499 2 1 697 532
1 498 2 1 696 531
1 497 2 1 695 530
1 496 2 1 694 529
1 495 2 1 693 528
1 494 2 1 692 527
1 493 2 1 691 526
1 492 2 1 690 525
1 491 2 1 689 524
1 490 2 1 688 523
1 489 2 1 687 522
1 554 2 1 752 587
1 553 2 1 751 586
1 552 2 1 750 585
1 551 2 1 749 584
1 550 2 1 748 583
1 549 2 1 747 582
1 548 2 1 746 581
1 547 2 1 745 580
1 546 2 1 744 579
1 545 2 1 743 578
1 544 2 1 742 577
1 543 2 1 741 576
1 542 2 1 740 575
1 541 2 1 739 574
1 540 2 1 738 573
1 539 2 1 737 572
1 538 2 1 736 571
1 537 2 1 735 570
1 536 2 1 734 569
1 535 2 1 733 568
1 534 2 1 732 567
1 533 2 1 731 566
1 532 2 1 730 565
1 531 2 1 729 564
1 530 2 1 728 563
1 529 2 1 727 562
1 528 2 1 726 561
1 527 2 1 725 560
1 526 2 1 724 559
1 525 2 1 723 558
1 524 2 1 722 557
1 523 2 1 721 556
1 522 2 1 720 555
1 571 1 1 769
1 682 2 1 715 684
1 681 2 1 718 683
1 676 2 1 688 678
1 675 2 1 691 677
1 654 2 1 717 664
1 653 2 1 714 663
1 652 2 1 708 662
1 651 2 1 707 661
1 650 2 1 701 660
1 649 2 1 700 659
1 648 2 1 694 658
1 647 2 1 693 657
1 646 2 1 690 656
1 645 2 1 687 655
1 606 2 1 719 625
1 605 2 1 716 624
1 604 2 1 713 623
1 603 2 1 712 622
1 602 2 1 711 621
1 601 2 1 710 620
1 600 2 1 709 619
1 599 2 1 706 618
1 598 2 1 705 617
1 597 2 1 704 616
1 596 2 1 703 615
1 595 2 1 702 614
1 594 2 1 699 613
1 593 2 1 698 612
1 592 2 1 697 611
1 591 2 1 696 610
1 590 2 1 695 609
1 589 2 1 692 608
1 588 2 1 689 607
1 684 2 1 748 686
1 683 2 1 751 685
1 678 2 1 721 680
1 677 2 1 724 679
1 664 2 1 750 674
1 663 2 1 747 673
1 662 2 1 741 672
1 661 2 1 740 671
1 660 2 1 734 670
1 659 2 1 733 669
1 658 2 1 727 668
1 657 2 1 726 667
1 656 2 1 723 666
1 655 2 1 720 665
1 625 2 1 752 644
1 624 2 1 749 643
1 623 2 1 746 642
1 622 2 1 745 641
1 621 2 1 744 640
1 620 2 1 743 639
1 619 2 1 742 638
1 618 2 1 739 637
1 617 2 1 738 636
1 616 2 1 737 635
1 615 2 1 736 634
1 614 2 1 735 633
1 613 2 1 732 632
1 612 2 1 731 631
1 611 2 1 730 630
1 610 2 1 729 629
1 609 2 1 728 628
1 608 2 1 725 627
1 607 2 1 722 626
1 642 1 1 779
1 637 1 1 772
1 632 1 1 765
1 641 1 1 778
1 636 1 1 771
1 631 1 1 764
1 626 1 1 755
1 627 1 1 758
1 640 1 1 777
1 635 1 1 770
1 630 1 1 763
1 643 1 1 782
1 644 1 1 785
1 680 1 1 754
1 679 1 1 757
1 639 1 1 776
1 629 1 1 762
1 686 1 1 781
1 685 1 1 784
1 665 1 1 753
1 666 1 1 756
1 638 1 1 775
1 633 1 1 768
1 628 1 1 761
1 673 1 1 780
1 674 1 1 783
1 672 1 1 774
1 670 1 1 767
1 668 1 1 760
1 671 1 1 773
1 669 1 1 766
1 667 1 1 759
1 819 3 0 664 683 554
1 820 3 0 663 684 551
1 821 3 0 621 622 548
1 822 3 0 620 621 547
1 823 3 0 619 620 546
1 824 3 0 662 619 545
1 825 3 0 661 662 544
1 826 3 0 616 617 541
1 827 3 0 615 616 540
1 828 3 0 614 615 539
1 829 3 0 660 614 538
1 830 3 0 659 660 537
1 831 3 0 611 612 534
1 832 3 0 610 611 533
1 833 3 0 609 610 532
1 834 3 0 658 609 531
1 835 3 0 657 658 530
1 836 3 0 656 677 527
1 837 3 0 655 678 524
1 838 3 0 674 685 587
1 839 3 0 673 686 584
1 840 3 0 640 641 581
1 841 3 0 639 640 580
1 842 3 0 638 639 579
1 843 3 0 672 638 578
1 844 3 0 671 672 577
1 845 3 0 635 636 574
1 846 3 0 634 635 573
1 847 3 0 633 634 572
1 848 3 0 670 633 571
1 849 3 0 669 670 570
1 850 3 0 630 631 567
1 851 3 0 629 630 566
1 852 3 0 628 629 565
1 853 3 0 668 628 564
1 854 3 0 667 668 563
1 855 3 0 666 679 560
1 856 3 0 665 680 557
1 857 0 0
1 858 3 0 625 683 552
1 859 3 0 624 684 549
1 860 3 0 623 622 546
1 861 3 0 622 621 545
1 862 3 0 621 620 544
1 863 3 0 620 619 543
1 864 3 0 619 662 542
1 865 3 0 618 617 539
1 866 3 0 617 616 538
1 867 3 0 616 615 537
1 868 3 0 615 614 536
1 869 3 0 614 660 535
1 870 3 0 613 612 532
1 871 3 0 612 611 531
1 872 3 0 611 610 530
1 873 3 0 610 609 529
1 874 3 0 609 658 528
1 875 3 0 608 677 525
1 876 3 0 607 678 522
1 877 3 0 644 685 585
1 878 3 0 643 686 582
1 879 3 0 642 641 579
1 880 3 0 641 640 578
1 881 3 0 640 639 577
1 882 3 0 639 638 576
1 883 3 0 638 672 575
1 884 3 0 637 636 572
1 885 3 0 636 635 571
1 886 3 0 635 634 570
1 887 3 0 634 633 569
1 888 3 0 633 670 568
1 889 3 0 632 631 565
1 890 3 0 631 630 564
1 891 3 0 630 629 563
1 892 3 0 629 628 562
1 893 3 0 628 668 561
1 894 3 0 627 679 558
1 895 3 0 626 680 555
1 896 0 0
1 897 3 0 613 618 548
1 898 3 0 612 617 547
1 899 3 0 611 616 546
1 900 3 0 610 615 545
1 901 3 0 609 614 544
1 902 3 0 658 660 543
1 903 3 0 657 659 542
1 904 3 0 624 611 539
1 905 3 0 684 610 538
1 906 3 0 663 609 537
1 907 3 0 625 624 532
1 908 3 0 683 684 531
1 909 3 0 664 663 530
1 910 3 0 616 621 527
1 911 3 0 615 620 526
1 912 3 0 614 619 525
1 913 3 0 621 608 524
1 914 3 0 620 677 523
1 915 3 0 619 656 522
1 916 3 0 632 637 581
1 917 3 0 631 636 580
1 918 3 0 630 635 579
1 919 3 0 629 634 578
1 920 3 0 628 633 577
1 921 3 0 668 670 576
1 922 3 0 667 669 575
1 923 3 0 643 630 572
1 924 3 0 686 629 571
1 925 3 0 673 628 570
1 926 3 0 644 643 565
1 927 3 0 685 686 564
1 928 3 0 674 673 563
1 929 3 0 635 640 560
1 930 3 0 634 639 559
1 931 3 0 633 638 558
1 932 3 0 640 627 557
1 933 3 0 639 679 556
1 934 3 0 638 666 555
1 935 0 0
1 936 3 0 611 624 554
1 937 3 0 610 684 553
1 938 3 0 609 663 552
1 939 3 0 616 611 551
1 940 3 0 615 610 550
1 941 3 0 614 609 549
1 942 3 0 607 608 546
1 943 3 0 678 677 545
1 944 3 0 655 656 544
1 945 3 0 608 621 539
1 946 3 0 677 620 538
1 947 3 0 656 619 537
1 948 3 0 623 618 534
1 949 3 0 622 617 533
1 950 3 0 621 616 532
1 951 3 0 620 615 531
1 952 3 0 619 614 530
1 953 3 0 662 660 529
1 954 3 0 661 659 528
1 955 3 0 630 643 587
1 956 3 0 629 686 586
1 957 3 0 628 673 585
1 958 3 0 635 630 584
1 959 3 0 634 629 583
1 960 3 0 633 628 582
1 961 3 0 626 627 579
1 962 3 0 680 679 578
1 963 3 0 665 666 577
1 964 3 0 627 640 572
1 965 3 0 679 639 571
1 966 3 0 666 638 570
1 967 3 0 642 637 567
1 968 3 0 641 636 566
1 969 3 0 640 635 565
1 970 3 0 639 634 564
1 971 3 0 638 633 563
1 972 3 0 672 670 562
1 973 3 0 671 669 561
1 974 0 0
1 1 2 1 944 116
1 1 2 1 943 115
1 1 2 1 942 114
1 1 2 1 947 113
1 1 2 1 946 112
1 1 2 1 945 111
1 1 1 0 110
1 1 1 0 109
1 1 2 1 938 108
1 1 2 1 937 107
1 1 2 1 936 106
1 1 1 0 105
1 1 1 0 104
1 1 1 0 103
1 1 1 0 102
1 1 2 1 941 101
1 1 2 1 940 100
1 1 2 1 939 99
1 1 1 0 98
1 1 1 0 97
1 1 2 1 954 96
1 1 2 1 953 95
1 1 2 1 952 94
1 1 2 1 951 93
1 1 2 1 950 92
1 1 2 1 949 91
1 1 2 1 948 90
1 1 1 0 89
1 1 1 0 88
1 1 1 0 87
1 1 1 0 86
1 1 1 0 85
1 1 1 0 84
1 1 1 0 149
1 1 1 0 148
1 1 1 0 147
1 1 1 0 146
1 1 1 0 145
1 1 1 0 144
1 1 2 1 903 143
1 1 2 1 902 142
1 1 2 1 901 141
1 1 2 1 900 140
1 1 2 1 899 139
1 1 2 1 898 138
1 1 2 1 897 137
1 1 1 0 136
1 1 1 0 135
1 1 2 1 912 134
1 1 2 1 911 133
1 1 2 1 910 132
1 1 1 0 131
1 1 1 0 130
1 1 1 0 129
1 1 1 0 128
1 1 2 1 915 127
1 1 2 1 914 126
1 1 2 1 913 125
1 1 1 0 124
1 1 1 0 123
1 1 2 1 906 122
1 1 2 1 905 121
1 1 2 1 904 120
1 1 2 1 909 119
1 1 2 1 908 118
1 1 2 1 907 117
1 1 1 0 182
1 1 1 0 181
1 1 2 1 876 180
1 1 1 0 179
1 1 1 0 178
1 1 2 1 875 177
1 1 1 0 176
1 1 1 0 175
1 1 2 1 874 174
1 1 2 1 873 173
1 1 2 1 872 172
1 1 2 1 871 171
1 1 2 1 870 170
1 1 1 0 169
1 1 1 0 168
1 1 2 1 869 167
1 1 2 1 868 166
1 1 2 1 867 165
1 1 2 1 866 164
1 1 2 1 865 163
1 1 1 0 162
1 1 1 0 161
1 1 2 1 864 160
1 1 2 1 863 159
1 1 2 1 862 158
1 1 2 1 861 157
1 1 2 1 860 156
1 1 1 0 155
1 1 1 0 154
1 1 2 1 859 153
1 1 1 0 152
1 1 1 0 151
1 1 2 1 858 150
1 1 2 1 837 215
1 1 1 0 214
1 1 1 0 213
1 1 2 1 836 212
1 1 1 0 211
1 1 1 0 210
1 1 2 1 835 209
1 1 2 1 834 208
1 1 2 1 833 207
1 1 2 1 832 206
1 1 2 1 831 205
1 1 1 0 204
1 1 1 0 203
1 1 2 1 830 202
1 1 2 1 829 201
1 1 2 1 828 200
1 1 2 1 827 199
1 1 2 1 826 198
1 1 1 0 197
1 1 1 0 196
1 1 2 1 825 195
1 1 2 1 824 194
1 1 2 1 823 193
1 1 2 1 822 192
1 1 2 1 821 191
1 1 1 0 190
1 1 1 0 189
1 1 2 1 820 188
1 1 1 0 187
1 1 1 0 186
1 1 2 1 819 185
1 1 1 0 184
1 1 1 0 183
1 1 2 1 963 251
1 1 2 1 962 250
1 1 2 1 961 249
1 1 2 1 966 248
1 1 2 1 965 247
1 1 2 1 964 246
1 1 1 0 245
1 1 1 0 244
1 1 2 1 957 243
1 1 2 1 956 242
1 1 2 1 955 241
1 1 1 0 240
1 1 1 0 239
1 1 1 0 238
1 1 1 0 237
1 1 2 1 960 236
1 1 2 1 959 235
1 1 2 1 958 234
1 1 1 0 233
1 1 1 0 232
1 1 2 1 973 231
1 1 2 1 972 230
1 1 2 1 971 229
1 1 2 1 970 228
1 1 2 1 969 227
1 1 2 1 968 226
1 1 2 1 967 225
1 1 1 0 224
1 1 1 0 223
1 1 1 0 222
1 1 1 0 221
1 1 1 0 220
1 1 1 0 219
1 1 1 0 284
1 1 1 0 283
1 1 1 0 282
1 1 1 0 281
1 1 1 0 280
1 1 1 0 279
1 1 2 1 922 278
1 1 2 1 921 277
1 1 2 1 920 276
1 1 2 1 919 275
1 1 2 1 918 274
1 1 2 1 917 273
1 1 2 1 916 272
1 1 1 0 271
1 1 1 0 270
1 1 2 1 931 269
1 1 2 1 930 268
1 1 2 1 929 267
1 1 1 0 266
1 1 1 0 265
1 1 1 0 264
1 1 1 0 263
1 1 2 1 934 262
1 1 2 1 933 261
1 1 2 1 932 260
1 1 1 0 259
1 1 1 0 258
1 1 2 1 925 257
1 1 2 1 924 256
1 1 2 1 923 255
1 1 2 1 928 254
1 1 2 1 927 253
1 1 2 1 926 252
1 1 1 0 317
1 1 1 0 316
1 1 2 1 895 315
1 1 1 0 314
1 1 1 0 313
1 1 2 1 894 312
1 1 1 0 311
1 1 1 0 310
1 1 2 1 893 309
1 1 2 1 892 308
1 1 2 1 891 307
1 1 2 1 890 306
1 1 2 1 889 305
1 1 1 0 304
1 1 1 0 303
1 1 2 1 888 302
1 1 2 1 887 301
1 1 2 1 886 300
1 1 2 1 885 299
1 1 2 1 884 298
1 1 1 0 297
1 1 1 0 296
1 1 2 1 883 295
1 1 2 1 882 294
1 1 2 1 881 293
1 1 2 1 880 292
1 1 2 1 879 291
1 1 1 0 290
1 1 1 0 289
1 1 2 1 878 288
1 1 1 0 287
1 1 1 0 286
1 1 2 1 877 285
1 1 2 1 856 350
1 1 1 0 349
1 1 1 0 348
1 1 2 1 855 347
1 1 1 0 346
1 1 1 0 345
1 1 2 1 854 344
1 1 2 1 853 343
1 1 2 1 852 342
1 1 2 1 851 341
1 1 2 1 850 340
1 1 1 0 339
1 1 1 0 338
1 1 2 1 849 337
1 1 2 1 848 336
1 1 2 1 847 335
1 1 2 1 846 334
1 1 2 1 845 333
1 1 1 0 332
1 1 1 0 331
1 1 2 1 844 330
1 1 2 1 843 329
1 1 2 1 842 328
1 1 2 1 841 327
1 1 2 1 840 326
1 1 1 0 325
1 1 1 0 324
1 1 2 1 839 323
1 1 1 0 322
1 1 1 0 321
1 1 2 1 838 320
1 1 1 0 319
1 1 1 0 318
1 1 1 0 386
1 1 1 0 385
1 1 1 0 384
1 1 1 0 383
1 1 1 0 381
1 1 1 0 380
1 1 1 0 379
1 1 1 0 378
1 1 1 0 377
1 1 1 0 376
1 1 1 0 375
1 1 1 0 374
1 1 1 0 373
1 1 1 0 372
1 1 1 0 371
1 1 1 0 370
1 1 1 0 369
1 1 1 0 368
1 1 1 0 367
1 1 1 0 366
1 1 1 0 365
1 1 1 0 364
1 1 1 0 363
1 1 1 0 362
1 1 1 0 361
1 1 1 0 360
1 1 1 0 359
1 1 1 0 358
1 1 1 0 357
1 1 1 0 356
1 1 1 0 355
1 1 1 0 354
1 1 1 0 419
1 1 1 0 418
1 1 1 0 417
1 1 1 0 416
1 1 1 0 415
1 1 1 0 414
1 1 1 0 413
1 1 1 0 412
1 1 1 0 411
1 1 1 0 410
1 1 1 0 409
1 1 1 0 408
1 1 1 0 407
1 1 1 0 406
1 1 1 0 405
1 1 1 0 404
1 1 1 0 403
1 1 1 0 402
1 1 1 0 401
1 1 1 0 400
1 1 1 0 399
1 1 1 0 398
1 1 1 0 397
1 1 1 0 396
1 1 1 0 395
1 1 1 0 394
1 1 1 0 393
1 1 1 0 392
1 1 1 0 390
1 1 1 0 389
1 1 1 0 388
1 1 1 0 387
1 1 1 0 452
1 1 1 0 451
1 1 1 0 450
1 1 1 0 449
1 1 1 0 448
1 1 1 0 447
1 1 1 0 446
1 1 1 0 445
1 1 1 0 444
1 1 1 0 443
1 1 1 0 442
1 1 1 0 441
1 1 1 0 440
1 1 1 0 439
1 1 1 0 438
1 1 1 0 437
1 1 1 0 436
1 1 1 0 435
1 1 1 0 433
1 1 1 0 432
1 1 1 0 431
1 1 1 0 430
1 1 1 0 429
1 1 1 0 428
1 1 1 0 427
1 1 1 0 426
1 1 1 0 425
1 1 1 0 424
1 1 1 0 423
1 1 1 0 422
1 1 1 0 421
1 1 1 0 420
1 1 1 0 485
1 1 1 0 484
1 1 1 0 483
1 1 1 0 482
1 1 1 0 481
1 1 1 0 480
1 1 1 0 479
1 1 1 0 478
1 1 1 0 477
1 1 1 0 476
1 1 1 0 475
1 1 1 0 474
1 1 1 0 473
1 1 1 0 472
1 1 1 0 470
1 1 1 0 469
1 1 1 0 468
1 1 1 0 467
1 1 1 0 466
1 1 1 0 465
1 1 1 0 464
1 1 1 0 463
1 1 1 0 462
1 1 1 0 461
1 1 1 0 460
1 1 1 0 459
1 1 1 0 458
1 1 1 0 457
1 1 1 0 456
1 1 1 0 455
1 1 1 0 454
1 1 1 0 453
0
2 range(1)
42 range(2)
43 range(3)
44 range(4)
45 range(5)
46 range(6)
47 range(7)
84 move(3,right,7,3)
85 move(3,right,7,4)
86 move(3,right,7,5)
87 move(3,right,6,3)
88 move(3,right,6,4)
89 move(3,right,6,5)
90 move(3,right,3,1)
91 move(3,right,3,2)
92 move(3,right,3,3)
93 move(3,right,3,4)
94 move(3,right,3,5)
95 move(3,right,3,6)
96 move(3,right,3,7)
97 move(3,right,4,1)
98 move(3,right,4,2)
99 move(3,right,4,3)
100 move(3,right,4,4)
101 move(3,right,4,5)
102 move(3,right,4,6)
103 move(3,right,4,7)
104 move(3,right,5,1)
105 move(3,right,5,2)
106 move(3,right,5,3)
107 move(3,right,5,4)
108 move(3,right,5,5)
109 move(3,right,5,6)
110 move(3,right,5,7)
111 move(3,right,2,3)
112 move(3,right,2,4)
113 move(3,right,2,5)
114 move(3,right,1,3)
115 move(3,right,1,4)
116 move(3,right,1,5)
117 move(3,left,7,3)
118 move(3,left,7,4)
119 move(3,left,7,5)
120 move(3,left,6,3)
121 move(3,left,6,4)
122 move(3,left,6,5)
123 move(3,left,3,1)
124 move(3,left,3,2)
125 move(3,left,3,3)
126 move(3,left,3,4)
127 move(3,left,3,5)
128 move(3,left,3,6)
129 move(3,left,3,7)
130 move(3,left,4,1)
131 move(3,left,4,2)
132 move(3,left,4,3)
133 move(3,left,4,4)
134 move(3,left,4,5)
135 move(3,left,4,6)
136 move(3,left,4,7)
137 move(3,left,5,1)
138 move(3,left,5,2)
139 move(3,left,5,3)
140 move(3,left,5,4)
141 move(3,left,5,5)
142 move(3,left,5,6)
143 move(3,left,5,7)
144 move(3,left,2,3)
145 move(3,left,2,4)
146 move(3,left,2,5)
147 move(3,left,1,3)
148 move(3,left,1,4)
149 move(3,left,1,5)
150 move(3,down,7,3)
151 move(3,down,7,4)
152 move(3,down,7,5)
153 move(3,down,6,3)
154 move(3,down,6,4)
155 move(3,down,6,5)
156 move(3,down,3,1)
157 move(3,down,3,2)
158 move(3,down,3,3)
159 move(3,down,3,4)
160 move(3,down,3,5)
161 move(3,down,3,6)
162 move(3,down,3,7)
163 move(3,down,4,1)
164 move(3,down,4,2)
165 move(3,down,4,3)
166 move(3,down,4,4)
167 move(3,down,4,5)
168 move(3,down,4,6)
169 move(3,down,4,7)
170 move(3,down,5,1)
171 move(3,down,5,2)
172 move(3,down,5,3)
173 move(3,down,5,4)
174 move(3,down,5,5)
175 move(3,down,5,6)
176 move(3,down,5,7)
177 move(3,down,2,3)
178 move(3,down,2,4)
179 move(3,down,2,5)
180 move(3,down,1,3)
181 move(3,down,1,4)
182 move(3,down,1,5)
183 move(3,up,7,3)
184 move(3,up,7,4)
185 move(3,up,7,5)
186 move(3,up,6,3)
187 move(3,up,6,4)
188 move(3,up,6,5)
189 move(3,up,3,1)
190 move(3,up,3,2)
191 move(3,up,3,3)
192 move(3,up,3,4)
193 move(3,up,3,5)
194 move(3,up,3,6)
195 move(3,up,3,7)
196 move(3,up,4,1)
197 move(3,up,4,2)
198 move(3,up,4,3)
199 move(3,up,4,4)
200 move(3,up,4,5)
201 move(3,up,4,6)
202 move(3,up,4,7)
203 move(3,up,5,1)
204 move(3,up,5,2)
205 move(3,up,5,3)
206 move(3,up,5,4)
207 move(3,up,5,5)
208 move(3,up,5,6)
209 move(3,up,5,7)
210 move(3,up,2,3)
211 move(3,up,2,4)
212 move(3,up,2,5)
213 move(3,up,1,3)
214 move(3,up,1,4)
215 move(3,up,1,5)
219 move(2,right,7,3)
220 move(2,right,7,4)
221 move(2,right,7,5)
222 move(2,right,6,3)
223 move(2,right,6,4)
224 move(2,right,6,5)
225 move(2,right,3,1)
226 move(2,right,3,2)
227 move(2,right,3,3)
228 move(2,right,3,4)
229 move(2,right,3,5)
230 move(2,right,3,6)
231 move(2,right,3,7)
232 move(2,right,4,1)
233 move(2,right,4,2)
234 move(2,right,4,3)
235 move(2,right,4,4)
236 move(2,right,4,5)
237 move(2,right,4,6)
238 move(2,right,4,7)
239 move(2,right,5,1)
240 move(2,right,5,2)
241 move(2,right,5,3)
242 move(2,right,5,4)
243 move(2,right,5,5)
244 move(2,right,5,6)
245 move(2,right,5,7)
246 move(2,right,2,3)
247 move(2,right,2,4)
248 move(2,right,2,5)
249 move(2,right,1,3)
250 move(2,right,1,4)
251 move(2,right,1,5)
252 move(2,left,7,3)
253 move(2,left,7,4)
254 move(2,left,7,5)
255 move(2,left,6,3)
256 move(2,left,6,4)
257 move(2,left,6,5)
258 move(2,left,3,1)
259 move(2,left,3,2)
260 move(2,left,3,3)
261 move(2,left,3,4)
262 move(2,left,3,5)
263 move(2,left,3,6)
264 move(2,left,3,7)
265 move(2,left,4,1)
266 move(2,left,4,2)
267 move(2,left,4,3)
268 move(2,left,4,4)
269 move(2,left,4,5)
270 move(2,left,4,6)
271 move(2,left,4,7)
272 move(2,left,5,1)
273 move(2,left,5,2)
274 move(2,left,5,3)
275 move(2,left,5,4)
276 move(2,left,5,5)
277 move(2,left,5,6)
278 move(2,left,5,7)
279 move(2,left,2,3)
280 move(2,left,2,4)
281 move(2,left,2,5)
282 move(2,left,1,3)
283 move(2,left,1,4)
284 move(2,left,1,5)
285 move(2,down,7,3)
286 move(2,down,7,4)
287 move(2,down,7,5)
288 move(2,down,6,3)
289 move(2,down,6,4)
290 move(2,down,6,5)
291 move(2,down,3,1)
292 move(2,down,3,2)
293 move(2,down,3,3)
294 move(2,down,3,4)
295 move(2,down,3,5)
296 move(2,down,3,6)
297 move(2,down,3,7)
298 move(2,down,4,1)
299 move(2,down,4,2)
300 move(2,down,4,3)
301 move(2,down,4,4)
302 move(2,down,4,5)
303 move(2,down,4,6)
304 move(2,down,4,7)
305 move(2,down,5,1)
306 move(2,down,5,2)
307 move(2,down,5,3)
308 move(2,down,5,4)
309 move(2,down,5,5)
310 move(2,down,5,6)
311 move(2,down,5,7)
312 move(2,down,2,3)
313 move(2,down,2,4)
314 move(2,down,2,5)
315 move(2,down,1,3)
316 move(2,down,1,4)
317 move(2,down,1,5)
318 move(2,up,7,3)
319 move(2,up,7,4)
320 move(2,up,7,5)
321 move(2,up,6,3)
322 move(2,up,6,4)
323 move(2,up,6,5)
324 move(2,up,3,1)
325 move(2,up,3,2)
326 move(2,up,3,3)
327 move(2,up,3,4)
328 move(2,up,3,5)
329 move(2,up,3,6)
330 move(2,up,3,7)
331 move(2,up,4,1)
332 move(2,up,4,2)
333 move(2,up,4,3)
334 move(2,up,4,4)
335 move(2,up,4,5)
336 move(2,up,4,6)
337 move(2,up,4,7)
338 move(2,up,5,1)
339 move(2,up,5,2)
340 move(2,up,5,3)
341 move(2,up,5,4)
342 move(2,up,5,5)
343 move(2,up,5,6)
344 move(2,up,5,7)
345 move(2,up,2,3)
346 move(2,up,2,4)
347 move(2,up,2,5)
348 move(2,up,1,3)
349 move(2,up,1,4)
350 move(2,up,1,5)
354 move(1,right,7,3)
355 move(1,right,7,4)
356 move(1,right,7,5)
357 move(1,right,6,3)
358 move(1,right,6,4)
359 move(1,right,6,5)
360 move(1,right,3,1)
361 move(1,right,3,2)
362 move(1,right,3,3)
363 move(1,right,3,4)
364 move(1,right,3,5)
365 move(1,right,3,6)
366 move(1,right,3,7)
367 move(1,right,4,1)
368 move(1,right,4,2)
369 move(1,right,4,3)
370 move(1,right,4,4)
371 move(1,right,4,5)
372 move(1,right,4,6)
373 move(1,right,4,7)
374 move(1,right,5,1)
375 move(1,right,5,2)
376 move(1,right,5,3)
377 move(1,right,5,4)
378 move(1,right,5,5)
379 move(1,right,5,6)
380 move(1,right,5,7)
381 move(1,right,2,3)
382 move(1,right,2,4)
383 move(1,right,2,5)
384 move(1,right,1,3)
385 move(1,right,1,4)
386 move(1,right,1,5)
387 move(1,left,7,3)
388 move(1,left,7,4)
389 move(1,left,7,5)
390 move(1,left,6,3)
391 move(1,left,6,4)
392 move(1,left,6,5)
393 move(1,left,3,1)
394 move(1,left,3,2)
395 move(1,left,3,3)
396 move(1,left,3,4)
397 move(1,left,3,5)
398 move(1,left,3,6)
399 move(1,left,3,7)
400 move(1,left,4,1)
401 move(1,left,4,2)
402 move(1,left,4,3)
403 move(1,left,4,4)
404 move(1,left,4,5)
405 move(1,left,4,6)
406 move(1,left,4,7)
407 move(1,left,5,1)
408 move(1,left,5,2)
409 move(1,left,5,3)
410 move(1,left,5,4)
411 move(1,left,5,5)
412 move(1,left,5,6)
413 move(1,left,5,7)
414 move(1,left,2,3)
415 move(1,left,2,4)
416 move(1,left,2,5)
417 move(1,left,1,3)
418 move(1,left,1,4)
419 move(1,left,1,5)
420 move(1,down,7,3)
421 move(1,down,7,4)
422 move(1,down,7,5)
423 move(1,down,6,3)
424 move(1,down,6,4)
425 move(1,down,6,5)
426 move(1,down,3,1)
427 move(1,down,3,2)
428 move(1,down,3,3)
429 move(1,down,3,4)
430 move(1,down,3,5)
431 move(1,down,3,6)
432 move(1,down,3,7)
433 move(1,down,4,1)
434 move(1,down,4,2)
435 move(1,down,4,3)
436 move(1,down,4,4)
437 move(1,down,4,5)
438 move(1,down,4,6)
439 move(1,down,4,7)
440 move(1,down,5,1)
441 move(1,down,5,2)
442 move(1,down,5,3)
443 move(1,down,5,4)
444 move(1,down,5,5)
445 move(1,down,5,6)
446 move(1,down,5,7)
447 move(1,down,2,3)
448 move(1,down,2,4)
449 move(1,down,2,5)
450 move(1,down,1,3)
451 move(1,down,1,4)
452 move(1,down,1,5)
453 move(1,up,7,3)
454 move(1,up,7,4)
455 move(1,up,7,5)
456 move(1,up,6,3)
457 move(1,up,6,4)
458 move(1,up,6,5)
459 move(1,up,3,1)
460 move(1,up,3,2)
461 move(1,up,3,3)
462 move(1,up,3,4)
463 move(1,up,3,5)
464 move(1,up,3,6)
465 move(1,up,3,7)
466 move(1,up,4,1)
467 move(1,up,4,2)
468 move(1,up,4,3)
469 move(1,up,4,4)
470 move(1,up,4,5)
471 move(1,up,4,6)
472 move(1,up,4,7)
473 move(1,up,5,1)
474 move(1,up,5,2)
475 move(1,up,5,3)
476 move(1,up,5,4)
477 move(1,up,5,5)
478 move(1,up,5,6)
479 move(1,up,5,7)
480 move(1,up,2,3)
481 move(1,up,2,4)
482 move(1,up,2,5)
483 move(1,up,1,3)
484 move(1,up,1,4)
485 move(1,up,1,5)
3 direction(up)
4 direction(down)
5 direction(left)
6 direction(right)
9 full(3,1)
10 full(4,1)
11 full(5,1)
12 full(3,2)
13 full(4,2)
14 full(5,2)
15 full(1,3)
16 full(2,3)
17 full(3,3)
18 full(4,3)
19 full(5,3)
20 full(6,3)
21 full(7,3)
22 full(1,4)
23 full(2,4)
24 full(3,4)
26 full(5,4)
27 full(6,4)
28 full(7,4)
29 full(1,5)
30 full(2,5)
31 full(3,5)
32 full(4,5)
33 full(5,5)
34 full(6,5)
35 full(7,5)
36 full(3,6)
37 full(4,6)
38 full(5,6)
39 full(3,7)
40 full(4,7)
41 full(5,7)
489 state(4,empty,1,5)
490 state(4,empty,1,4)
491 state(4,empty,1,3)
492 state(4,empty,2,5)
493 state(4,empty,2,4)
494 state(4,empty,2,3)
495 state(4,empty,5,7)
496 state(4,empty,5,6)
497 state(4,empty,5,5)
498 state(4,empty,5,4)
499 state(4,empty,5,3)
500 state(4,empty,5,2)
501 state(4,empty,5,1)
502 state(4,empty,4,7)
503 state(4,empty,4,6)
504 state(4,empty,4,5)
505 state(4,empty,4,4)
506 state(4,empty,4,3)
507 state(4,empty,4,2)
508 state(4,empty,4,1)
509 state(4,empty,3,7)
510 state(4,empty,3,6)
511 state(4,empty,3,5)
512 state(4,empty,3,4)
513 state(4,empty,3,3)
514 state(4,empty,3,2)
515 state(4,empty,3,1)
516 state(4,empty,6,5)
517 state(4,empty,6,4)
518 state(4,empty,6,3)
519 state(4,empty,7,5)
520 state(4,empty,7,4)
521 state(4,empty,7,3)
522 state(3,empty,1,5)
523 state(3,empty,1,4)
524 state(3,empty,1,3)
525 state(3,empty,2,5)
526 state(3,empty,2,4)
527 state(3,empty,2,3)
528 state(3,empty,5,7)
529 state(3,empty,5,6)
530 state(3,empty,5,5)
531 state(3,empty,5,4)
532 state(3,empty,5,3)
533 state(3,empty,5,2)
534 state(3,empty,5,1)
535 state(3,empty,4,7)
536 state(3,empty,4,6)
537 state(3,empty,4,5)
538 state(3,empty,4,4)
539 state(3,empty,4,3)
540 state(3,empty,4,2)
541 state(3,empty,4,1)
542 state(3,empty,3,7)
543 state(3,empty,3,6)
544 state(3,empty,3,5)
545 state(3,empty,3,4)
546 state(3,empty,3,3)
547 state(3,empty,3,2)
548 state(3,empty,3,1)
549 state(3,empty,6,5)
550 state(3,empty,6,4)
551 state(3,empty,6,3)
552 state(3,empty,7,5)
553 state(3,empty,7,4)
554 state(3,empty,7,3)
555 state(2,empty,1,5)
556 state(2,empty,1,4)
557 state(2,empty,1,3)
558 state(2,empty,2,5)
559 state(2,empty,2,4)
560 state(2,empty,2,3)
561 state(2,empty,5,7)
562 state(2,empty,5,6)
563 state(2,empty,5,5)
564 state(2,empty,5,4)
565 state(2,empty,5,3)
566 state(2,empty,5,2)
567 state(2,empty,5,1)
568 state(2,empty,4,7)
569 state(2,empty,4,6)
570 state(2,empty,4,5)
571 state(2,empty,4,4)
572 state(2,empty,4,3)
573 state(2,empty,4,2)
574 state(2,empty,4,1)
575 state(2,empty,3,7)
576 state(2,empty,3,6)
577 state(2,empty,3,5)
578 state(2,empty,3,4)
579 state(2,empty,3,3)
580 state(2,empty,3,2)
581 state(2,empty,3,1)
582 state(2,empty,6,5)
583 state(2,empty,6,4)
584 state(2,empty,6,3)
585 state(2,empty,7,5)
586 state(2,empty,7,4)
587 state(2,empty,7,3)
588 state(4,full,1,3)
589 state(4,full,2,3)
590 state(4,full,5,5)
591 state(4,full,5,4)
592 state(4,full,5,3)
593 state(4,full,5,2)
594 state(4,full,5,1)
595 state(4,full,4,5)
596 state(4,full,4,4)
597 state(4,full,4,3)
598 state(4,full,4,2)
599 state(4,full,4,1)
600 state(4,full,3,5)
601 state(4,full,3,4)
602 state(4,full,3,3)
603 state(4,full,3,2)
604 state(4,full,3,1)
605 state(4,full,6,3)
606 state(4,full,7,3)
607 state(3,full,1,3)
608 state(3,full,2,3)
609 state(3,full,5,5)
610 state(3,full,5,4)
611 state(3,full,5,3)
612 state(3,full,5,2)
613 state(3,full,5,1)
614 state(3,full,4,5)
615 state(3,full,4,4)
616 state(3,full,4,3)
617 state(3,full,4,2)
618 state(3,full,4,1)
619 state(3,full,3,5)
620 state(3,full,3,4)
621 state(3,full,3,3)
622 state(3,full,3,2)
623 state(3,full,3,1)
624 state(3,full,6,3)
625 state(3,full,7,3)
626 state(2,full,1,3)
627 state(2,full,2,3)
628 state(2,full,5,5)
629 state(2,full,5,4)
630 state(2,full,5,3)
631 state(2,full,5,2)
632 state(2,full,5,1)
633 state(2,full,4,5)
634 state(2,full,4,4)
635 state(2,full,4,3)
636 state(2,full,4,2)
637 state(2,full,4,1)
638 state(2,full,3,5)
639 state(2,full,3,4)
640 state(2,full,3,3)
641 state(2,full,3,2)
642 state(2,full,3,1)
643 state(2,full,6,3)
644 state(2,full,7,3)
645 state(4,full,1,5)
646 state(4,full,2,5)
647 state(4,full,5,7)
648 state(4,full,5,6)
649 state(4,full,4,7)
650 state(4,full,4,6)
651 state(4,full,3,7)
652 state(4,full,3,6)
653 state(4,full,6,5)
654 state(4,full,7,5)
655 state(3,full,1,5)
656 state(3,full,2,5)
657 state(3,full,5,7)
658 state(3,full,5,6)
659 state(3,full,4,7)
660 state(3,full,4,6)
661 state(3,full,3,7)
662 state(3,full,3,6)
663 state(3,full,6,5)
664 state(3,full,7,5)
665 state(2,full,1,5)
666 state(2,full,2,5)
667 state(2,full,5,7)
668 state(2,full,5,6)
669 state(2,full,4,7)
670 state(2,full,4,6)
671 state(2,full,3,7)
672 state(2,full,3,6)
673 state(2,full,6,5)
674 state(2,full,7,5)
675 state(4,full,2,4)
676 state(4,full,1,4)
677 state(3,full,2,4)
678 state(3,full,1,4)
679 state(2,full,2,4)
680 state(2,full,1,4)
681 state(4,full,7,4)
682 state(4,full,6,4)
683 state(3,full,7,4)
684 state(3,full,6,4)
685 state(2,full,7,4)
686 state(2,full,6,4)
786 state(1,full,5,7)
787 state(1,full,4,7)
788 state(1,full,3,7)
789 state(1,full,5,6)
790 state(1,full,4,6)
791 state(1,full,3,6)
792 state(1,full,7,5)
793 state(1,full,6,5)
794 state(1,full,5,5)
795 state(1,full,4,5)
796 state(1,full,3,5)
797 state(1,full,2,5)
798 state(1,full,1,5)
799 state(1,full,7,4)
800 state(1,full,6,4)
801 state(1,full,5,4)
802 state(1,full,3,4)
803 state(1,full,2,4)
804 state(1,full,1,4)
805 state(1,full,7,3)
806 state(1,full,6,3)
807 state(1,full,5,3)
808 state(1,full,4,3)
809 state(1,full,3,3)
810 state(1,full,2,3)
811 state(1,full,1,3)
812 state(1,full,5,2)
813 state(1,full,4,2)
814 state(1,full,3,2)
815 state(1,full,5,1)
816 state(1,full,4,1)
817 state(1,full,3,1)
818 state(1,empty,4,4)
7 status(full)
8 status(empty)
687 changed(4,1,5)
688 changed(4,1,4)
689 changed(4,1,3)
690 changed(4,2,5)
691 changed(4,2,4)
692 changed(4,2,3)
693 changed(4,5,7)
694 changed(4,5,6)
695 changed(4,5,5)
696 changed(4,5,4)
697 changed(4,5,3)
698 changed(4,5,2)
699 changed(4,5,1)
700 changed(4,4,7)
701 changed(4,4,6)
702 changed(4,4,5)
703 changed(4,4,4)
704 changed(4,4,3)
705 changed(4,4,2)
706 changed(4,4,1)
707 changed(4,3,7)
708 changed(4,3,6)
709 changed(4,3,5)
710 changed(4,3,4)
711 changed(4,3,3)
712 changed(4,3,2)
713 changed(4,3,1)
714 changed(4,6,5)
715 changed(4,6,4)
716 changed(4,6,3)
717 changed(4,7,5)
718 changed(4,7,4)
719 changed(4,7,3)
720 changed(3,1,5)
721 changed(3,1,4)
722 changed(3,1,3)
723 changed(3,2,5)
724 changed(3,2,4)
725 changed(3,2,3)
726 changed(3,5,7)
727 changed(3,5,6)
728 changed(3,5,5)
729 changed(3,5,4)
730 changed(3,5,3)
731 changed(3,5,2)
732 changed(3,5,1)
733 changed(3,4,7)
734 changed(3,4,6)
735 changed(3,4,5)
736 changed(3,4,4)
737 changed(3,4,3)
738 changed(3,4,2)
739 changed(3,4,1)
740 changed(3,3,7)
741 changed(3,3,6)
742 changed(3,3,5)
743 changed(3,3,4)
744 changed(3,3,3)
745 changed(3,3,2)
746 changed(3,3,1)
747 changed(3,6,5)
748 changed(3,6,4)
749 changed(3,6,3)
750 changed(3,7,5)
751 changed(3,7,4)
752 changed(3,7,3)
753 changed(2,1,5)
754 changed(2,1,4)
755 changed(2,1,3)
756 changed(2,2,5)
757 changed(2,2,4)
758 changed(2,2,3)
759 changed(2,5,7)
760 changed(2,5,6)
761 changed(2,5,5)
762 changed(2,5,4)
763 changed(2,5,3)
764 changed(2,5,2)
765 changed(2,5,1)
766 changed(2,4,7)
767 changed(2,4,6)
768 changed(2,4,5)
769 changed(2,4,4)
770 changed(2,4,3)
771 changed(2,4,2)
772 changed(2,4,1)
773 changed(2,3,7)
774 changed(2,3,6)
775 changed(2,3,5)
776 changed(2,3,4)
777 changed(2,3,3)
778 changed(2,3,2)
779 changed(2,3,1)
780 changed(2,6,5)
781 changed(2,6,4)
782 changed(2,6,3)
783 changed(2,7,5)
784 changed(2,7,4)
785 changed(2,7,3)
81 time(1)
82 time(2)
83 time(3)
25 empty(4,4)
819 possibleMove(3,up,7,5)
820 possibleMove(3,up,6,5)
821 possibleMove(3,up,3,3)
822 possibleMove(3,up,3,4)
823 possibleMove(3,up,3,5)
824 possibleMove(3,up,3,6)
825 possibleMove(3,up,3,7)
826 possibleMove(3,up,4,3)
827 possibleMove(3,up,4,4)
828 possibleMove(3,up,4,5)
829 possibleMove(3,up,4,6)
830 possibleMove(3,up,4,7)
831 possibleMove(3,up,5,3)
832 possibleMove(3,up,5,4)
833 possibleMove(3,up,5,5)
834 possibleMove(3,up,5,6)
835 possibleMove(3,up,5,7)
836 possibleMove(3,up,2,5)
837 possibleMove(3,up,1,5)
838 possibleMove(2,up,7,5)
839 possibleMove(2,up,6,5)
840 possibleMove(2,up,3,3)
841 possibleMove(2,up,3,4)
842 possibleMove(2,up,3,5)
843 possibleMove(2,up,3,6)
844 possibleMove(2,up,3,7)
845 possibleMove(2,up,4,3)
846 possibleMove(2,up,4,4)
847 possibleMove(2,up,4,5)
848 possibleMove(2,up,4,6)
849 possibleMove(2,up,4,7)
850 possibleMove(2,up,5,3)
851 possibleMove(2,up,5,4)
852 possibleMove(2,up,5,5)
853 possibleMove(2,up,5,6)
854 possibleMove(2,up,5,7)
855 possibleMove(2,up,2,5)
856 possibleMove(2,up,1,5)
857 possibleMove(1,up,4,6)
858 possibleMove(3,down,7,3)
859 possibleMove(3,down,6,3)
860 possibleMove(3,down,3,1)
861 possibleMove(3,down,3,2)
862 possibleMove(3,down,3,3)
863 possibleMove(3,down,3,4)
864 possibleMove(3,down,3,5)
865 possibleMove(3,down,4,1)
866 possibleMove(3,down,4,2)
867 possibleMove(3,down,4,3)
868 possibleMove(3,down,4,4)
869 possibleMove(3,down,4,5)
870 possibleMove(3,down,5,1)
871 possibleMove(3,down,5,2)
872 possibleMove(3,down,5,3)
873 possibleMove(3,down,5,4)
874 possibleMove(3,down,5,5)
875 possibleMove(3,down,2,3)
876 possibleMove(3,down,1,3)
877 possibleMove(2,down,7,3)
878 possibleMove(2,down,6,3)
879 possibleMove(2,down,3,1)
880 possibleMove(2,down,3,2)
881 possibleMove(2,down,3,3)
882 possibleMove(2,down,3,4)
883 possibleMove(2,down,3,5)
884 possibleMove(2,down,4,1)
885 possibleMove(2,down,4,2)
886 possibleMove(2,down,4,3)
887 possibleMove(2,down,4,4)
888 possibleMove(2,down,4,5)
889 possibleMove(2,down,5,1)
890 possibleMove(2,down,5,2)
891 possibleMove(2,down,5,3)
892 possibleMove(2,down,5,4)
893 possibleMove(2,down,5,5)
894 possibleMove(2,down,2,3)
895 possibleMove(2,down,1,3)
896 possibleMove(1,down,4,2)
897 possibleMove(3,left,5,1)
898 possibleMove(3,left,5,2)
899 possibleMove(3,left,5,3)
900 possibleMove(3,left,5,4)
901 possibleMove(3,left,5,5)
902 possibleMove(3,left,5,6)
903 possibleMove(3,left,5,7)
904 possibleMove(3,left,6,3)
905 possibleMove(3,left,6,4)
906 possibleMove(3,left,6,5)
907 possibleMove(3,left,7,3)
908 possibleMove(3,left,7,4)
909 possibleMove(3,left,7,5)
910 possibleMove(3,left,4,3)
911 possibleMove(3,left,4,4)
912 possibleMove(3,left,4,5)
913 possibleMove(3,left,3,3)
914 possibleMove(3,left,3,4)
915 possibleMove(3,left,3,5)
916 possibleMove(2,left,5,1)
917 possibleMove(2,left,5,2)
918 possibleMove(2,left,5,3)
919 possibleMove(2,left,5,4)
920 possibleMove(2,left,5,5)
921 possibleMove(2,left,5,6)
922 possibleMove(2,left,5,7)
923 possibleMove(2,left,6,3)
924 possibleMove(2,left,6,4)
925 possibleMove(2,left,6,5)
926 possibleMove(2,left,7,3)
927 possibleMove(2,left,7,4)
928 possibleMove(2,left,7,5)
929 possibleMove(2,left,4,3)
930 possibleMove(2,left,4,4)
931 possibleMove(2,left,4,5)
932 possibleMove(2,left,3,3)
933 possibleMove(2,left,3,4)
934 possibleMove(2,left,3,5)
935 possibleMove(1,left,6,4)
936 possibleMove(3,right,5,3)
937 possibleMove(3,right,5,4)
938 possibleMove(3,right,5,5)
939 possibleMove(3,right,4,3)
940 possibleMove(3,right,4,4)
941 possibleMove(3,right,4,5)
942 possibleMove(3,right,1,3)
943 possibleMove(3,right,1,4)
944 possibleMove(3,right,1,5)
945 possibleMove(3,right,2,3)
946 possibleMove(3,right,2,4)
947 possibleMove(3,right,2,5)
948 possibleMove(3,right,3,1)
949 possibleMove(3,right,3,2)
950 possibleMove(3,right,3,3)
951 possibleMove(3,right,3,4)
952 possibleMove(3,right,3,5)
953 possibleMove(3,right,3,6)
954 possibleMove(3,right,3,7)
955 possibleMove(2,right,5,3)
956 possibleMove(2,right,5,4)
957 possibleMove(2,right,5,5)
958 possibleMove(2,right,4,3)
959 possibleMove(2,right,4,4)
960 possibleMove(2,right,4,5)
961 possibleMove(2,right,1,3)
962 possibleMove(2,right,1,4)
963 possibleMove(2,right,1,5)
964 possibleMove(2,right,2,3)
965 possibleMove(2,right,2,4)
966 possibleMove(2,right,2,5)
967 possibleMove(2,right,3,1)
968 possibleMove(2,right,3,2)
969 possibleMove(2,right,3,3)
970 possibleMove(2,right,3,4)
971 possibleMove(2,right,3,5)
972 possibleMove(2,right,3,6)
973 possibleMove(2,right,3,7)
974 possibleMove(1,right,2,4)
48 location(1,5)
49 location(1,4)
50 location(1,3)
51 location(2,5)
52 location(2,4)
53 location(2,3)
54 location(5,7)
55 location(5,6)
56 location(5,5)
57 location(5,4)
58 location(5,3)
59 location(5,2)
60 location(5,1)
61 location(4,7)
62 location(4,6)
63 location(4,5)
64 location(4,4)
65 location(4,3)
66 location(4,2)
67 location(4,1)
68 location(3,7)
69 location(3,6)
70 location(3,5)
71 location(3,4)
72 location(3,3)
73 location(3,2)
74 location(3,1)
75 location(6,5)
76 location(6,4)
77 location(6,3)
78 location(7,5)
79 location(7,4)
80 location(7,3)
0
B+
0
B-
1
0
1
"""
output = """
{range(1), direction(up), direction(down), direction(left), direction(right), status(full), status(empty), full(3,1), full(4,1), full(5,1), full(3,2), full(4,2), full(5,2), full(1,3), full(2,3), full(3,3), full(4,3), full(5,3), full(6,3), full(7,3), full(1,4), full(2,4), full(3,4), empty(4,4), full(5,4), full(6,4), full(7,4), full(1,5), full(2,5), full(3,5), full(4,5), full(5,5), full(6,5), full(7,5), full(3,6), full(4,6), full(5,6), full(3,7), full(4,7), full(5,7), range(2), range(3), range(4), range(5), range(6), range(7), location(1,5), location(1,4), location(1,3), location(2,5), location(2,4), location(2,3), location(5,7), location(5,6), location(5,5), location(5,4), location(5,3), location(5,2), location(5,1), location(4,7), location(4,6), location(4,5), location(4,4), location(4,3), location(4,2), location(4,1), location(3,7), location(3,6), location(3,5), location(3,4), location(3,3), location(3,2), location(3,1), location(6,5), location(6,4), location(6,3), location(7,5), location(7,4), location(7,3), time(1), time(2), time(3), move(3,right,1,3), move(2,down,3,2), move(1,right,2,4), state(4,empty,1,3), state(4,empty,2,4), state(4,empty,2,3), state(4,empty,3,2), state(3,empty,2,4), state(3,empty,3,3), state(3,empty,3,2), state(2,empty,2,4), state(2,empty,3,4), state(4,full,5,5), state(4,full,5,4), state(4,full,5,3), state(4,full,5,2), state(4,full,5,1), state(4,full,4,5), state(4,full,4,4), state(4,full,4,3), state(4,full,4,2), state(4,full,4,1), state(4,full,3,5), state(4,full,3,4), state(4,full,3,3), state(4,full,3,1), state(4,full,6,3), state(4,full,7,3), state(3,full,1,3), state(3,full,2,3), state(3,full,5,5), state(3,full,5,4), state(3,full,5,3), state(3,full,5,2), state(3,full,5,1), state(3,full,4,5), state(3,full,4,4), state(3,full,4,3), state(3,full,4,2), state(3,full,4,1), state(3,full,3,5), state(3,full,3,4), state(3,full,3,1), state(3,full,6,3), state(3,full,7,3), state(2,full,1,3), state(2,full,2,3), state(2,full,5,5), state(2,full,5,4), state(2,full,5,3), state(2,full,5,2), state(2,full,5,1), state(2,full,4,5), state(2,full,4,4), state(2,full,4,3), state(2,full,4,2), state(2,full,4,1), state(2,full,3,5), state(2,full,3,3), state(2,full,3,2), state(2,full,3,1), state(2,full,6,3), state(2,full,7,3), state(4,full,1,5), state(4,full,2,5), state(4,full,5,7), state(4,full,5,6), state(4,full,4,7), state(4,full,4,6), state(4,full,3,7), state(4,full,3,6), state(4,full,6,5), state(4,full,7,5), state(3,full,1,5), state(3,full,2,5), state(3,full,5,7), state(3,full,5,6), state(3,full,4,7), state(3,full,4,6), state(3,full,3,7), state(3,full,3,6), state(3,full,6,5), state(3,full,7,5), state(2,full,1,5), state(2,full,2,5), state(2,full,5,7), state(2,full,5,6), state(2,full,4,7), state(2,full,4,6), state(2,full,3,7), state(2,full,3,6), state(2,full,6,5), state(2,full,7,5), state(4,full,1,4), state(3,full,1,4), state(2,full,1,4), state(4,full,7,4), state(4,full,6,4), state(3,full,7,4), state(3,full,6,4), state(2,full,7,4), state(2,full,6,4), changed(4,1,3), changed(4,2,3), changed(4,3,3), changed(3,3,4), changed(3,3,3), changed(3,3,2), changed(2,2,4), changed(2,4,4), changed(2,3,4), state(1,full,5,7), state(1,full,4,7), state(1,full,3,7), state(1,full,5,6), state(1,full,4,6), state(1,full,3,6), state(1,full,7,5), state(1,full,6,5), state(1,full,5,5), state(1,full,4,5), state(1,full,3,5), state(1,full,2,5), state(1,full,1,5), state(1,full,7,4), state(1,full,6,4), state(1,full,5,4), state(1,full,3,4), state(1,full,2,4), state(1,full,1,4), state(1,full,7,3), state(1,full,6,3), state(1,full,5,3), state(1,full,4,3), state(1,full,3,3), state(1,full,2,3), state(1,full,1,3), state(1,full,5,2), state(1,full,4,2), state(1,full,3,2), state(1,full,5,1), state(1,full,4,1), state(1,full,3,1), state(1,empty,4,4), possibleMove(3,up,3,5), possibleMove(2,up,3,6), possibleMove(1,up,4,6), possibleMove(2,down,3,2), possibleMove(1,down,4,2), possibleMove(3,left,5,2), possibleMove(3,left,5,3), possibleMove(3,left,4,4), possibleMove(2,left,5,4), possibleMove(1,left,6,4), possibleMove(3,right,1,3), possibleMove(1,right,2,4)}
{range(1), direction(up), direction(down), direction(left), direction(right), status(full), status(empty), full(3,1), full(4,1), full(5,1), full(3,2), full(4,2), full(5,2), full(1,3), full(2,3), full(3,3), full(4,3), full(5,3), full(6,3), full(7,3), full(1,4), full(2,4), full(3,4), empty(4,4), full(5,4), full(6,4), full(7,4), full(1,5), full(2,5), full(3,5), full(4,5), full(5,5), full(6,5), full(7,5), full(3,6), full(4,6), full(5,6), full(3,7), full(4,7), full(5,7), range(2), range(3), range(4), range(5), range(6), range(7), location(1,5), location(1,4), location(1,3), location(2,5), location(2,4), location(2,3), location(5,7), location(5,6), location(5,5), location(5,4), location(5,3), location(5,2), location(5,1), location(4,7), location(4,6), location(4,5), location(4,4), location(4,3), location(4,2), location(4,1), location(3,7), location(3,6), location(3,5), location(3,4), location(3,3), location(3,2), location(3,1), location(6,5), location(6,4), location(6,3), location(7,5), location(7,4), location(7,3), time(1), time(2), time(3), move(3,left,4,4), move(2,down,3,2), move(1,right,2,4), state(4,empty,4,4), state(4,empty,3,4), state(4,empty,3,3), state(4,empty,3,2), state(3,empty,2,4), state(3,empty,3,3), state(3,empty,3,2), state(2,empty,2,4), state(2,empty,3,4), state(4,full,1,3), state(4,full,2,3), state(4,full,5,5), state(4,full,5,4), state(4,full,5,3), state(4,full,5,2), state(4,full,5,1), state(4,full,4,5), state(4,full,4,3), state(4,full,4,2), state(4,full,4,1), state(4,full,3,5), state(4,full,3,1), state(4,full,6,3), state(4,full,7,3), state(3,full,1,3), state(3,full,2,3), state(3,full,5,5), state(3,full,5,4), state(3,full,5,3), state(3,full,5,2), state(3,full,5,1), state(3,full,4,5), state(3,full,4,4), state(3,full,4,3), state(3,full,4,2), state(3,full,4,1), state(3,full,3,5), state(3,full,3,4), state(3,full,3,1), state(3,full,6,3), state(3,full,7,3), state(2,full,1,3), state(2,full,2,3), state(2,full,5,5), state(2,full,5,4), state(2,full,5,3), state(2,full,5,2), state(2,full,5,1), state(2,full,4,5), state(2,full,4,4), state(2,full,4,3), state(2,full,4,2), state(2,full,4,1), state(2,full,3,5), state(2,full,3,3), state(2,full,3,2), state(2,full,3,1), state(2,full,6,3), state(2,full,7,3), state(4,full,1,5), state(4,full,2,5), state(4,full,5,7), state(4,full,5,6), state(4,full,4,7), state(4,full,4,6), state(4,full,3,7), state(4,full,3,6), state(4,full,6,5), state(4,full,7,5), state(3,full,1,5), state(3,full,2,5), state(3,full,5,7), state(3,full,5,6), state(3,full,4,7), state(3,full,4,6), state(3,full,3,7), state(3,full,3,6), state(3,full,6,5), state(3,full,7,5), state(2,full,1,5), state(2,full,2,5), state(2,full,5,7), state(2,full,5,6), state(2,full,4,7), state(2,full,4,6), state(2,full,3,7), state(2,full,3,6), state(2,full,6,5), state(2,full,7,5), state(4,full,2,4), state(4,full,1,4), state(3,full,1,4), state(2,full,1,4), state(4,full,7,4), state(4,full,6,4), state(3,full,7,4), state(3,full,6,4), state(2,full,7,4), state(2,full,6,4), changed(4,2,4), changed(4,4,4), changed(4,3,4), changed(3,3,4), changed(3,3,3), changed(3,3,2), changed(2,2,4), changed(2,4,4), changed(2,3,4), state(1,full,5,7), state(1,full,4,7), state(1,full,3,7), state(1,full,5,6), state(1,full,4,6), state(1,full,3,6), state(1,full,7,5), state(1,full,6,5), state(1,full,5,5), state(1,full,4,5), state(1,full,3,5), state(1,full,2,5), state(1,full,1,5), state(1,full,7,4), state(1,full,6,4), state(1,full,5,4), state(1,full,3,4), state(1,full,2,4), state(1,full,1,4), state(1,full,7,3), state(1,full,6,3), state(1,full,5,3), state(1,full,4,3), state(1,full,3,3), state(1,full,2,3), state(1,full,1,3), state(1,full,5,2), state(1,full,4,2), state(1,full,3,2), state(1,full,5,1), state(1,full,4,1), state(1,full,3,1), state(1,empty,4,4), possibleMove(3,up,3,5), possibleMove(2,up,3,6), possibleMove(1,up,4,6), possibleMove(2,down,3,2), possibleMove(1,down,4,2), possibleMove(3,left,5,2), possibleMove(3,left,5,3), possibleMove(3,left,4,4), possibleMove(2,left,5,4), possibleMove(1,left,6,4), possibleMove(3,right,1,3), possibleMove(1,right,2,4)}
{range(1), direction(up), direction(down), direction(left), direction(right), status(full), status(empty), full(3,1), full(4,1), full(5,1), full(3,2), full(4,2), full(5,2), full(1,3), full(2,3), full(3,3), full(4,3), full(5,3), full(6,3), full(7,3), full(1,4), full(2,4), full(3,4), empty(4,4), full(5,4), full(6,4), full(7,4), full(1,5), full(2,5), full(3,5), full(4,5), full(5,5), full(6,5), full(7,5), full(3,6), full(4,6), full(5,6), full(3,7), full(4,7), full(5,7), range(2), range(3), range(4), range(5), range(6), range(7), location(1,5), location(1,4), location(1,3), location(2,5), location(2,4), location(2,3), location(5,7), location(5,6), location(5,5), location(5,4), location(5,3), location(5,2), location(5,1), location(4,7), location(4,6), location(4,5), location(4,4), location(4,3), location(4,2), location(4,1), location(3,7), location(3,6), location(3,5), location(3,4), location(3,3), location(3,2), location(3,1), location(6,5), location(6,4), location(6,3), location(7,5), location(7,4), location(7,3), time(1), time(2), time(3), move(3,left,5,2), move(2,down,3,2), move(1,right,2,4), state(4,empty,2,4), state(4,empty,5,2), state(4,empty,4,2), state(4,empty,3,3), state(3,empty,2,4), state(3,empty,3,3), state(3,empty,3,2), state(2,empty,2,4), state(2,empty,3,4), state(4,full,1,3), state(4,full,2,3), state(4,full,5,5), state(4,full,5,4), state(4,full,5,3), state(4,full,5,1), state(4,full,4,5), state(4,full,4,4), state(4,full,4,3), state(4,full,4,1), state(4,full,3,5), state(4,full,3,4), state(4,full,3,2), state(4,full,3,1), state(4,full,6,3), state(4,full,7,3), state(3,full,1,3), state(3,full,2,3), state(3,full,5,5), state(3,full,5,4), state(3,full,5,3), state(3,full,5,2), state(3,full,5,1), state(3,full,4,5), state(3,full,4,4), state(3,full,4,3), state(3,full,4,2), state(3,full,4,1), state(3,full,3,5), state(3,full,3,4), state(3,full,3,1), state(3,full,6,3), state(3,full,7,3), state(2,full,1,3), state(2,full,2,3), state(2,full,5,5), state(2,full,5,4), state(2,full,5,3), state(2,full,5,2), state(2,full,5,1), state(2,full,4,5), state(2,full,4,4), state(2,full,4,3), state(2,full,4,2), state(2,full,4,1), state(2,full,3,5), state(2,full,3,3), state(2,full,3,2), state(2,full,3,1), state(2,full,6,3), state(2,full,7,3), state(4,full,1,5), state(4,full,2,5), state(4,full,5,7), state(4,full,5,6), state(4,full,4,7), state(4,full,4,6), state(4,full,3,7), state(4,full,3,6), state(4,full,6,5), state(4,full,7,5), state(3,full,1,5), state(3,full,2,5), state(3,full,5,7), state(3,full,5,6), state(3,full,4,7), state(3,full,4,6), state(3,full,3,7), state(3,full,3,6), state(3,full,6,5), state(3,full,7,5), state(2,full,1,5), state(2,full,2,5), state(2,full,5,7), state(2,full,5,6), state(2,full,4,7), state(2,full,4,6), state(2,full,3,7), state(2,full,3,6), state(2,full,6,5), state(2,full,7,5), state(4,full,1,4), state(3,full,1,4), state(2,full,1,4), state(4,full,7,4), state(4,full,6,4), state(3,full,7,4), state(3,full,6,4), state(2,full,7,4), state(2,full,6,4), changed(4,5,2), changed(4,4,2), changed(4,3,2), changed(3,3,4), changed(3,3,3), changed(3,3,2), changed(2,2,4), changed(2,4,4), changed(2,3,4), state(1,full,5,7), state(1,full,4,7), state(1,full,3,7), state(1,full,5,6), state(1,full,4,6), state(1,full,3,6), state(1,full,7,5), state(1,full,6,5), state(1,full,5,5), state(1,full,4,5), state(1,full,3,5), state(1,full,2,5), state(1,full,1,5), state(1,full,7,4), state(1,full,6,4), state(1,full,5,4), state(1,full,3,4), state(1,full,2,4), state(1,full,1,4), state(1,full,7,3), state(1,full,6,3), state(1,full,5,3), state(1,full,4,3), state(1,full,3,3), state(1,full,2,3), state(1,full,1,3), state(1,full,5,2), state(1,full,4,2), state(1,full,3,2), state(1,full,5,1), state(1,full,4,1), state(1,full,3,1), state(1,empty,4,4), possibleMove(3,up,3,5), possibleMove(2,up,3,6), possibleMove(1,up,4,6), possibleMove(2,down,3,2), possibleMove(1,down,4,2), possibleMove(3,left,5,2), possibleMove(3,left,5,3), possibleMove(3,left,4,4), possibleMove(2,left,5,4), possibleMove(1,left,6,4), possibleMove(3,right,1,3), possibleMove(1,right,2,4)}
{range(1), direction(up), direction(down), direction(left), direction(right), status(full), status(empty), full(3,1), full(4,1), full(5,1), full(3,2), full(4,2), full(5,2), full(1,3), full(2,3), full(3,3), full(4,3), full(5,3), full(6,3), full(7,3), full(1,4), full(2,4), full(3,4), empty(4,4), full(5,4), full(6,4), full(7,4), full(1,5), full(2,5), full(3,5), full(4,5), full(5,5), full(6,5), full(7,5), full(3,6), full(4,6), full(5,6), full(3,7), full(4,7), full(5,7), range(2), range(3), range(4), range(5), range(6), range(7), location(1,5), location(1,4), location(1,3), location(2,5), location(2,4), location(2,3), location(5,7), location(5,6), location(5,5), location(5,4), location(5,3), location(5,2), location(5,1), location(4,7), location(4,6), location(4,5), location(4,4), location(4,3), location(4,2), location(4,1), location(3,7), location(3,6), location(3,5), location(3,4), location(3,3), location(3,2), location(3,1), location(6,5), location(6,4), location(6,3), location(7,5), location(7,4), location(7,3), time(1), time(2), time(3), move(3,left,5,3), move(2,down,3,2), move(1,right,2,4), state(4,empty,2,4), state(4,empty,5,3), state(4,empty,4,3), state(4,empty,3,2), state(3,empty,2,4), state(3,empty,3,3), state(3,empty,3,2), state(2,empty,2,4), state(2,empty,3,4), state(4,full,1,3), state(4,full,2,3), state(4,full,5,5), state(4,full,5,4), state(4,full,5,2), state(4,full,5,1), state(4,full,4,5), state(4,full,4,4), state(4,full,4,2), state(4,full,4,1), state(4,full,3,5), state(4,full,3,4), state(4,full,3,3), state(4,full,3,1), state(4,full,6,3), state(4,full,7,3), state(3,full,1,3), state(3,full,2,3), state(3,full,5,5), state(3,full,5,4), state(3,full,5,3), state(3,full,5,2), state(3,full,5,1), state(3,full,4,5), state(3,full,4,4), state(3,full,4,3), state(3,full,4,2), state(3,full,4,1), state(3,full,3,5), state(3,full,3,4), state(3,full,3,1), state(3,full,6,3), state(3,full,7,3), state(2,full,1,3), state(2,full,2,3), state(2,full,5,5), state(2,full,5,4), state(2,full,5,3), state(2,full,5,2), state(2,full,5,1), state(2,full,4,5), state(2,full,4,4), state(2,full,4,3), state(2,full,4,2), state(2,full,4,1), state(2,full,3,5), state(2,full,3,3), state(2,full,3,2), state(2,full,3,1), state(2,full,6,3), state(2,full,7,3), state(4,full,1,5), state(4,full,2,5), state(4,full,5,7), state(4,full,5,6), state(4,full,4,7), state(4,full,4,6), state(4,full,3,7), state(4,full,3,6), state(4,full,6,5), state(4,full,7,5), state(3,full,1,5), state(3,full,2,5), state(3,full,5,7), state(3,full,5,6), state(3,full,4,7), state(3,full,4,6), state(3,full,3,7), state(3,full,3,6), state(3,full,6,5), state(3,full,7,5), state(2,full,1,5), state(2,full,2,5), state(2,full,5,7), state(2,full,5,6), state(2,full,4,7), state(2,full,4,6), state(2,full,3,7), state(2,full,3,6), state(2,full,6,5), state(2,full,7,5), state(4,full,1,4), state(3,full,1,4), state(2,full,1,4), state(4,full,7,4), state(4,full,6,4), state(3,full,7,4), state(3,full,6,4), state(2,full,7,4), state(2,full,6,4), changed(4,5,3), changed(4,4,3), changed(4,3,3), changed(3,3,4), changed(3,3,3), changed(3,3,2), changed(2,2,4), changed(2,4,4), changed(2,3,4), state(1,full,5,7), state(1,full,4,7), state(1,full,3,7), state(1,full,5,6), state(1,full,4,6), state(1,full,3,6), state(1,full,7,5), state(1,full,6,5), state(1,full,5,5), state(1,full,4,5), state(1,full,3,5), state(1,full,2,5), state(1,full,1,5), state(1,full,7,4), state(1,full,6,4), state(1,full,5,4), state(1,full,3,4), state(1,full,2,4), state(1,full,1,4), state(1,full,7,3), state(1,full,6,3), state(1,full,5,3), state(1,full,4,3), state(1,full,3,3), state(1,full,2,3), state(1,full,1,3), state(1,full,5,2), state(1,full,4,2), state(1,full,3,2), state(1,full,5,1), state(1,full,4,1), state(1,full,3,1), state(1,empty,4,4), possibleMove(3,up,3,5), possibleMove(2,up,3,6), possibleMove(1,up,4,6), possibleMove(2,down,3,2), possibleMove(1,down,4,2), possibleMove(3,left,5,2), possibleMove(3,left,5,3), possibleMove(3,left,4,4), possibleMove(2,left,5,4), possibleMove(1,left,6,4), possibleMove(3,right,1,3), possibleMove(1,right,2,4)}
{range(1), direction(up), direction(down), direction(left), direction(right), status(full), status(empty), full(3,1), full(4,1), full(5,1), full(3,2), full(4,2), full(5,2), full(1,3), full(2,3), full(3,3), full(4,3), full(5,3), full(6,3), full(7,3), full(1,4), full(2,4), full(3,4), empty(4,4), full(5,4), full(6,4), full(7,4), full(1,5), full(2,5), full(3,5), full(4,5), full(5,5), full(6,5), full(7,5), full(3,6), full(4,6), full(5,6), full(3,7), full(4,7), full(5,7), range(2), range(3), range(4), range(5), range(6), range(7), location(1,5), location(1,4), location(1,3), location(2,5), location(2,4), location(2,3), location(5,7), location(5,6), location(5,5), location(5,4), location(5,3), location(5,2), location(5,1), location(4,7), location(4,6), location(4,5), location(4,4), location(4,3), location(4,2), location(4,1), location(3,7), location(3,6), location(3,5), location(3,4), location(3,3), location(3,2), location(3,1), location(6,5), location(6,4), location(6,3), location(7,5), location(7,4), location(7,3), time(1), time(2), time(3), move(3,up,3,5), move(2,down,3,2), move(1,right,2,4), state(4,empty,2,4), state(4,empty,3,5), state(4,empty,3,4), state(4,empty,3,2), state(3,empty,2,4), state(3,empty,3,3), state(3,empty,3,2), state(2,empty,2,4), state(2,empty,3,4), state(4,full,1,3), state(4,full,2,3), state(4,full,5,5), state(4,full,5,4), state(4,full,5,3), state(4,full,5,2), state(4,full,5,1), state(4,full,4,5), state(4,full,4,4), state(4,full,4,3), state(4,full,4,2), state(4,full,4,1), state(4,full,3,3), state(4,full,3,1), state(4,full,6,3), state(4,full,7,3), state(3,full,1,3), state(3,full,2,3), state(3,full,5,5), state(3,full,5,4), state(3,full,5,3), state(3,full,5,2), state(3,full,5,1), state(3,full,4,5), state(3,full,4,4), state(3,full,4,3), state(3,full,4,2), state(3,full,4,1), state(3,full,3,5), state(3,full,3,4), state(3,full,3,1), state(3,full,6,3), state(3,full,7,3), state(2,full,1,3), state(2,full,2,3), state(2,full,5,5), state(2,full,5,4), state(2,full,5,3), state(2,full,5,2), state(2,full,5,1), state(2,full,4,5), state(2,full,4,4), state(2,full,4,3), state(2,full,4,2), state(2,full,4,1), state(2,full,3,5), state(2,full,3,3), state(2,full,3,2), state(2,full,3,1), state(2,full,6,3), state(2,full,7,3), state(4,full,1,5), state(4,full,2,5), state(4,full,5,7), state(4,full,5,6), state(4,full,4,7), state(4,full,4,6), state(4,full,3,7), state(4,full,3,6), state(4,full,6,5), state(4,full,7,5), state(3,full,1,5), state(3,full,2,5), state(3,full,5,7), state(3,full,5,6), state(3,full,4,7), state(3,full,4,6), state(3,full,3,7), state(3,full,3,6), state(3,full,6,5), state(3,full,7,5), state(2,full,1,5), state(2,full,2,5), state(2,full,5,7), state(2,full,5,6), state(2,full,4,7), state(2,full,4,6), state(2,full,3,7), state(2,full,3,6), state(2,full,6,5), state(2,full,7,5), state(4,full,1,4), state(3,full,1,4), state(2,full,1,4), state(4,full,7,4), state(4,full,6,4), state(3,full,7,4), state(3,full,6,4), state(2,full,7,4), state(2,full,6,4), changed(4,3,5), changed(4,3,4), changed(4,3,3), changed(3,3,4), changed(3,3,3), changed(3,3,2), changed(2,2,4), changed(2,4,4), changed(2,3,4), state(1,full,5,7), state(1,full,4,7), state(1,full,3,7), state(1,full,5,6), state(1,full,4,6), state(1,full,3,6), state(1,full,7,5), state(1,full,6,5), state(1,full,5,5), state(1,full,4,5), state(1,full,3,5), state(1,full,2,5), state(1,full,1,5), state(1,full,7,4), state(1,full,6,4), state(1,full,5,4), state(1,full,3,4), state(1,full,2,4), state(1,full,1,4), state(1,full,7,3), state(1,full,6,3), state(1,full,5,3), state(1,full,4,3), state(1,full,3,3), state(1,full,2,3), state(1,full,1,3), state(1,full,5,2), state(1,full,4,2), state(1,full,3,2), state(1,full,5,1), state(1,full,4,1), state(1,full,3,1), state(1,empty,4,4), possibleMove(3,up,3,5), possibleMove(2,up,3,6), possibleMove(1,up,4,6), possibleMove(2,down,3,2), possibleMove(1,down,4,2), possibleMove(3,left,5,2), possibleMove(3,left,5,3), possibleMove(3,left,4,4), possibleMove(2,left,5,4), possibleMove(1,left,6,4), possibleMove(3,right,1,3), possibleMove(1,right,2,4)}
{range(1), direction(up), direction(down), direction(left), direction(right), status(full), status(empty), full(3,1), full(4,1), full(5,1), full(3,2), full(4,2), full(5,2), full(1,3), full(2,3), full(3,3), full(4,3), full(5,3), full(6,3), full(7,3), full(1,4), full(2,4), full(3,4), empty(4,4), full(5,4), full(6,4), full(7,4), full(1,5), full(2,5), full(3,5), full(4,5), full(5,5), full(6,5), full(7,5), full(3,6), full(4,6), full(5,6), full(3,7), full(4,7), full(5,7), range(2), range(3), range(4), range(5), range(6), range(7), location(1,5), location(1,4), location(1,3), location(2,5), location(2,4), location(2,3), location(5,7), location(5,6), location(5,5), location(5,4), location(5,3), location(5,2), location(5,1), location(4,7), location(4,6), location(4,5), location(4,4), location(4,3), location(4,2), location(4,1), location(3,7), location(3,6), location(3,5), location(3,4), location(3,3), location(3,2), location(3,1), location(6,5), location(6,4), location(6,3), location(7,5), location(7,4), location(7,3), time(1), time(2), time(3), move(3,right,1,5), move(2,up,3,6), move(1,right,2,4), state(4,empty,1,5), state(4,empty,2,5), state(4,empty,2,4), state(4,empty,3,6), state(3,empty,2,4), state(3,empty,3,6), state(3,empty,3,5), state(2,empty,2,4), state(2,empty,3,4), state(4,full,1,3), state(4,full,2,3), state(4,full,5,5), state(4,full,5,4), state(4,full,5,3), state(4,full,5,2), state(4,full,5,1), state(4,full,4,5), state(4,full,4,4), state(4,full,4,3), state(4,full,4,2), state(4,full,4,1), state(4,full,3,5), state(4,full,3,4), state(4,full,3,3), state(4,full,3,2), state(4,full,3,1), state(4,full,6,3), state(4,full,7,3), state(3,full,1,3), state(3,full,2,3), state(3,full,5,5), state(3,full,5,4), state(3,full,5,3), state(3,full,5,2), state(3,full,5,1), state(3,full,4,5), state(3,full,4,4), state(3,full,4,3), state(3,full,4,2), state(3,full,4,1), state(3,full,3,4), state(3,full,3,3), state(3,full,3,2), state(3,full,3,1), state(3,full,6,3), state(3,full,7,3), state(2,full,1,3), state(2,full,2,3), state(2,full,5,5), state(2,full,5,4), state(2,full,5,3), state(2,full,5,2), state(2,full,5,1), state(2,full,4,5), state(2,full,4,4), state(2,full,4,3), state(2,full,4,2), state(2,full,4,1), state(2,full,3,5), state(2,full,3,3), state(2,full,3,2), state(2,full,3,1), state(2,full,6,3), state(2,full,7,3), state(4,full,5,7), state(4,full,5,6), state(4,full,4,7), state(4,full,4,6), state(4,full,3,7), state(4,full,6,5), state(4,full,7,5), state(3,full,1,5), state(3,full,2,5), state(3,full,5,7), state(3,full,5,6), state(3,full,4,7), state(3,full,4,6), state(3,full,3,7), state(3,full,6,5), state(3,full,7,5), state(2,full,1,5), state(2,full,2,5), state(2,full,5,7), state(2,full,5,6), state(2,full,4,7), state(2,full,4,6), state(2,full,3,7), state(2,full,3,6), state(2,full,6,5), state(2,full,7,5), state(4,full,1,4), state(3,full,1,4), state(2,full,1,4), state(4,full,7,4), state(4,full,6,4), state(3,full,7,4), state(3,full,6,4), state(2,full,7,4), state(2,full,6,4), changed(4,1,5), changed(4,2,5), changed(4,3,5), changed(3,3,6), changed(3,3,5), changed(3,3,4), changed(2,2,4), changed(2,4,4), changed(2,3,4), state(1,full,5,7), state(1,full,4,7), state(1,full,3,7), state(1,full,5,6), state(1,full,4,6), state(1,full,3,6), state(1,full,7,5), state(1,full,6,5), state(1,full,5,5), state(1,full,4,5), state(1,full,3,5), state(1,full,2,5), state(1,full,1,5), state(1,full,7,4), state(1,full,6,4), state(1,full,5,4), state(1,full,3,4), state(1,full,2,4), state(1,full,1,4), state(1,full,7,3), state(1,full,6,3), state(1,full,5,3), state(1,full,4,3), state(1,full,3,3), state(1,full,2,3), state(1,full,1,3), state(1,full,5,2), state(1,full,4,2), state(1,full,3,2), state(1,full,5,1), state(1,full,4,1), state(1,full,3,1), state(1,empty,4,4), possibleMove(2,up,3,6), possibleMove(1,up,4,6), possibleMove(3,down,3,3), possibleMove(2,down,3,2), possibleMove(1,down,4,2), possibleMove(3,left,5,5), possibleMove(3,left,5,6), possibleMove(3,left,4,4), possibleMove(2,left,5,4), possibleMove(1,left,6,4), possibleMove(3,right,1,5), possibleMove(1,right,2,4)}
{range(1), direction(up), direction(down), direction(left), direction(right), status(full), status(empty), full(3,1), full(4,1), full(5,1), full(3,2), full(4,2), full(5,2), full(1,3), full(2,3), full(3,3), full(4,3), full(5,3), full(6,3), full(7,3), full(1,4), full(2,4), full(3,4), empty(4,4), full(5,4), full(6,4), full(7,4), full(1,5), full(2,5), full(3,5), full(4,5), full(5,5), full(6,5), full(7,5), full(3,6), full(4,6), full(5,6), full(3,7), full(4,7), full(5,7), range(2), range(3), range(4), range(5), range(6), range(7), location(1,5), location(1,4), location(1,3), location(2,5), location(2,4), location(2,3), location(5,7), location(5,6), location(5,5), location(5,4), location(5,3), location(5,2), location(5,1), location(4,7), location(4,6), location(4,5), location(4,4), location(4,3), location(4,2), location(4,1), location(3,7), location(3,6), location(3,5), location(3,4), location(3,3), location(3,2), location(3,1), location(6,5), location(6,4), location(6,3), location(7,5), location(7,4), location(7,3), time(1), time(2), time(3), move(3,left,5,6), move(2,up,3,6), move(1,right,2,4), state(4,empty,2,4), state(4,empty,5,6), state(4,empty,4,6), state(4,empty,3,5), state(3,empty,2,4), state(3,empty,3,6), state(3,empty,3,5), state(2,empty,2,4), state(2,empty,3,4), state(4,full,1,3), state(4,full,2,3), state(4,full,5,5), state(4,full,5,4), state(4,full,5,3), state(4,full,5,2), state(4,full,5,1), state(4,full,4,5), state(4,full,4,4), state(4,full,4,3), state(4,full,4,2), state(4,full,4,1), state(4,full,3,4), state(4,full,3,3), state(4,full,3,2), state(4,full,3,1), state(4,full,6,3), state(4,full,7,3), state(3,full,1,3), state(3,full,2,3), state(3,full,5,5), state(3,full,5,4), state(3,full,5,3), state(3,full,5,2), state(3,full,5,1), state(3,full,4,5), state(3,full,4,4), state(3,full,4,3), state(3,full,4,2), state(3,full,4,1), state(3,full,3,4), state(3,full,3,3), state(3,full,3,2), state(3,full,3,1), state(3,full,6,3), state(3,full,7,3), state(2,full,1,3), state(2,full,2,3), state(2,full,5,5), state(2,full,5,4), state(2,full,5,3), state(2,full,5,2), state(2,full,5,1), state(2,full,4,5), state(2,full,4,4), state(2,full,4,3), state(2,full,4,2), state(2,full,4,1), state(2,full,3,5), state(2,full,3,3), state(2,full,3,2), state(2,full,3,1), state(2,full,6,3), state(2,full,7,3), state(4,full,1,5), state(4,full,2,5), state(4,full,5,7), state(4,full,4,7), state(4,full,3,7), state(4,full,3,6), state(4,full,6,5), state(4,full,7,5), state(3,full,1,5), state(3,full,2,5), state(3,full,5,7), state(3,full,5,6), state(3,full,4,7), state(3,full,4,6), state(3,full,3,7), state(3,full,6,5), state(3,full,7,5), state(2,full,1,5), state(2,full,2,5), state(2,full,5,7), state(2,full,5,6), state(2,full,4,7), state(2,full,4,6), state(2,full,3,7), state(2,full,3,6), state(2,full,6,5), state(2,full,7,5), state(4,full,1,4), state(3,full,1,4), state(2,full,1,4), state(4,full,7,4), state(4,full,6,4), state(3,full,7,4), state(3,full,6,4), state(2,full,7,4), state(2,full,6,4), changed(4,5,6), changed(4,4,6), changed(4,3,6), changed(3,3,6), changed(3,3,5), changed(3,3,4), changed(2,2,4), changed(2,4,4), changed(2,3,4), state(1,full,5,7), state(1,full,4,7), state(1,full,3,7), state(1,full,5,6), state(1,full,4,6), state(1,full,3,6), state(1,full,7,5), state(1,full,6,5), state(1,full,5,5), state(1,full,4,5), state(1,full,3,5), state(1,full,2,5), state(1,full,1,5), state(1,full,7,4), state(1,full,6,4), state(1,full,5,4), state(1,full,3,4), state(1,full,2,4), state(1,full,1,4), state(1,full,7,3), state(1,full,6,3), state(1,full,5,3), state(1,full,4,3), state(1,full,3,3), state(1,full,2,3), state(1,full,1,3), state(1,full,5,2), state(1,full,4,2), state(1,full,3,2), state(1,full,5,1), state(1,full,4,1), state(1,full,3,1), state(1,empty,4,4), possibleMove(2,up,3,6), possibleMove(1,up,4,6), possibleMove(3,down,3,3), possibleMove(2,down,3,2), possibleMove(1,down,4,2), possibleMove(3,left,5,5), possibleMove(3,left,5,6), possibleMove(3,left,4,4), possibleMove(2,left,5,4), possibleMove(1,left,6,4), possibleMove(3,right,1,5), possibleMove(1,right,2,4)}
{range(1), direction(up), direction(down), direction(left), direction(right), status(full), status(empty), full(3,1), full(4,1), full(5,1), full(3,2), full(4,2), full(5,2), full(1,3), full(2,3), full(3,3), full(4,3), full(5,3), full(6,3), full(7,3), full(1,4), full(2,4), full(3,4), empty(4,4), full(5,4), full(6,4), full(7,4), full(1,5), full(2,5), full(3,5), full(4,5), full(5,5), full(6,5), full(7,5), full(3,6), full(4,6), full(5,6), full(3,7), full(4,7), full(5,7), range(2), range(3), range(4), range(5), range(6), range(7), location(1,5), location(1,4), location(1,3), location(2,5), location(2,4), location(2,3), location(5,7), location(5,6), location(5,5), location(5,4), location(5,3), location(5,2), location(5,1), location(4,7), location(4,6), location(4,5), location(4,4), location(4,3), location(4,2), location(4,1), location(3,7), location(3,6), location(3,5), location(3,4), location(3,3), location(3,2), location(3,1), location(6,5), location(6,4), location(6,3), location(7,5), location(7,4), location(7,3), time(1), time(2), time(3), move(3,left,5,5), move(2,up,3,6), move(1,right,2,4), state(4,empty,2,4), state(4,empty,5,5), state(4,empty,4,5), state(4,empty,3,6), state(3,empty,2,4), state(3,empty,3,6), state(3,empty,3,5), state(2,empty,2,4), state(2,empty,3,4), state(4,full,1,3), state(4,full,2,3), state(4,full,5,4), state(4,full,5,3), state(4,full,5,2), state(4,full,5,1), state(4,full,4,4), state(4,full,4,3), state(4,full,4,2), state(4,full,4,1), state(4,full,3,5), state(4,full,3,4), state(4,full,3,3), state(4,full,3,2), state(4,full,3,1), state(4,full,6,3), state(4,full,7,3), state(3,full,1,3), state(3,full,2,3), state(3,full,5,5), state(3,full,5,4), state(3,full,5,3), state(3,full,5,2), state(3,full,5,1), state(3,full,4,5), state(3,full,4,4), state(3,full,4,3), state(3,full,4,2), state(3,full,4,1), state(3,full,3,4), state(3,full,3,3), state(3,full,3,2), state(3,full,3,1), state(3,full,6,3), state(3,full,7,3), state(2,full,1,3), state(2,full,2,3), state(2,full,5,5), state(2,full,5,4), state(2,full,5,3), state(2,full,5,2), state(2,full,5,1), state(2,full,4,5), state(2,full,4,4), state(2,full,4,3), state(2,full,4,2), state(2,full,4,1), state(2,full,3,5), state(2,full,3,3), state(2,full,3,2), state(2,full,3,1), state(2,full,6,3), state(2,full,7,3), state(4,full,1,5), state(4,full,2,5), state(4,full,5,7), state(4,full,5,6), state(4,full,4,7), state(4,full,4,6), state(4,full,3,7), state(4,full,6,5), state(4,full,7,5), state(3,full,1,5), state(3,full,2,5), state(3,full,5,7), state(3,full,5,6), state(3,full,4,7), state(3,full,4,6), state(3,full,3,7), state(3,full,6,5), state(3,full,7,5), state(2,full,1,5), state(2,full,2,5), state(2,full,5,7), state(2,full,5,6), state(2,full,4,7), state(2,full,4,6), state(2,full,3,7), state(2,full,3,6), state(2,full,6,5), state(2,full,7,5), state(4,full,1,4), state(3,full,1,4), state(2,full,1,4), state(4,full,7,4), state(4,full,6,4), state(3,full,7,4), state(3,full,6,4), state(2,full,7,4), state(2,full,6,4), changed(4,5,5), changed(4,4,5), changed(4,3,5), changed(3,3,6), changed(3,3,5), changed(3,3,4), changed(2,2,4), changed(2,4,4), changed(2,3,4), state(1,full,5,7), state(1,full,4,7), state(1,full,3,7), state(1,full,5,6), state(1,full,4,6), state(1,full,3,6), state(1,full,7,5), state(1,full,6,5), state(1,full,5,5), state(1,full,4,5), state(1,full,3,5), state(1,full,2,5), state(1,full,1,5), state(1,full,7,4), state(1,full,6,4), state(1,full,5,4), state(1,full,3,4), state(1,full,2,4), state(1,full,1,4), state(1,full,7,3), state(1,full,6,3), state(1,full,5,3), state(1,full,4,3), state(1,full,3,3), state(1,full,2,3), state(1,full,1,3), state(1,full,5,2), state(1,full,4,2), state(1,full,3,2), state(1,full,5,1), state(1,full,4,1), state(1,full,3,1), state(1,empty,4,4), possibleMove(2,up,3,6), possibleMove(1,up,4,6), possibleMove(3,down,3,3), possibleMove(2,down,3,2), possibleMove(1,down,4,2), possibleMove(3,left,5,5), possibleMove(3,left,5,6), possibleMove(3,left,4,4), possibleMove(2,left,5,4), possibleMove(1,left,6,4), possibleMove(3,right,1,5), possibleMove(1,right,2,4)}
{range(1), direction(up), direction(down), direction(left), direction(right), status(full), status(empty), full(3,1), full(4,1), full(5,1), full(3,2), full(4,2), full(5,2), full(1,3), full(2,3), full(3,3), full(4,3), full(5,3), full(6,3), full(7,3), full(1,4), full(2,4), full(3,4), empty(4,4), full(5,4), full(6,4), full(7,4), full(1,5), full(2,5), full(3,5), full(4,5), full(5,5), full(6,5), full(7,5), full(3,6), full(4,6), full(5,6), full(3,7), full(4,7), full(5,7), range(2), range(3), range(4), range(5), range(6), range(7), location(1,5), location(1,4), location(1,3), location(2,5), location(2,4), location(2,3), location(5,7), location(5,6), location(5,5), location(5,4), location(5,3), location(5,2), location(5,1), location(4,7), location(4,6), location(4,5), location(4,4), location(4,3), location(4,2), location(4,1), location(3,7), location(3,6), location(3,5), location(3,4), location(3,3), location(3,2), location(3,1), location(6,5), location(6,4), location(6,3), location(7,5), location(7,4), location(7,3), time(1), time(2), time(3), move(3,left,4,4), move(2,up,3,6), move(1,right,2,4), state(4,empty,4,4), state(4,empty,3,6), state(4,empty,3,5), state(4,empty,3,4), state(3,empty,2,4), state(3,empty,3,6), state(3,empty,3,5), state(2,empty,2,4), state(2,empty,3,4), state(4,full,1,3), state(4,full,2,3), state(4,full,5,5), state(4,full,5,4), state(4,full,5,3), state(4,full,5,2), state(4,full,5,1), state(4,full,4,5), state(4,full,4,3), state(4,full,4,2), state(4,full,4,1), state(4,full,3,3), state(4,full,3,2), state(4,full,3,1), state(4,full,6,3), state(4,full,7,3), state(3,full,1,3), state(3,full,2,3), state(3,full,5,5), state(3,full,5,4), state(3,full,5,3), state(3,full,5,2), state(3,full,5,1), state(3,full,4,5), state(3,full,4,4), state(3,full,4,3), state(3,full,4,2), state(3,full,4,1), state(3,full,3,4), state(3,full,3,3), state(3,full,3,2), state(3,full,3,1), state(3,full,6,3), state(3,full,7,3), state(2,full,1,3), state(2,full,2,3), state(2,full,5,5), state(2,full,5,4), state(2,full,5,3), state(2,full,5,2), state(2,full,5,1), state(2,full,4,5), state(2,full,4,4), state(2,full,4,3), state(2,full,4,2), state(2,full,4,1), state(2,full,3,5), state(2,full,3,3), state(2,full,3,2), state(2,full,3,1), state(2,full,6,3), state(2,full,7,3), state(4,full,1,5), state(4,full,2,5), state(4,full,5,7), state(4,full,5,6), state(4,full,4,7), state(4,full,4,6), state(4,full,3,7), state(4,full,6,5), state(4,full,7,5), state(3,full,1,5), state(3,full,2,5), state(3,full,5,7), state(3,full,5,6), state(3,full,4,7), state(3,full,4,6), state(3,full,3,7), state(3,full,6,5), state(3,full,7,5), state(2,full,1,5), state(2,full,2,5), state(2,full,5,7), state(2,full,5,6), state(2,full,4,7), state(2,full,4,6), state(2,full,3,7), state(2,full,3,6), state(2,full,6,5), state(2,full,7,5), state(4,full,2,4), state(4,full,1,4), state(3,full,1,4), state(2,full,1,4), state(4,full,7,4), state(4,full,6,4), state(3,full,7,4), state(3,full,6,4), state(2,full,7,4), state(2,full,6,4), changed(4,2,4), changed(4,4,4), changed(4,3,4), changed(3,3,6), changed(3,3,5), changed(3,3,4), changed(2,2,4), changed(2,4,4), changed(2,3,4), state(1,full,5,7), state(1,full,4,7), state(1,full,3,7), state(1,full,5,6), state(1,full,4,6), state(1,full,3,6), state(1,full,7,5), state(1,full,6,5), state(1,full,5,5), state(1,full,4,5), state(1,full,3,5), state(1,full,2,5), state(1,full,1,5), state(1,full,7,4), state(1,full,6,4), state(1,full,5,4), state(1,full,3,4), state(1,full,2,4), state(1,full,1,4), state(1,full,7,3), state(1,full,6,3), state(1,full,5,3), state(1,full,4,3), state(1,full,3,3), state(1,full,2,3), state(1,full,1,3), state(1,full,5,2), state(1,full,4,2), state(1,full,3,2), state(1,full,5,1), state(1,full,4,1), state(1,full,3,1), state(1,empty,4,4), possibleMove(2,up,3,6), possibleMove(1,up,4,6), possibleMove(3,down,3,3), possibleMove(2,down,3,2), possibleMove(1,down,4,2), possibleMove(3,left,5,5), possibleMove(3,left,5,6), possibleMove(3,left,4,4), possibleMove(2,left,5,4), possibleMove(1,left,6,4), possibleMove(3,right,1,5), possibleMove(1,right,2,4)}
{range(1), direction(up), direction(down), direction(left), direction(right), status(full), status(empty), full(3,1), full(4,1), full(5,1), full(3,2), full(4,2), full(5,2), full(1,3), full(2,3), full(3,3), full(4,3), full(5,3), full(6,3), full(7,3), full(1,4), full(2,4), full(3,4), empty(4,4), full(5,4), full(6,4), full(7,4), full(1,5), full(2,5), full(3,5), full(4,5), full(5,5), full(6,5), full(7,5), full(3,6), full(4,6), full(5,6), full(3,7), full(4,7), full(5,7), range(2), range(3), range(4), range(5), range(6), range(7), location(1,5), location(1,4), location(1,3), location(2,5), location(2,4), location(2,3), location(5,7), location(5,6), location(5,5), location(5,4), location(5,3), location(5,2), location(5,1), location(4,7), location(4,6), location(4,5), location(4,4), location(4,3), location(4,2), location(4,1), location(3,7), location(3,6), location(3,5), location(3,4), location(3,3), location(3,2), location(3,1), location(6,5), location(6,4), location(6,3), location(7,5), location(7,4), location(7,3), time(1), time(2), time(3), move(3,down,3,3), move(2,up,3,6), move(1,right,2,4), state(4,empty,2,4), state(4,empty,3,6), state(4,empty,3,4), state(4,empty,3,3), state(3,empty,2,4), state(3,empty,3,6), state(3,empty,3,5), state(2,empty,2,4), state(2,empty,3,4), state(4,full,1,3), state(4,full,2,3), state(4,full,5,5), state(4,full,5,4), state(4,full,5,3), state(4,full,5,2), state(4,full,5,1), state(4,full,4,5), state(4,full,4,4), state(4,full,4,3), state(4,full,4,2), state(4,full,4,1), state(4,full,3,5), state(4,full,3,2), state(4,full,3,1), state(4,full,6,3), state(4,full,7,3), state(3,full,1,3), state(3,full,2,3), state(3,full,5,5), state(3,full,5,4), state(3,full,5,3), state(3,full,5,2), state(3,full,5,1), state(3,full,4,5), state(3,full,4,4), state(3,full,4,3), state(3,full,4,2), state(3,full,4,1), state(3,full,3,4), state(3,full,3,3), state(3,full,3,2), state(3,full,3,1), state(3,full,6,3), state(3,full,7,3), state(2,full,1,3), state(2,full,2,3), state(2,full,5,5), state(2,full,5,4), state(2,full,5,3), state(2,full,5,2), state(2,full,5,1), state(2,full,4,5), state(2,full,4,4), state(2,full,4,3), state(2,full,4,2), state(2,full,4,1), state(2,full,3,5), state(2,full,3,3), state(2,full,3,2), state(2,full,3,1), state(2,full,6,3), state(2,full,7,3), state(4,full,1,5), state(4,full,2,5), state(4,full,5,7), state(4,full,5,6), state(4,full,4,7), state(4,full,4,6), state(4,full,3,7), state(4,full,6,5), state(4,full,7,5), state(3,full,1,5), state(3,full,2,5), state(3,full,5,7), state(3,full,5,6), state(3,full,4,7), state(3,full,4,6), state(3,full,3,7), state(3,full,6,5), state(3,full,7,5), state(2,full,1,5), state(2,full,2,5), state(2,full,5,7), state(2,full,5,6), state(2,full,4,7), state(2,full,4,6), state(2,full,3,7), state(2,full,3,6), state(2,full,6,5), state(2,full,7,5), state(4,full,1,4), state(3,full,1,4), state(2,full,1,4), state(4,full,7,4), state(4,full,6,4), state(3,full,7,4), state(3,full,6,4), state(2,full,7,4), state(2,full,6,4), changed(4,3,5), changed(4,3,4), changed(4,3,3), changed(3,3,6), changed(3,3,5), changed(3,3,4), changed(2,2,4), changed(2,4,4), changed(2,3,4), state(1,full,5,7), state(1,full,4,7), state(1,full,3,7), state(1,full,5,6), state(1,full,4,6), state(1,full,3,6), state(1,full,7,5), state(1,full,6,5), state(1,full,5,5), state(1,full,4,5), state(1,full,3,5), state(1,full,2,5), state(1,full,1,5), state(1,full,7,4), state(1,full,6,4), state(1,full,5,4), state(1,full,3,4), state(1,full,2,4), state(1,full,1,4), state(1,full,7,3), state(1,full,6,3), state(1,full,5,3), state(1,full,4,3), state(1,full,3,3), state(1,full,2,3), state(1,full,1,3), state(1,full,5,2), state(1,full,4,2), state(1,full,3,2), state(1,full,5,1), state(1,full,4,1), state(1,full,3,1), state(1,empty,4,4), possibleMove(2,up,3,6), possibleMove(1,up,4,6), possibleMove(3,down,3,3), possibleMove(2,down,3,2), possibleMove(1,down,4,2), possibleMove(3,left,5,5), possibleMove(3,left,5,6), possibleMove(3,left,4,4), possibleMove(2,left,5,4), possibleMove(1,left,6,4), possibleMove(3,right,1,5), possibleMove(1,right,2,4)}
{range(1), direction(up), direction(down), direction(left), direction(right), status(full), status(empty), full(3,1), full(4,1), full(5,1), full(3,2), full(4,2), full(5,2), full(1,3), full(2,3), full(3,3), full(4,3), full(5,3), full(6,3), full(7,3), full(1,4), full(2,4), full(3,4), empty(4,4), full(5,4), full(6,4), full(7,4), full(1,5), full(2,5), full(3,5), full(4,5), full(5,5), full(6,5), full(7,5), full(3,6), full(4,6), full(5,6), full(3,7), full(4,7), full(5,7), range(2), range(3), range(4), range(5), range(6), range(7), location(1,5), location(1,4), location(1,3), location(2,5), location(2,4), location(2,3), location(5,7), location(5,6), location(5,5), location(5,4), location(5,3), location(5,2), location(5,1), location(4,7), location(4,6), location(4,5), location(4,4), location(4,3), location(4,2), location(4,1), location(3,7), location(3,6), location(3,5), location(3,4), location(3,3), location(3,2), location(3,1), location(6,5), location(6,4), location(6,3), location(7,5), location(7,4), location(7,3), time(1), time(2), time(3), move(3,left,7,4), move(2,left,5,4), move(1,right,2,4), state(4,empty,2,4), state(4,empty,4,4), state(4,empty,6,4), state(4,empty,7,4), state(3,empty,2,4), state(3,empty,5,4), state(3,empty,4,4), state(2,empty,2,4), state(2,empty,3,4), state(4,full,1,3), state(4,full,2,3), state(4,full,5,5), state(4,full,5,4), state(4,full,5,3), state(4,full,5,2), state(4,full,5,1), state(4,full,4,5), state(4,full,4,3), state(4,full,4,2), state(4,full,4,1), state(4,full,3,5), state(4,full,3,4), state(4,full,3,3), state(4,full,3,2), state(4,full,3,1), state(4,full,6,3), state(4,full,7,3), state(3,full,1,3), state(3,full,2,3), state(3,full,5,5), state(3,full,5,3), state(3,full,5,2), state(3,full,5,1), state(3,full,4,5), state(3,full,4,3), state(3,full,4,2), state(3,full,4,1), state(3,full,3,5), state(3,full,3,4), state(3,full,3,3), state(3,full,3,2), state(3,full,3,1), state(3,full,6,3), state(3,full,7,3), state(2,full,1,3), state(2,full,2,3), state(2,full,5,5), state(2,full,5,4), state(2,full,5,3), state(2,full,5,2), state(2,full,5,1), state(2,full,4,5), state(2,full,4,4), state(2,full,4,3), state(2,full,4,2), state(2,full,4,1), state(2,full,3,5), state(2,full,3,3), state(2,full,3,2), state(2,full,3,1), state(2,full,6,3), state(2,full,7,3), state(4,full,1,5), state(4,full,2,5), state(4,full,5,7), state(4,full,5,6), state(4,full,4,7), state(4,full,4,6), state(4,full,3,7), state(4,full,3,6), state(4,full,6,5), state(4,full,7,5), state(3,full,1,5), state(3,full,2,5), state(3,full,5,7), state(3,full,5,6), state(3,full,4,7), state(3,full,4,6), state(3,full,3,7), state(3,full,3,6), state(3,full,6,5), state(3,full,7,5), state(2,full,1,5), state(2,full,2,5), state(2,full,5,7), state(2,full,5,6), state(2,full,4,7), state(2,full,4,6), state(2,full,3,7), state(2,full,3,6), state(2,full,6,5), state(2,full,7,5), state(4,full,1,4), state(3,full,1,4), state(2,full,1,4), state(3,full,7,4), state(3,full,6,4), state(2,full,7,4), state(2,full,6,4), changed(4,5,4), changed(4,6,4), changed(4,7,4), changed(3,5,4), changed(3,4,4), changed(3,3,4), changed(2,2,4), changed(2,4,4), changed(2,3,4), state(1,full,5,7), state(1,full,4,7), state(1,full,3,7), state(1,full,5,6), state(1,full,4,6), state(1,full,3,6), state(1,full,7,5), state(1,full,6,5), state(1,full,5,5), state(1,full,4,5), state(1,full,3,5), state(1,full,2,5), state(1,full,1,5), state(1,full,7,4), state(1,full,6,4), state(1,full,5,4), state(1,full,3,4), state(1,full,2,4), state(1,full,1,4), state(1,full,7,3), state(1,full,6,3), state(1,full,5,3), state(1,full,4,3), state(1,full,3,3), state(1,full,2,3), state(1,full,1,3), state(1,full,5,2), state(1,full,4,2), state(1,full,3,2), state(1,full,5,1), state(1,full,4,1), state(1,full,3,1), state(1,empty,4,4), possibleMove(3,up,4,6), possibleMove(3,up,5,6), possibleMove(2,up,3,6), possibleMove(1,up,4,6), possibleMove(3,down,4,2), possibleMove(3,down,5,2), possibleMove(2,down,3,2), possibleMove(1,down,4,2), possibleMove(3,left,7,4), possibleMove(2,left,5,4), possibleMove(1,left,6,4), possibleMove(1,right,2,4)}
{range(1), direction(up), direction(down), direction(left), direction(right), status(full), status(empty), full(3,1), full(4,1), full(5,1), full(3,2), full(4,2), full(5,2), full(1,3), full(2,3), full(3,3), full(4,3), full(5,3), full(6,3), full(7,3), full(1,4), full(2,4), full(3,4), empty(4,4), full(5,4), full(6,4), full(7,4), full(1,5), full(2,5), full(3,5), full(4,5), full(5,5), full(6,5), full(7,5), full(3,6), full(4,6), full(5,6), full(3,7), full(4,7), full(5,7), range(2), range(3), range(4), range(5), range(6), range(7), location(1,5), location(1,4), location(1,3), location(2,5), location(2,4), location(2,3), location(5,7), location(5,6), location(5,5), location(5,4), location(5,3), location(5,2), location(5,1), location(4,7), location(4,6), location(4,5), location(4,4), location(4,3), location(4,2), location(4,1), location(3,7), location(3,6), location(3,5), location(3,4), location(3,3), location(3,2), location(3,1), location(6,5), location(6,4), location(6,3), location(7,5), location(7,4), location(7,3), time(1), time(2), time(3), move(3,down,4,2), move(2,left,5,4), move(1,right,2,4), state(4,empty,2,4), state(4,empty,5,4), state(4,empty,4,3), state(4,empty,4,2), state(3,empty,2,4), state(3,empty,5,4), state(3,empty,4,4), state(2,empty,2,4), state(2,empty,3,4), state(4,full,1,3), state(4,full,2,3), state(4,full,5,5), state(4,full,5,3), state(4,full,5,2), state(4,full,5,1), state(4,full,4,5), state(4,full,4,4), state(4,full,4,1), state(4,full,3,5), state(4,full,3,4), state(4,full,3,3), state(4,full,3,2), state(4,full,3,1), state(4,full,6,3), state(4,full,7,3), state(3,full,1,3), state(3,full,2,3), state(3,full,5,5), state(3,full,5,3), state(3,full,5,2), state(3,full,5,1), state(3,full,4,5), state(3,full,4,3), state(3,full,4,2), state(3,full,4,1), state(3,full,3,5), state(3,full,3,4), state(3,full,3,3), state(3,full,3,2), state(3,full,3,1), state(3,full,6,3), state(3,full,7,3), state(2,full,1,3), state(2,full,2,3), state(2,full,5,5), state(2,full,5,4), state(2,full,5,3), state(2,full,5,2), state(2,full,5,1), state(2,full,4,5), state(2,full,4,4), state(2,full,4,3), state(2,full,4,2), state(2,full,4,1), state(2,full,3,5), state(2,full,3,3), state(2,full,3,2), state(2,full,3,1), state(2,full,6,3), state(2,full,7,3), state(4,full,1,5), state(4,full,2,5), state(4,full,5,7), state(4,full,5,6), state(4,full,4,7), state(4,full,4,6), state(4,full,3,7), state(4,full,3,6), state(4,full,6,5), state(4,full,7,5), state(3,full,1,5), state(3,full,2,5), state(3,full,5,7), state(3,full,5,6), state(3,full,4,7), state(3,full,4,6), state(3,full,3,7), state(3,full,3,6), state(3,full,6,5), state(3,full,7,5), state(2,full,1,5), state(2,full,2,5), state(2,full,5,7), state(2,full,5,6), state(2,full,4,7), state(2,full,4,6), state(2,full,3,7), state(2,full,3,6), state(2,full,6,5), state(2,full,7,5), state(4,full,1,4), state(3,full,1,4), state(2,full,1,4), state(4,full,7,4), state(4,full,6,4), state(3,full,7,4), state(3,full,6,4), state(2,full,7,4), state(2,full,6,4), changed(4,4,4), changed(4,4,3), changed(4,4,2), changed(3,5,4), changed(3,4,4), changed(3,3,4), changed(2,2,4), changed(2,4,4), changed(2,3,4), state(1,full,5,7), state(1,full,4,7), state(1,full,3,7), state(1,full,5,6), state(1,full,4,6), state(1,full,3,6), state(1,full,7,5), state(1,full,6,5), state(1,full,5,5), state(1,full,4,5), state(1,full,3,5), state(1,full,2,5), state(1,full,1,5), state(1,full,7,4), state(1,full,6,4), state(1,full,5,4), state(1,full,3,4), state(1,full,2,4), state(1,full,1,4), state(1,full,7,3), state(1,full,6,3), state(1,full,5,3), state(1,full,4,3), state(1,full,3,3), state(1,full,2,3), state(1,full,1,3), state(1,full,5,2), state(1,full,4,2), state(1,full,3,2), state(1,full,5,1), state(1,full,4,1), state(1,full,3,1), state(1,empty,4,4), possibleMove(3,up,4,6), possibleMove(3,up,5,6), possibleMove(2,up,3,6), possibleMove(1,up,4,6), possibleMove(3,down,4,2), possibleMove(3,down,5,2), possibleMove(2,down,3,2), possibleMove(1,down,4,2), possibleMove(3,left,7,4), possibleMove(2,left,5,4), possibleMove(1,left,6,4), possibleMove(1,right,2,4)}
{range(1), direction(up), direction(down), direction(left), direction(right), status(full), status(empty), full(3,1), full(4,1), full(5,1), full(3,2), full(4,2), full(5,2), full(1,3), full(2,3), full(3,3), full(4,3), full(5,3), full(6,3), full(7,3), full(1,4), full(2,4), full(3,4), empty(4,4), full(5,4), full(6,4), full(7,4), full(1,5), full(2,5), full(3,5), full(4,5), full(5,5), full(6,5), full(7,5), full(3,6), full(4,6), full(5,6), full(3,7), full(4,7), full(5,7), range(2), range(3), range(4), range(5), range(6), range(7), location(1,5), location(1,4), location(1,3), location(2,5), location(2,4), location(2,3), location(5,7), location(5,6), location(5,5), location(5,4), location(5,3), location(5,2), location(5,1), location(4,7), location(4,6), location(4,5), location(4,4), location(4,3), location(4,2), location(4,1), location(3,7), location(3,6), location(3,5), location(3,4), location(3,3), location(3,2), location(3,1), location(6,5), location(6,4), location(6,3), location(7,5), location(7,4), location(7,3), time(1), time(2), time(3), move(3,down,5,2), move(2,left,5,4), move(1,right,2,4), state(4,empty,2,4), state(4,empty,5,3), state(4,empty,5,2), state(4,empty,4,4), state(3,empty,2,4), state(3,empty,5,4), state(3,empty,4,4), state(2,empty,2,4), state(2,empty,3,4), state(4,full,1,3), state(4,full,2,3), state(4,full,5,5), state(4,full,5,4), state(4,full,5,1), state(4,full,4,5), state(4,full,4,3), state(4,full,4,2), state(4,full,4,1), state(4,full,3,5), state(4,full,3,4), state(4,full,3,3), state(4,full,3,2), state(4,full,3,1), state(4,full,6,3), state(4,full,7,3), state(3,full,1,3), state(3,full,2,3), state(3,full,5,5), state(3,full,5,3), state(3,full,5,2), state(3,full,5,1), state(3,full,4,5), state(3,full,4,3), state(3,full,4,2), state(3,full,4,1), state(3,full,3,5), state(3,full,3,4), state(3,full,3,3), state(3,full,3,2), state(3,full,3,1), state(3,full,6,3), state(3,full,7,3), state(2,full,1,3), state(2,full,2,3), state(2,full,5,5), state(2,full,5,4), state(2,full,5,3), state(2,full,5,2), state(2,full,5,1), state(2,full,4,5), state(2,full,4,4), state(2,full,4,3), state(2,full,4,2), state(2,full,4,1), state(2,full,3,5), state(2,full,3,3), state(2,full,3,2), state(2,full,3,1), state(2,full,6,3), state(2,full,7,3), state(4,full,1,5), state(4,full,2,5), state(4,full,5,7), state(4,full,5,6), state(4,full,4,7), state(4,full,4,6), state(4,full,3,7), state(4,full,3,6), state(4,full,6,5), state(4,full,7,5), state(3,full,1,5), state(3,full,2,5), state(3,full,5,7), state(3,full,5,6), state(3,full,4,7), state(3,full,4,6), state(3,full,3,7), state(3,full,3,6), state(3,full,6,5), state(3,full,7,5), state(2,full,1,5), state(2,full,2,5), state(2,full,5,7), state(2,full,5,6), state(2,full,4,7), state(2,full,4,6), state(2,full,3,7), state(2,full,3,6), state(2,full,6,5), state(2,full,7,5), state(4,full,1,4), state(3,full,1,4), state(2,full,1,4), state(4,full,7,4), state(4,full,6,4), state(3,full,7,4), state(3,full,6,4), state(2,full,7,4), state(2,full,6,4), changed(4,5,4), changed(4,5,3), changed(4,5,2), changed(3,5,4), changed(3,4,4), changed(3,3,4), changed(2,2,4), changed(2,4,4), changed(2,3,4), state(1,full,5,7), state(1,full,4,7), state(1,full,3,7), state(1,full,5,6), state(1,full,4,6), state(1,full,3,6), state(1,full,7,5), state(1,full,6,5), state(1,full,5,5), state(1,full,4,5), state(1,full,3,5), state(1,full,2,5), state(1,full,1,5), state(1,full,7,4), state(1,full,6,4), state(1,full,5,4), state(1,full,3,4), state(1,full,2,4), state(1,full,1,4), state(1,full,7,3), state(1,full,6,3), state(1,full,5,3), state(1,full,4,3), state(1,full,3,3), state(1,full,2,3), state(1,full,1,3), state(1,full,5,2), state(1,full,4,2), state(1,full,3,2), state(1,full,5,1), state(1,full,4,1), state(1,full,3,1), state(1,empty,4,4), possibleMove(3,up,4,6), possibleMove(3,up,5,6), possibleMove(2,up,3,6), possibleMove(1,up,4,6), possibleMove(3,down,4,2), possibleMove(3,down,5,2), possibleMove(2,down,3,2), possibleMove(1,down,4,2), possibleMove(3,left,7,4), possibleMove(2,left,5,4), possibleMove(1,left,6,4), possibleMove(1,right,2,4)}
{range(1), direction(up), direction(down), direction(left), direction(right), status(full), status(empty), full(3,1), full(4,1), full(5,1), full(3,2), full(4,2), full(5,2), full(1,3), full(2,3), full(3,3), full(4,3), full(5,3), full(6,3), full(7,3), full(1,4), full(2,4), full(3,4), empty(4,4), full(5,4), full(6,4), full(7,4), full(1,5), full(2,5), full(3,5), full(4,5), full(5,5), full(6,5), full(7,5), full(3,6), full(4,6), full(5,6), full(3,7), full(4,7), full(5,7), range(2), range(3), range(4), range(5), range(6), range(7), location(1,5), location(1,4), location(1,3), location(2,5), location(2,4), location(2,3), location(5,7), location(5,6), location(5,5), location(5,4), location(5,3), location(5,2), location(5,1), location(4,7), location(4,6), location(4,5), location(4,4), location(4,3), location(4,2), location(4,1), location(3,7), location(3,6), location(3,5), location(3,4), location(3,3), location(3,2), location(3,1), location(6,5), location(6,4), location(6,3), location(7,5), location(7,4), location(7,3), time(1), time(2), time(3), move(3,up,4,6), move(2,left,5,4), move(1,right,2,4), state(4,empty,2,4), state(4,empty,5,4), state(4,empty,4,6), state(4,empty,4,5), state(3,empty,2,4), state(3,empty,5,4), state(3,empty,4,4), state(2,empty,2,4), state(2,empty,3,4), state(4,full,1,3), state(4,full,2,3), state(4,full,5,5), state(4,full,5,3), state(4,full,5,2), state(4,full,5,1), state(4,full,4,4), state(4,full,4,3), state(4,full,4,2), state(4,full,4,1), state(4,full,3,5), state(4,full,3,4), state(4,full,3,3), state(4,full,3,2), state(4,full,3,1), state(4,full,6,3), state(4,full,7,3), state(3,full,1,3), state(3,full,2,3), state(3,full,5,5), state(3,full,5,3), state(3,full,5,2), state(3,full,5,1), state(3,full,4,5), state(3,full,4,3), state(3,full,4,2), state(3,full,4,1), state(3,full,3,5), state(3,full,3,4), state(3,full,3,3), state(3,full,3,2), state(3,full,3,1), state(3,full,6,3), state(3,full,7,3), state(2,full,1,3), state(2,full,2,3), state(2,full,5,5), state(2,full,5,4), state(2,full,5,3), state(2,full,5,2), state(2,full,5,1), state(2,full,4,5), state(2,full,4,4), state(2,full,4,3), state(2,full,4,2), state(2,full,4,1), state(2,full,3,5), state(2,full,3,3), state(2,full,3,2), state(2,full,3,1), state(2,full,6,3), state(2,full,7,3), state(4,full,1,5), state(4,full,2,5), state(4,full,5,7), state(4,full,5,6), state(4,full,4,7), state(4,full,3,7), state(4,full,3,6), state(4,full,6,5), state(4,full,7,5), state(3,full,1,5), state(3,full,2,5), state(3,full,5,7), state(3,full,5,6), state(3,full,4,7), state(3,full,4,6), state(3,full,3,7), state(3,full,3,6), state(3,full,6,5), state(3,full,7,5), state(2,full,1,5), state(2,full,2,5), state(2,full,5,7), state(2,full,5,6), state(2,full,4,7), state(2,full,4,6), state(2,full,3,7), state(2,full,3,6), state(2,full,6,5), state(2,full,7,5), state(4,full,1,4), state(3,full,1,4), state(2,full,1,4), state(4,full,7,4), state(4,full,6,4), state(3,full,7,4), state(3,full,6,4), state(2,full,7,4), state(2,full,6,4), changed(4,4,6), changed(4,4,5), changed(4,4,4), changed(3,5,4), changed(3,4,4), changed(3,3,4), changed(2,2,4), changed(2,4,4), changed(2,3,4), state(1,full,5,7), state(1,full,4,7), state(1,full,3,7), state(1,full,5,6), state(1,full,4,6), state(1,full,3,6), state(1,full,7,5), state(1,full,6,5), state(1,full,5,5), state(1,full,4,5), state(1,full,3,5), state(1,full,2,5), state(1,full,1,5), state(1,full,7,4), state(1,full,6,4), state(1,full,5,4), state(1,full,3,4), state(1,full,2,4), state(1,full,1,4), state(1,full,7,3), state(1,full,6,3), state(1,full,5,3), state(1,full,4,3), state(1,full,3,3), state(1,full,2,3), state(1,full,1,3), state(1,full,5,2), state(1,full,4,2), state(1,full,3,2), state(1,full,5,1), state(1,full,4,1), state(1,full,3,1), state(1,empty,4,4), possibleMove(3,up,4,6), possibleMove(3,up,5,6), possibleMove(2,up,3,6), possibleMove(1,up,4,6), possibleMove(3,down,4,2), possibleMove(3,down,5,2), possibleMove(2,down,3,2), possibleMove(1,down,4,2), possibleMove(3,left,7,4), possibleMove(2,left,5,4), possibleMove(1,left,6,4), possibleMove(1,right,2,4)}
{range(1), direction(up), direction(down), direction(left), direction(right), status(full), status(empty), full(3,1), full(4,1), full(5,1), full(3,2), full(4,2), full(5,2), full(1,3), full(2,3), full(3,3), full(4,3), full(5,3), full(6,3), full(7,3), full(1,4), full(2,4), full(3,4), empty(4,4), full(5,4), full(6,4), full(7,4), full(1,5), full(2,5), full(3,5), full(4,5), full(5,5), full(6,5), full(7,5), full(3,6), full(4,6), full(5,6), full(3,7), full(4,7), full(5,7), range(2), range(3), range(4), range(5), range(6), range(7), location(1,5), location(1,4), location(1,3), location(2,5), location(2,4), location(2,3), location(5,7), location(5,6), location(5,5), location(5,4), location(5,3), location(5,2), location(5,1), location(4,7), location(4,6), location(4,5), location(4,4), location(4,3), location(4,2), location(4,1), location(3,7), location(3,6), location(3,5), location(3,4), location(3,3), location(3,2), location(3,1), location(6,5), location(6,4), location(6,3), location(7,5), location(7,4), location(7,3), time(1), time(2), time(3), move(3,up,5,6), move(2,left,5,4), move(1,right,2,4), state(4,empty,2,4), state(4,empty,5,6), state(4,empty,5,5), state(4,empty,4,4), state(3,empty,2,4), state(3,empty,5,4), state(3,empty,4,4), state(2,empty,2,4), state(2,empty,3,4), state(4,full,1,3), state(4,full,2,3), state(4,full,5,4), state(4,full,5,3), state(4,full,5,2), state(4,full,5,1), state(4,full,4,5), state(4,full,4,3), state(4,full,4,2), state(4,full,4,1), state(4,full,3,5), state(4,full,3,4), state(4,full,3,3), state(4,full,3,2), state(4,full,3,1), state(4,full,6,3), state(4,full,7,3), state(3,full,1,3), state(3,full,2,3), state(3,full,5,5), state(3,full,5,3), state(3,full,5,2), state(3,full,5,1), state(3,full,4,5), state(3,full,4,3), state(3,full,4,2), state(3,full,4,1), state(3,full,3,5), state(3,full,3,4), state(3,full,3,3), state(3,full,3,2), state(3,full,3,1), state(3,full,6,3), state(3,full,7,3), state(2,full,1,3), state(2,full,2,3), state(2,full,5,5), state(2,full,5,4), state(2,full,5,3), state(2,full,5,2), state(2,full,5,1), state(2,full,4,5), state(2,full,4,4), state(2,full,4,3), state(2,full,4,2), state(2,full,4,1), state(2,full,3,5), state(2,full,3,3), state(2,full,3,2), state(2,full,3,1), state(2,full,6,3), state(2,full,7,3), state(4,full,1,5), state(4,full,2,5), state(4,full,5,7), state(4,full,4,7), state(4,full,4,6), state(4,full,3,7), state(4,full,3,6), state(4,full,6,5), state(4,full,7,5), state(3,full,1,5), state(3,full,2,5), state(3,full,5,7), state(3,full,5,6), state(3,full,4,7), state(3,full,4,6), state(3,full,3,7), state(3,full,3,6), state(3,full,6,5), state(3,full,7,5), state(2,full,1,5), state(2,full,2,5), state(2,full,5,7), state(2,full,5,6), state(2,full,4,7), state(2,full,4,6), state(2,full,3,7), state(2,full,3,6), state(2,full,6,5), state(2,full,7,5), state(4,full,1,4), state(3,full,1,4), state(2,full,1,4), state(4,full,7,4), state(4,full,6,4), state(3,full,7,4), state(3,full,6,4), state(2,full,7,4), state(2,full,6,4), changed(4,5,6), changed(4,5,5), changed(4,5,4), changed(3,5,4), changed(3,4,4), changed(3,3,4), changed(2,2,4), changed(2,4,4), changed(2,3,4), state(1,full,5,7), state(1,full,4,7), state(1,full,3,7), state(1,full,5,6), state(1,full,4,6), state(1,full,3,6), state(1,full,7,5), state(1,full,6,5), state(1,full,5,5), state(1,full,4,5), state(1,full,3,5), state(1,full,2,5), state(1,full,1,5), state(1,full,7,4), state(1,full,6,4), state(1,full,5,4), state(1,full,3,4), state(1,full,2,4), state(1,full,1,4), state(1,full,7,3), state(1,full,6,3), state(1,full,5,3), state(1,full,4,3), state(1,full,3,3), state(1,full,2,3), state(1,full,1,3), state(1,full,5,2), state(1,full,4,2), state(1,full,3,2), state(1,full,5,1), state(1,full,4,1), state(1,full,3,1), state(1,empty,4,4), possibleMove(3,up,4,6), possibleMove(3,up,5,6), possibleMove(2,up,3,6), possibleMove(1,up,4,6), possibleMove(3,down,4,2), possibleMove(3,down,5,2), possibleMove(2,down,3,2), possibleMove(1,down,4,2), possibleMove(3,left,7,4), possibleMove(2,left,5,4), possibleMove(1,left,6,4), possibleMove(1,right,2,4)}
{range(1), direction(up), direction(down), direction(left), direction(right), status(full), status(empty), full(3,1), full(4,1), full(5,1), full(3,2), full(4,2), full(5,2), full(1,3), full(2,3), full(3,3), full(4,3), full(5,3), full(6,3), full(7,3), full(1,4), full(2,4), full(3,4), empty(4,4), full(5,4), full(6,4), full(7,4), full(1,5), full(2,5), full(3,5), full(4,5), full(5,5), full(6,5), full(7,5), full(3,6), full(4,6), full(5,6), full(3,7), full(4,7), full(5,7), range(2), range(3), range(4), range(5), range(6), range(7), location(1,5), location(1,4), location(1,3), location(2,5), location(2,4), location(2,3), location(5,7), location(5,6), location(5,5), location(5,4), location(5,3), location(5,2), location(5,1), location(4,7), location(4,6), location(4,5), location(4,4), location(4,3), location(4,2), location(4,1), location(3,7), location(3,6), location(3,5), location(3,4), location(3,3), location(3,2), location(3,1), location(6,5), location(6,4), location(6,3), location(7,5), location(7,4), location(7,3), time(1), time(2), time(3), move(3,up,3,6), move(2,right,3,4), move(1,left,6,4), state(4,empty,4,4), state(4,empty,3,6), state(4,empty,3,5), state(4,empty,6,4), state(3,empty,4,4), state(3,empty,3,4), state(3,empty,6,4), state(2,empty,5,4), state(2,empty,6,4), state(4,full,1,3), state(4,full,2,3), state(4,full,5,5), state(4,full,5,4), state(4,full,5,3), state(4,full,5,2), state(4,full,5,1), state(4,full,4,5), state(4,full,4,3), state(4,full,4,2), state(4,full,4,1), state(4,full,3,4), state(4,full,3,3), state(4,full,3,2), state(4,full,3,1), state(4,full,6,3), state(4,full,7,3), state(3,full,1,3), state(3,full,2,3), state(3,full,5,5), state(3,full,5,4), state(3,full,5,3), state(3,full,5,2), state(3,full,5,1), state(3,full,4,5), state(3,full,4,3), state(3,full,4,2), state(3,full,4,1), state(3,full,3,5), state(3,full,3,3), state(3,full,3,2), state(3,full,3,1), state(3,full,6,3), state(3,full,7,3), state(2,full,1,3), state(2,full,2,3), state(2,full,5,5), state(2,full,5,3), state(2,full,5,2), state(2,full,5,1), state(2,full,4,5), state(2,full,4,4), state(2,full,4,3), state(2,full,4,2), state(2,full,4,1), state(2,full,3,5), state(2,full,3,4), state(2,full,3,3), state(2,full,3,2), state(2,full,3,1), state(2,full,6,3), state(2,full,7,3), state(4,full,1,5), state(4,full,2,5), state(4,full,5,7), state(4,full,5,6), state(4,full,4,7), state(4,full,4,6), state(4,full,3,7), state(4,full,6,5), state(4,full,7,5), state(3,full,1,5), state(3,full,2,5), state(3,full,5,7), state(3,full,5,6), state(3,full,4,7), state(3,full,4,6), state(3,full,3,7), state(3,full,3,6), state(3,full,6,5), state(3,full,7,5), state(2,full,1,5), state(2,full,2,5), state(2,full,5,7), state(2,full,5,6), state(2,full,4,7), state(2,full,4,6), state(2,full,3,7), state(2,full,3,6), state(2,full,6,5), state(2,full,7,5), state(4,full,2,4), state(4,full,1,4), state(3,full,2,4), state(3,full,1,4), state(2,full,2,4), state(2,full,1,4), state(4,full,7,4), state(3,full,7,4), state(2,full,7,4), changed(4,3,6), changed(4,3,5), changed(4,3,4), changed(3,5,4), changed(3,4,4), changed(3,3,4), changed(2,5,4), changed(2,4,4), changed(2,6,4), state(1,full,5,7), state(1,full,4,7), state(1,full,3,7), state(1,full,5,6), state(1,full,4,6), state(1,full,3,6), state(1,full,7,5), state(1,full,6,5), state(1,full,5,5), state(1,full,4,5), state(1,full,3,5), state(1,full,2,5), state(1,full,1,5), state(1,full,7,4), state(1,full,6,4), state(1,full,5,4), state(1,full,3,4), state(1,full,2,4), state(1,full,1,4), state(1,full,7,3), state(1,full,6,3), state(1,full,5,3), state(1,full,4,3), state(1,full,3,3), state(1,full,2,3), state(1,full,1,3), state(1,full,5,2), state(1,full,4,2), state(1,full,3,2), state(1,full,5,1), state(1,full,4,1), state(1,full,3,1), state(1,empty,4,4), possibleMove(3,up,3,6), possibleMove(3,up,4,6), possibleMove(2,up,5,6), possibleMove(1,up,4,6), possibleMove(3,down,3,2), possibleMove(3,down,4,2), possibleMove(2,down,5,2), possibleMove(1,down,4,2), possibleMove(1,left,6,4), possibleMove(3,right,1,4), possibleMove(2,right,3,4), possibleMove(1,right,2,4)}
{range(1), direction(up), direction(down), direction(left), direction(right), status(full), status(empty), full(3,1), full(4,1), full(5,1), full(3,2), full(4,2), full(5,2), full(1,3), full(2,3), full(3,3), full(4,3), full(5,3), full(6,3), full(7,3), full(1,4), full(2,4), full(3,4), empty(4,4), full(5,4), full(6,4), full(7,4), full(1,5), full(2,5), full(3,5), full(4,5), full(5,5), full(6,5), full(7,5), full(3,6), full(4,6), full(5,6), full(3,7), full(4,7), full(5,7), range(2), range(3), range(4), range(5), range(6), range(7), location(1,5), location(1,4), location(1,3), location(2,5), location(2,4), location(2,3), location(5,7), location(5,6), location(5,5), location(5,4), location(5,3), location(5,2), location(5,1), location(4,7), location(4,6), location(4,5), location(4,4), location(4,3), location(4,2), location(4,1), location(3,7), location(3,6), location(3,5), location(3,4), location(3,3), location(3,2), location(3,1), location(6,5), location(6,4), location(6,3), location(7,5), location(7,4), location(7,3), time(1), time(2), time(3), move(3,up,3,5), move(2,right,2,3), move(1,down,4,2), state(4,empty,2,3), state(4,empty,4,2), state(4,empty,3,5), state(4,empty,3,4), state(3,empty,2,3), state(3,empty,4,2), state(3,empty,3,3), state(2,empty,4,3), state(2,empty,4,2), state(4,full,1,3), state(4,full,5,5), state(4,full,5,4), state(4,full,5,3), state(4,full,5,2), state(4,full,5,1), state(4,full,4,5), state(4,full,4,4), state(4,full,4,3), state(4,full,4,1), state(4,full,3,3), state(4,full,3,2), state(4,full,3,1), state(4,full,6,3), state(4,full,7,3), state(3,full,1,3), state(3,full,5,5), state(3,full,5,4), state(3,full,5,3), state(3,full,5,2), state(3,full,5,1), state(3,full,4,5), state(3,full,4,4), state(3,full,4,3), state(3,full,4,1), state(3,full,3,5), state(3,full,3,4), state(3,full,3,2), state(3,full,3,1), state(3,full,6,3), state(3,full,7,3), state(2,full,1,3), state(2,full,2,3), state(2,full,5,5), state(2,full,5,4), state(2,full,5,3), state(2,full,5,2), state(2,full,5,1), state(2,full,4,5), state(2,full,4,4), state(2,full,4,1), state(2,full,3,5), state(2,full,3,4), state(2,full,3,3), state(2,full,3,2), state(2,full,3,1), state(2,full,6,3), state(2,full,7,3), state(4,full,1,5), state(4,full,2,5), state(4,full,5,7), state(4,full,5,6), state(4,full,4,7), state(4,full,4,6), state(4,full,3,7), state(4,full,3,6), state(4,full,6,5), state(4,full,7,5), state(3,full,1,5), state(3,full,2,5), state(3,full,5,7), state(3,full,5,6), state(3,full,4,7), state(3,full,4,6), state(3,full,3,7), state(3,full,3,6), state(3,full,6,5), state(3,full,7,5), state(2,full,1,5), state(2,full,2,5), state(2,full,5,7), state(2,full,5,6), state(2,full,4,7), state(2,full,4,6), state(2,full,3,7), state(2,full,3,6), state(2,full,6,5), state(2,full,7,5), state(4,full,2,4), state(4,full,1,4), state(3,full,2,4), state(3,full,1,4), state(2,full,2,4), state(2,full,1,4), state(4,full,7,4), state(4,full,6,4), state(3,full,7,4), state(3,full,6,4), state(2,full,7,4), state(2,full,6,4), changed(4,3,5), changed(4,3,4), changed(4,3,3), changed(3,2,3), changed(3,4,3), changed(3,3,3), changed(2,4,4), changed(2,4,3), changed(2,4,2), state(1,full,5,7), state(1,full,4,7), state(1,full,3,7), state(1,full,5,6), state(1,full,4,6), state(1,full,3,6), state(1,full,7,5), state(1,full,6,5), state(1,full,5,5), state(1,full,4,5), state(1,full,3,5), state(1,full,2,5), state(1,full,1,5), state(1,full,7,4), state(1,full,6,4), state(1,full,5,4), state(1,full,3,4), state(1,full,2,4), state(1,full,1,4), state(1,full,7,3), state(1,full,6,3), state(1,full,5,3), state(1,full,4,3), state(1,full,3,3), state(1,full,2,3), state(1,full,1,3), state(1,full,5,2), state(1,full,4,2), state(1,full,3,2), state(1,full,5,1), state(1,full,4,1), state(1,full,3,1), state(1,empty,4,4), possibleMove(3,up,3,5), possibleMove(3,up,4,4), possibleMove(3,up,2,5), possibleMove(2,up,4,5), possibleMove(1,up,4,6), possibleMove(3,down,3,1), possibleMove(1,down,4,2), possibleMove(3,left,5,3), possibleMove(2,left,6,3), possibleMove(1,left,6,4), possibleMove(2,right,2,3), possibleMove(1,right,2,4)}
{range(1), direction(up), direction(down), direction(left), direction(right), status(full), status(empty), full(3,1), full(4,1), full(5,1), full(3,2), full(4,2), full(5,2), full(1,3), full(2,3), full(3,3), full(4,3), full(5,3), full(6,3), full(7,3), full(1,4), full(2,4), full(3,4), empty(4,4), full(5,4), full(6,4), full(7,4), full(1,5), full(2,5), full(3,5), full(4,5), full(5,5), full(6,5), full(7,5), full(3,6), full(4,6), full(5,6), full(3,7), full(4,7), full(5,7), range(2), range(3), range(4), range(5), range(6), range(7), location(1,5), location(1,4), location(1,3), location(2,5), location(2,4), location(2,3), location(5,7), location(5,6), location(5,5), location(5,4), location(5,3), location(5,2), location(5,1), location(4,7), location(4,6), location(4,5), location(4,4), location(4,3), location(4,2), location(4,1), location(3,7), location(3,6), location(3,5), location(3,4), location(3,3), location(3,2), location(3,1), location(6,5), location(6,4), location(6,3), location(7,5), location(7,4), location(7,3), time(1), time(2), time(3), move(3,down,3,3), move(2,right,2,5), move(1,up,4,6), state(4,empty,2,5), state(4,empty,4,6), state(4,empty,3,4), state(4,empty,3,3), state(3,empty,2,5), state(3,empty,4,6), state(3,empty,3,5), state(2,empty,4,6), state(2,empty,4,5), state(4,full,1,3), state(4,full,2,3), state(4,full,5,5), state(4,full,5,4), state(4,full,5,3), state(4,full,5,2), state(4,full,5,1), state(4,full,4,5), state(4,full,4,4), state(4,full,4,3), state(4,full,4,2), state(4,full,4,1), state(4,full,3,5), state(4,full,3,2), state(4,full,3,1), state(4,full,6,3), state(4,full,7,3), state(3,full,1,3), state(3,full,2,3), state(3,full,5,5), state(3,full,5,4), state(3,full,5,3), state(3,full,5,2), state(3,full,5,1), state(3,full,4,5), state(3,full,4,4), state(3,full,4,3), state(3,full,4,2), state(3,full,4,1), state(3,full,3,4), state(3,full,3,3), state(3,full,3,2), state(3,full,3,1), state(3,full,6,3), state(3,full,7,3), state(2,full,1,3), state(2,full,2,3), state(2,full,5,5), state(2,full,5,4), state(2,full,5,3), state(2,full,5,2), state(2,full,5,1), state(2,full,4,4), state(2,full,4,3), state(2,full,4,2), state(2,full,4,1), state(2,full,3,5), state(2,full,3,4), state(2,full,3,3), state(2,full,3,2), state(2,full,3,1), state(2,full,6,3), state(2,full,7,3), state(4,full,1,5), state(4,full,5,7), state(4,full,5,6), state(4,full,4,7), state(4,full,3,7), state(4,full,3,6), state(4,full,6,5), state(4,full,7,5), state(3,full,1,5), state(3,full,5,7), state(3,full,5,6), state(3,full,4,7), state(3,full,3,7), state(3,full,3,6), state(3,full,6,5), state(3,full,7,5), state(2,full,1,5), state(2,full,2,5), state(2,full,5,7), state(2,full,5,6), state(2,full,4,7), state(2,full,3,7), state(2,full,3,6), state(2,full,6,5), state(2,full,7,5), state(4,full,2,4), state(4,full,1,4), state(3,full,2,4), state(3,full,1,4), state(2,full,2,4), state(2,full,1,4), state(4,full,7,4), state(4,full,6,4), state(3,full,7,4), state(3,full,6,4), state(2,full,7,4), state(2,full,6,4), changed(4,3,5), changed(4,3,4), changed(4,3,3), changed(3,2,5), changed(3,4,5), changed(3,3,5), changed(2,4,6), changed(2,4,5), changed(2,4,4), state(1,full,5,7), state(1,full,4,7), state(1,full,3,7), state(1,full,5,6), state(1,full,4,6), state(1,full,3,6), state(1,full,7,5), state(1,full,6,5), state(1,full,5,5), state(1,full,4,5), state(1,full,3,5), state(1,full,2,5), state(1,full,1,5), state(1,full,7,4), state(1,full,6,4), state(1,full,5,4), state(1,full,3,4), state(1,full,2,4), state(1,full,1,4), state(1,full,7,3), state(1,full,6,3), state(1,full,5,3), state(1,full,4,3), state(1,full,3,3), state(1,full,2,3), state(1,full,1,3), state(1,full,5,2), state(1,full,4,2), state(1,full,3,2), state(1,full,5,1), state(1,full,4,1), state(1,full,3,1), state(1,empty,4,4), possibleMove(3,up,3,7), possibleMove(1,up,4,6), possibleMove(3,down,3,3), possibleMove(3,down,4,4), possibleMove(3,down,2,3), possibleMove(2,down,4,3), possibleMove(1,down,4,2), possibleMove(3,left,5,5), possibleMove(2,left,6,5), possibleMove(1,left,6,4), possibleMove(2,right,2,5), possibleMove(1,right,2,4)}
{range(1), direction(up), direction(down), direction(left), direction(right), status(full), status(empty), full(3,1), full(4,1), full(5,1), full(3,2), full(4,2), full(5,2), full(1,3), full(2,3), full(3,3), full(4,3), full(5,3), full(6,3), full(7,3), full(1,4), full(2,4), full(3,4), empty(4,4), full(5,4), full(6,4), full(7,4), full(1,5), full(2,5), full(3,5), full(4,5), full(5,5), full(6,5), full(7,5), full(3,6), full(4,6), full(5,6), full(3,7), full(4,7), full(5,7), range(2), range(3), range(4), range(5), range(6), range(7), location(1,5), location(1,4), location(1,3), location(2,5), location(2,4), location(2,3), location(5,7), location(5,6), location(5,5), location(5,4), location(5,3), location(5,2), location(5,1), location(4,7), location(4,6), location(4,5), location(4,4), location(4,3), location(4,2), location(4,1), location(3,7), location(3,6), location(3,5), location(3,4), location(3,3), location(3,2), location(3,1), location(6,5), location(6,4), location(6,3), location(7,5), location(7,4), location(7,3), time(1), time(2), time(3), move(3,right,2,4), move(2,down,4,3), move(1,up,4,6), state(4,empty,2,4), state(4,empty,4,6), state(4,empty,4,3), state(4,empty,3,4), state(3,empty,4,6), state(3,empty,4,4), state(3,empty,4,3), state(2,empty,4,6), state(2,empty,4,5), state(4,full,1,3), state(4,full,2,3), state(4,full,5,5), state(4,full,5,4), state(4,full,5,3), state(4,full,5,2), state(4,full,5,1), state(4,full,4,5), state(4,full,4,4), state(4,full,4,2), state(4,full,4,1), state(4,full,3,5), state(4,full,3,3), state(4,full,3,2), state(4,full,3,1), state(4,full,6,3), state(4,full,7,3), state(3,full,1,3), state(3,full,2,3), state(3,full,5,5), state(3,full,5,4), state(3,full,5,3), state(3,full,5,2), state(3,full,5,1), state(3,full,4,5), state(3,full,4,2), state(3,full,4,1), state(3,full,3,5), state(3,full,3,4), state(3,full,3,3), state(3,full,3,2), state(3,full,3,1), state(3,full,6,3), state(3,full,7,3), state(2,full,1,3), state(2,full,2,3), state(2,full,5,5), state(2,full,5,4), state(2,full,5,3), state(2,full,5,2), state(2,full,5,1), state(2,full,4,4), state(2,full,4,3), state(2,full,4,2), state(2,full,4,1), state(2,full,3,5), state(2,full,3,4), state(2,full,3,3), state(2,full,3,2), state(2,full,3,1), state(2,full,6,3), state(2,full,7,3), state(4,full,1,5), state(4,full,2,5), state(4,full,5,7), state(4,full,5,6), state(4,full,4,7), state(4,full,3,7), state(4,full,3,6), state(4,full,6,5), state(4,full,7,5), state(3,full,1,5), state(3,full,2,5), state(3,full,5,7), state(3,full,5,6), state(3,full,4,7), state(3,full,3,7), state(3,full,3,6), state(3,full,6,5), state(3,full,7,5), state(2,full,1,5), state(2,full,2,5), state(2,full,5,7), state(2,full,5,6), state(2,full,4,7), state(2,full,3,7), state(2,full,3,6), state(2,full,6,5), state(2,full,7,5), state(4,full,1,4), state(3,full,2,4), state(3,full,1,4), state(2,full,2,4), state(2,full,1,4), state(4,full,7,4), state(4,full,6,4), state(3,full,7,4), state(3,full,6,4), state(2,full,7,4), state(2,full,6,4), changed(4,2,4), changed(4,4,4), changed(4,3,4), changed(3,4,5), changed(3,4,4), changed(3,4,3), changed(2,4,6), changed(2,4,5), changed(2,4,4), state(1,full,5,7), state(1,full,4,7), state(1,full,3,7), state(1,full,5,6), state(1,full,4,6), state(1,full,3,6), state(1,full,7,5), state(1,full,6,5), state(1,full,5,5), state(1,full,4,5), state(1,full,3,5), state(1,full,2,5), state(1,full,1,5), state(1,full,7,4), state(1,full,6,4), state(1,full,5,4), state(1,full,3,4), state(1,full,2,4), state(1,full,1,4), state(1,full,7,3), state(1,full,6,3), state(1,full,5,3), state(1,full,4,3), state(1,full,3,3), state(1,full,2,3), state(1,full,1,3), state(1,full,5,2), state(1,full,4,2), state(1,full,3,2), state(1,full,5,1), state(1,full,4,1), state(1,full,3,1), state(1,empty,4,4), possibleMove(1,up,4,6), possibleMove(3,down,4,1), possibleMove(2,down,4,3), possibleMove(1,down,4,2), possibleMove(3,left,6,3), possibleMove(3,left,6,4), possibleMove(2,left,6,5), possibleMove(1,left,6,4), possibleMove(3,right,2,3), possibleMove(3,right,2,4), possibleMove(2,right,2,5), possibleMove(1,right,2,4)}
{range(1), direction(up), direction(down), direction(left), direction(right), status(full), status(empty), full(3,1), full(4,1), full(5,1), full(3,2), full(4,2), full(5,2), full(1,3), full(2,3), full(3,3), full(4,3), full(5,3), full(6,3), full(7,3), full(1,4), full(2,4), full(3,4), empty(4,4), full(5,4), full(6,4), full(7,4), full(1,5), full(2,5), full(3,5), full(4,5), full(5,5), full(6,5), full(7,5), full(3,6), full(4,6), full(5,6), full(3,7), full(4,7), full(5,7), range(2), range(3), range(4), range(5), range(6), range(7), location(1,5), location(1,4), location(1,3), location(2,5), location(2,4), location(2,3), location(5,7), location(5,6), location(5,5), location(5,4), location(5,3), location(5,2), location(5,1), location(4,7), location(4,6), location(4,5), location(4,4), location(4,3), location(4,2), location(4,1), location(3,7), location(3,6), location(3,5), location(3,4), location(3,3), location(3,2), location(3,1), location(6,5), location(6,4), location(6,3), location(7,5), location(7,4), location(7,3), time(1), time(2), time(3), move(3,down,6,3), move(2,left,6,5), move(1,up,4,6), state(4,empty,5,5), state(4,empty,4,6), state(4,empty,6,4), state(4,empty,6,3), state(3,empty,5,5), state(3,empty,4,6), state(3,empty,6,5), state(2,empty,4,6), state(2,empty,4,5), state(4,full,1,3), state(4,full,2,3), state(4,full,5,4), state(4,full,5,3), state(4,full,5,2), state(4,full,5,1), state(4,full,4,5), state(4,full,4,4), state(4,full,4,3), state(4,full,4,2), state(4,full,4,1), state(4,full,3,5), state(4,full,3,4), state(4,full,3,3), state(4,full,3,2), state(4,full,3,1), state(4,full,7,3), state(3,full,1,3), state(3,full,2,3), state(3,full,5,4), state(3,full,5,3), state(3,full,5,2), state(3,full,5,1), state(3,full,4,5), state(3,full,4,4), state(3,full,4,3), state(3,full,4,2), state(3,full,4,1), state(3,full,3,5), state(3,full,3,4), state(3,full,3,3), state(3,full,3,2), state(3,full,3,1), state(3,full,6,3), state(3,full,7,3), state(2,full,1,3), state(2,full,2,3), state(2,full,5,5), state(2,full,5,4), state(2,full,5,3), state(2,full,5,2), state(2,full,5,1), state(2,full,4,4), state(2,full,4,3), state(2,full,4,2), state(2,full,4,1), state(2,full,3,5), state(2,full,3,4), state(2,full,3,3), state(2,full,3,2), state(2,full,3,1), state(2,full,6,3), state(2,full,7,3), state(4,full,1,5), state(4,full,2,5), state(4,full,5,7), state(4,full,5,6), state(4,full,4,7), state(4,full,3,7), state(4,full,3,6), state(4,full,6,5), state(4,full,7,5), state(3,full,1,5), state(3,full,2,5), state(3,full,5,7), state(3,full,5,6), state(3,full,4,7), state(3,full,3,7), state(3,full,3,6), state(3,full,7,5), state(2,full,1,5), state(2,full,2,5), state(2,full,5,7), state(2,full,5,6), state(2,full,4,7), state(2,full,3,7), state(2,full,3,6), state(2,full,6,5), state(2,full,7,5), state(4,full,2,4), state(4,full,1,4), state(3,full,2,4), state(3,full,1,4), state(2,full,2,4), state(2,full,1,4), state(4,full,7,4), state(3,full,7,4), state(3,full,6,4), state(2,full,7,4), state(2,full,6,4), changed(4,6,5), changed(4,6,4), changed(4,6,3), changed(3,5,5), changed(3,4,5), changed(3,6,5), changed(2,4,6), changed(2,4,5), changed(2,4,4), state(1,full,5,7), state(1,full,4,7), state(1,full,3,7), state(1,full,5,6), state(1,full,4,6), state(1,full,3,6), state(1,full,7,5), state(1,full,6,5), state(1,full,5,5), state(1,full,4,5), state(1,full,3,5), state(1,full,2,5), state(1,full,1,5), state(1,full,7,4), state(1,full,6,4), state(1,full,5,4), state(1,full,3,4), state(1,full,2,4), state(1,full,1,4), state(1,full,7,3), state(1,full,6,3), state(1,full,5,3), state(1,full,4,3), state(1,full,3,3), state(1,full,2,3), state(1,full,1,3), state(1,full,5,2), state(1,full,4,2), state(1,full,3,2), state(1,full,5,1), state(1,full,4,1), state(1,full,3,1), state(1,empty,4,4), possibleMove(3,up,5,7), possibleMove(1,up,4,6), possibleMove(3,down,6,3), possibleMove(3,down,4,4), possibleMove(3,down,5,3), possibleMove(2,down,4,3), possibleMove(1,down,4,2), possibleMove(2,left,6,5), possibleMove(1,left,6,4), possibleMove(3,right,3,5), possibleMove(2,right,2,5), possibleMove(1,right,2,4)}
{range(1), direction(up), direction(down), direction(left), direction(right), status(full), status(empty), full(3,1), full(4,1), full(5,1), full(3,2), full(4,2), full(5,2), full(1,3), full(2,3), full(3,3), full(4,3), full(5,3), full(6,3), full(7,3), full(1,4), full(2,4), full(3,4), empty(4,4), full(5,4), full(6,4), full(7,4), full(1,5), full(2,5), full(3,5), full(4,5), full(5,5), full(6,5), full(7,5), full(3,6), full(4,6), full(5,6), full(3,7), full(4,7), full(5,7), range(2), range(3), range(4), range(5), range(6), range(7), location(1,5), location(1,4), location(1,3), location(2,5), location(2,4), location(2,3), location(5,7), location(5,6), location(5,5), location(5,4), location(5,3), location(5,2), location(5,1), location(4,7), location(4,6), location(4,5), location(4,4), location(4,3), location(4,2), location(4,1), location(3,7), location(3,6), location(3,5), location(3,4), location(3,3), location(3,2), location(3,1), location(6,5), location(6,4), location(6,3), location(7,5), location(7,4), location(7,3), time(1), time(2), time(3), move(3,down,4,4), move(2,left,6,5), move(1,up,4,6), state(4,empty,5,5), state(4,empty,4,5), state(4,empty,4,4), state(4,empty,6,5), state(3,empty,5,5), state(3,empty,4,6), state(3,empty,6,5), state(2,empty,4,6), state(2,empty,4,5), state(4,full,1,3), state(4,full,2,3), state(4,full,5,4), state(4,full,5,3), state(4,full,5,2), state(4,full,5,1), state(4,full,4,3), state(4,full,4,2), state(4,full,4,1), state(4,full,3,5), state(4,full,3,4), state(4,full,3,3), state(4,full,3,2), state(4,full,3,1), state(4,full,6,3), state(4,full,7,3), state(3,full,1,3), state(3,full,2,3), state(3,full,5,4), state(3,full,5,3), state(3,full,5,2), state(3,full,5,1), state(3,full,4,5), state(3,full,4,4), state(3,full,4,3), state(3,full,4,2), state(3,full,4,1), state(3,full,3,5), state(3,full,3,4), state(3,full,3,3), state(3,full,3,2), state(3,full,3,1), state(3,full,6,3), state(3,full,7,3), state(2,full,1,3), state(2,full,2,3), state(2,full,5,5), state(2,full,5,4), state(2,full,5,3), state(2,full,5,2), state(2,full,5,1), state(2,full,4,4), state(2,full,4,3), state(2,full,4,2), state(2,full,4,1), state(2,full,3,5), state(2,full,3,4), state(2,full,3,3), state(2,full,3,2), state(2,full,3,1), state(2,full,6,3), state(2,full,7,3), state(4,full,1,5), state(4,full,2,5), state(4,full,5,7), state(4,full,5,6), state(4,full,4,7), state(4,full,4,6), state(4,full,3,7), state(4,full,3,6), state(4,full,7,5), state(3,full,1,5), state(3,full,2,5), state(3,full,5,7), state(3,full,5,6), state(3,full,4,7), state(3,full,3,7), state(3,full,3,6), state(3,full,7,5), state(2,full,1,5), state(2,full,2,5), state(2,full,5,7), state(2,full,5,6), state(2,full,4,7), state(2,full,3,7), state(2,full,3,6), state(2,full,6,5), state(2,full,7,5), state(4,full,2,4), state(4,full,1,4), state(3,full,2,4), state(3,full,1,4), state(2,full,2,4), state(2,full,1,4), state(4,full,7,4), state(4,full,6,4), state(3,full,7,4), state(3,full,6,4), state(2,full,7,4), state(2,full,6,4), changed(4,4,6), changed(4,4,5), changed(4,4,4), changed(3,5,5), changed(3,4,5), changed(3,6,5), changed(2,4,6), changed(2,4,5), changed(2,4,4), state(1,full,5,7), state(1,full,4,7), state(1,full,3,7), state(1,full,5,6), state(1,full,4,6), state(1,full,3,6), state(1,full,7,5), state(1,full,6,5), state(1,full,5,5), state(1,full,4,5), state(1,full,3,5), state(1,full,2,5), state(1,full,1,5), state(1,full,7,4), state(1,full,6,4), state(1,full,5,4), state(1,full,3,4), state(1,full,2,4), state(1,full,1,4), state(1,full,7,3), state(1,full,6,3), state(1,full,5,3), state(1,full,4,3), state(1,full,3,3), state(1,full,2,3), state(1,full,1,3), state(1,full,5,2), state(1,full,4,2), state(1,full,3,2), state(1,full,5,1), state(1,full,4,1), state(1,full,3,1), state(1,empty,4,4), possibleMove(3,up,5,7), possibleMove(1,up,4,6), possibleMove(3,down,6,3), possibleMove(3,down,4,4), possibleMove(3,down,5,3), possibleMove(2,down,4,3), possibleMove(1,down,4,2), possibleMove(2,left,6,5), possibleMove(1,left,6,4), possibleMove(3,right,3,5), possibleMove(2,right,2,5), possibleMove(1,right,2,4)}
{range(1), direction(up), direction(down), direction(left), direction(right), status(full), status(empty), full(3,1), full(4,1), full(5,1), full(3,2), full(4,2), full(5,2), full(1,3), full(2,3), full(3,3), full(4,3), full(5,3), full(6,3), full(7,3), full(1,4), full(2,4), full(3,4), empty(4,4), full(5,4), full(6,4), full(7,4), full(1,5), full(2,5), full(3,5), full(4,5), full(5,5), full(6,5), full(7,5), full(3,6), full(4,6), full(5,6), full(3,7), full(4,7), full(5,7), range(2), range(3), range(4), range(5), range(6), range(7), location(1,5), location(1,4), location(1,3), location(2,5), location(2,4), location(2,3), location(5,7), location(5,6), location(5,5), location(5,4), location(5,3), location(5,2), location(5,1), location(4,7), location(4,6), location(4,5), location(4,4), location(4,3), location(4,2), location(4,1), location(3,7), location(3,6), location(3,5), location(3,4), location(3,3), location(3,2), location(3,1), location(6,5), location(6,4), location(6,3), location(7,5), location(7,4), location(7,3), time(1), time(2), time(3), move(3,down,5,3), move(2,left,6,5), move(1,up,4,6), state(4,empty,5,4), state(4,empty,5,3), state(4,empty,4,6), state(4,empty,6,5), state(3,empty,5,5), state(3,empty,4,6), state(3,empty,6,5), state(2,empty,4,6), state(2,empty,4,5), state(4,full,1,3), state(4,full,2,3), state(4,full,5,5), state(4,full,5,2), state(4,full,5,1), state(4,full,4,5), state(4,full,4,4), state(4,full,4,3), state(4,full,4,2), state(4,full,4,1), state(4,full,3,5), state(4,full,3,4), state(4,full,3,3), state(4,full,3,2), state(4,full,3,1), state(4,full,6,3), state(4,full,7,3), state(3,full,1,3), state(3,full,2,3), state(3,full,5,4), state(3,full,5,3), state(3,full,5,2), state(3,full,5,1), state(3,full,4,5), state(3,full,4,4), state(3,full,4,3), state(3,full,4,2), state(3,full,4,1), state(3,full,3,5), state(3,full,3,4), state(3,full,3,3), state(3,full,3,2), state(3,full,3,1), state(3,full,6,3), state(3,full,7,3), state(2,full,1,3), state(2,full,2,3), state(2,full,5,5), state(2,full,5,4), state(2,full,5,3), state(2,full,5,2), state(2,full,5,1), state(2,full,4,4), state(2,full,4,3), state(2,full,4,2), state(2,full,4,1), state(2,full,3,5), state(2,full,3,4), state(2,full,3,3), state(2,full,3,2), state(2,full,3,1), state(2,full,6,3), state(2,full,7,3), state(4,full,1,5), state(4,full,2,5), state(4,full,5,7), state(4,full,5,6), state(4,full,4,7), state(4,full,3,7), state(4,full,3,6), state(4,full,7,5), state(3,full,1,5), state(3,full,2,5), state(3,full,5,7), state(3,full,5,6), state(3,full,4,7), state(3,full,3,7), state(3,full,3,6), state(3,full,7,5), state(2,full,1,5), state(2,full,2,5), state(2,full,5,7), state(2,full,5,6), state(2,full,4,7), state(2,full,3,7), state(2,full,3,6), state(2,full,6,5), state(2,full,7,5), state(4,full,2,4), state(4,full,1,4), state(3,full,2,4), state(3,full,1,4), state(2,full,2,4), state(2,full,1,4), state(4,full,7,4), state(4,full,6,4), state(3,full,7,4), state(3,full,6,4), state(2,full,7,4), state(2,full,6,4), changed(4,5,5), changed(4,5,4), changed(4,5,3), changed(3,5,5), changed(3,4,5), changed(3,6,5), changed(2,4,6), changed(2,4,5), changed(2,4,4), state(1,full,5,7), state(1,full,4,7), state(1,full,3,7), state(1,full,5,6), state(1,full,4,6), state(1,full,3,6), state(1,full,7,5), state(1,full,6,5), state(1,full,5,5), state(1,full,4,5), state(1,full,3,5), state(1,full,2,5), state(1,full,1,5), state(1,full,7,4), state(1,full,6,4), state(1,full,5,4), state(1,full,3,4), state(1,full,2,4), state(1,full,1,4), state(1,full,7,3), state(1,full,6,3), state(1,full,5,3), state(1,full,4,3), state(1,full,3,3), state(1,full,2,3), state(1,full,1,3), state(1,full,5,2), state(1,full,4,2), state(1,full,3,2), state(1,full,5,1), state(1,full,4,1), state(1,full,3,1), state(1,empty,4,4), possibleMove(3,up,5,7), possibleMove(1,up,4,6), possibleMove(3,down,6,3), possibleMove(3,down,4,4), possibleMove(3,down,5,3), possibleMove(2,down,4,3), possibleMove(1,down,4,2), possibleMove(2,left,6,5), possibleMove(1,left,6,4), possibleMove(3,right,3,5), possibleMove(2,right,2,5), possibleMove(1,right,2,4)}
{range(1), direction(up), direction(down), direction(left), direction(right), status(full), status(empty), full(3,1), full(4,1), full(5,1), full(3,2), full(4,2), full(5,2), full(1,3), full(2,3), full(3,3), full(4,3), full(5,3), full(6,3), full(7,3), full(1,4), full(2,4), full(3,4), empty(4,4), full(5,4), full(6,4), full(7,4), full(1,5), full(2,5), full(3,5), full(4,5), full(5,5), full(6,5), full(7,5), full(3,6), full(4,6), full(5,6), full(3,7), full(4,7), full(5,7), range(2), range(3), range(4), range(5), range(6), range(7), location(1,5), location(1,4), location(1,3), location(2,5), location(2,4), location(2,3), location(5,7), location(5,6), location(5,5), location(5,4), location(5,3), location(5,2), location(5,1), location(4,7), location(4,6), location(4,5), location(4,4), location(4,3), location(4,2), location(4,1), location(3,7), location(3,6), location(3,5), location(3,4), location(3,3), location(3,2), location(3,1), location(6,5), location(6,4), location(6,3), location(7,5), location(7,4), location(7,3), time(1), time(2), time(3), move(3,up,5,7), move(2,left,6,5), move(1,up,4,6), state(4,empty,5,7), state(4,empty,5,6), state(4,empty,4,6), state(4,empty,6,5), state(3,empty,5,5), state(3,empty,4,6), state(3,empty,6,5), state(2,empty,4,6), state(2,empty,4,5), state(4,full,1,3), state(4,full,2,3), state(4,full,5,5), state(4,full,5,4), state(4,full,5,3), state(4,full,5,2), state(4,full,5,1), state(4,full,4,5), state(4,full,4,4), state(4,full,4,3), state(4,full,4,2), state(4,full,4,1), state(4,full,3,5), state(4,full,3,4), state(4,full,3,3), state(4,full,3,2), state(4,full,3,1), state(4,full,6,3), state(4,full,7,3), state(3,full,1,3), state(3,full,2,3), state(3,full,5,4), state(3,full,5,3), state(3,full,5,2), state(3,full,5,1), state(3,full,4,5), state(3,full,4,4), state(3,full,4,3), state(3,full,4,2), state(3,full,4,1), state(3,full,3,5), state(3,full,3,4), state(3,full,3,3), state(3,full,3,2), state(3,full,3,1), state(3,full,6,3), state(3,full,7,3), state(2,full,1,3), state(2,full,2,3), state(2,full,5,5), state(2,full,5,4), state(2,full,5,3), state(2,full,5,2), state(2,full,5,1), state(2,full,4,4), state(2,full,4,3), state(2,full,4,2), state(2,full,4,1), state(2,full,3,5), state(2,full,3,4), state(2,full,3,3), state(2,full,3,2), state(2,full,3,1), state(2,full,6,3), state(2,full,7,3), state(4,full,1,5), state(4,full,2,5), state(4,full,4,7), state(4,full,3,7), state(4,full,3,6), state(4,full,7,5), state(3,full,1,5), state(3,full,2,5), state(3,full,5,7), state(3,full,5,6), state(3,full,4,7), state(3,full,3,7), state(3,full,3,6), state(3,full,7,5), state(2,full,1,5), state(2,full,2,5), state(2,full,5,7), state(2,full,5,6), state(2,full,4,7), state(2,full,3,7), state(2,full,3,6), state(2,full,6,5), state(2,full,7,5), state(4,full,2,4), state(4,full,1,4), state(3,full,2,4), state(3,full,1,4), state(2,full,2,4), state(2,full,1,4), state(4,full,7,4), state(4,full,6,4), state(3,full,7,4), state(3,full,6,4), state(2,full,7,4), state(2,full,6,4), changed(4,5,7), changed(4,5,6), changed(4,5,5), changed(3,5,5), changed(3,4,5), changed(3,6,5), changed(2,4,6), changed(2,4,5), changed(2,4,4), state(1,full,5,7), state(1,full,4,7), state(1,full,3,7), state(1,full,5,6), state(1,full,4,6), state(1,full,3,6), state(1,full,7,5), state(1,full,6,5), state(1,full,5,5), state(1,full,4,5), state(1,full,3,5), state(1,full,2,5), state(1,full,1,5), state(1,full,7,4), state(1,full,6,4), state(1,full,5,4), state(1,full,3,4), state(1,full,2,4), state(1,full,1,4), state(1,full,7,3), state(1,full,6,3), state(1,full,5,3), state(1,full,4,3), state(1,full,3,3), state(1,full,2,3), state(1,full,1,3), state(1,full,5,2), state(1,full,4,2), state(1,full,3,2), state(1,full,5,1), state(1,full,4,1), state(1,full,3,1), state(1,empty,4,4), possibleMove(3,up,5,7), possibleMove(1,up,4,6), possibleMove(3,down,6,3), possibleMove(3,down,4,4), possibleMove(3,down,5,3), possibleMove(2,down,4,3), possibleMove(1,down,4,2), possibleMove(2,left,6,5), possibleMove(1,left,6,4), possibleMove(3,right,3,5), possibleMove(2,right,2,5), possibleMove(1,right,2,4)}
{range(1), direction(up), direction(down), direction(left), direction(right), status(full), status(empty), full(3,1), full(4,1), full(5,1), full(3,2), full(4,2), full(5,2), full(1,3), full(2,3), full(3,3), full(4,3), full(5,3), full(6,3), full(7,3), full(1,4), full(2,4), full(3,4), empty(4,4), full(5,4), full(6,4), full(7,4), full(1,5), full(2,5), full(3,5), full(4,5), full(5,5), full(6,5), full(7,5), full(3,6), full(4,6), full(5,6), full(3,7), full(4,7), full(5,7), range(2), range(3), range(4), range(5), range(6), range(7), location(1,5), location(1,4), location(1,3), location(2,5), location(2,4), location(2,3), location(5,7), location(5,6), location(5,5), location(5,4), location(5,3), location(5,2), location(5,1), location(4,7), location(4,6), location(4,5), location(4,4), location(4,3), location(4,2), location(4,1), location(3,7), location(3,6), location(3,5), location(3,4), location(3,3), location(3,2), location(3,1), location(6,5), location(6,4), location(6,3), location(7,5), location(7,4), location(7,3), time(1), time(2), time(3), move(3,right,2,3), move(2,down,4,3), move(1,up,4,6), state(4,empty,2,3), state(4,empty,4,6), state(4,empty,4,4), state(4,empty,3,3), state(3,empty,4,6), state(3,empty,4,4), state(3,empty,4,3), state(2,empty,4,6), state(2,empty,4,5), state(4,full,1,3), state(4,full,5,5), state(4,full,5,4), state(4,full,5,3), state(4,full,5,2), state(4,full,5,1), state(4,full,4,5), state(4,full,4,3), state(4,full,4,2), state(4,full,4,1), state(4,full,3,5), state(4,full,3,4), state(4,full,3,2), state(4,full,3,1), state(4,full,6,3), state(4,full,7,3), state(3,full,1,3), state(3,full,2,3), state(3,full,5,5), state(3,full,5,4), state(3,full,5,3), state(3,full,5,2), state(3,full,5,1), state(3,full,4,5), state(3,full,4,2), state(3,full,4,1), state(3,full,3,5), state(3,full,3,4), state(3,full,3,3), state(3,full,3,2), state(3,full,3,1), state(3,full,6,3), state(3,full,7,3), state(2,full,1,3), state(2,full,2,3), state(2,full,5,5), state(2,full,5,4), state(2,full,5,3), state(2,full,5,2), state(2,full,5,1), state(2,full,4,4), state(2,full,4,3), state(2,full,4,2), state(2,full,4,1), state(2,full,3,5), state(2,full,3,4), state(2,full,3,3), state(2,full,3,2), state(2,full,3,1), state(2,full,6,3), state(2,full,7,3), state(4,full,1,5), state(4,full,2,5), state(4,full,5,7), state(4,full,5,6), state(4,full,4,7), state(4,full,3,7), state(4,full,3,6), state(4,full,6,5), state(4,full,7,5), state(3,full,1,5), state(3,full,2,5), state(3,full,5,7), state(3,full,5,6), state(3,full,4,7), state(3,full,3,7), state(3,full,3,6), state(3,full,6,5), state(3,full,7,5), state(2,full,1,5), state(2,full,2,5), state(2,full,5,7), state(2,full,5,6), state(2,full,4,7), state(2,full,3,7), state(2,full,3,6), state(2,full,6,5), state(2,full,7,5), state(4,full,2,4), state(4,full,1,4), state(3,full,2,4), state(3,full,1,4), state(2,full,2,4), state(2,full,1,4), state(4,full,7,4), state(4,full,6,4), state(3,full,7,4), state(3,full,6,4), state(2,full,7,4), state(2,full,6,4), changed(4,2,3), changed(4,4,3), changed(4,3,3), changed(3,4,5), changed(3,4,4), changed(3,4,3), changed(2,4,6), changed(2,4,5), changed(2,4,4), state(1,full,5,7), state(1,full,4,7), state(1,full,3,7), state(1,full,5,6), state(1,full,4,6), state(1,full,3,6), state(1,full,7,5), state(1,full,6,5), state(1,full,5,5), state(1,full,4,5), state(1,full,3,5), state(1,full,2,5), state(1,full,1,5), state(1,full,7,4), state(1,full,6,4), state(1,full,5,4), state(1,full,3,4), state(1,full,2,4), state(1,full,1,4), state(1,full,7,3), state(1,full,6,3), state(1,full,5,3), state(1,full,4,3), state(1,full,3,3), state(1,full,2,3), state(1,full,1,3), state(1,full,5,2), state(1,full,4,2), state(1,full,3,2), state(1,full,5,1), state(1,full,4,1), state(1,full,3,1), state(1,empty,4,4), possibleMove(1,up,4,6), possibleMove(3,down,4,1), possibleMove(2,down,4,3), possibleMove(1,down,4,2), possibleMove(3,left,6,3), possibleMove(3,left,6,4), possibleMove(2,left,6,5), possibleMove(1,left,6,4), possibleMove(3,right,2,3), possibleMove(3,right,2,4), possibleMove(2,right,2,5), possibleMove(1,right,2,4)}
{range(1), direction(up), direction(down), direction(left), direction(right), status(full), status(empty), full(3,1), full(4,1), full(5,1), full(3,2), full(4,2), full(5,2), full(1,3), full(2,3), full(3,3), full(4,3), full(5,3), full(6,3), full(7,3), full(1,4), full(2,4), full(3,4), empty(4,4), full(5,4), full(6,4), full(7,4), full(1,5), full(2,5), full(3,5), full(4,5), full(5,5), full(6,5), full(7,5), full(3,6), full(4,6), full(5,6), full(3,7), full(4,7), full(5,7), range(2), range(3), range(4), range(5), range(6), range(7), location(1,5), location(1,4), location(1,3), location(2,5), location(2,4), location(2,3), location(5,7), location(5,6), location(5,5), location(5,4), location(5,3), location(5,2), location(5,1), location(4,7), location(4,6), location(4,5), location(4,4), location(4,3), location(4,2), location(4,1), location(3,7), location(3,6), location(3,5), location(3,4), location(3,3), location(3,2), location(3,1), location(6,5), location(6,4), location(6,3), location(7,5), location(7,4), location(7,3), time(1), time(2), time(3), move(3,left,6,3), move(2,down,4,3), move(1,up,4,6), state(4,empty,5,3), state(4,empty,4,6), state(4,empty,4,4), state(4,empty,6,3), state(3,empty,4,6), state(3,empty,4,4), state(3,empty,4,3), state(2,empty,4,6), state(2,empty,4,5), state(4,full,1,3), state(4,full,2,3), state(4,full,5,5), state(4,full,5,4), state(4,full,5,2), state(4,full,5,1), state(4,full,4,5), state(4,full,4,3), state(4,full,4,2), state(4,full,4,1), state(4,full,3,5), state(4,full,3,4), state(4,full,3,3), state(4,full,3,2), state(4,full,3,1), state(4,full,7,3), state(3,full,1,3), state(3,full,2,3), state(3,full,5,5), state(3,full,5,4), state(3,full,5,3), state(3,full,5,2), state(3,full,5,1), state(3,full,4,5), state(3,full,4,2), state(3,full,4,1), state(3,full,3,5), state(3,full,3,4), state(3,full,3,3), state(3,full,3,2), state(3,full,3,1), state(3,full,6,3), state(3,full,7,3), state(2,full,1,3), state(2,full,2,3), state(2,full,5,5), state(2,full,5,4), state(2,full,5,3), state(2,full,5,2), state(2,full,5,1), state(2,full,4,4), state(2,full,4,3), state(2,full,4,2), state(2,full,4,1), state(2,full,3,5), state(2,full,3,4), state(2,full,3,3), state(2,full,3,2), state(2,full,3,1), state(2,full,6,3), state(2,full,7,3), state(4,full,1,5), state(4,full,2,5), state(4,full,5,7), state(4,full,5,6), state(4,full,4,7), state(4,full,3,7), state(4,full,3,6), state(4,full,6,5), state(4,full,7,5), state(3,full,1,5), state(3,full,2,5), state(3,full,5,7), state(3,full,5,6), state(3,full,4,7), state(3,full,3,7), state(3,full,3,6), state(3,full,6,5), state(3,full,7,5), state(2,full,1,5), state(2,full,2,5), state(2,full,5,7), state(2,full,5,6), state(2,full,4,7), state(2,full,3,7), state(2,full,3,6), state(2,full,6,5), state(2,full,7,5), state(4,full,2,4), state(4,full,1,4), state(3,full,2,4), state(3,full,1,4), state(2,full,2,4), state(2,full,1,4), state(4,full,7,4), state(4,full,6,4), state(3,full,7,4), state(3,full,6,4), state(2,full,7,4), state(2,full,6,4), changed(4,5,3), changed(4,4,3), changed(4,6,3), changed(3,4,5), changed(3,4,4), changed(3,4,3), changed(2,4,6), changed(2,4,5), changed(2,4,4), state(1,full,5,7), state(1,full,4,7), state(1,full,3,7), state(1,full,5,6), state(1,full,4,6), state(1,full,3,6), state(1,full,7,5), state(1,full,6,5), state(1,full,5,5), state(1,full,4,5), state(1,full,3,5), state(1,full,2,5), state(1,full,1,5), state(1,full,7,4), state(1,full,6,4), state(1,full,5,4), state(1,full,3,4), state(1,full,2,4), state(1,full,1,4), state(1,full,7,3), state(1,full,6,3), state(1,full,5,3), state(1,full,4,3), state(1,full,3,3), state(1,full,2,3), state(1,full,1,3), state(1,full,5,2), state(1,full,4,2), state(1,full,3,2), state(1,full,5,1), state(1,full,4,1), state(1,full,3,1), state(1,empty,4,4), possibleMove(1,up,4,6), possibleMove(3,down,4,1), possibleMove(2,down,4,3), possibleMove(1,down,4,2), possibleMove(3,left,6,3), possibleMove(3,left,6,4), possibleMove(2,left,6,5), possibleMove(1,left,6,4), possibleMove(3,right,2,3), possibleMove(3,right,2,4), possibleMove(2,right,2,5), possibleMove(1,right,2,4)}
{range(1), direction(up), direction(down), direction(left), direction(right), status(full), status(empty), full(3,1), full(4,1), full(5,1), full(3,2), full(4,2), full(5,2), full(1,3), full(2,3), full(3,3), full(4,3), full(5,3), full(6,3), full(7,3), full(1,4), full(2,4), full(3,4), empty(4,4), full(5,4), full(6,4), full(7,4), full(1,5), full(2,5), full(3,5), full(4,5), full(5,5), full(6,5), full(7,5), full(3,6), full(4,6), full(5,6), full(3,7), full(4,7), full(5,7), range(2), range(3), range(4), range(5), range(6), range(7), location(1,5), location(1,4), location(1,3), location(2,5), location(2,4), location(2,3), location(5,7), location(5,6), location(5,5), location(5,4), location(5,3), location(5,2), location(5,1), location(4,7), location(4,6), location(4,5), location(4,4), location(4,3), location(4,2), location(4,1), location(3,7), location(3,6), location(3,5), location(3,4), location(3,3), location(3,2), location(3,1), location(6,5), location(6,4), location(6,3), location(7,5), location(7,4), location(7,3), time(1), time(2), time(3), move(3,left,6,4), move(2,down,4,3), move(1,up,4,6), state(4,empty,5,4), state(4,empty,4,6), state(4,empty,4,3), state(4,empty,6,4), state(3,empty,4,6), state(3,empty,4,4), state(3,empty,4,3), state(2,empty,4,6), state(2,empty,4,5), state(4,full,1,3), state(4,full,2,3), state(4,full,5,5), state(4,full,5,3), state(4,full,5,2), state(4,full,5,1), state(4,full,4,5), state(4,full,4,4), state(4,full,4,2), state(4,full,4,1), state(4,full,3,5), state(4,full,3,4), state(4,full,3,3), state(4,full,3,2), state(4,full,3,1), state(4,full,6,3), state(4,full,7,3), state(3,full,1,3), state(3,full,2,3), state(3,full,5,5), state(3,full,5,4), state(3,full,5,3), state(3,full,5,2), state(3,full,5,1), state(3,full,4,5), state(3,full,4,2), state(3,full,4,1), state(3,full,3,5), state(3,full,3,4), state(3,full,3,3), state(3,full,3,2), state(3,full,3,1), state(3,full,6,3), state(3,full,7,3), state(2,full,1,3), state(2,full,2,3), state(2,full,5,5), state(2,full,5,4), state(2,full,5,3), state(2,full,5,2), state(2,full,5,1), state(2,full,4,4), state(2,full,4,3), state(2,full,4,2), state(2,full,4,1), state(2,full,3,5), state(2,full,3,4), state(2,full,3,3), state(2,full,3,2), state(2,full,3,1), state(2,full,6,3), state(2,full,7,3), state(4,full,1,5), state(4,full,2,5), state(4,full,5,7), state(4,full,5,6), state(4,full,4,7), state(4,full,3,7), state(4,full,3,6), state(4,full,6,5), state(4,full,7,5), state(3,full,1,5), state(3,full,2,5), state(3,full,5,7), state(3,full,5,6), state(3,full,4,7), state(3,full,3,7), state(3,full,3,6), state(3,full,6,5), state(3,full,7,5), state(2,full,1,5), state(2,full,2,5), state(2,full,5,7), state(2,full,5,6), state(2,full,4,7), state(2,full,3,7), state(2,full,3,6), state(2,full,6,5), state(2,full,7,5), state(4,full,2,4), state(4,full,1,4), state(3,full,2,4), state(3,full,1,4), state(2,full,2,4), state(2,full,1,4), state(4,full,7,4), state(3,full,7,4), state(3,full,6,4), state(2,full,7,4), state(2,full,6,4), changed(4,5,4), changed(4,4,4), changed(4,6,4), changed(3,4,5), changed(3,4,4), changed(3,4,3), changed(2,4,6), changed(2,4,5), changed(2,4,4), state(1,full,5,7), state(1,full,4,7), state(1,full,3,7), state(1,full,5,6), state(1,full,4,6), state(1,full,3,6), state(1,full,7,5), state(1,full,6,5), state(1,full,5,5), state(1,full,4,5), state(1,full,3,5), state(1,full,2,5), state(1,full,1,5), state(1,full,7,4), state(1,full,6,4), state(1,full,5,4), state(1,full,3,4), state(1,full,2,4), state(1,full,1,4), state(1,full,7,3), state(1,full,6,3), state(1,full,5,3), state(1,full,4,3), state(1,full,3,3), state(1,full,2,3), state(1,full,1,3), state(1,full,5,2), state(1,full,4,2), state(1,full,3,2), state(1,full,5,1), state(1,full,4,1), state(1,full,3,1), state(1,empty,4,4), possibleMove(1,up,4,6), possibleMove(3,down,4,1), possibleMove(2,down,4,3), possibleMove(1,down,4,2), possibleMove(3,left,6,3), possibleMove(3,left,6,4), possibleMove(2,left,6,5), possibleMove(1,left,6,4), possibleMove(3,right,2,3), possibleMove(3,right,2,4), possibleMove(2,right,2,5), possibleMove(1,right,2,4)}
{range(1), direction(up), direction(down), direction(left), direction(right), status(full), status(empty), full(3,1), full(4,1), full(5,1), full(3,2), full(4,2), full(5,2), full(1,3), full(2,3), full(3,3), full(4,3), full(5,3), full(6,3), full(7,3), full(1,4), full(2,4), full(3,4), empty(4,4), full(5,4), full(6,4), full(7,4), full(1,5), full(2,5), full(3,5), full(4,5), full(5,5), full(6,5), full(7,5), full(3,6), full(4,6), full(5,6), full(3,7), full(4,7), full(5,7), range(2), range(3), range(4), range(5), range(6), range(7), location(1,5), location(1,4), location(1,3), location(2,5), location(2,4), location(2,3), location(5,7), location(5,6), location(5,5), location(5,4), location(5,3), location(5,2), location(5,1), location(4,7), location(4,6), location(4,5), location(4,4), location(4,3), location(4,2), location(4,1), location(3,7), location(3,6), location(3,5), location(3,4), location(3,3), location(3,2), location(3,1), location(6,5), location(6,4), location(6,3), location(7,5), location(7,4), location(7,3), time(1), time(2), time(3), move(3,down,4,1), move(2,down,4,3), move(1,up,4,6), state(4,empty,4,6), state(4,empty,4,4), state(4,empty,4,2), state(4,empty,4,1), state(3,empty,4,6), state(3,empty,4,4), state(3,empty,4,3), state(2,empty,4,6), state(2,empty,4,5), state(4,full,1,3), state(4,full,2,3), state(4,full,5,5), state(4,full,5,4), state(4,full,5,3), state(4,full,5,2), state(4,full,5,1), state(4,full,4,5), state(4,full,4,3), state(4,full,3,5), state(4,full,3,4), state(4,full,3,3), state(4,full,3,2), state(4,full,3,1), state(4,full,6,3), state(4,full,7,3), state(3,full,1,3), state(3,full,2,3), state(3,full,5,5), state(3,full,5,4), state(3,full,5,3), state(3,full,5,2), state(3,full,5,1), state(3,full,4,5), state(3,full,4,2), state(3,full,4,1), state(3,full,3,5), state(3,full,3,4), state(3,full,3,3), state(3,full,3,2), state(3,full,3,1), state(3,full,6,3), state(3,full,7,3), state(2,full,1,3), state(2,full,2,3), state(2,full,5,5), state(2,full,5,4), state(2,full,5,3), state(2,full,5,2), state(2,full,5,1), state(2,full,4,4), state(2,full,4,3), state(2,full,4,2), state(2,full,4,1), state(2,full,3,5), state(2,full,3,4), state(2,full,3,3), state(2,full,3,2), state(2,full,3,1), state(2,full,6,3), state(2,full,7,3), state(4,full,1,5), state(4,full,2,5), state(4,full,5,7), state(4,full,5,6), state(4,full,4,7), state(4,full,3,7), state(4,full,3,6), state(4,full,6,5), state(4,full,7,5), state(3,full,1,5), state(3,full,2,5), state(3,full,5,7), state(3,full,5,6), state(3,full,4,7), state(3,full,3,7), state(3,full,3,6), state(3,full,6,5), state(3,full,7,5), state(2,full,1,5), state(2,full,2,5), state(2,full,5,7), state(2,full,5,6), state(2,full,4,7), state(2,full,3,7), state(2,full,3,6), state(2,full,6,5), state(2,full,7,5), state(4,full,2,4), state(4,full,1,4), state(3,full,2,4), state(3,full,1,4), state(2,full,2,4), state(2,full,1,4), state(4,full,7,4), state(4,full,6,4), state(3,full,7,4), state(3,full,6,4), state(2,full,7,4), state(2,full,6,4), changed(4,4,3), changed(4,4,2), changed(4,4,1), changed(3,4,5), changed(3,4,4), changed(3,4,3), changed(2,4,6), changed(2,4,5), changed(2,4,4), state(1,full,5,7), state(1,full,4,7), state(1,full,3,7), state(1,full,5,6), state(1,full,4,6), state(1,full,3,6), state(1,full,7,5), state(1,full,6,5), state(1,full,5,5), state(1,full,4,5), state(1,full,3,5), state(1,full,2,5), state(1,full,1,5), state(1,full,7,4), state(1,full,6,4), state(1,full,5,4), state(1,full,3,4), state(1,full,2,4), state(1,full,1,4), state(1,full,7,3), state(1,full,6,3), state(1,full,5,3), state(1,full,4,3), state(1,full,3,3), state(1,full,2,3), state(1,full,1,3), state(1,full,5,2), state(1,full,4,2), state(1,full,3,2), state(1,full,5,1), state(1,full,4,1), state(1,full,3,1), state(1,empty,4,4), possibleMove(1,up,4,6), possibleMove(3,down,4,1), possibleMove(2,down,4,3), possibleMove(1,down,4,2), possibleMove(3,left,6,3), possibleMove(3,left,6,4), possibleMove(2,left,6,5), possibleMove(1,left,6,4), possibleMove(3,right,2,3), possibleMove(3,right,2,4), possibleMove(2,right,2,5), possibleMove(1,right,2,4)}
{range(1), direction(up), direction(down), direction(left), direction(right), status(full), status(empty), full(3,1), full(4,1), full(5,1), full(3,2), full(4,2), full(5,2), full(1,3), full(2,3), full(3,3), full(4,3), full(5,3), full(6,3), full(7,3), full(1,4), full(2,4), full(3,4), empty(4,4), full(5,4), full(6,4), full(7,4), full(1,5), full(2,5), full(3,5), full(4,5), full(5,5), full(6,5), full(7,5), full(3,6), full(4,6), full(5,6), full(3,7), full(4,7), full(5,7), range(2), range(3), range(4), range(5), range(6), range(7), location(1,5), location(1,4), location(1,3), location(2,5), location(2,4), location(2,3), location(5,7), location(5,6), location(5,5), location(5,4), location(5,3), location(5,2), location(5,1), location(4,7), location(4,6), location(4,5), location(4,4), location(4,3), location(4,2), location(4,1), location(3,7), location(3,6), location(3,5), location(3,4), location(3,3), location(3,2), location(3,1), location(6,5), location(6,4), location(6,3), location(7,5), location(7,4), location(7,3), time(1), time(2), time(3), move(3,down,4,4), move(2,right,2,5), move(1,up,4,6), state(4,empty,2,5), state(4,empty,4,5), state(4,empty,4,4), state(4,empty,3,5), state(3,empty,2,5), state(3,empty,4,6), state(3,empty,3,5), state(2,empty,4,6), state(2,empty,4,5), state(4,full,1,3), state(4,full,2,3), state(4,full,5,5), state(4,full,5,4), state(4,full,5,3), state(4,full,5,2), state(4,full,5,1), state(4,full,4,3), state(4,full,4,2), state(4,full,4,1), state(4,full,3,4), state(4,full,3,3), state(4,full,3,2), state(4,full,3,1), state(4,full,6,3), state(4,full,7,3), state(3,full,1,3), state(3,full,2,3), state(3,full,5,5), state(3,full,5,4), state(3,full,5,3), state(3,full,5,2), state(3,full,5,1), state(3,full,4,5), state(3,full,4,4), state(3,full,4,3), state(3,full,4,2), state(3,full,4,1), state(3,full,3,4), state(3,full,3,3), state(3,full,3,2), state(3,full,3,1), state(3,full,6,3), state(3,full,7,3), state(2,full,1,3), state(2,full,2,3), state(2,full,5,5), state(2,full,5,4), state(2,full,5,3), state(2,full,5,2), state(2,full,5,1), state(2,full,4,4), state(2,full,4,3), state(2,full,4,2), state(2,full,4,1), state(2,full,3,5), state(2,full,3,4), state(2,full,3,3), state(2,full,3,2), state(2,full,3,1), state(2,full,6,3), state(2,full,7,3), state(4,full,1,5), state(4,full,5,7), state(4,full,5,6), state(4,full,4,7), state(4,full,4,6), state(4,full,3,7), state(4,full,3,6), state(4,full,6,5), state(4,full,7,5), state(3,full,1,5), state(3,full,5,7), state(3,full,5,6), state(3,full,4,7), state(3,full,3,7), state(3,full,3,6), state(3,full,6,5), state(3,full,7,5), state(2,full,1,5), state(2,full,2,5), state(2,full,5,7), state(2,full,5,6), state(2,full,4,7), state(2,full,3,7), state(2,full,3,6), state(2,full,6,5), state(2,full,7,5), state(4,full,2,4), state(4,full,1,4), state(3,full,2,4), state(3,full,1,4), state(2,full,2,4), state(2,full,1,4), state(4,full,7,4), state(4,full,6,4), state(3,full,7,4), state(3,full,6,4), state(2,full,7,4), state(2,full,6,4), changed(4,4,6), changed(4,4,5), changed(4,4,4), changed(3,2,5), changed(3,4,5), changed(3,3,5), changed(2,4,6), changed(2,4,5), changed(2,4,4), state(1,full,5,7), state(1,full,4,7), state(1,full,3,7), state(1,full,5,6), state(1,full,4,6), state(1,full,3,6), state(1,full,7,5), state(1,full,6,5), state(1,full,5,5), state(1,full,4,5), state(1,full,3,5), state(1,full,2,5), state(1,full,1,5), state(1,full,7,4), state(1,full,6,4), state(1,full,5,4), state(1,full,3,4), state(1,full,2,4), state(1,full,1,4), state(1,full,7,3), state(1,full,6,3), state(1,full,5,3), state(1,full,4,3), state(1,full,3,3), state(1,full,2,3), state(1,full,1,3), state(1,full,5,2), state(1,full,4,2), state(1,full,3,2), state(1,full,5,1), state(1,full,4,1), state(1,full,3,1), state(1,empty,4,4), possibleMove(3,up,3,7), possibleMove(1,up,4,6), possibleMove(3,down,3,3), possibleMove(3,down,4,4), possibleMove(3,down,2,3), possibleMove(2,down,4,3), possibleMove(1,down,4,2), possibleMove(3,left,5,5), possibleMove(2,left,6,5), possibleMove(1,left,6,4), possibleMove(2,right,2,5), possibleMove(1,right,2,4)}
{range(1), direction(up), direction(down), direction(left), direction(right), status(full), status(empty), full(3,1), full(4,1), full(5,1), full(3,2), full(4,2), full(5,2), full(1,3), full(2,3), full(3,3), full(4,3), full(5,3), full(6,3), full(7,3), full(1,4), full(2,4), full(3,4), empty(4,4), full(5,4), full(6,4), full(7,4), full(1,5), full(2,5), full(3,5), full(4,5), full(5,5), full(6,5), full(7,5), full(3,6), full(4,6), full(5,6), full(3,7), full(4,7), full(5,7), range(2), range(3), range(4), range(5), range(6), range(7), location(1,5), location(1,4), location(1,3), location(2,5), location(2,4), location(2,3), location(5,7), location(5,6), location(5,5), location(5,4), location(5,3), location(5,2), location(5,1), location(4,7), location(4,6), location(4,5), location(4,4), location(4,3), location(4,2), location(4,1), location(3,7), location(3,6), location(3,5), location(3,4), location(3,3), location(3,2), location(3,1), location(6,5), location(6,4), location(6,3), location(7,5), location(7,4), location(7,3), time(1), time(2), time(3), move(3,down,2,3), move(2,right,2,5), move(1,up,4,6), state(4,empty,2,4), state(4,empty,2,3), state(4,empty,4,6), state(4,empty,3,5), state(3,empty,2,5), state(3,empty,4,6), state(3,empty,3,5), state(2,empty,4,6), state(2,empty,4,5), state(4,full,1,3), state(4,full,5,5), state(4,full,5,4), state(4,full,5,3), state(4,full,5,2), state(4,full,5,1), state(4,full,4,5), state(4,full,4,4), state(4,full,4,3), state(4,full,4,2), state(4,full,4,1), state(4,full,3,4), state(4,full,3,3), state(4,full,3,2), state(4,full,3,1), state(4,full,6,3), state(4,full,7,3), state(3,full,1,3), state(3,full,2,3), state(3,full,5,5), state(3,full,5,4), state(3,full,5,3), state(3,full,5,2), state(3,full,5,1), state(3,full,4,5), state(3,full,4,4), state(3,full,4,3), state(3,full,4,2), state(3,full,4,1), state(3,full,3,4), state(3,full,3,3), state(3,full,3,2), state(3,full,3,1), state(3,full,6,3), state(3,full,7,3), state(2,full,1,3), state(2,full,2,3), state(2,full,5,5), state(2,full,5,4), state(2,full,5,3), state(2,full,5,2), state(2,full,5,1), state(2,full,4,4), state(2,full,4,3), state(2,full,4,2), state(2,full,4,1), state(2,full,3,5), state(2,full,3,4), state(2,full,3,3), state(2,full,3,2), state(2,full,3,1), state(2,full,6,3), state(2,full,7,3), state(4,full,1,5), state(4,full,2,5), state(4,full,5,7), state(4,full,5,6), state(4,full,4,7), state(4,full,3,7), state(4,full,3,6), state(4,full,6,5), state(4,full,7,5), state(3,full,1,5), state(3,full,5,7), state(3,full,5,6), state(3,full,4,7), state(3,full,3,7), state(3,full,3,6), state(3,full,6,5), state(3,full,7,5), state(2,full,1,5), state(2,full,2,5), state(2,full,5,7), state(2,full,5,6), state(2,full,4,7), state(2,full,3,7), state(2,full,3,6), state(2,full,6,5), state(2,full,7,5), state(4,full,1,4), state(3,full,2,4), state(3,full,1,4), state(2,full,2,4), state(2,full,1,4), state(4,full,7,4), state(4,full,6,4), state(3,full,7,4), state(3,full,6,4), state(2,full,7,4), state(2,full,6,4), changed(4,2,5), changed(4,2,4), changed(4,2,3), changed(3,2,5), changed(3,4,5), changed(3,3,5), changed(2,4,6), changed(2,4,5), changed(2,4,4), state(1,full,5,7), state(1,full,4,7), state(1,full,3,7), state(1,full,5,6), state(1,full,4,6), state(1,full,3,6), state(1,full,7,5), state(1,full,6,5), state(1,full,5,5), state(1,full,4,5), state(1,full,3,5), state(1,full,2,5), state(1,full,1,5), state(1,full,7,4), state(1,full,6,4), state(1,full,5,4), state(1,full,3,4), state(1,full,2,4), state(1,full,1,4), state(1,full,7,3), state(1,full,6,3), state(1,full,5,3), state(1,full,4,3), state(1,full,3,3), state(1,full,2,3), state(1,full,1,3), state(1,full,5,2), state(1,full,4,2), state(1,full,3,2), state(1,full,5,1), state(1,full,4,1), state(1,full,3,1), state(1,empty,4,4), possibleMove(3,up,3,7), possibleMove(1,up,4,6), possibleMove(3,down,3,3), possibleMove(3,down,4,4), possibleMove(3,down,2,3), possibleMove(2,down,4,3), possibleMove(1,down,4,2), possibleMove(3,left,5,5), possibleMove(2,left,6,5), possibleMove(1,left,6,4), possibleMove(2,right,2,5), possibleMove(1,right,2,4)}
{range(1), direction(up), direction(down), direction(left), direction(right), status(full), status(empty), full(3,1), full(4,1), full(5,1), full(3,2), full(4,2), full(5,2), full(1,3), full(2,3), full(3,3), full(4,3), full(5,3), full(6,3), full(7,3), full(1,4), full(2,4), full(3,4), empty(4,4), full(5,4), full(6,4), full(7,4), full(1,5), full(2,5), full(3,5), full(4,5), full(5,5), full(6,5), full(7,5), full(3,6), full(4,6), full(5,6), full(3,7), full(4,7), full(5,7), range(2), range(3), range(4), range(5), range(6), range(7), location(1,5), location(1,4), location(1,3), location(2,5), location(2,4), location(2,3), location(5,7), location(5,6), location(5,5), location(5,4), location(5,3), location(5,2), location(5,1), location(4,7), location(4,6), location(4,5), location(4,4), location(4,3), location(4,2), location(4,1), location(3,7), location(3,6), location(3,5), location(3,4), location(3,3), location(3,2), location(3,1), location(6,5), location(6,4), location(6,3), location(7,5), location(7,4), location(7,3), time(1), time(2), time(3), move(3,right,2,4), move(2,up,4,5), move(1,down,4,2), state(4,empty,2,4), state(4,empty,4,5), state(4,empty,4,2), state(4,empty,3,4), state(3,empty,4,5), state(3,empty,4,4), state(3,empty,4,2), state(2,empty,4,3), state(2,empty,4,2), state(4,full,1,3), state(4,full,2,3), state(4,full,5,5), state(4,full,5,4), state(4,full,5,3), state(4,full,5,2), state(4,full,5,1), state(4,full,4,4), state(4,full,4,3), state(4,full,4,1), state(4,full,3,5), state(4,full,3,3), state(4,full,3,2), state(4,full,3,1), state(4,full,6,3), state(4,full,7,3), state(3,full,1,3), state(3,full,2,3), state(3,full,5,5), state(3,full,5,4), state(3,full,5,3), state(3,full,5,2), state(3,full,5,1), state(3,full,4,3), state(3,full,4,1), state(3,full,3,5), state(3,full,3,4), state(3,full,3,3), state(3,full,3,2), state(3,full,3,1), state(3,full,6,3), state(3,full,7,3), state(2,full,1,3), state(2,full,2,3), state(2,full,5,5), state(2,full,5,4), state(2,full,5,3), state(2,full,5,2), state(2,full,5,1), state(2,full,4,5), state(2,full,4,4), state(2,full,4,1), state(2,full,3,5), state(2,full,3,4), state(2,full,3,3), state(2,full,3,2), state(2,full,3,1), state(2,full,6,3), state(2,full,7,3), state(4,full,1,5), state(4,full,2,5), state(4,full,5,7), state(4,full,5,6), state(4,full,4,7), state(4,full,4,6), state(4,full,3,7), state(4,full,3,6), state(4,full,6,5), state(4,full,7,5), state(3,full,1,5), state(3,full,2,5), state(3,full,5,7), state(3,full,5,6), state(3,full,4,7), state(3,full,4,6), state(3,full,3,7), state(3,full,3,6), state(3,full,6,5), state(3,full,7,5), state(2,full,1,5), state(2,full,2,5), state(2,full,5,7), state(2,full,5,6), state(2,full,4,7), state(2,full,4,6), state(2,full,3,7), state(2,full,3,6), state(2,full,6,5), state(2,full,7,5), state(4,full,1,4), state(3,full,2,4), state(3,full,1,4), state(2,full,2,4), state(2,full,1,4), state(4,full,7,4), state(4,full,6,4), state(3,full,7,4), state(3,full,6,4), state(2,full,7,4), state(2,full,6,4), changed(4,2,4), changed(4,4,4), changed(4,3,4), changed(3,4,5), changed(3,4,4), changed(3,4,3), changed(2,4,4), changed(2,4,3), changed(2,4,2), state(1,full,5,7), state(1,full,4,7), state(1,full,3,7), state(1,full,5,6), state(1,full,4,6), state(1,full,3,6), state(1,full,7,5), state(1,full,6,5), state(1,full,5,5), state(1,full,4,5), state(1,full,3,5), state(1,full,2,5), state(1,full,1,5), state(1,full,7,4), state(1,full,6,4), state(1,full,5,4), state(1,full,3,4), state(1,full,2,4), state(1,full,1,4), state(1,full,7,3), state(1,full,6,3), state(1,full,5,3), state(1,full,4,3), state(1,full,3,3), state(1,full,2,3), state(1,full,1,3), state(1,full,5,2), state(1,full,4,2), state(1,full,3,2), state(1,full,5,1), state(1,full,4,1), state(1,full,3,1), state(1,empty,4,4), possibleMove(3,up,4,7), possibleMove(2,up,4,5), possibleMove(1,up,4,6), possibleMove(1,down,4,2), possibleMove(3,left,6,4), possibleMove(3,left,6,5), possibleMove(2,left,6,3), possibleMove(1,left,6,4), possibleMove(3,right,2,4), possibleMove(3,right,2,5), possibleMove(2,right,2,3), possibleMove(1,right,2,4)}
{range(1), direction(up), direction(down), direction(left), direction(right), status(full), status(empty), full(3,1), full(4,1), full(5,1), full(3,2), full(4,2), full(5,2), full(1,3), full(2,3), full(3,3), full(4,3), full(5,3), full(6,3), full(7,3), full(1,4), full(2,4), full(3,4), empty(4,4), full(5,4), full(6,4), full(7,4), full(1,5), full(2,5), full(3,5), full(4,5), full(5,5), full(6,5), full(7,5), full(3,6), full(4,6), full(5,6), full(3,7), full(4,7), full(5,7), range(2), range(3), range(4), range(5), range(6), range(7), location(1,5), location(1,4), location(1,3), location(2,5), location(2,4), location(2,3), location(5,7), location(5,6), location(5,5), location(5,4), location(5,3), location(5,2), location(5,1), location(4,7), location(4,6), location(4,5), location(4,4), location(4,3), location(4,2), location(4,1), location(3,7), location(3,6), location(3,5), location(3,4), location(3,3), location(3,2), location(3,1), location(6,5), location(6,4), location(6,3), location(7,5), location(7,4), location(7,3), time(1), time(2), time(3), move(3,left,6,4), move(2,up,4,5), move(1,down,4,2), state(4,empty,5,4), state(4,empty,4,5), state(4,empty,4,2), state(4,empty,6,4), state(3,empty,4,5), state(3,empty,4,4), state(3,empty,4,2), state(2,empty,4,3), state(2,empty,4,2), state(4,full,1,3), state(4,full,2,3), state(4,full,5,5), state(4,full,5,3), state(4,full,5,2), state(4,full,5,1), state(4,full,4,4), state(4,full,4,3), state(4,full,4,1), state(4,full,3,5), state(4,full,3,4), state(4,full,3,3), state(4,full,3,2), state(4,full,3,1), state(4,full,6,3), state(4,full,7,3), state(3,full,1,3), state(3,full,2,3), state(3,full,5,5), state(3,full,5,4), state(3,full,5,3), state(3,full,5,2), state(3,full,5,1), state(3,full,4,3), state(3,full,4,1), state(3,full,3,5), state(3,full,3,4), state(3,full,3,3), state(3,full,3,2), state(3,full,3,1), state(3,full,6,3), state(3,full,7,3), state(2,full,1,3), state(2,full,2,3), state(2,full,5,5), state(2,full,5,4), state(2,full,5,3), state(2,full,5,2), state(2,full,5,1), state(2,full,4,5), state(2,full,4,4), state(2,full,4,1), state(2,full,3,5), state(2,full,3,4), state(2,full,3,3), state(2,full,3,2), state(2,full,3,1), state(2,full,6,3), state(2,full,7,3), state(4,full,1,5), state(4,full,2,5), state(4,full,5,7), state(4,full,5,6), state(4,full,4,7), state(4,full,4,6), state(4,full,3,7), state(4,full,3,6), state(4,full,6,5), state(4,full,7,5), state(3,full,1,5), state(3,full,2,5), state(3,full,5,7), state(3,full,5,6), state(3,full,4,7), state(3,full,4,6), state(3,full,3,7), state(3,full,3,6), state(3,full,6,5), state(3,full,7,5), state(2,full,1,5), state(2,full,2,5), state(2,full,5,7), state(2,full,5,6), state(2,full,4,7), state(2,full,4,6), state(2,full,3,7), state(2,full,3,6), state(2,full,6,5), state(2,full,7,5), state(4,full,2,4), state(4,full,1,4), state(3,full,2,4), state(3,full,1,4), state(2,full,2,4), state(2,full,1,4), state(4,full,7,4), state(3,full,7,4), state(3,full,6,4), state(2,full,7,4), state(2,full,6,4), changed(4,5,4), changed(4,4,4), changed(4,6,4), changed(3,4,5), changed(3,4,4), changed(3,4,3), changed(2,4,4), changed(2,4,3), changed(2,4,2), state(1,full,5,7), state(1,full,4,7), state(1,full,3,7), state(1,full,5,6), state(1,full,4,6), state(1,full,3,6), state(1,full,7,5), state(1,full,6,5), state(1,full,5,5), state(1,full,4,5), state(1,full,3,5), state(1,full,2,5), state(1,full,1,5), state(1,full,7,4), state(1,full,6,4), state(1,full,5,4), state(1,full,3,4), state(1,full,2,4), state(1,full,1,4), state(1,full,7,3), state(1,full,6,3), state(1,full,5,3), state(1,full,4,3), state(1,full,3,3), state(1,full,2,3), state(1,full,1,3), state(1,full,5,2), state(1,full,4,2), state(1,full,3,2), state(1,full,5,1), state(1,full,4,1), state(1,full,3,1), state(1,empty,4,4), possibleMove(3,up,4,7), possibleMove(2,up,4,5), possibleMove(1,up,4,6), possibleMove(1,down,4,2), possibleMove(3,left,6,4), possibleMove(3,left,6,5), possibleMove(2,left,6,3), possibleMove(1,left,6,4), possibleMove(3,right,2,4), possibleMove(3,right,2,5), possibleMove(2,right,2,3), possibleMove(1,right,2,4)}
{range(1), direction(up), direction(down), direction(left), direction(right), status(full), status(empty), full(3,1), full(4,1), full(5,1), full(3,2), full(4,2), full(5,2), full(1,3), full(2,3), full(3,3), full(4,3), full(5,3), full(6,3), full(7,3), full(1,4), full(2,4), full(3,4), empty(4,4), full(5,4), full(6,4), full(7,4), full(1,5), full(2,5), full(3,5), full(4,5), full(5,5), full(6,5), full(7,5), full(3,6), full(4,6), full(5,6), full(3,7), full(4,7), full(5,7), range(2), range(3), range(4), range(5), range(6), range(7), location(1,5), location(1,4), location(1,3), location(2,5), location(2,4), location(2,3), location(5,7), location(5,6), location(5,5), location(5,4), location(5,3), location(5,2), location(5,1), location(4,7), location(4,6), location(4,5), location(4,4), location(4,3), location(4,2), location(4,1), location(3,7), location(3,6), location(3,5), location(3,4), location(3,3), location(3,2), location(3,1), location(6,5), location(6,4), location(6,3), location(7,5), location(7,4), location(7,3), time(1), time(2), time(3), move(3,left,6,5), move(2,up,4,5), move(1,down,4,2), state(4,empty,5,5), state(4,empty,4,4), state(4,empty,4,2), state(4,empty,6,5), state(3,empty,4,5), state(3,empty,4,4), state(3,empty,4,2), state(2,empty,4,3), state(2,empty,4,2), state(4,full,1,3), state(4,full,2,3), state(4,full,5,4), state(4,full,5,3), state(4,full,5,2), state(4,full,5,1), state(4,full,4,5), state(4,full,4,3), state(4,full,4,1), state(4,full,3,5), state(4,full,3,4), state(4,full,3,3), state(4,full,3,2), state(4,full,3,1), state(4,full,6,3), state(4,full,7,3), state(3,full,1,3), state(3,full,2,3), state(3,full,5,5), state(3,full,5,4), state(3,full,5,3), state(3,full,5,2), state(3,full,5,1), state(3,full,4,3), state(3,full,4,1), state(3,full,3,5), state(3,full,3,4), state(3,full,3,3), state(3,full,3,2), state(3,full,3,1), state(3,full,6,3), state(3,full,7,3), state(2,full,1,3), state(2,full,2,3), state(2,full,5,5), state(2,full,5,4), state(2,full,5,3), state(2,full,5,2), state(2,full,5,1), state(2,full,4,5), state(2,full,4,4), state(2,full,4,1), state(2,full,3,5), state(2,full,3,4), state(2,full,3,3), state(2,full,3,2), state(2,full,3,1), state(2,full,6,3), state(2,full,7,3), state(4,full,1,5), state(4,full,2,5), state(4,full,5,7), state(4,full,5,6), state(4,full,4,7), state(4,full,4,6), state(4,full,3,7), state(4,full,3,6), state(4,full,7,5), state(3,full,1,5), state(3,full,2,5), state(3,full,5,7), state(3,full,5,6), state(3,full,4,7), state(3,full,4,6), state(3,full,3,7), state(3,full,3,6), state(3,full,6,5), state(3,full,7,5), state(2,full,1,5), state(2,full,2,5), state(2,full,5,7), state(2,full,5,6), state(2,full,4,7), state(2,full,4,6), state(2,full,3,7), state(2,full,3,6), state(2,full,6,5), state(2,full,7,5), state(4,full,2,4), state(4,full,1,4), state(3,full,2,4), state(3,full,1,4), state(2,full,2,4), state(2,full,1,4), state(4,full,7,4), state(4,full,6,4), state(3,full,7,4), state(3,full,6,4), state(2,full,7,4), state(2,full,6,4), changed(4,5,5), changed(4,4,5), changed(4,6,5), changed(3,4,5), changed(3,4,4), changed(3,4,3), changed(2,4,4), changed(2,4,3), changed(2,4,2), state(1,full,5,7), state(1,full,4,7), state(1,full,3,7), state(1,full,5,6), state(1,full,4,6), state(1,full,3,6), state(1,full,7,5), state(1,full,6,5), state(1,full,5,5), state(1,full,4,5), state(1,full,3,5), state(1,full,2,5), state(1,full,1,5), state(1,full,7,4), state(1,full,6,4), state(1,full,5,4), state(1,full,3,4), state(1,full,2,4), state(1,full,1,4), state(1,full,7,3), state(1,full,6,3), state(1,full,5,3), state(1,full,4,3), state(1,full,3,3), state(1,full,2,3), state(1,full,1,3), state(1,full,5,2), state(1,full,4,2), state(1,full,3,2), state(1,full,5,1), state(1,full,4,1), state(1,full,3,1), state(1,empty,4,4), possibleMove(3,up,4,7), possibleMove(2,up,4,5), possibleMove(1,up,4,6), possibleMove(1,down,4,2), possibleMove(3,left,6,4), possibleMove(3,left,6,5), possibleMove(2,left,6,3), possibleMove(1,left,6,4), possibleMove(3,right,2,4), possibleMove(3,right,2,5), possibleMove(2,right,2,3), possibleMove(1,right,2,4)}
{range(1), direction(up), direction(down), direction(left), direction(right), status(full), status(empty), full(3,1), full(4,1), full(5,1), full(3,2), full(4,2), full(5,2), full(1,3), full(2,3), full(3,3), full(4,3), full(5,3), full(6,3), full(7,3), full(1,4), full(2,4), full(3,4), empty(4,4), full(5,4), full(6,4), full(7,4), full(1,5), full(2,5), full(3,5), full(4,5), full(5,5), full(6,5), full(7,5), full(3,6), full(4,6), full(5,6), full(3,7), full(4,7), full(5,7), range(2), range(3), range(4), range(5), range(6), range(7), location(1,5), location(1,4), location(1,3), location(2,5), location(2,4), location(2,3), location(5,7), location(5,6), location(5,5), location(5,4), location(5,3), location(5,2), location(5,1), location(4,7), location(4,6), location(4,5), location(4,4), location(4,3), location(4,2), location(4,1), location(3,7), location(3,6), location(3,5), location(3,4), location(3,3), location(3,2), location(3,1), location(6,5), location(6,4), location(6,3), location(7,5), location(7,4), location(7,3), time(1), time(2), time(3), move(3,up,4,7), move(2,up,4,5), move(1,down,4,2), state(4,empty,4,7), state(4,empty,4,6), state(4,empty,4,4), state(4,empty,4,2), state(3,empty,4,5), state(3,empty,4,4), state(3,empty,4,2), state(2,empty,4,3), state(2,empty,4,2), state(4,full,1,3), state(4,full,2,3), state(4,full,5,5), state(4,full,5,4), state(4,full,5,3), state(4,full,5,2), state(4,full,5,1), state(4,full,4,5), state(4,full,4,3), state(4,full,4,1), state(4,full,3,5), state(4,full,3,4), state(4,full,3,3), state(4,full,3,2), state(4,full,3,1), state(4,full,6,3), state(4,full,7,3), state(3,full,1,3), state(3,full,2,3), state(3,full,5,5), state(3,full,5,4), state(3,full,5,3), state(3,full,5,2), state(3,full,5,1), state(3,full,4,3), state(3,full,4,1), state(3,full,3,5), state(3,full,3,4), state(3,full,3,3), state(3,full,3,2), state(3,full,3,1), state(3,full,6,3), state(3,full,7,3), state(2,full,1,3), state(2,full,2,3), state(2,full,5,5), state(2,full,5,4), state(2,full,5,3), state(2,full,5,2), state(2,full,5,1), state(2,full,4,5), state(2,full,4,4), state(2,full,4,1), state(2,full,3,5), state(2,full,3,4), state(2,full,3,3), state(2,full,3,2), state(2,full,3,1), state(2,full,6,3), state(2,full,7,3), state(4,full,1,5), state(4,full,2,5), state(4,full,5,7), state(4,full,5,6), state(4,full,3,7), state(4,full,3,6), state(4,full,6,5), state(4,full,7,5), state(3,full,1,5), state(3,full,2,5), state(3,full,5,7), state(3,full,5,6), state(3,full,4,7), state(3,full,4,6), state(3,full,3,7), state(3,full,3,6), state(3,full,6,5), state(3,full,7,5), state(2,full,1,5), state(2,full,2,5), state(2,full,5,7), state(2,full,5,6), state(2,full,4,7), state(2,full,4,6), state(2,full,3,7), state(2,full,3,6), state(2,full,6,5), state(2,full,7,5), state(4,full,2,4), state(4,full,1,4), state(3,full,2,4), state(3,full,1,4), state(2,full,2,4), state(2,full,1,4), state(4,full,7,4), state(4,full,6,4), state(3,full,7,4), state(3,full,6,4), state(2,full,7,4), state(2,full,6,4), changed(4,4,7), changed(4,4,6), changed(4,4,5), changed(3,4,5), changed(3,4,4), changed(3,4,3), changed(2,4,4), changed(2,4,3), changed(2,4,2), state(1,full,5,7), state(1,full,4,7), state(1,full,3,7), state(1,full,5,6), state(1,full,4,6), state(1,full,3,6), state(1,full,7,5), state(1,full,6,5), state(1,full,5,5), state(1,full,4,5), state(1,full,3,5), state(1,full,2,5), state(1,full,1,5), state(1,full,7,4), state(1,full,6,4), state(1,full,5,4), state(1,full,3,4), state(1,full,2,4), state(1,full,1,4), state(1,full,7,3), state(1,full,6,3), state(1,full,5,3), state(1,full,4,3), state(1,full,3,3), state(1,full,2,3), state(1,full,1,3), state(1,full,5,2), state(1,full,4,2), state(1,full,3,2), state(1,full,5,1), state(1,full,4,1), state(1,full,3,1), state(1,empty,4,4), possibleMove(3,up,4,7), possibleMove(2,up,4,5), possibleMove(1,up,4,6), possibleMove(1,down,4,2), possibleMove(3,left,6,4), possibleMove(3,left,6,5), possibleMove(2,left,6,3), possibleMove(1,left,6,4), possibleMove(3,right,2,4), possibleMove(3,right,2,5), possibleMove(2,right,2,3), possibleMove(1,right,2,4)}
{range(1), direction(up), direction(down), direction(left), direction(right), status(full), status(empty), full(3,1), full(4,1), full(5,1), full(3,2), full(4,2), full(5,2), full(1,3), full(2,3), full(3,3), full(4,3), full(5,3), full(6,3), full(7,3), full(1,4), full(2,4), full(3,4), empty(4,4), full(5,4), full(6,4), full(7,4), full(1,5), full(2,5), full(3,5), full(4,5), full(5,5), full(6,5), full(7,5), full(3,6), full(4,6), full(5,6), full(3,7), full(4,7), full(5,7), range(2), range(3), range(4), range(5), range(6), range(7), location(1,5), location(1,4), location(1,3), location(2,5), location(2,4), location(2,3), location(5,7), location(5,6), location(5,5), location(5,4), location(5,3), location(5,2), location(5,1), location(4,7), location(4,6), location(4,5), location(4,4), location(4,3), location(4,2), location(4,1), location(3,7), location(3,6), location(3,5), location(3,4), location(3,3), location(3,2), location(3,1), location(6,5), location(6,4), location(6,3), location(7,5), location(7,4), location(7,3), time(1), time(2), time(3), move(3,left,5,3), move(2,right,2,3), move(1,down,4,2), state(4,empty,2,3), state(4,empty,5,3), state(4,empty,4,3), state(4,empty,4,2), state(3,empty,2,3), state(3,empty,4,2), state(3,empty,3,3), state(2,empty,4,3), state(2,empty,4,2), state(4,full,1,3), state(4,full,5,5), state(4,full,5,4), state(4,full,5,2), state(4,full,5,1), state(4,full,4,5), state(4,full,4,4), state(4,full,4,1), state(4,full,3,5), state(4,full,3,4), state(4,full,3,3), state(4,full,3,2), state(4,full,3,1), state(4,full,6,3), state(4,full,7,3), state(3,full,1,3), state(3,full,5,5), state(3,full,5,4), state(3,full,5,3), state(3,full,5,2), state(3,full,5,1), state(3,full,4,5), state(3,full,4,4), state(3,full,4,3), state(3,full,4,1), state(3,full,3,5), state(3,full,3,4), state(3,full,3,2), state(3,full,3,1), state(3,full,6,3), state(3,full,7,3), state(2,full,1,3), state(2,full,2,3), state(2,full,5,5), state(2,full,5,4), state(2,full,5,3), state(2,full,5,2), state(2,full,5,1), state(2,full,4,5), state(2,full,4,4), state(2,full,4,1), state(2,full,3,5), state(2,full,3,4), state(2,full,3,3), state(2,full,3,2), state(2,full,3,1), state(2,full,6,3), state(2,full,7,3), state(4,full,1,5), state(4,full,2,5), state(4,full,5,7), state(4,full,5,6), state(4,full,4,7), state(4,full,4,6), state(4,full,3,7), state(4,full,3,6), state(4,full,6,5), state(4,full,7,5), state(3,full,1,5), state(3,full,2,5), state(3,full,5,7), state(3,full,5,6), state(3,full,4,7), state(3,full,4,6), state(3,full,3,7), state(3,full,3,6), state(3,full,6,5), state(3,full,7,5), state(2,full,1,5), state(2,full,2,5), state(2,full,5,7), state(2,full,5,6), state(2,full,4,7), state(2,full,4,6), state(2,full,3,7), state(2,full,3,6), state(2,full,6,5), state(2,full,7,5), state(4,full,2,4), state(4,full,1,4), state(3,full,2,4), state(3,full,1,4), state(2,full,2,4), state(2,full,1,4), state(4,full,7,4), state(4,full,6,4), state(3,full,7,4), state(3,full,6,4), state(2,full,7,4), state(2,full,6,4), changed(4,5,3), changed(4,4,3), changed(4,3,3), changed(3,2,3), changed(3,4,3), changed(3,3,3), changed(2,4,4), changed(2,4,3), changed(2,4,2), state(1,full,5,7), state(1,full,4,7), state(1,full,3,7), state(1,full,5,6), state(1,full,4,6), state(1,full,3,6), state(1,full,7,5), state(1,full,6,5), state(1,full,5,5), state(1,full,4,5), state(1,full,3,5), state(1,full,2,5), state(1,full,1,5), state(1,full,7,4), state(1,full,6,4), state(1,full,5,4), state(1,full,3,4), state(1,full,2,4), state(1,full,1,4), state(1,full,7,3), state(1,full,6,3), state(1,full,5,3), state(1,full,4,3), state(1,full,3,3), state(1,full,2,3), state(1,full,1,3), state(1,full,5,2), state(1,full,4,2), state(1,full,3,2), state(1,full,5,1), state(1,full,4,1), state(1,full,3,1), state(1,empty,4,4), possibleMove(3,up,3,5), possibleMove(3,up,4,4), possibleMove(3,up,2,5), possibleMove(2,up,4,5), possibleMove(1,up,4,6), possibleMove(3,down,3,1), possibleMove(1,down,4,2), possibleMove(3,left,5,3), possibleMove(2,left,6,3), possibleMove(1,left,6,4), possibleMove(2,right,2,3), possibleMove(1,right,2,4)}
{range(1), direction(up), direction(down), direction(left), direction(right), status(full), status(empty), full(3,1), full(4,1), full(5,1), full(3,2), full(4,2), full(5,2), full(1,3), full(2,3), full(3,3), full(4,3), full(5,3), full(6,3), full(7,3), full(1,4), full(2,4), full(3,4), empty(4,4), full(5,4), full(6,4), full(7,4), full(1,5), full(2,5), full(3,5), full(4,5), full(5,5), full(6,5), full(7,5), full(3,6), full(4,6), full(5,6), full(3,7), full(4,7), full(5,7), range(2), range(3), range(4), range(5), range(6), range(7), location(1,5), location(1,4), location(1,3), location(2,5), location(2,4), location(2,3), location(5,7), location(5,6), location(5,5), location(5,4), location(5,3), location(5,2), location(5,1), location(4,7), location(4,6), location(4,5), location(4,4), location(4,3), location(4,2), location(4,1), location(3,7), location(3,6), location(3,5), location(3,4), location(3,3), location(3,2), location(3,1), location(6,5), location(6,4), location(6,3), location(7,5), location(7,4), location(7,3), time(1), time(2), time(3), move(3,down,3,1), move(2,right,2,3), move(1,down,4,2), state(4,empty,2,3), state(4,empty,4,2), state(4,empty,3,2), state(4,empty,3,1), state(3,empty,2,3), state(3,empty,4,2), state(3,empty,3,3), state(2,empty,4,3), state(2,empty,4,2), state(4,full,1,3), state(4,full,5,5), state(4,full,5,4), state(4,full,5,3), state(4,full,5,2), state(4,full,5,1), state(4,full,4,5), state(4,full,4,4), state(4,full,4,3), state(4,full,4,1), state(4,full,3,5), state(4,full,3,4), state(4,full,3,3), state(4,full,6,3), state(4,full,7,3), state(3,full,1,3), state(3,full,5,5), state(3,full,5,4), state(3,full,5,3), state(3,full,5,2), state(3,full,5,1), state(3,full,4,5), state(3,full,4,4), state(3,full,4,3), state(3,full,4,1), state(3,full,3,5), state(3,full,3,4), state(3,full,3,2), state(3,full,3,1), state(3,full,6,3), state(3,full,7,3), state(2,full,1,3), state(2,full,2,3), state(2,full,5,5), state(2,full,5,4), state(2,full,5,3), state(2,full,5,2), state(2,full,5,1), state(2,full,4,5), state(2,full,4,4), state(2,full,4,1), state(2,full,3,5), state(2,full,3,4), state(2,full,3,3), state(2,full,3,2), state(2,full,3,1), state(2,full,6,3), state(2,full,7,3), state(4,full,1,5), state(4,full,2,5), state(4,full,5,7), state(4,full,5,6), state(4,full,4,7), state(4,full,4,6), state(4,full,3,7), state(4,full,3,6), state(4,full,6,5), state(4,full,7,5), state(3,full,1,5), state(3,full,2,5), state(3,full,5,7), state(3,full,5,6), state(3,full,4,7), state(3,full,4,6), state(3,full,3,7), state(3,full,3,6), state(3,full,6,5), state(3,full,7,5), state(2,full,1,5), state(2,full,2,5), state(2,full,5,7), state(2,full,5,6), state(2,full,4,7), state(2,full,4,6), state(2,full,3,7), state(2,full,3,6), state(2,full,6,5), state(2,full,7,5), state(4,full,2,4), state(4,full,1,4), state(3,full,2,4), state(3,full,1,4), state(2,full,2,4), state(2,full,1,4), state(4,full,7,4), state(4,full,6,4), state(3,full,7,4), state(3,full,6,4), state(2,full,7,4), state(2,full,6,4), changed(4,3,3), changed(4,3,2), changed(4,3,1), changed(3,2,3), changed(3,4,3), changed(3,3,3), changed(2,4,4), changed(2,4,3), changed(2,4,2), state(1,full,5,7), state(1,full,4,7), state(1,full,3,7), state(1,full,5,6), state(1,full,4,6), state(1,full,3,6), state(1,full,7,5), state(1,full,6,5), state(1,full,5,5), state(1,full,4,5), state(1,full,3,5), state(1,full,2,5), state(1,full,1,5), state(1,full,7,4), state(1,full,6,4), state(1,full,5,4), state(1,full,3,4), state(1,full,2,4), state(1,full,1,4), state(1,full,7,3), state(1,full,6,3), state(1,full,5,3), state(1,full,4,3), state(1,full,3,3), state(1,full,2,3), state(1,full,1,3), state(1,full,5,2), state(1,full,4,2), state(1,full,3,2), state(1,full,5,1), state(1,full,4,1), state(1,full,3,1), state(1,empty,4,4), possibleMove(3,up,3,5), possibleMove(3,up,4,4), possibleMove(3,up,2,5), possibleMove(2,up,4,5), possibleMove(1,up,4,6), possibleMove(3,down,3,1), possibleMove(1,down,4,2), possibleMove(3,left,5,3), possibleMove(2,left,6,3), possibleMove(1,left,6,4), possibleMove(2,right,2,3), possibleMove(1,right,2,4)}
{range(1), direction(up), direction(down), direction(left), direction(right), status(full), status(empty), full(3,1), full(4,1), full(5,1), full(3,2), full(4,2), full(5,2), full(1,3), full(2,3), full(3,3), full(4,3), full(5,3), full(6,3), full(7,3), full(1,4), full(2,4), full(3,4), empty(4,4), full(5,4), full(6,4), full(7,4), full(1,5), full(2,5), full(3,5), full(4,5), full(5,5), full(6,5), full(7,5), full(3,6), full(4,6), full(5,6), full(3,7), full(4,7), full(5,7), range(2), range(3), range(4), range(5), range(6), range(7), location(1,5), location(1,4), location(1,3), location(2,5), location(2,4), location(2,3), location(5,7), location(5,6), location(5,5), location(5,4), location(5,3), location(5,2), location(5,1), location(4,7), location(4,6), location(4,5), location(4,4), location(4,3), location(4,2), location(4,1), location(3,7), location(3,6), location(3,5), location(3,4), location(3,3), location(3,2), location(3,1), location(6,5), location(6,4), location(6,3), location(7,5), location(7,4), location(7,3), time(1), time(2), time(3), move(3,up,4,4), move(2,right,2,3), move(1,down,4,2), state(4,empty,2,3), state(4,empty,4,4), state(4,empty,4,3), state(4,empty,3,3), state(3,empty,2,3), state(3,empty,4,2), state(3,empty,3,3), state(2,empty,4,3), state(2,empty,4,2), state(4,full,1,3), state(4,full,5,5), state(4,full,5,4), state(4,full,5,3), state(4,full,5,2), state(4,full,5,1), state(4,full,4,5), state(4,full,4,2), state(4,full,4,1), state(4,full,3,5), state(4,full,3,4), state(4,full,3,2), state(4,full,3,1), state(4,full,6,3), state(4,full,7,3), state(3,full,1,3), state(3,full,5,5), state(3,full,5,4), state(3,full,5,3), state(3,full,5,2), state(3,full,5,1), state(3,full,4,5), state(3,full,4,4), state(3,full,4,3), state(3,full,4,1), state(3,full,3,5), state(3,full,3,4), state(3,full,3,2), state(3,full,3,1), state(3,full,6,3), state(3,full,7,3), state(2,full,1,3), state(2,full,2,3), state(2,full,5,5), state(2,full,5,4), state(2,full,5,3), state(2,full,5,2), state(2,full,5,1), state(2,full,4,5), state(2,full,4,4), state(2,full,4,1), state(2,full,3,5), state(2,full,3,4), state(2,full,3,3), state(2,full,3,2), state(2,full,3,1), state(2,full,6,3), state(2,full,7,3), state(4,full,1,5), state(4,full,2,5), state(4,full,5,7), state(4,full,5,6), state(4,full,4,7), state(4,full,4,6), state(4,full,3,7), state(4,full,3,6), state(4,full,6,5), state(4,full,7,5), state(3,full,1,5), state(3,full,2,5), state(3,full,5,7), state(3,full,5,6), state(3,full,4,7), state(3,full,4,6), state(3,full,3,7), state(3,full,3,6), state(3,full,6,5), state(3,full,7,5), state(2,full,1,5), state(2,full,2,5), state(2,full,5,7), state(2,full,5,6), state(2,full,4,7), state(2,full,4,6), state(2,full,3,7), state(2,full,3,6), state(2,full,6,5), state(2,full,7,5), state(4,full,2,4), state(4,full,1,4), state(3,full,2,4), state(3,full,1,4), state(2,full,2,4), state(2,full,1,4), state(4,full,7,4), state(4,full,6,4), state(3,full,7,4), state(3,full,6,4), state(2,full,7,4), state(2,full,6,4), changed(4,4,4), changed(4,4,3), changed(4,4,2), changed(3,2,3), changed(3,4,3), changed(3,3,3), changed(2,4,4), changed(2,4,3), changed(2,4,2), state(1,full,5,7), state(1,full,4,7), state(1,full,3,7), state(1,full,5,6), state(1,full,4,6), state(1,full,3,6), state(1,full,7,5), state(1,full,6,5), state(1,full,5,5), state(1,full,4,5), state(1,full,3,5), state(1,full,2,5), state(1,full,1,5), state(1,full,7,4), state(1,full,6,4), state(1,full,5,4), state(1,full,3,4), state(1,full,2,4), state(1,full,1,4), state(1,full,7,3), state(1,full,6,3), state(1,full,5,3), state(1,full,4,3), state(1,full,3,3), state(1,full,2,3), state(1,full,1,3), state(1,full,5,2), state(1,full,4,2), state(1,full,3,2), state(1,full,5,1), state(1,full,4,1), state(1,full,3,1), state(1,empty,4,4), possibleMove(3,up,3,5), possibleMove(3,up,4,4), possibleMove(3,up,2,5), possibleMove(2,up,4,5), possibleMove(1,up,4,6), possibleMove(3,down,3,1), possibleMove(1,down,4,2), possibleMove(3,left,5,3), possibleMove(2,left,6,3), possibleMove(1,left,6,4), possibleMove(2,right,2,3), possibleMove(1,right,2,4)}
{range(1), direction(up), direction(down), direction(left), direction(right), status(full), status(empty), full(3,1), full(4,1), full(5,1), full(3,2), full(4,2), full(5,2), full(1,3), full(2,3), full(3,3), full(4,3), full(5,3), full(6,3), full(7,3), full(1,4), full(2,4), full(3,4), empty(4,4), full(5,4), full(6,4), full(7,4), full(1,5), full(2,5), full(3,5), full(4,5), full(5,5), full(6,5), full(7,5), full(3,6), full(4,6), full(5,6), full(3,7), full(4,7), full(5,7), range(2), range(3), range(4), range(5), range(6), range(7), location(1,5), location(1,4), location(1,3), location(2,5), location(2,4), location(2,3), location(5,7), location(5,6), location(5,5), location(5,4), location(5,3), location(5,2), location(5,1), location(4,7), location(4,6), location(4,5), location(4,4), location(4,3), location(4,2), location(4,1), location(3,7), location(3,6), location(3,5), location(3,4), location(3,3), location(3,2), location(3,1), location(6,5), location(6,4), location(6,3), location(7,5), location(7,4), location(7,3), time(1), time(2), time(3), move(3,up,2,5), move(2,right,2,3), move(1,down,4,2), state(4,empty,2,5), state(4,empty,2,4), state(4,empty,4,2), state(4,empty,3,3), state(3,empty,2,3), state(3,empty,4,2), state(3,empty,3,3), state(2,empty,4,3), state(2,empty,4,2), state(4,full,1,3), state(4,full,2,3), state(4,full,5,5), state(4,full,5,4), state(4,full,5,3), state(4,full,5,2), state(4,full,5,1), state(4,full,4,5), state(4,full,4,4), state(4,full,4,3), state(4,full,4,1), state(4,full,3,5), state(4,full,3,4), state(4,full,3,2), state(4,full,3,1), state(4,full,6,3), state(4,full,7,3), state(3,full,1,3), state(3,full,5,5), state(3,full,5,4), state(3,full,5,3), state(3,full,5,2), state(3,full,5,1), state(3,full,4,5), state(3,full,4,4), state(3,full,4,3), state(3,full,4,1), state(3,full,3,5), state(3,full,3,4), state(3,full,3,2), state(3,full,3,1), state(3,full,6,3), state(3,full,7,3), state(2,full,1,3), state(2,full,2,3), state(2,full,5,5), state(2,full,5,4), state(2,full,5,3), state(2,full,5,2), state(2,full,5,1), state(2,full,4,5), state(2,full,4,4), state(2,full,4,1), state(2,full,3,5), state(2,full,3,4), state(2,full,3,3), state(2,full,3,2), state(2,full,3,1), state(2,full,6,3), state(2,full,7,3), state(4,full,1,5), state(4,full,5,7), state(4,full,5,6), state(4,full,4,7), state(4,full,4,6), state(4,full,3,7), state(4,full,3,6), state(4,full,6,5), state(4,full,7,5), state(3,full,1,5), state(3,full,2,5), state(3,full,5,7), state(3,full,5,6), state(3,full,4,7), state(3,full,4,6), state(3,full,3,7), state(3,full,3,6), state(3,full,6,5), state(3,full,7,5), state(2,full,1,5), state(2,full,2,5), state(2,full,5,7), state(2,full,5,6), state(2,full,4,7), state(2,full,4,6), state(2,full,3,7), state(2,full,3,6), state(2,full,6,5), state(2,full,7,5), state(4,full,1,4), state(3,full,2,4), state(3,full,1,4), state(2,full,2,4), state(2,full,1,4), state(4,full,7,4), state(4,full,6,4), state(3,full,7,4), state(3,full,6,4), state(2,full,7,4), state(2,full,6,4), changed(4,2,5), changed(4,2,4), changed(4,2,3), changed(3,2,3), changed(3,4,3), changed(3,3,3), changed(2,4,4), changed(2,4,3), changed(2,4,2), state(1,full,5,7), state(1,full,4,7), state(1,full,3,7), state(1,full,5,6), state(1,full,4,6), state(1,full,3,6), state(1,full,7,5), state(1,full,6,5), state(1,full,5,5), state(1,full,4,5), state(1,full,3,5), state(1,full,2,5), state(1,full,1,5), state(1,full,7,4), state(1,full,6,4), state(1,full,5,4), state(1,full,3,4), state(1,full,2,4), state(1,full,1,4), state(1,full,7,3), state(1,full,6,3), state(1,full,5,3), state(1,full,4,3), state(1,full,3,3), state(1,full,2,3), state(1,full,1,3), state(1,full,5,2), state(1,full,4,2), state(1,full,3,2), state(1,full,5,1), state(1,full,4,1), state(1,full,3,1), state(1,empty,4,4), possibleMove(3,up,3,5), possibleMove(3,up,4,4), possibleMove(3,up,2,5), possibleMove(2,up,4,5), possibleMove(1,up,4,6), possibleMove(3,down,3,1), possibleMove(1,down,4,2), possibleMove(3,left,5,3), possibleMove(2,left,6,3), possibleMove(1,left,6,4), possibleMove(2,right,2,3), possibleMove(1,right,2,4)}
{range(1), direction(up), direction(down), direction(left), direction(right), status(full), status(empty), full(3,1), full(4,1), full(5,1), full(3,2), full(4,2), full(5,2), full(1,3), full(2,3), full(3,3), full(4,3), full(5,3), full(6,3), full(7,3), full(1,4), full(2,4), full(3,4), empty(4,4), full(5,4), full(6,4), full(7,4), full(1,5), full(2,5), full(3,5), full(4,5), full(5,5), full(6,5), full(7,5), full(3,6), full(4,6), full(5,6), full(3,7), full(4,7), full(5,7), range(2), range(3), range(4), range(5), range(6), range(7), location(1,5), location(1,4), location(1,3), location(2,5), location(2,4), location(2,3), location(5,7), location(5,6), location(5,5), location(5,4), location(5,3), location(5,2), location(5,1), location(4,7), location(4,6), location(4,5), location(4,4), location(4,3), location(4,2), location(4,1), location(3,7), location(3,6), location(3,5), location(3,4), location(3,3), location(3,2), location(3,1), location(6,5), location(6,4), location(6,3), location(7,5), location(7,4), location(7,3), time(1), time(2), time(3), move(3,right,3,3), move(2,left,6,3), move(1,down,4,2), state(4,empty,4,3), state(4,empty,4,2), state(4,empty,3,3), state(4,empty,6,3), state(3,empty,5,3), state(3,empty,4,2), state(3,empty,6,3), state(2,empty,4,3), state(2,empty,4,2), state(4,full,1,3), state(4,full,2,3), state(4,full,5,5), state(4,full,5,4), state(4,full,5,3), state(4,full,5,2), state(4,full,5,1), state(4,full,4,5), state(4,full,4,4), state(4,full,4,1), state(4,full,3,5), state(4,full,3,4), state(4,full,3,2), state(4,full,3,1), state(4,full,7,3), state(3,full,1,3), state(3,full,2,3), state(3,full,5,5), state(3,full,5,4), state(3,full,5,2), state(3,full,5,1), state(3,full,4,5), state(3,full,4,4), state(3,full,4,3), state(3,full,4,1), state(3,full,3,5), state(3,full,3,4), state(3,full,3,3), state(3,full,3,2), state(3,full,3,1), state(3,full,7,3), state(2,full,1,3), state(2,full,2,3), state(2,full,5,5), state(2,full,5,4), state(2,full,5,3), state(2,full,5,2), state(2,full,5,1), state(2,full,4,5), state(2,full,4,4), state(2,full,4,1), state(2,full,3,5), state(2,full,3,4), state(2,full,3,3), state(2,full,3,2), state(2,full,3,1), state(2,full,6,3), state(2,full,7,3), state(4,full,1,5), state(4,full,2,5), state(4,full,5,7), state(4,full,5,6), state(4,full,4,7), state(4,full,4,6), state(4,full,3,7), state(4,full,3,6), state(4,full,6,5), state(4,full,7,5), state(3,full,1,5), state(3,full,2,5), state(3,full,5,7), state(3,full,5,6), state(3,full,4,7), state(3,full,4,6), state(3,full,3,7), state(3,full,3,6), state(3,full,6,5), state(3,full,7,5), state(2,full,1,5), state(2,full,2,5), state(2,full,5,7), state(2,full,5,6), state(2,full,4,7), state(2,full,4,6), state(2,full,3,7), state(2,full,3,6), state(2,full,6,5), state(2,full,7,5), state(4,full,2,4), state(4,full,1,4), state(3,full,2,4), state(3,full,1,4), state(2,full,2,4), state(2,full,1,4), state(4,full,7,4), state(4,full,6,4), state(3,full,7,4), state(3,full,6,4), state(2,full,7,4), state(2,full,6,4), changed(4,5,3), changed(4,4,3), changed(4,3,3), changed(3,5,3), changed(3,4,3), changed(3,6,3), changed(2,4,4), changed(2,4,3), changed(2,4,2), state(1,full,5,7), state(1,full,4,7), state(1,full,3,7), state(1,full,5,6), state(1,full,4,6), state(1,full,3,6), state(1,full,7,5), state(1,full,6,5), state(1,full,5,5), state(1,full,4,5), state(1,full,3,5), state(1,full,2,5), state(1,full,1,5), state(1,full,7,4), state(1,full,6,4), state(1,full,5,4), state(1,full,3,4), state(1,full,2,4), state(1,full,1,4), state(1,full,7,3), state(1,full,6,3), state(1,full,5,3), state(1,full,4,3), state(1,full,3,3), state(1,full,2,3), state(1,full,1,3), state(1,full,5,2), state(1,full,4,2), state(1,full,3,2), state(1,full,5,1), state(1,full,4,1), state(1,full,3,1), state(1,empty,4,4), possibleMove(3,up,6,5), possibleMove(3,up,4,4), possibleMove(3,up,5,5), possibleMove(2,up,4,5), possibleMove(1,up,4,6), possibleMove(3,down,5,1), possibleMove(1,down,4,2), possibleMove(2,left,6,3), possibleMove(1,left,6,4), possibleMove(3,right,3,3), possibleMove(2,right,2,3), possibleMove(1,right,2,4)}
{range(1), direction(up), direction(down), direction(left), direction(right), status(full), status(empty), full(3,1), full(4,1), full(5,1), full(3,2), full(4,2), full(5,2), full(1,3), full(2,3), full(3,3), full(4,3), full(5,3), full(6,3), full(7,3), full(1,4), full(2,4), full(3,4), empty(4,4), full(5,4), full(6,4), full(7,4), full(1,5), full(2,5), full(3,5), full(4,5), full(5,5), full(6,5), full(7,5), full(3,6), full(4,6), full(5,6), full(3,7), full(4,7), full(5,7), range(2), range(3), range(4), range(5), range(6), range(7), location(1,5), location(1,4), location(1,3), location(2,5), location(2,4), location(2,3), location(5,7), location(5,6), location(5,5), location(5,4), location(5,3), location(5,2), location(5,1), location(4,7), location(4,6), location(4,5), location(4,4), location(4,3), location(4,2), location(4,1), location(3,7), location(3,6), location(3,5), location(3,4), location(3,3), location(3,2), location(3,1), location(6,5), location(6,4), location(6,3), location(7,5), location(7,4), location(7,3), time(1), time(2), time(3), move(3,down,5,1), move(2,left,6,3), move(1,down,4,2), state(4,empty,5,2), state(4,empty,5,1), state(4,empty,4,2), state(4,empty,6,3), state(3,empty,5,3), state(3,empty,4,2), state(3,empty,6,3), state(2,empty,4,3), state(2,empty,4,2), state(4,full,1,3), state(4,full,2,3), state(4,full,5,5), state(4,full,5,4), state(4,full,5,3), state(4,full,4,5), state(4,full,4,4), state(4,full,4,3), state(4,full,4,1), state(4,full,3,5), state(4,full,3,4), state(4,full,3,3), state(4,full,3,2), state(4,full,3,1), state(4,full,7,3), state(3,full,1,3), state(3,full,2,3), state(3,full,5,5), state(3,full,5,4), state(3,full,5,2), state(3,full,5,1), state(3,full,4,5), state(3,full,4,4), state(3,full,4,3), state(3,full,4,1), state(3,full,3,5), state(3,full,3,4), state(3,full,3,3), state(3,full,3,2), state(3,full,3,1), state(3,full,7,3), state(2,full,1,3), state(2,full,2,3), state(2,full,5,5), state(2,full,5,4), state(2,full,5,3), state(2,full,5,2), state(2,full,5,1), state(2,full,4,5), state(2,full,4,4), state(2,full,4,1), state(2,full,3,5), state(2,full,3,4), state(2,full,3,3), state(2,full,3,2), state(2,full,3,1), state(2,full,6,3), state(2,full,7,3), state(4,full,1,5), state(4,full,2,5), state(4,full,5,7), state(4,full,5,6), state(4,full,4,7), state(4,full,4,6), state(4,full,3,7), state(4,full,3,6), state(4,full,6,5), state(4,full,7,5), state(3,full,1,5), state(3,full,2,5), state(3,full,5,7), state(3,full,5,6), state(3,full,4,7), state(3,full,4,6), state(3,full,3,7), state(3,full,3,6), state(3,full,6,5), state(3,full,7,5), state(2,full,1,5), state(2,full,2,5), state(2,full,5,7), state(2,full,5,6), state(2,full,4,7), state(2,full,4,6), state(2,full,3,7), state(2,full,3,6), state(2,full,6,5), state(2,full,7,5), state(4,full,2,4), state(4,full,1,4), state(3,full,2,4), state(3,full,1,4), state(2,full,2,4), state(2,full,1,4), state(4,full,7,4), state(4,full,6,4), state(3,full,7,4), state(3,full,6,4), state(2,full,7,4), state(2,full,6,4), changed(4,5,3), changed(4,5,2), changed(4,5,1), changed(3,5,3), changed(3,4,3), changed(3,6,3), changed(2,4,4), changed(2,4,3), changed(2,4,2), state(1,full,5,7), state(1,full,4,7), state(1,full,3,7), state(1,full,5,6), state(1,full,4,6), state(1,full,3,6), state(1,full,7,5), state(1,full,6,5), state(1,full,5,5), state(1,full,4,5), state(1,full,3,5), state(1,full,2,5), state(1,full,1,5), state(1,full,7,4), state(1,full,6,4), state(1,full,5,4), state(1,full,3,4), state(1,full,2,4), state(1,full,1,4), state(1,full,7,3), state(1,full,6,3), state(1,full,5,3), state(1,full,4,3), state(1,full,3,3), state(1,full,2,3), state(1,full,1,3), state(1,full,5,2), state(1,full,4,2), state(1,full,3,2), state(1,full,5,1), state(1,full,4,1), state(1,full,3,1), state(1,empty,4,4), possibleMove(3,up,6,5), possibleMove(3,up,4,4), possibleMove(3,up,5,5), possibleMove(2,up,4,5), possibleMove(1,up,4,6), possibleMove(3,down,5,1), possibleMove(1,down,4,2), possibleMove(2,left,6,3), possibleMove(1,left,6,4), possibleMove(3,right,3,3), possibleMove(2,right,2,3), possibleMove(1,right,2,4)}
{range(1), direction(up), direction(down), direction(left), direction(right), status(full), status(empty), full(3,1), full(4,1), full(5,1), full(3,2), full(4,2), full(5,2), full(1,3), full(2,3), full(3,3), full(4,3), full(5,3), full(6,3), full(7,3), full(1,4), full(2,4), full(3,4), empty(4,4), full(5,4), full(6,4), full(7,4), full(1,5), full(2,5), full(3,5), full(4,5), full(5,5), full(6,5), full(7,5), full(3,6), full(4,6), full(5,6), full(3,7), full(4,7), full(5,7), range(2), range(3), range(4), range(5), range(6), range(7), location(1,5), location(1,4), location(1,3), location(2,5), location(2,4), location(2,3), location(5,7), location(5,6), location(5,5), location(5,4), location(5,3), location(5,2), location(5,1), location(4,7), location(4,6), location(4,5), location(4,4), location(4,3), location(4,2), location(4,1), location(3,7), location(3,6), location(3,5), location(3,4), location(3,3), location(3,2), location(3,1), location(6,5), location(6,4), location(6,3), location(7,5), location(7,4), location(7,3), time(1), time(2), time(3), move(3,up,6,5), move(2,left,6,3), move(1,down,4,2), state(4,empty,5,3), state(4,empty,4,2), state(4,empty,6,5), state(4,empty,6,4), state(3,empty,5,3), state(3,empty,4,2), state(3,empty,6,3), state(2,empty,4,3), state(2,empty,4,2), state(4,full,1,3), state(4,full,2,3), state(4,full,5,5), state(4,full,5,4), state(4,full,5,2), state(4,full,5,1), state(4,full,4,5), state(4,full,4,4), state(4,full,4,3), state(4,full,4,1), state(4,full,3,5), state(4,full,3,4), state(4,full,3,3), state(4,full,3,2), state(4,full,3,1), state(4,full,6,3), state(4,full,7,3), state(3,full,1,3), state(3,full,2,3), state(3,full,5,5), state(3,full,5,4), state(3,full,5,2), state(3,full,5,1), state(3,full,4,5), state(3,full,4,4), state(3,full,4,3), state(3,full,4,1), state(3,full,3,5), state(3,full,3,4), state(3,full,3,3), state(3,full,3,2), state(3,full,3,1), state(3,full,7,3), state(2,full,1,3), state(2,full,2,3), state(2,full,5,5), state(2,full,5,4), state(2,full,5,3), state(2,full,5,2), state(2,full,5,1), state(2,full,4,5), state(2,full,4,4), state(2,full,4,1), state(2,full,3,5), state(2,full,3,4), state(2,full,3,3), state(2,full,3,2), state(2,full,3,1), state(2,full,6,3), state(2,full,7,3), state(4,full,1,5), state(4,full,2,5), state(4,full,5,7), state(4,full,5,6), state(4,full,4,7), state(4,full,4,6), state(4,full,3,7), state(4,full,3,6), state(4,full,7,5), state(3,full,1,5), state(3,full,2,5), state(3,full,5,7), state(3,full,5,6), state(3,full,4,7), state(3,full,4,6), state(3,full,3,7), state(3,full,3,6), state(3,full,6,5), state(3,full,7,5), state(2,full,1,5), state(2,full,2,5), state(2,full,5,7), state(2,full,5,6), state(2,full,4,7), state(2,full,4,6), state(2,full,3,7), state(2,full,3,6), state(2,full,6,5), state(2,full,7,5), state(4,full,2,4), state(4,full,1,4), state(3,full,2,4), state(3,full,1,4), state(2,full,2,4), state(2,full,1,4), state(4,full,7,4), state(3,full,7,4), state(3,full,6,4), state(2,full,7,4), state(2,full,6,4), changed(4,6,5), changed(4,6,4), changed(4,6,3), changed(3,5,3), changed(3,4,3), changed(3,6,3), changed(2,4,4), changed(2,4,3), changed(2,4,2), state(1,full,5,7), state(1,full,4,7), state(1,full,3,7), state(1,full,5,6), state(1,full,4,6), state(1,full,3,6), state(1,full,7,5), state(1,full,6,5), state(1,full,5,5), state(1,full,4,5), state(1,full,3,5), state(1,full,2,5), state(1,full,1,5), state(1,full,7,4), state(1,full,6,4), state(1,full,5,4), state(1,full,3,4), state(1,full,2,4), state(1,full,1,4), state(1,full,7,3), state(1,full,6,3), state(1,full,5,3), state(1,full,4,3), state(1,full,3,3), state(1,full,2,3), state(1,full,1,3), state(1,full,5,2), state(1,full,4,2), state(1,full,3,2), state(1,full,5,1), state(1,full,4,1), state(1,full,3,1), state(1,empty,4,4), possibleMove(3,up,6,5), possibleMove(3,up,4,4), possibleMove(3,up,5,5), possibleMove(2,up,4,5), possibleMove(1,up,4,6), possibleMove(3,down,5,1), possibleMove(1,down,4,2), possibleMove(2,left,6,3), possibleMove(1,left,6,4), possibleMove(3,right,3,3), possibleMove(2,right,2,3), possibleMove(1,right,2,4)}
{range(1), direction(up), direction(down), direction(left), direction(right), status(full), status(empty), full(3,1), full(4,1), full(5,1), full(3,2), full(4,2), full(5,2), full(1,3), full(2,3), full(3,3), full(4,3), full(5,3), full(6,3), full(7,3), full(1,4), full(2,4), full(3,4), empty(4,4), full(5,4), full(6,4), full(7,4), full(1,5), full(2,5), full(3,5), full(4,5), full(5,5), full(6,5), full(7,5), full(3,6), full(4,6), full(5,6), full(3,7), full(4,7), full(5,7), range(2), range(3), range(4), range(5), range(6), range(7), location(1,5), location(1,4), location(1,3), location(2,5), location(2,4), location(2,3), location(5,7), location(5,6), location(5,5), location(5,4), location(5,3), location(5,2), location(5,1), location(4,7), location(4,6), location(4,5), location(4,4), location(4,3), location(4,2), location(4,1), location(3,7), location(3,6), location(3,5), location(3,4), location(3,3), location(3,2), location(3,1), location(6,5), location(6,4), location(6,3), location(7,5), location(7,4), location(7,3), time(1), time(2), time(3), move(3,up,4,4), move(2,left,6,3), move(1,down,4,2), state(4,empty,5,3), state(4,empty,4,4), state(4,empty,4,3), state(4,empty,6,3), state(3,empty,5,3), state(3,empty,4,2), state(3,empty,6,3), state(2,empty,4,3), state(2,empty,4,2), state(4,full,1,3), state(4,full,2,3), state(4,full,5,5), state(4,full,5,4), state(4,full,5,2), state(4,full,5,1), state(4,full,4,5), state(4,full,4,2), state(4,full,4,1), state(4,full,3,5), state(4,full,3,4), state(4,full,3,3), state(4,full,3,2), state(4,full,3,1), state(4,full,7,3), state(3,full,1,3), state(3,full,2,3), state(3,full,5,5), state(3,full,5,4), state(3,full,5,2), state(3,full,5,1), state(3,full,4,5), state(3,full,4,4), state(3,full,4,3), state(3,full,4,1), state(3,full,3,5), state(3,full,3,4), state(3,full,3,3), state(3,full,3,2), state(3,full,3,1), state(3,full,7,3), state(2,full,1,3), state(2,full,2,3), state(2,full,5,5), state(2,full,5,4), state(2,full,5,3), state(2,full,5,2), state(2,full,5,1), state(2,full,4,5), state(2,full,4,4), state(2,full,4,1), state(2,full,3,5), state(2,full,3,4), state(2,full,3,3), state(2,full,3,2), state(2,full,3,1), state(2,full,6,3), state(2,full,7,3), state(4,full,1,5), state(4,full,2,5), state(4,full,5,7), state(4,full,5,6), state(4,full,4,7), state(4,full,4,6), state(4,full,3,7), state(4,full,3,6), state(4,full,6,5), state(4,full,7,5), state(3,full,1,5), state(3,full,2,5), state(3,full,5,7), state(3,full,5,6), state(3,full,4,7), state(3,full,4,6), state(3,full,3,7), state(3,full,3,6), state(3,full,6,5), state(3,full,7,5), state(2,full,1,5), state(2,full,2,5), state(2,full,5,7), state(2,full,5,6), state(2,full,4,7), state(2,full,4,6), state(2,full,3,7), state(2,full,3,6), state(2,full,6,5), state(2,full,7,5), state(4,full,2,4), state(4,full,1,4), state(3,full,2,4), state(3,full,1,4), state(2,full,2,4), state(2,full,1,4), state(4,full,7,4), state(4,full,6,4), state(3,full,7,4), state(3,full,6,4), state(2,full,7,4), state(2,full,6,4), changed(4,4,4), changed(4,4,3), changed(4,4,2), changed(3,5,3), changed(3,4,3), changed(3,6,3), changed(2,4,4), changed(2,4,3), changed(2,4,2), state(1,full,5,7), state(1,full,4,7), state(1,full,3,7), state(1,full,5,6), state(1,full,4,6), state(1,full,3,6), state(1,full,7,5), state(1,full,6,5), state(1,full,5,5), state(1,full,4,5), state(1,full,3,5), state(1,full,2,5), state(1,full,1,5), state(1,full,7,4), state(1,full,6,4), state(1,full,5,4), state(1,full,3,4), state(1,full,2,4), state(1,full,1,4), state(1,full,7,3), state(1,full,6,3), state(1,full,5,3), state(1,full,4,3), state(1,full,3,3), state(1,full,2,3), state(1,full,1,3), state(1,full,5,2), state(1,full,4,2), state(1,full,3,2), state(1,full,5,1), state(1,full,4,1), state(1,full,3,1), state(1,empty,4,4), possibleMove(3,up,6,5), possibleMove(3,up,4,4), possibleMove(3,up,5,5), possibleMove(2,up,4,5), possibleMove(1,up,4,6), possibleMove(3,down,5,1), possibleMove(1,down,4,2), possibleMove(2,left,6,3), possibleMove(1,left,6,4), possibleMove(3,right,3,3), possibleMove(2,right,2,3), possibleMove(1,right,2,4)}
{range(1), direction(up), direction(down), direction(left), direction(right), status(full), status(empty), full(3,1), full(4,1), full(5,1), full(3,2), full(4,2), full(5,2), full(1,3), full(2,3), full(3,3), full(4,3), full(5,3), full(6,3), full(7,3), full(1,4), full(2,4), full(3,4), empty(4,4), full(5,4), full(6,4), full(7,4), full(1,5), full(2,5), full(3,5), full(4,5), full(5,5), full(6,5), full(7,5), full(3,6), full(4,6), full(5,6), full(3,7), full(4,7), full(5,7), range(2), range(3), range(4), range(5), range(6), range(7), location(1,5), location(1,4), location(1,3), location(2,5), location(2,4), location(2,3), location(5,7), location(5,6), location(5,5), location(5,4), location(5,3), location(5,2), location(5,1), location(4,7), location(4,6), location(4,5), location(4,4), location(4,3), location(4,2), location(4,1), location(3,7), location(3,6), location(3,5), location(3,4), location(3,3), location(3,2), location(3,1), location(6,5), location(6,4), location(6,3), location(7,5), location(7,4), location(7,3), time(1), time(2), time(3), move(3,up,5,5), move(2,left,6,3), move(1,down,4,2), state(4,empty,5,5), state(4,empty,5,4), state(4,empty,4,2), state(4,empty,6,3), state(3,empty,5,3), state(3,empty,4,2), state(3,empty,6,3), state(2,empty,4,3), state(2,empty,4,2), state(4,full,1,3), state(4,full,2,3), state(4,full,5,3), state(4,full,5,2), state(4,full,5,1), state(4,full,4,5), state(4,full,4,4), state(4,full,4,3), state(4,full,4,1), state(4,full,3,5), state(4,full,3,4), state(4,full,3,3), state(4,full,3,2), state(4,full,3,1), state(4,full,7,3), state(3,full,1,3), state(3,full,2,3), state(3,full,5,5), state(3,full,5,4), state(3,full,5,2), state(3,full,5,1), state(3,full,4,5), state(3,full,4,4), state(3,full,4,3), state(3,full,4,1), state(3,full,3,5), state(3,full,3,4), state(3,full,3,3), state(3,full,3,2), state(3,full,3,1), state(3,full,7,3), state(2,full,1,3), state(2,full,2,3), state(2,full,5,5), state(2,full,5,4), state(2,full,5,3), state(2,full,5,2), state(2,full,5,1), state(2,full,4,5), state(2,full,4,4), state(2,full,4,1), state(2,full,3,5), state(2,full,3,4), state(2,full,3,3), state(2,full,3,2), state(2,full,3,1), state(2,full,6,3), state(2,full,7,3), state(4,full,1,5), state(4,full,2,5), state(4,full,5,7), state(4,full,5,6), state(4,full,4,7), state(4,full,4,6), state(4,full,3,7), state(4,full,3,6), state(4,full,6,5), state(4,full,7,5), state(3,full,1,5), state(3,full,2,5), state(3,full,5,7), state(3,full,5,6), state(3,full,4,7), state(3,full,4,6), state(3,full,3,7), state(3,full,3,6), state(3,full,6,5), state(3,full,7,5), state(2,full,1,5), state(2,full,2,5), state(2,full,5,7), state(2,full,5,6), state(2,full,4,7), state(2,full,4,6), state(2,full,3,7), state(2,full,3,6), state(2,full,6,5), state(2,full,7,5), state(4,full,2,4), state(4,full,1,4), state(3,full,2,4), state(3,full,1,4), state(2,full,2,4), state(2,full,1,4), state(4,full,7,4), state(4,full,6,4), state(3,full,7,4), state(3,full,6,4), state(2,full,7,4), state(2,full,6,4), changed(4,5,5), changed(4,5,4), changed(4,5,3), changed(3,5,3), changed(3,4,3), changed(3,6,3), changed(2,4,4), changed(2,4,3), changed(2,4,2), state(1,full,5,7), state(1,full,4,7), state(1,full,3,7), state(1,full,5,6), state(1,full,4,6), state(1,full,3,6), state(1,full,7,5), state(1,full,6,5), state(1,full,5,5), state(1,full,4,5), state(1,full,3,5), state(1,full,2,5), state(1,full,1,5), state(1,full,7,4), state(1,full,6,4), state(1,full,5,4), state(1,full,3,4), state(1,full,2,4), state(1,full,1,4), state(1,full,7,3), state(1,full,6,3), state(1,full,5,3), state(1,full,4,3), state(1,full,3,3), state(1,full,2,3), state(1,full,1,3), state(1,full,5,2), state(1,full,4,2), state(1,full,3,2), state(1,full,5,1), state(1,full,4,1), state(1,full,3,1), state(1,empty,4,4), possibleMove(3,up,6,5), possibleMove(3,up,4,4), possibleMove(3,up,5,5), possibleMove(2,up,4,5), possibleMove(1,up,4,6), possibleMove(3,down,5,1), possibleMove(1,down,4,2), possibleMove(2,left,6,3), possibleMove(1,left,6,4), possibleMove(3,right,3,3), possibleMove(2,right,2,3), possibleMove(1,right,2,4)}
{range(1), direction(up), direction(down), direction(left), direction(right), status(full), status(empty), full(3,1), full(4,1), full(5,1), full(3,2), full(4,2), full(5,2), full(1,3), full(2,3), full(3,3), full(4,3), full(5,3), full(6,3), full(7,3), full(1,4), full(2,4), full(3,4), empty(4,4), full(5,4), full(6,4), full(7,4), full(1,5), full(2,5), full(3,5), full(4,5), full(5,5), full(6,5), full(7,5), full(3,6), full(4,6), full(5,6), full(3,7), full(4,7), full(5,7), range(2), range(3), range(4), range(5), range(6), range(7), location(1,5), location(1,4), location(1,3), location(2,5), location(2,4), location(2,3), location(5,7), location(5,6), location(5,5), location(5,4), location(5,3), location(5,2), location(5,1), location(4,7), location(4,6), location(4,5), location(4,4), location(4,3), location(4,2), location(4,1), location(3,7), location(3,6), location(3,5), location(3,4), location(3,3), location(3,2), location(3,1), location(6,5), location(6,4), location(6,3), location(7,5), location(7,4), location(7,3), time(1), time(2), time(3), move(3,right,3,2), move(2,down,5,2), move(1,left,6,4), state(4,empty,5,3), state(4,empty,4,2), state(4,empty,3,2), state(4,empty,6,4), state(3,empty,5,3), state(3,empty,5,2), state(3,empty,6,4), state(2,empty,5,4), state(2,empty,6,4), state(4,full,1,3), state(4,full,2,3), state(4,full,5,5), state(4,full,5,4), state(4,full,5,2), state(4,full,5,1), state(4,full,4,5), state(4,full,4,4), state(4,full,4,3), state(4,full,4,1), state(4,full,3,5), state(4,full,3,4), state(4,full,3,3), state(4,full,3,1), state(4,full,6,3), state(4,full,7,3), state(3,full,1,3), state(3,full,2,3), state(3,full,5,5), state(3,full,5,4), state(3,full,5,1), state(3,full,4,5), state(3,full,4,4), state(3,full,4,3), state(3,full,4,2), state(3,full,4,1), state(3,full,3,5), state(3,full,3,4), state(3,full,3,3), state(3,full,3,2), state(3,full,3,1), state(3,full,6,3), state(3,full,7,3), state(2,full,1,3), state(2,full,2,3), state(2,full,5,5), state(2,full,5,3), state(2,full,5,2), state(2,full,5,1), state(2,full,4,5), state(2,full,4,4), state(2,full,4,3), state(2,full,4,2), state(2,full,4,1), state(2,full,3,5), state(2,full,3,4), state(2,full,3,3), state(2,full,3,2), state(2,full,3,1), state(2,full,6,3), state(2,full,7,3), state(4,full,1,5), state(4,full,2,5), state(4,full,5,7), state(4,full,5,6), state(4,full,4,7), state(4,full,4,6), state(4,full,3,7), state(4,full,3,6), state(4,full,6,5), state(4,full,7,5), state(3,full,1,5), state(3,full,2,5), state(3,full,5,7), state(3,full,5,6), state(3,full,4,7), state(3,full,4,6), state(3,full,3,7), state(3,full,3,6), state(3,full,6,5), state(3,full,7,5), state(2,full,1,5), state(2,full,2,5), state(2,full,5,7), state(2,full,5,6), state(2,full,4,7), state(2,full,4,6), state(2,full,3,7), state(2,full,3,6), state(2,full,6,5), state(2,full,7,5), state(4,full,2,4), state(4,full,1,4), state(3,full,2,4), state(3,full,1,4), state(2,full,2,4), state(2,full,1,4), state(4,full,7,4), state(3,full,7,4), state(2,full,7,4), changed(4,5,2), changed(4,4,2), changed(4,3,2), changed(3,5,4), changed(3,5,3), changed(3,5,2), changed(2,5,4), changed(2,4,4), changed(2,6,4), state(1,full,5,7), state(1,full,4,7), state(1,full,3,7), state(1,full,5,6), state(1,full,4,6), state(1,full,3,6), state(1,full,7,5), state(1,full,6,5), state(1,full,5,5), state(1,full,4,5), state(1,full,3,5), state(1,full,2,5), state(1,full,1,5), state(1,full,7,4), state(1,full,6,4), state(1,full,5,4), state(1,full,3,4), state(1,full,2,4), state(1,full,1,4), state(1,full,7,3), state(1,full,6,3), state(1,full,5,3), state(1,full,4,3), state(1,full,3,3), state(1,full,2,3), state(1,full,1,3), state(1,full,5,2), state(1,full,4,2), state(1,full,3,2), state(1,full,5,1), state(1,full,4,1), state(1,full,3,1), state(1,empty,4,4), possibleMove(3,up,5,5), possibleMove(2,up,5,6), possibleMove(1,up,4,6), possibleMove(2,down,5,2), possibleMove(1,down,4,2), possibleMove(3,left,7,3), possibleMove(1,left,6,4), possibleMove(3,right,4,4), possibleMove(3,right,3,2), possibleMove(3,right,3,3), possibleMove(2,right,3,4), possibleMove(1,right,2,4)}
{range(1), direction(up), direction(down), direction(left), direction(right), status(full), status(empty), full(3,1), full(4,1), full(5,1), full(3,2), full(4,2), full(5,2), full(1,3), full(2,3), full(3,3), full(4,3), full(5,3), full(6,3), full(7,3), full(1,4), full(2,4), full(3,4), empty(4,4), full(5,4), full(6,4), full(7,4), full(1,5), full(2,5), full(3,5), full(4,5), full(5,5), full(6,5), full(7,5), full(3,6), full(4,6), full(5,6), full(3,7), full(4,7), full(5,7), range(2), range(3), range(4), range(5), range(6), range(7), location(1,5), location(1,4), location(1,3), location(2,5), location(2,4), location(2,3), location(5,7), location(5,6), location(5,5), location(5,4), location(5,3), location(5,2), location(5,1), location(4,7), location(4,6), location(4,5), location(4,4), location(4,3), location(4,2), location(4,1), location(3,7), location(3,6), location(3,5), location(3,4), location(3,3), location(3,2), location(3,1), location(6,5), location(6,4), location(6,3), location(7,5), location(7,4), location(7,3), time(1), time(2), time(3), move(3,right,3,3), move(2,down,5,2), move(1,left,6,4), state(4,empty,5,2), state(4,empty,4,3), state(4,empty,3,3), state(4,empty,6,4), state(3,empty,5,3), state(3,empty,5,2), state(3,empty,6,4), state(2,empty,5,4), state(2,empty,6,4), state(4,full,1,3), state(4,full,2,3), state(4,full,5,5), state(4,full,5,4), state(4,full,5,3), state(4,full,5,1), state(4,full,4,5), state(4,full,4,4), state(4,full,4,2), state(4,full,4,1), state(4,full,3,5), state(4,full,3,4), state(4,full,3,2), state(4,full,3,1), state(4,full,6,3), state(4,full,7,3), state(3,full,1,3), state(3,full,2,3), state(3,full,5,5), state(3,full,5,4), state(3,full,5,1), state(3,full,4,5), state(3,full,4,4), state(3,full,4,3), state(3,full,4,2), state(3,full,4,1), state(3,full,3,5), state(3,full,3,4), state(3,full,3,3), state(3,full,3,2), state(3,full,3,1), state(3,full,6,3), state(3,full,7,3), state(2,full,1,3), state(2,full,2,3), state(2,full,5,5), state(2,full,5,3), state(2,full,5,2), state(2,full,5,1), state(2,full,4,5), state(2,full,4,4), state(2,full,4,3), state(2,full,4,2), state(2,full,4,1), state(2,full,3,5), state(2,full,3,4), state(2,full,3,3), state(2,full,3,2), state(2,full,3,1), state(2,full,6,3), state(2,full,7,3), state(4,full,1,5), state(4,full,2,5), state(4,full,5,7), state(4,full,5,6), state(4,full,4,7), state(4,full,4,6), state(4,full,3,7), state(4,full,3,6), state(4,full,6,5), state(4,full,7,5), state(3,full,1,5), state(3,full,2,5), state(3,full,5,7), state(3,full,5,6), state(3,full,4,7), state(3,full,4,6), state(3,full,3,7), state(3,full,3,6), state(3,full,6,5), state(3,full,7,5), state(2,full,1,5), state(2,full,2,5), state(2,full,5,7), state(2,full,5,6), state(2,full,4,7), state(2,full,4,6), state(2,full,3,7), state(2,full,3,6), state(2,full,6,5), state(2,full,7,5), state(4,full,2,4), state(4,full,1,4), state(3,full,2,4), state(3,full,1,4), state(2,full,2,4), state(2,full,1,4), state(4,full,7,4), state(3,full,7,4), state(2,full,7,4), changed(4,5,3), changed(4,4,3), changed(4,3,3), changed(3,5,4), changed(3,5,3), changed(3,5,2), changed(2,5,4), changed(2,4,4), changed(2,6,4), state(1,full,5,7), state(1,full,4,7), state(1,full,3,7), state(1,full,5,6), state(1,full,4,6), state(1,full,3,6), state(1,full,7,5), state(1,full,6,5), state(1,full,5,5), state(1,full,4,5), state(1,full,3,5), state(1,full,2,5), state(1,full,1,5), state(1,full,7,4), state(1,full,6,4), state(1,full,5,4), state(1,full,3,4), state(1,full,2,4), state(1,full,1,4), state(1,full,7,3), state(1,full,6,3), state(1,full,5,3), state(1,full,4,3), state(1,full,3,3), state(1,full,2,3), state(1,full,1,3), state(1,full,5,2), state(1,full,4,2), state(1,full,3,2), state(1,full,5,1), state(1,full,4,1), state(1,full,3,1), state(1,empty,4,4), possibleMove(3,up,5,5), possibleMove(2,up,5,6), possibleMove(1,up,4,6), possibleMove(2,down,5,2), possibleMove(1,down,4,2), possibleMove(3,left,7,3), possibleMove(1,left,6,4), possibleMove(3,right,4,4), possibleMove(3,right,3,2), possibleMove(3,right,3,3), possibleMove(2,right,3,4), possibleMove(1,right,2,4)}
{range(1), direction(up), direction(down), direction(left), direction(right), status(full), status(empty), full(3,1), full(4,1), full(5,1), full(3,2), full(4,2), full(5,2), full(1,3), full(2,3), full(3,3), full(4,3), full(5,3), full(6,3), full(7,3), full(1,4), full(2,4), full(3,4), empty(4,4), full(5,4), full(6,4), full(7,4), full(1,5), full(2,5), full(3,5), full(4,5), full(5,5), full(6,5), full(7,5), full(3,6), full(4,6), full(5,6), full(3,7), full(4,7), full(5,7), range(2), range(3), range(4), range(5), range(6), range(7), location(1,5), location(1,4), location(1,3), location(2,5), location(2,4), location(2,3), location(5,7), location(5,6), location(5,5), location(5,4), location(5,3), location(5,2), location(5,1), location(4,7), location(4,6), location(4,5), location(4,4), location(4,3), location(4,2), location(4,1), location(3,7), location(3,6), location(3,5), location(3,4), location(3,3), location(3,2), location(3,1), location(6,5), location(6,4), location(6,3), location(7,5), location(7,4), location(7,3), time(1), time(2), time(3), move(3,right,4,4), move(2,down,5,2), move(1,left,6,4), state(4,empty,5,4), state(4,empty,5,3), state(4,empty,5,2), state(4,empty,4,4), state(3,empty,5,3), state(3,empty,5,2), state(3,empty,6,4), state(2,empty,5,4), state(2,empty,6,4), state(4,full,1,3), state(4,full,2,3), state(4,full,5,5), state(4,full,5,1), state(4,full,4,5), state(4,full,4,3), state(4,full,4,2), state(4,full,4,1), state(4,full,3,5), state(4,full,3,4), state(4,full,3,3), state(4,full,3,2), state(4,full,3,1), state(4,full,6,3), state(4,full,7,3), state(3,full,1,3), state(3,full,2,3), state(3,full,5,5), state(3,full,5,4), state(3,full,5,1), state(3,full,4,5), state(3,full,4,4), state(3,full,4,3), state(3,full,4,2), state(3,full,4,1), state(3,full,3,5), state(3,full,3,4), state(3,full,3,3), state(3,full,3,2), state(3,full,3,1), state(3,full,6,3), state(3,full,7,3), state(2,full,1,3), state(2,full,2,3), state(2,full,5,5), state(2,full,5,3), state(2,full,5,2), state(2,full,5,1), state(2,full,4,5), state(2,full,4,4), state(2,full,4,3), state(2,full,4,2), state(2,full,4,1), state(2,full,3,5), state(2,full,3,4), state(2,full,3,3), state(2,full,3,2), state(2,full,3,1), state(2,full,6,3), state(2,full,7,3), state(4,full,1,5), state(4,full,2,5), state(4,full,5,7), state(4,full,5,6), state(4,full,4,7), state(4,full,4,6), state(4,full,3,7), state(4,full,3,6), state(4,full,6,5), state(4,full,7,5), state(3,full,1,5), state(3,full,2,5), state(3,full,5,7), state(3,full,5,6), state(3,full,4,7), state(3,full,4,6), state(3,full,3,7), state(3,full,3,6), state(3,full,6,5), state(3,full,7,5), state(2,full,1,5), state(2,full,2,5), state(2,full,5,7), state(2,full,5,6), state(2,full,4,7), state(2,full,4,6), state(2,full,3,7), state(2,full,3,6), state(2,full,6,5), state(2,full,7,5), state(4,full,2,4), state(4,full,1,4), state(3,full,2,4), state(3,full,1,4), state(2,full,2,4), state(2,full,1,4), state(4,full,7,4), state(4,full,6,4), state(3,full,7,4), state(2,full,7,4), changed(4,5,4), changed(4,4,4), changed(4,6,4), changed(3,5,4), changed(3,5,3), changed(3,5,2), changed(2,5,4), changed(2,4,4), changed(2,6,4), state(1,full,5,7), state(1,full,4,7), state(1,full,3,7), state(1,full,5,6), state(1,full,4,6), state(1,full,3,6), state(1,full,7,5), state(1,full,6,5), state(1,full,5,5), state(1,full,4,5), state(1,full,3,5), state(1,full,2,5), state(1,full,1,5), state(1,full,7,4), state(1,full,6,4), state(1,full,5,4), state(1,full,3,4), state(1,full,2,4), state(1,full,1,4), state(1,full,7,3), state(1,full,6,3), state(1,full,5,3), state(1,full,4,3), state(1,full,3,3), state(1,full,2,3), state(1,full,1,3), state(1,full,5,2), state(1,full,4,2), state(1,full,3,2), state(1,full,5,1), state(1,full,4,1), state(1,full,3,1), state(1,empty,4,4), possibleMove(3,up,5,5), possibleMove(2,up,5,6), possibleMove(1,up,4,6), possibleMove(2,down,5,2), possibleMove(1,down,4,2), possibleMove(3,left,7,3), possibleMove(1,left,6,4), possibleMove(3,right,4,4), possibleMove(3,right,3,2), possibleMove(3,right,3,3), possibleMove(2,right,3,4), possibleMove(1,right,2,4)}
{range(1), direction(up), direction(down), direction(left), direction(right), status(full), status(empty), full(3,1), full(4,1), full(5,1), full(3,2), full(4,2), full(5,2), full(1,3), full(2,3), full(3,3), full(4,3), full(5,3), full(6,3), full(7,3), full(1,4), full(2,4), full(3,4), empty(4,4), full(5,4), full(6,4), full(7,4), full(1,5), full(2,5), full(3,5), full(4,5), full(5,5), full(6,5), full(7,5), full(3,6), full(4,6), full(5,6), full(3,7), full(4,7), full(5,7), range(2), range(3), range(4), range(5), range(6), range(7), location(1,5), location(1,4), location(1,3), location(2,5), location(2,4), location(2,3), location(5,7), location(5,6), location(5,5), location(5,4), location(5,3), location(5,2), location(5,1), location(4,7), location(4,6), location(4,5), location(4,4), location(4,3), location(4,2), location(4,1), location(3,7), location(3,6), location(3,5), location(3,4), location(3,3), location(3,2), location(3,1), location(6,5), location(6,4), location(6,3), location(7,5), location(7,4), location(7,3), time(1), time(2), time(3), move(3,left,7,3), move(2,down,5,2), move(1,left,6,4), state(4,empty,5,2), state(4,empty,6,4), state(4,empty,6,3), state(4,empty,7,3), state(3,empty,5,3), state(3,empty,5,2), state(3,empty,6,4), state(2,empty,5,4), state(2,empty,6,4), state(4,full,1,3), state(4,full,2,3), state(4,full,5,5), state(4,full,5,4), state(4,full,5,3), state(4,full,5,1), state(4,full,4,5), state(4,full,4,4), state(4,full,4,3), state(4,full,4,2), state(4,full,4,1), state(4,full,3,5), state(4,full,3,4), state(4,full,3,3), state(4,full,3,2), state(4,full,3,1), state(3,full,1,3), state(3,full,2,3), state(3,full,5,5), state(3,full,5,4), state(3,full,5,1), state(3,full,4,5), state(3,full,4,4), state(3,full,4,3), state(3,full,4,2), state(3,full,4,1), state(3,full,3,5), state(3,full,3,4), state(3,full,3,3), state(3,full,3,2), state(3,full,3,1), state(3,full,6,3), state(3,full,7,3), state(2,full,1,3), state(2,full,2,3), state(2,full,5,5), state(2,full,5,3), state(2,full,5,2), state(2,full,5,1), state(2,full,4,5), state(2,full,4,4), state(2,full,4,3), state(2,full,4,2), state(2,full,4,1), state(2,full,3,5), state(2,full,3,4), state(2,full,3,3), state(2,full,3,2), state(2,full,3,1), state(2,full,6,3), state(2,full,7,3), state(4,full,1,5), state(4,full,2,5), state(4,full,5,7), state(4,full,5,6), state(4,full,4,7), state(4,full,4,6), state(4,full,3,7), state(4,full,3,6), state(4,full,6,5), state(4,full,7,5), state(3,full,1,5), state(3,full,2,5), state(3,full,5,7), state(3,full,5,6), state(3,full,4,7), state(3,full,4,6), state(3,full,3,7), state(3,full,3,6), state(3,full,6,5), state(3,full,7,5), state(2,full,1,5), state(2,full,2,5), state(2,full,5,7), state(2,full,5,6), state(2,full,4,7), state(2,full,4,6), state(2,full,3,7), state(2,full,3,6), state(2,full,6,5), state(2,full,7,5), state(4,full,2,4), state(4,full,1,4), state(3,full,2,4), state(3,full,1,4), state(2,full,2,4), state(2,full,1,4), state(4,full,7,4), state(3,full,7,4), state(2,full,7,4), changed(4,5,3), changed(4,6,3), changed(4,7,3), changed(3,5,4), changed(3,5,3), changed(3,5,2), changed(2,5,4), changed(2,4,4), changed(2,6,4), state(1,full,5,7), state(1,full,4,7), state(1,full,3,7), state(1,full,5,6), state(1,full,4,6), state(1,full,3,6), state(1,full,7,5), state(1,full,6,5), state(1,full,5,5), state(1,full,4,5), state(1,full,3,5), state(1,full,2,5), state(1,full,1,5), state(1,full,7,4), state(1,full,6,4), state(1,full,5,4), state(1,full,3,4), state(1,full,2,4), state(1,full,1,4), state(1,full,7,3), state(1,full,6,3), state(1,full,5,3), state(1,full,4,3), state(1,full,3,3), state(1,full,2,3), state(1,full,1,3), state(1,full,5,2), state(1,full,4,2), state(1,full,3,2), state(1,full,5,1), state(1,full,4,1), state(1,full,3,1), state(1,empty,4,4), possibleMove(3,up,5,5), possibleMove(2,up,5,6), possibleMove(1,up,4,6), possibleMove(2,down,5,2), possibleMove(1,down,4,2), possibleMove(3,left,7,3), possibleMove(1,left,6,4), possibleMove(3,right,4,4), possibleMove(3,right,3,2), possibleMove(3,right,3,3), possibleMove(2,right,3,4), possibleMove(1,right,2,4)}
{range(1), direction(up), direction(down), direction(left), direction(right), status(full), status(empty), full(3,1), full(4,1), full(5,1), full(3,2), full(4,2), full(5,2), full(1,3), full(2,3), full(3,3), full(4,3), full(5,3), full(6,3), full(7,3), full(1,4), full(2,4), full(3,4), empty(4,4), full(5,4), full(6,4), full(7,4), full(1,5), full(2,5), full(3,5), full(4,5), full(5,5), full(6,5), full(7,5), full(3,6), full(4,6), full(5,6), full(3,7), full(4,7), full(5,7), range(2), range(3), range(4), range(5), range(6), range(7), location(1,5), location(1,4), location(1,3), location(2,5), location(2,4), location(2,3), location(5,7), location(5,6), location(5,5), location(5,4), location(5,3), location(5,2), location(5,1), location(4,7), location(4,6), location(4,5), location(4,4), location(4,3), location(4,2), location(4,1), location(3,7), location(3,6), location(3,5), location(3,4), location(3,3), location(3,2), location(3,1), location(6,5), location(6,4), location(6,3), location(7,5), location(7,4), location(7,3), time(1), time(2), time(3), move(3,up,5,5), move(2,down,5,2), move(1,left,6,4), state(4,empty,5,5), state(4,empty,5,4), state(4,empty,5,2), state(4,empty,6,4), state(3,empty,5,3), state(3,empty,5,2), state(3,empty,6,4), state(2,empty,5,4), state(2,empty,6,4), state(4,full,1,3), state(4,full,2,3), state(4,full,5,3), state(4,full,5,1), state(4,full,4,5), state(4,full,4,4), state(4,full,4,3), state(4,full,4,2), state(4,full,4,1), state(4,full,3,5), state(4,full,3,4), state(4,full,3,3), state(4,full,3,2), state(4,full,3,1), state(4,full,6,3), state(4,full,7,3), state(3,full,1,3), state(3,full,2,3), state(3,full,5,5), state(3,full,5,4), state(3,full,5,1), state(3,full,4,5), state(3,full,4,4), state(3,full,4,3), state(3,full,4,2), state(3,full,4,1), state(3,full,3,5), state(3,full,3,4), state(3,full,3,3), state(3,full,3,2), state(3,full,3,1), state(3,full,6,3), state(3,full,7,3), state(2,full,1,3), state(2,full,2,3), state(2,full,5,5), state(2,full,5,3), state(2,full,5,2), state(2,full,5,1), state(2,full,4,5), state(2,full,4,4), state(2,full,4,3), state(2,full,4,2), state(2,full,4,1), state(2,full,3,5), state(2,full,3,4), state(2,full,3,3), state(2,full,3,2), state(2,full,3,1), state(2,full,6,3), state(2,full,7,3), state(4,full,1,5), state(4,full,2,5), state(4,full,5,7), state(4,full,5,6), state(4,full,4,7), state(4,full,4,6), state(4,full,3,7), state(4,full,3,6), state(4,full,6,5), state(4,full,7,5), state(3,full,1,5), state(3,full,2,5), state(3,full,5,7), state(3,full,5,6), state(3,full,4,7), state(3,full,4,6), state(3,full,3,7), state(3,full,3,6), state(3,full,6,5), state(3,full,7,5), state(2,full,1,5), state(2,full,2,5), state(2,full,5,7), state(2,full,5,6), state(2,full,4,7), state(2,full,4,6), state(2,full,3,7), state(2,full,3,6), state(2,full,6,5), state(2,full,7,5), state(4,full,2,4), state(4,full,1,4), state(3,full,2,4), state(3,full,1,4), state(2,full,2,4), state(2,full,1,4), state(4,full,7,4), state(3,full,7,4), state(2,full,7,4), changed(4,5,5), changed(4,5,4), changed(4,5,3), changed(3,5,4), changed(3,5,3), changed(3,5,2), changed(2,5,4), changed(2,4,4), changed(2,6,4), state(1,full,5,7), state(1,full,4,7), state(1,full,3,7), state(1,full,5,6), state(1,full,4,6), state(1,full,3,6), state(1,full,7,5), state(1,full,6,5), state(1,full,5,5), state(1,full,4,5), state(1,full,3,5), state(1,full,2,5), state(1,full,1,5), state(1,full,7,4), state(1,full,6,4), state(1,full,5,4), state(1,full,3,4), state(1,full,2,4), state(1,full,1,4), state(1,full,7,3), state(1,full,6,3), state(1,full,5,3), state(1,full,4,3), state(1,full,3,3), state(1,full,2,3), state(1,full,1,3), state(1,full,5,2), state(1,full,4,2), state(1,full,3,2), state(1,full,5,1), state(1,full,4,1), state(1,full,3,1), state(1,empty,4,4), possibleMove(3,up,5,5), possibleMove(2,up,5,6), possibleMove(1,up,4,6), possibleMove(2,down,5,2), possibleMove(1,down,4,2), possibleMove(3,left,7,3), possibleMove(1,left,6,4), possibleMove(3,right,4,4), possibleMove(3,right,3,2), possibleMove(3,right,3,3), possibleMove(2,right,3,4), possibleMove(1,right,2,4)}
{range(1), direction(up), direction(down), direction(left), direction(right), status(full), status(empty), full(3,1), full(4,1), full(5,1), full(3,2), full(4,2), full(5,2), full(1,3), full(2,3), full(3,3), full(4,3), full(5,3), full(6,3), full(7,3), full(1,4), full(2,4), full(3,4), empty(4,4), full(5,4), full(6,4), full(7,4), full(1,5), full(2,5), full(3,5), full(4,5), full(5,5), full(6,5), full(7,5), full(3,6), full(4,6), full(5,6), full(3,7), full(4,7), full(5,7), range(2), range(3), range(4), range(5), range(6), range(7), location(1,5), location(1,4), location(1,3), location(2,5), location(2,4), location(2,3), location(5,7), location(5,6), location(5,5), location(5,4), location(5,3), location(5,2), location(5,1), location(4,7), location(4,6), location(4,5), location(4,4), location(4,3), location(4,2), location(4,1), location(3,7), location(3,6), location(3,5), location(3,4), location(3,3), location(3,2), location(3,1), location(6,5), location(6,4), location(6,3), location(7,5), location(7,4), location(7,3), time(1), time(2), time(3), move(3,right,3,6), move(2,up,5,6), move(1,left,6,4), state(4,empty,5,5), state(4,empty,4,6), state(4,empty,3,6), state(4,empty,6,4), state(3,empty,5,6), state(3,empty,5,5), state(3,empty,6,4), state(2,empty,5,4), state(2,empty,6,4), state(4,full,1,3), state(4,full,2,3), state(4,full,5,4), state(4,full,5,3), state(4,full,5,2), state(4,full,5,1), state(4,full,4,5), state(4,full,4,4), state(4,full,4,3), state(4,full,4,2), state(4,full,4,1), state(4,full,3,5), state(4,full,3,4), state(4,full,3,3), state(4,full,3,2), state(4,full,3,1), state(4,full,6,3), state(4,full,7,3), state(3,full,1,3), state(3,full,2,3), state(3,full,5,4), state(3,full,5,3), state(3,full,5,2), state(3,full,5,1), state(3,full,4,5), state(3,full,4,4), state(3,full,4,3), state(3,full,4,2), state(3,full,4,1), state(3,full,3,5), state(3,full,3,4), state(3,full,3,3), state(3,full,3,2), state(3,full,3,1), state(3,full,6,3), state(3,full,7,3), state(2,full,1,3), state(2,full,2,3), state(2,full,5,5), state(2,full,5,3), state(2,full,5,2), state(2,full,5,1), state(2,full,4,5), state(2,full,4,4), state(2,full,4,3), state(2,full,4,2), state(2,full,4,1), state(2,full,3,5), state(2,full,3,4), state(2,full,3,3), state(2,full,3,2), state(2,full,3,1), state(2,full,6,3), state(2,full,7,3), state(4,full,1,5), state(4,full,2,5), state(4,full,5,7), state(4,full,5,6), state(4,full,4,7), state(4,full,3,7), state(4,full,6,5), state(4,full,7,5), state(3,full,1,5), state(3,full,2,5), state(3,full,5,7), state(3,full,4,7), state(3,full,4,6), state(3,full,3,7), state(3,full,3,6), state(3,full,6,5), state(3,full,7,5), state(2,full,1,5), state(2,full,2,5), state(2,full,5,7), state(2,full,5,6), state(2,full,4,7), state(2,full,4,6), state(2,full,3,7), state(2,full,3,6), state(2,full,6,5), state(2,full,7,5), state(4,full,2,4), state(4,full,1,4), state(3,full,2,4), state(3,full,1,4), state(2,full,2,4), state(2,full,1,4), state(4,full,7,4), state(3,full,7,4), state(2,full,7,4), changed(4,5,6), changed(4,4,6), changed(4,3,6), changed(3,5,6), changed(3,5,5), changed(3,5,4), changed(2,5,4), changed(2,4,4), changed(2,6,4), state(1,full,5,7), state(1,full,4,7), state(1,full,3,7), state(1,full,5,6), state(1,full,4,6), state(1,full,3,6), state(1,full,7,5), state(1,full,6,5), state(1,full,5,5), state(1,full,4,5), state(1,full,3,5), state(1,full,2,5), state(1,full,1,5), state(1,full,7,4), state(1,full,6,4), state(1,full,5,4), state(1,full,3,4), state(1,full,2,4), state(1,full,1,4), state(1,full,7,3), state(1,full,6,3), state(1,full,5,3), state(1,full,4,3), state(1,full,3,3), state(1,full,2,3), state(1,full,1,3), state(1,full,5,2), state(1,full,4,2), state(1,full,3,2), state(1,full,5,1), state(1,full,4,1), state(1,full,3,1), state(1,empty,4,4), possibleMove(2,up,5,6), possibleMove(1,up,4,6), possibleMove(3,down,5,3), possibleMove(2,down,5,2), possibleMove(1,down,4,2), possibleMove(3,left,7,5), possibleMove(1,left,6,4), possibleMove(3,right,4,4), possibleMove(3,right,3,5), possibleMove(3,right,3,6), possibleMove(2,right,3,4), possibleMove(1,right,2,4)}
{range(1), direction(up), direction(down), direction(left), direction(right), status(full), status(empty), full(3,1), full(4,1), full(5,1), full(3,2), full(4,2), full(5,2), full(1,3), full(2,3), full(3,3), full(4,3), full(5,3), full(6,3), full(7,3), full(1,4), full(2,4), full(3,4), empty(4,4), full(5,4), full(6,4), full(7,4), full(1,5), full(2,5), full(3,5), full(4,5), full(5,5), full(6,5), full(7,5), full(3,6), full(4,6), full(5,6), full(3,7), full(4,7), full(5,7), range(2), range(3), range(4), range(5), range(6), range(7), location(1,5), location(1,4), location(1,3), location(2,5), location(2,4), location(2,3), location(5,7), location(5,6), location(5,5), location(5,4), location(5,3), location(5,2), location(5,1), location(4,7), location(4,6), location(4,5), location(4,4), location(4,3), location(4,2), location(4,1), location(3,7), location(3,6), location(3,5), location(3,4), location(3,3), location(3,2), location(3,1), location(6,5), location(6,4), location(6,3), location(7,5), location(7,4), location(7,3), time(1), time(2), time(3), move(3,right,4,4), move(2,up,5,6), move(1,left,6,4), state(4,empty,5,6), state(4,empty,5,5), state(4,empty,5,4), state(4,empty,4,4), state(3,empty,5,6), state(3,empty,5,5), state(3,empty,6,4), state(2,empty,5,4), state(2,empty,6,4), state(4,full,1,3), state(4,full,2,3), state(4,full,5,3), state(4,full,5,2), state(4,full,5,1), state(4,full,4,5), state(4,full,4,3), state(4,full,4,2), state(4,full,4,1), state(4,full,3,5), state(4,full,3,4), state(4,full,3,3), state(4,full,3,2), state(4,full,3,1), state(4,full,6,3), state(4,full,7,3), state(3,full,1,3), state(3,full,2,3), state(3,full,5,4), state(3,full,5,3), state(3,full,5,2), state(3,full,5,1), state(3,full,4,5), state(3,full,4,4), state(3,full,4,3), state(3,full,4,2), state(3,full,4,1), state(3,full,3,5), state(3,full,3,4), state(3,full,3,3), state(3,full,3,2), state(3,full,3,1), state(3,full,6,3), state(3,full,7,3), state(2,full,1,3), state(2,full,2,3), state(2,full,5,5), state(2,full,5,3), state(2,full,5,2), state(2,full,5,1), state(2,full,4,5), state(2,full,4,4), state(2,full,4,3), state(2,full,4,2), state(2,full,4,1), state(2,full,3,5), state(2,full,3,4), state(2,full,3,3), state(2,full,3,2), state(2,full,3,1), state(2,full,6,3), state(2,full,7,3), state(4,full,1,5), state(4,full,2,5), state(4,full,5,7), state(4,full,4,7), state(4,full,4,6), state(4,full,3,7), state(4,full,3,6), state(4,full,6,5), state(4,full,7,5), state(3,full,1,5), state(3,full,2,5), state(3,full,5,7), state(3,full,4,7), state(3,full,4,6), state(3,full,3,7), state(3,full,3,6), state(3,full,6,5), state(3,full,7,5), state(2,full,1,5), state(2,full,2,5), state(2,full,5,7), state(2,full,5,6), state(2,full,4,7), state(2,full,4,6), state(2,full,3,7), state(2,full,3,6), state(2,full,6,5), state(2,full,7,5), state(4,full,2,4), state(4,full,1,4), state(3,full,2,4), state(3,full,1,4), state(2,full,2,4), state(2,full,1,4), state(4,full,7,4), state(4,full,6,4), state(3,full,7,4), state(2,full,7,4), changed(4,5,4), changed(4,4,4), changed(4,6,4), changed(3,5,6), changed(3,5,5), changed(3,5,4), changed(2,5,4), changed(2,4,4), changed(2,6,4), state(1,full,5,7), state(1,full,4,7), state(1,full,3,7), state(1,full,5,6), state(1,full,4,6), state(1,full,3,6), state(1,full,7,5), state(1,full,6,5), state(1,full,5,5), state(1,full,4,5), state(1,full,3,5), state(1,full,2,5), state(1,full,1,5), state(1,full,7,4), state(1,full,6,4), state(1,full,5,4), state(1,full,3,4), state(1,full,2,4), state(1,full,1,4), state(1,full,7,3), state(1,full,6,3), state(1,full,5,3), state(1,full,4,3), state(1,full,3,3), state(1,full,2,3), state(1,full,1,3), state(1,full,5,2), state(1,full,4,2), state(1,full,3,2), state(1,full,5,1), state(1,full,4,1), state(1,full,3,1), state(1,empty,4,4), possibleMove(2,up,5,6), possibleMove(1,up,4,6), possibleMove(3,down,5,3), possibleMove(2,down,5,2), possibleMove(1,down,4,2), possibleMove(3,left,7,5), possibleMove(1,left,6,4), possibleMove(3,right,4,4), possibleMove(3,right,3,5), possibleMove(3,right,3,6), possibleMove(2,right,3,4), possibleMove(1,right,2,4)}
{range(1), direction(up), direction(down), direction(left), direction(right), status(full), status(empty), full(3,1), full(4,1), full(5,1), full(3,2), full(4,2), full(5,2), full(1,3), full(2,3), full(3,3), full(4,3), full(5,3), full(6,3), full(7,3), full(1,4), full(2,4), full(3,4), empty(4,4), full(5,4), full(6,4), full(7,4), full(1,5), full(2,5), full(3,5), full(4,5), full(5,5), full(6,5), full(7,5), full(3,6), full(4,6), full(5,6), full(3,7), full(4,7), full(5,7), range(2), range(3), range(4), range(5), range(6), range(7), location(1,5), location(1,4), location(1,3), location(2,5), location(2,4), location(2,3), location(5,7), location(5,6), location(5,5), location(5,4), location(5,3), location(5,2), location(5,1), location(4,7), location(4,6), location(4,5), location(4,4), location(4,3), location(4,2), location(4,1), location(3,7), location(3,6), location(3,5), location(3,4), location(3,3), location(3,2), location(3,1), location(6,5), location(6,4), location(6,3), location(7,5), location(7,4), location(7,3), time(1), time(2), time(3), move(3,left,7,5), move(2,up,5,6), move(1,left,6,4), state(4,empty,5,6), state(4,empty,6,5), state(4,empty,6,4), state(4,empty,7,5), state(3,empty,5,6), state(3,empty,5,5), state(3,empty,6,4), state(2,empty,5,4), state(2,empty,6,4), state(4,full,1,3), state(4,full,2,3), state(4,full,5,5), state(4,full,5,4), state(4,full,5,3), state(4,full,5,2), state(4,full,5,1), state(4,full,4,5), state(4,full,4,4), state(4,full,4,3), state(4,full,4,2), state(4,full,4,1), state(4,full,3,5), state(4,full,3,4), state(4,full,3,3), state(4,full,3,2), state(4,full,3,1), state(4,full,6,3), state(4,full,7,3), state(3,full,1,3), state(3,full,2,3), state(3,full,5,4), state(3,full,5,3), state(3,full,5,2), state(3,full,5,1), state(3,full,4,5), state(3,full,4,4), state(3,full,4,3), state(3,full,4,2), state(3,full,4,1), state(3,full,3,5), state(3,full,3,4), state(3,full,3,3), state(3,full,3,2), state(3,full,3,1), state(3,full,6,3), state(3,full,7,3), state(2,full,1,3), state(2,full,2,3), state(2,full,5,5), state(2,full,5,3), state(2,full,5,2), state(2,full,5,1), state(2,full,4,5), state(2,full,4,4), state(2,full,4,3), state(2,full,4,2), state(2,full,4,1), state(2,full,3,5), state(2,full,3,4), state(2,full,3,3), state(2,full,3,2), state(2,full,3,1), state(2,full,6,3), state(2,full,7,3), state(4,full,1,5), state(4,full,2,5), state(4,full,5,7), state(4,full,4,7), state(4,full,4,6), state(4,full,3,7), state(4,full,3,6), state(3,full,1,5), state(3,full,2,5), state(3,full,5,7), state(3,full,4,7), state(3,full,4,6), state(3,full,3,7), state(3,full,3,6), state(3,full,6,5), state(3,full,7,5), state(2,full,1,5), state(2,full,2,5), state(2,full,5,7), state(2,full,5,6), state(2,full,4,7), state(2,full,4,6), state(2,full,3,7), state(2,full,3,6), state(2,full,6,5), state(2,full,7,5), state(4,full,2,4), state(4,full,1,4), state(3,full,2,4), state(3,full,1,4), state(2,full,2,4), state(2,full,1,4), state(4,full,7,4), state(3,full,7,4), state(2,full,7,4), changed(4,5,5), changed(4,6,5), changed(4,7,5), changed(3,5,6), changed(3,5,5), changed(3,5,4), changed(2,5,4), changed(2,4,4), changed(2,6,4), state(1,full,5,7), state(1,full,4,7), state(1,full,3,7), state(1,full,5,6), state(1,full,4,6), state(1,full,3,6), state(1,full,7,5), state(1,full,6,5), state(1,full,5,5), state(1,full,4,5), state(1,full,3,5), state(1,full,2,5), state(1,full,1,5), state(1,full,7,4), state(1,full,6,4), state(1,full,5,4), state(1,full,3,4), state(1,full,2,4), state(1,full,1,4), state(1,full,7,3), state(1,full,6,3), state(1,full,5,3), state(1,full,4,3), state(1,full,3,3), state(1,full,2,3), state(1,full,1,3), state(1,full,5,2), state(1,full,4,2), state(1,full,3,2), state(1,full,5,1), state(1,full,4,1), state(1,full,3,1), state(1,empty,4,4), possibleMove(2,up,5,6), possibleMove(1,up,4,6), possibleMove(3,down,5,3), possibleMove(2,down,5,2), possibleMove(1,down,4,2), possibleMove(3,left,7,5), possibleMove(1,left,6,4), possibleMove(3,right,4,4), possibleMove(3,right,3,5), possibleMove(3,right,3,6), possibleMove(2,right,3,4), possibleMove(1,right,2,4)}
{range(1), direction(up), direction(down), direction(left), direction(right), status(full), status(empty), full(3,1), full(4,1), full(5,1), full(3,2), full(4,2), full(5,2), full(1,3), full(2,3), full(3,3), full(4,3), full(5,3), full(6,3), full(7,3), full(1,4), full(2,4), full(3,4), empty(4,4), full(5,4), full(6,4), full(7,4), full(1,5), full(2,5), full(3,5), full(4,5), full(5,5), full(6,5), full(7,5), full(3,6), full(4,6), full(5,6), full(3,7), full(4,7), full(5,7), range(2), range(3), range(4), range(5), range(6), range(7), location(1,5), location(1,4), location(1,3), location(2,5), location(2,4), location(2,3), location(5,7), location(5,6), location(5,5), location(5,4), location(5,3), location(5,2), location(5,1), location(4,7), location(4,6), location(4,5), location(4,4), location(4,3), location(4,2), location(4,1), location(3,7), location(3,6), location(3,5), location(3,4), location(3,3), location(3,2), location(3,1), location(6,5), location(6,4), location(6,3), location(7,5), location(7,4), location(7,3), time(1), time(2), time(3), move(3,down,5,3), move(2,up,5,6), move(1,left,6,4), state(4,empty,5,6), state(4,empty,5,4), state(4,empty,5,3), state(4,empty,6,4), state(3,empty,5,6), state(3,empty,5,5), state(3,empty,6,4), state(2,empty,5,4), state(2,empty,6,4), state(4,full,1,3), state(4,full,2,3), state(4,full,5,5), state(4,full,5,2), state(4,full,5,1), state(4,full,4,5), state(4,full,4,4), state(4,full,4,3), state(4,full,4,2), state(4,full,4,1), state(4,full,3,5), state(4,full,3,4), state(4,full,3,3), state(4,full,3,2), state(4,full,3,1), state(4,full,6,3), state(4,full,7,3), state(3,full,1,3), state(3,full,2,3), state(3,full,5,4), state(3,full,5,3), state(3,full,5,2), state(3,full,5,1), state(3,full,4,5), state(3,full,4,4), state(3,full,4,3), state(3,full,4,2), state(3,full,4,1), state(3,full,3,5), state(3,full,3,4), state(3,full,3,3), state(3,full,3,2), state(3,full,3,1), state(3,full,6,3), state(3,full,7,3), state(2,full,1,3), state(2,full,2,3), state(2,full,5,5), state(2,full,5,3), state(2,full,5,2), state(2,full,5,1), state(2,full,4,5), state(2,full,4,4), state(2,full,4,3), state(2,full,4,2), state(2,full,4,1), state(2,full,3,5), state(2,full,3,4), state(2,full,3,3), state(2,full,3,2), state(2,full,3,1), state(2,full,6,3), state(2,full,7,3), state(4,full,1,5), state(4,full,2,5), state(4,full,5,7), state(4,full,4,7), state(4,full,4,6), state(4,full,3,7), state(4,full,3,6), state(4,full,6,5), state(4,full,7,5), state(3,full,1,5), state(3,full,2,5), state(3,full,5,7), state(3,full,4,7), state(3,full,4,6), state(3,full,3,7), state(3,full,3,6), state(3,full,6,5), state(3,full,7,5), state(2,full,1,5), state(2,full,2,5), state(2,full,5,7), state(2,full,5,6), state(2,full,4,7), state(2,full,4,6), state(2,full,3,7), state(2,full,3,6), state(2,full,6,5), state(2,full,7,5), state(4,full,2,4), state(4,full,1,4), state(3,full,2,4), state(3,full,1,4), state(2,full,2,4), state(2,full,1,4), state(4,full,7,4), state(3,full,7,4), state(2,full,7,4), changed(4,5,5), changed(4,5,4), changed(4,5,3), changed(3,5,6), changed(3,5,5), changed(3,5,4), changed(2,5,4), changed(2,4,4), changed(2,6,4), state(1,full,5,7), state(1,full,4,7), state(1,full,3,7), state(1,full,5,6), state(1,full,4,6), state(1,full,3,6), state(1,full,7,5), state(1,full,6,5), state(1,full,5,5), state(1,full,4,5), state(1,full,3,5), state(1,full,2,5), state(1,full,1,5), state(1,full,7,4), state(1,full,6,4), state(1,full,5,4), state(1,full,3,4), state(1,full,2,4), state(1,full,1,4), state(1,full,7,3), state(1,full,6,3), state(1,full,5,3), state(1,full,4,3), state(1,full,3,3), state(1,full,2,3), state(1,full,1,3), state(1,full,5,2), state(1,full,4,2), state(1,full,3,2), state(1,full,5,1), state(1,full,4,1), state(1,full,3,1), state(1,empty,4,4), possibleMove(2,up,5,6), possibleMove(1,up,4,6), possibleMove(3,down,5,3), possibleMove(2,down,5,2), possibleMove(1,down,4,2), possibleMove(3,left,7,5), possibleMove(1,left,6,4), possibleMove(3,right,4,4), possibleMove(3,right,3,5), possibleMove(3,right,3,6), possibleMove(2,right,3,4), possibleMove(1,right,2,4)}
{range(1), direction(up), direction(down), direction(left), direction(right), status(full), status(empty), full(3,1), full(4,1), full(5,1), full(3,2), full(4,2), full(5,2), full(1,3), full(2,3), full(3,3), full(4,3), full(5,3), full(6,3), full(7,3), full(1,4), full(2,4), full(3,4), empty(4,4), full(5,4), full(6,4), full(7,4), full(1,5), full(2,5), full(3,5), full(4,5), full(5,5), full(6,5), full(7,5), full(3,6), full(4,6), full(5,6), full(3,7), full(4,7), full(5,7), range(2), range(3), range(4), range(5), range(6), range(7), location(1,5), location(1,4), location(1,3), location(2,5), location(2,4), location(2,3), location(5,7), location(5,6), location(5,5), location(5,4), location(5,3), location(5,2), location(5,1), location(4,7), location(4,6), location(4,5), location(4,4), location(4,3), location(4,2), location(4,1), location(3,7), location(3,6), location(3,5), location(3,4), location(3,3), location(3,2), location(3,1), location(6,5), location(6,4), location(6,3), location(7,5), location(7,4), location(7,3), time(1), time(2), time(3), move(3,right,1,4), move(2,right,3,4), move(1,left,6,4), state(4,empty,1,4), state(4,empty,2,4), state(4,empty,4,4), state(4,empty,6,4), state(3,empty,4,4), state(3,empty,3,4), state(3,empty,6,4), state(2,empty,5,4), state(2,empty,6,4), state(4,full,1,3), state(4,full,2,3), state(4,full,5,5), state(4,full,5,4), state(4,full,5,3), state(4,full,5,2), state(4,full,5,1), state(4,full,4,5), state(4,full,4,3), state(4,full,4,2), state(4,full,4,1), state(4,full,3,5), state(4,full,3,4), state(4,full,3,3), state(4,full,3,2), state(4,full,3,1), state(4,full,6,3), state(4,full,7,3), state(3,full,1,3), state(3,full,2,3), state(3,full,5,5), state(3,full,5,4), state(3,full,5,3), state(3,full,5,2), state(3,full,5,1), state(3,full,4,5), state(3,full,4,3), state(3,full,4,2), state(3,full,4,1), state(3,full,3,5), state(3,full,3,3), state(3,full,3,2), state(3,full,3,1), state(3,full,6,3), state(3,full,7,3), state(2,full,1,3), state(2,full,2,3), state(2,full,5,5), state(2,full,5,3), state(2,full,5,2), state(2,full,5,1), state(2,full,4,5), state(2,full,4,4), state(2,full,4,3), state(2,full,4,2), state(2,full,4,1), state(2,full,3,5), state(2,full,3,4), state(2,full,3,3), state(2,full,3,2), state(2,full,3,1), state(2,full,6,3), state(2,full,7,3), state(4,full,1,5), state(4,full,2,5), state(4,full,5,7), state(4,full,5,6), state(4,full,4,7), state(4,full,4,6), state(4,full,3,7), state(4,full,3,6), state(4,full,6,5), state(4,full,7,5), state(3,full,1,5), state(3,full,2,5), state(3,full,5,7), state(3,full,5,6), state(3,full,4,7), state(3,full,4,6), state(3,full,3,7), state(3,full,3,6), state(3,full,6,5), state(3,full,7,5), state(2,full,1,5), state(2,full,2,5), state(2,full,5,7), state(2,full,5,6), state(2,full,4,7), state(2,full,4,6), state(2,full,3,7), state(2,full,3,6), state(2,full,6,5), state(2,full,7,5), state(3,full,2,4), state(3,full,1,4), state(2,full,2,4), state(2,full,1,4), state(4,full,7,4), state(3,full,7,4), state(2,full,7,4), changed(4,1,4), changed(4,2,4), changed(4,3,4), changed(3,5,4), changed(3,4,4), changed(3,3,4), changed(2,5,4), changed(2,4,4), changed(2,6,4), state(1,full,5,7), state(1,full,4,7), state(1,full,3,7), state(1,full,5,6), state(1,full,4,6), state(1,full,3,6), state(1,full,7,5), state(1,full,6,5), state(1,full,5,5), state(1,full,4,5), state(1,full,3,5), state(1,full,2,5), state(1,full,1,5), state(1,full,7,4), state(1,full,6,4), state(1,full,5,4), state(1,full,3,4), state(1,full,2,4), state(1,full,1,4), state(1,full,7,3), state(1,full,6,3), state(1,full,5,3), state(1,full,4,3), state(1,full,3,3), state(1,full,2,3), state(1,full,1,3), state(1,full,5,2), state(1,full,4,2), state(1,full,3,2), state(1,full,5,1), state(1,full,4,1), state(1,full,3,1), state(1,empty,4,4), possibleMove(3,up,3,6), possibleMove(3,up,4,6), possibleMove(2,up,5,6), possibleMove(1,up,4,6), possibleMove(3,down,3,2), possibleMove(3,down,4,2), possibleMove(2,down,5,2), possibleMove(1,down,4,2), possibleMove(1,left,6,4), possibleMove(3,right,1,4), possibleMove(2,right,3,4), possibleMove(1,right,2,4)}
{range(1), direction(up), direction(down), direction(left), direction(right), status(full), status(empty), full(3,1), full(4,1), full(5,1), full(3,2), full(4,2), full(5,2), full(1,3), full(2,3), full(3,3), full(4,3), full(5,3), full(6,3), full(7,3), full(1,4), full(2,4), full(3,4), empty(4,4), full(5,4), full(6,4), full(7,4), full(1,5), full(2,5), full(3,5), full(4,5), full(5,5), full(6,5), full(7,5), full(3,6), full(4,6), full(5,6), full(3,7), full(4,7), full(5,7), range(2), range(3), range(4), range(5), range(6), range(7), location(1,5), location(1,4), location(1,3), location(2,5), location(2,4), location(2,3), location(5,7), location(5,6), location(5,5), location(5,4), location(5,3), location(5,2), location(5,1), location(4,7), location(4,6), location(4,5), location(4,4), location(4,3), location(4,2), location(4,1), location(3,7), location(3,6), location(3,5), location(3,4), location(3,3), location(3,2), location(3,1), location(6,5), location(6,4), location(6,3), location(7,5), location(7,4), location(7,3), time(1), time(2), time(3), move(3,down,3,2), move(2,right,3,4), move(1,left,6,4), state(4,empty,4,4), state(4,empty,3,3), state(4,empty,3,2), state(4,empty,6,4), state(3,empty,4,4), state(3,empty,3,4), state(3,empty,6,4), state(2,empty,5,4), state(2,empty,6,4), state(4,full,1,3), state(4,full,2,3), state(4,full,5,5), state(4,full,5,4), state(4,full,5,3), state(4,full,5,2), state(4,full,5,1), state(4,full,4,5), state(4,full,4,3), state(4,full,4,2), state(4,full,4,1), state(4,full,3,5), state(4,full,3,4), state(4,full,3,1), state(4,full,6,3), state(4,full,7,3), state(3,full,1,3), state(3,full,2,3), state(3,full,5,5), state(3,full,5,4), state(3,full,5,3), state(3,full,5,2), state(3,full,5,1), state(3,full,4,5), state(3,full,4,3), state(3,full,4,2), state(3,full,4,1), state(3,full,3,5), state(3,full,3,3), state(3,full,3,2), state(3,full,3,1), state(3,full,6,3), state(3,full,7,3), state(2,full,1,3), state(2,full,2,3), state(2,full,5,5), state(2,full,5,3), state(2,full,5,2), state(2,full,5,1), state(2,full,4,5), state(2,full,4,4), state(2,full,4,3), state(2,full,4,2), state(2,full,4,1), state(2,full,3,5), state(2,full,3,4), state(2,full,3,3), state(2,full,3,2), state(2,full,3,1), state(2,full,6,3), state(2,full,7,3), state(4,full,1,5), state(4,full,2,5), state(4,full,5,7), state(4,full,5,6), state(4,full,4,7), state(4,full,4,6), state(4,full,3,7), state(4,full,3,6), state(4,full,6,5), state(4,full,7,5), state(3,full,1,5), state(3,full,2,5), state(3,full,5,7), state(3,full,5,6), state(3,full,4,7), state(3,full,4,6), state(3,full,3,7), state(3,full,3,6), state(3,full,6,5), state(3,full,7,5), state(2,full,1,5), state(2,full,2,5), state(2,full,5,7), state(2,full,5,6), state(2,full,4,7), state(2,full,4,6), state(2,full,3,7), state(2,full,3,6), state(2,full,6,5), state(2,full,7,5), state(4,full,2,4), state(4,full,1,4), state(3,full,2,4), state(3,full,1,4), state(2,full,2,4), state(2,full,1,4), state(4,full,7,4), state(3,full,7,4), state(2,full,7,4), changed(4,3,4), changed(4,3,3), changed(4,3,2), changed(3,5,4), changed(3,4,4), changed(3,3,4), changed(2,5,4), changed(2,4,4), changed(2,6,4), state(1,full,5,7), state(1,full,4,7), state(1,full,3,7), state(1,full,5,6), state(1,full,4,6), state(1,full,3,6), state(1,full,7,5), state(1,full,6,5), state(1,full,5,5), state(1,full,4,5), state(1,full,3,5), state(1,full,2,5), state(1,full,1,5), state(1,full,7,4), state(1,full,6,4), state(1,full,5,4), state(1,full,3,4), state(1,full,2,4), state(1,full,1,4), state(1,full,7,3), state(1,full,6,3), state(1,full,5,3), state(1,full,4,3), state(1,full,3,3), state(1,full,2,3), state(1,full,1,3), state(1,full,5,2), state(1,full,4,2), state(1,full,3,2), state(1,full,5,1), state(1,full,4,1), state(1,full,3,1), state(1,empty,4,4), possibleMove(3,up,3,6), possibleMove(3,up,4,6), possibleMove(2,up,5,6), possibleMove(1,up,4,6), possibleMove(3,down,3,2), possibleMove(3,down,4,2), possibleMove(2,down,5,2), possibleMove(1,down,4,2), possibleMove(1,left,6,4), possibleMove(3,right,1,4), possibleMove(2,right,3,4), possibleMove(1,right,2,4)}
{range(1), direction(up), direction(down), direction(left), direction(right), status(full), status(empty), full(3,1), full(4,1), full(5,1), full(3,2), full(4,2), full(5,2), full(1,3), full(2,3), full(3,3), full(4,3), full(5,3), full(6,3), full(7,3), full(1,4), full(2,4), full(3,4), empty(4,4), full(5,4), full(6,4), full(7,4), full(1,5), full(2,5), full(3,5), full(4,5), full(5,5), full(6,5), full(7,5), full(3,6), full(4,6), full(5,6), full(3,7), full(4,7), full(5,7), range(2), range(3), range(4), range(5), range(6), range(7), location(1,5), location(1,4), location(1,3), location(2,5), location(2,4), location(2,3), location(5,7), location(5,6), location(5,5), location(5,4), location(5,3), location(5,2), location(5,1), location(4,7), location(4,6), location(4,5), location(4,4), location(4,3), location(4,2), location(4,1), location(3,7), location(3,6), location(3,5), location(3,4), location(3,3), location(3,2), location(3,1), location(6,5), location(6,4), location(6,3), location(7,5), location(7,4), location(7,3), time(1), time(2), time(3), move(3,down,4,2), move(2,right,3,4), move(1,left,6,4), state(4,empty,4,3), state(4,empty,4,2), state(4,empty,3,4), state(4,empty,6,4), state(3,empty,4,4), state(3,empty,3,4), state(3,empty,6,4), state(2,empty,5,4), state(2,empty,6,4), state(4,full,1,3), state(4,full,2,3), state(4,full,5,5), state(4,full,5,4), state(4,full,5,3), state(4,full,5,2), state(4,full,5,1), state(4,full,4,5), state(4,full,4,4), state(4,full,4,1), state(4,full,3,5), state(4,full,3,3), state(4,full,3,2), state(4,full,3,1), state(4,full,6,3), state(4,full,7,3), state(3,full,1,3), state(3,full,2,3), state(3,full,5,5), state(3,full,5,4), state(3,full,5,3), state(3,full,5,2), state(3,full,5,1), state(3,full,4,5), state(3,full,4,3), state(3,full,4,2), state(3,full,4,1), state(3,full,3,5), state(3,full,3,3), state(3,full,3,2), state(3,full,3,1), state(3,full,6,3), state(3,full,7,3), state(2,full,1,3), state(2,full,2,3), state(2,full,5,5), state(2,full,5,3), state(2,full,5,2), state(2,full,5,1), state(2,full,4,5), state(2,full,4,4), state(2,full,4,3), state(2,full,4,2), state(2,full,4,1), state(2,full,3,5), state(2,full,3,4), state(2,full,3,3), state(2,full,3,2), state(2,full,3,1), state(2,full,6,3), state(2,full,7,3), state(4,full,1,5), state(4,full,2,5), state(4,full,5,7), state(4,full,5,6), state(4,full,4,7), state(4,full,4,6), state(4,full,3,7), state(4,full,3,6), state(4,full,6,5), state(4,full,7,5), state(3,full,1,5), state(3,full,2,5), state(3,full,5,7), state(3,full,5,6), state(3,full,4,7), state(3,full,4,6), state(3,full,3,7), state(3,full,3,6), state(3,full,6,5), state(3,full,7,5), state(2,full,1,5), state(2,full,2,5), state(2,full,5,7), state(2,full,5,6), state(2,full,4,7), state(2,full,4,6), state(2,full,3,7), state(2,full,3,6), state(2,full,6,5), state(2,full,7,5), state(4,full,2,4), state(4,full,1,4), state(3,full,2,4), state(3,full,1,4), state(2,full,2,4), state(2,full,1,4), state(4,full,7,4), state(3,full,7,4), state(2,full,7,4), changed(4,4,4), changed(4,4,3), changed(4,4,2), changed(3,5,4), changed(3,4,4), changed(3,3,4), changed(2,5,4), changed(2,4,4), changed(2,6,4), state(1,full,5,7), state(1,full,4,7), state(1,full,3,7), state(1,full,5,6), state(1,full,4,6), state(1,full,3,6), state(1,full,7,5), state(1,full,6,5), state(1,full,5,5), state(1,full,4,5), state(1,full,3,5), state(1,full,2,5), state(1,full,1,5), state(1,full,7,4), state(1,full,6,4), state(1,full,5,4), state(1,full,3,4), state(1,full,2,4), state(1,full,1,4), state(1,full,7,3), state(1,full,6,3), state(1,full,5,3), state(1,full,4,3), state(1,full,3,3), state(1,full,2,3), state(1,full,1,3), state(1,full,5,2), state(1,full,4,2), state(1,full,3,2), state(1,full,5,1), state(1,full,4,1), state(1,full,3,1), state(1,empty,4,4), possibleMove(3,up,3,6), possibleMove(3,up,4,6), possibleMove(2,up,5,6), possibleMove(1,up,4,6), possibleMove(3,down,3,2), possibleMove(3,down,4,2), possibleMove(2,down,5,2), possibleMove(1,down,4,2), possibleMove(1,left,6,4), possibleMove(3,right,1,4), possibleMove(2,right,3,4), possibleMove(1,right,2,4)}
{range(1), direction(up), direction(down), direction(left), direction(right), status(full), status(empty), full(3,1), full(4,1), full(5,1), full(3,2), full(4,2), full(5,2), full(1,3), full(2,3), full(3,3), full(4,3), full(5,3), full(6,3), full(7,3), full(1,4), full(2,4), full(3,4), empty(4,4), full(5,4), full(6,4), full(7,4), full(1,5), full(2,5), full(3,5), full(4,5), full(5,5), full(6,5), full(7,5), full(3,6), full(4,6), full(5,6), full(3,7), full(4,7), full(5,7), range(2), range(3), range(4), range(5), range(6), range(7), location(1,5), location(1,4), location(1,3), location(2,5), location(2,4), location(2,3), location(5,7), location(5,6), location(5,5), location(5,4), location(5,3), location(5,2), location(5,1), location(4,7), location(4,6), location(4,5), location(4,4), location(4,3), location(4,2), location(4,1), location(3,7), location(3,6), location(3,5), location(3,4), location(3,3), location(3,2), location(3,1), location(6,5), location(6,4), location(6,3), location(7,5), location(7,4), location(7,3), time(1), time(2), time(3), move(3,up,4,6), move(2,right,3,4), move(1,left,6,4), state(4,empty,4,6), state(4,empty,4,5), state(4,empty,3,4), state(4,empty,6,4), state(3,empty,4,4), state(3,empty,3,4), state(3,empty,6,4), state(2,empty,5,4), state(2,empty,6,4), state(4,full,1,3), state(4,full,2,3), state(4,full,5,5), state(4,full,5,4), state(4,full,5,3), state(4,full,5,2), state(4,full,5,1), state(4,full,4,4), state(4,full,4,3), state(4,full,4,2), state(4,full,4,1), state(4,full,3,5), state(4,full,3,3), state(4,full,3,2), state(4,full,3,1), state(4,full,6,3), state(4,full,7,3), state(3,full,1,3), state(3,full,2,3), state(3,full,5,5), state(3,full,5,4), state(3,full,5,3), state(3,full,5,2), state(3,full,5,1), state(3,full,4,5), state(3,full,4,3), state(3,full,4,2), state(3,full,4,1), state(3,full,3,5), state(3,full,3,3), state(3,full,3,2), state(3,full,3,1), state(3,full,6,3), state(3,full,7,3), state(2,full,1,3), state(2,full,2,3), state(2,full,5,5), state(2,full,5,3), state(2,full,5,2), state(2,full,5,1), state(2,full,4,5), state(2,full,4,4), state(2,full,4,3), state(2,full,4,2), state(2,full,4,1), state(2,full,3,5), state(2,full,3,4), state(2,full,3,3), state(2,full,3,2), state(2,full,3,1), state(2,full,6,3), state(2,full,7,3), state(4,full,1,5), state(4,full,2,5), state(4,full,5,7), state(4,full,5,6), state(4,full,4,7), state(4,full,3,7), state(4,full,3,6), state(4,full,6,5), state(4,full,7,5), state(3,full,1,5), state(3,full,2,5), state(3,full,5,7), state(3,full,5,6), state(3,full,4,7), state(3,full,4,6), state(3,full,3,7), state(3,full,3,6), state(3,full,6,5), state(3,full,7,5), state(2,full,1,5), state(2,full,2,5), state(2,full,5,7), state(2,full,5,6), state(2,full,4,7), state(2,full,4,6), state(2,full,3,7), state(2,full,3,6), state(2,full,6,5), state(2,full,7,5), state(4,full,2,4), state(4,full,1,4), state(3,full,2,4), state(3,full,1,4), state(2,full,2,4), state(2,full,1,4), state(4,full,7,4), state(3,full,7,4), state(2,full,7,4), changed(4,4,6), changed(4,4,5), changed(4,4,4), changed(3,5,4), changed(3,4,4), changed(3,3,4), changed(2,5,4), changed(2,4,4), changed(2,6,4), state(1,full,5,7), state(1,full,4,7), state(1,full,3,7), state(1,full,5,6), state(1,full,4,6), state(1,full,3,6), state(1,full,7,5), state(1,full,6,5), state(1,full,5,5), state(1,full,4,5), state(1,full,3,5), state(1,full,2,5), state(1,full,1,5), state(1,full,7,4), state(1,full,6,4), state(1,full,5,4), state(1,full,3,4), state(1,full,2,4), state(1,full,1,4), state(1,full,7,3), state(1,full,6,3), state(1,full,5,3), state(1,full,4,3), state(1,full,3,3), state(1,full,2,3), state(1,full,1,3), state(1,full,5,2), state(1,full,4,2), state(1,full,3,2), state(1,full,5,1), state(1,full,4,1), state(1,full,3,1), state(1,empty,4,4), possibleMove(3,up,3,6), possibleMove(3,up,4,6), possibleMove(2,up,5,6), possibleMove(1,up,4,6), possibleMove(3,down,3,2), possibleMove(3,down,4,2), possibleMove(2,down,5,2), possibleMove(1,down,4,2), possibleMove(1,left,6,4), possibleMove(3,right,1,4), possibleMove(2,right,3,4), possibleMove(1,right,2,4)}
{range(1), direction(up), direction(down), direction(left), direction(right), status(full), status(empty), full(3,1), full(4,1), full(5,1), full(3,2), full(4,2), full(5,2), full(1,3), full(2,3), full(3,3), full(4,3), full(5,3), full(6,3), full(7,3), full(1,4), full(2,4), full(3,4), empty(4,4), full(5,4), full(6,4), full(7,4), full(1,5), full(2,5), full(3,5), full(4,5), full(5,5), full(6,5), full(7,5), full(3,6), full(4,6), full(5,6), full(3,7), full(4,7), full(5,7), range(2), range(3), range(4), range(5), range(6), range(7), location(1,5), location(1,4), location(1,3), location(2,5), location(2,4), location(2,3), location(5,7), location(5,6), location(5,5), location(5,4), location(5,3), location(5,2), location(5,1), location(4,7), location(4,6), location(4,5), location(4,4), location(4,3), location(4,2), location(4,1), location(3,7), location(3,6), location(3,5), location(3,4), location(3,3), location(3,2), location(3,1), location(6,5), location(6,4), location(6,3), location(7,5), location(7,4), location(7,3), time(1), time(2), time(3), move(3,right,3,5), move(2,left,6,5), move(1,up,4,6), state(4,empty,4,6), state(4,empty,4,5), state(4,empty,3,5), state(4,empty,6,5), state(3,empty,5,5), state(3,empty,4,6), state(3,empty,6,5), state(2,empty,4,6), state(2,empty,4,5), state(4,full,1,3), state(4,full,2,3), state(4,full,5,5), state(4,full,5,4), state(4,full,5,3), state(4,full,5,2), state(4,full,5,1), state(4,full,4,4), state(4,full,4,3), state(4,full,4,2), state(4,full,4,1), state(4,full,3,4), state(4,full,3,3), state(4,full,3,2), state(4,full,3,1), state(4,full,6,3), state(4,full,7,3), state(3,full,1,3), state(3,full,2,3), state(3,full,5,4), state(3,full,5,3), state(3,full,5,2), state(3,full,5,1), state(3,full,4,5), state(3,full,4,4), state(3,full,4,3), state(3,full,4,2), state(3,full,4,1), state(3,full,3,5), state(3,full,3,4), state(3,full,3,3), state(3,full,3,2), state(3,full,3,1), state(3,full,6,3), state(3,full,7,3), state(2,full,1,3), state(2,full,2,3), state(2,full,5,5), state(2,full,5,4), state(2,full,5,3), state(2,full,5,2), state(2,full,5,1), state(2,full,4,4), state(2,full,4,3), state(2,full,4,2), state(2,full,4,1), state(2,full,3,5), state(2,full,3,4), state(2,full,3,3), state(2,full,3,2), state(2,full,3,1), state(2,full,6,3), state(2,full,7,3), state(4,full,1,5), state(4,full,2,5), state(4,full,5,7), state(4,full,5,6), state(4,full,4,7), state(4,full,3,7), state(4,full,3,6), state(4,full,7,5), state(3,full,1,5), state(3,full,2,5), state(3,full,5,7), state(3,full,5,6), state(3,full,4,7), state(3,full,3,7), state(3,full,3,6), state(3,full,7,5), state(2,full,1,5), state(2,full,2,5), state(2,full,5,7), state(2,full,5,6), state(2,full,4,7), state(2,full,3,7), state(2,full,3,6), state(2,full,6,5), state(2,full,7,5), state(4,full,2,4), state(4,full,1,4), state(3,full,2,4), state(3,full,1,4), state(2,full,2,4), state(2,full,1,4), state(4,full,7,4), state(4,full,6,4), state(3,full,7,4), state(3,full,6,4), state(2,full,7,4), state(2,full,6,4), changed(4,5,5), changed(4,4,5), changed(4,3,5), changed(3,5,5), changed(3,4,5), changed(3,6,5), changed(2,4,6), changed(2,4,5), changed(2,4,4), state(1,full,5,7), state(1,full,4,7), state(1,full,3,7), state(1,full,5,6), state(1,full,4,6), state(1,full,3,6), state(1,full,7,5), state(1,full,6,5), state(1,full,5,5), state(1,full,4,5), state(1,full,3,5), state(1,full,2,5), state(1,full,1,5), state(1,full,7,4), state(1,full,6,4), state(1,full,5,4), state(1,full,3,4), state(1,full,2,4), state(1,full,1,4), state(1,full,7,3), state(1,full,6,3), state(1,full,5,3), state(1,full,4,3), state(1,full,3,3), state(1,full,2,3), state(1,full,1,3), state(1,full,5,2), state(1,full,4,2), state(1,full,3,2), state(1,full,5,1), state(1,full,4,1), state(1,full,3,1), state(1,empty,4,4), possibleMove(3,up,5,7), possibleMove(1,up,4,6), possibleMove(3,down,6,3), possibleMove(3,down,4,4), possibleMove(3,down,5,3), possibleMove(2,down,4,3), possibleMove(1,down,4,2), possibleMove(2,left,6,5), possibleMove(1,left,6,4), possibleMove(3,right,3,5), possibleMove(2,right,2,5), possibleMove(1,right,2,4)}
{range(1), direction(up), direction(down), direction(left), direction(right), status(full), status(empty), full(3,1), full(4,1), full(5,1), full(3,2), full(4,2), full(5,2), full(1,3), full(2,3), full(3,3), full(4,3), full(5,3), full(6,3), full(7,3), full(1,4), full(2,4), full(3,4), empty(4,4), full(5,4), full(6,4), full(7,4), full(1,5), full(2,5), full(3,5), full(4,5), full(5,5), full(6,5), full(7,5), full(3,6), full(4,6), full(5,6), full(3,7), full(4,7), full(5,7), range(2), range(3), range(4), range(5), range(6), range(7), location(1,5), location(1,4), location(1,3), location(2,5), location(2,4), location(2,3), location(5,7), location(5,6), location(5,5), location(5,4), location(5,3), location(5,2), location(5,1), location(4,7), location(4,6), location(4,5), location(4,4), location(4,3), location(4,2), location(4,1), location(3,7), location(3,6), location(3,5), location(3,4), location(3,3), location(3,2), location(3,1), location(6,5), location(6,4), location(6,3), location(7,5), location(7,4), location(7,3), time(1), time(2), time(3), move(3,left,5,5), move(2,right,2,5), move(1,up,4,6), state(4,empty,2,5), state(4,empty,5,5), state(4,empty,4,6), state(4,empty,4,5), state(3,empty,2,5), state(3,empty,4,6), state(3,empty,3,5), state(2,empty,4,6), state(2,empty,4,5), state(4,full,1,3), state(4,full,2,3), state(4,full,5,4), state(4,full,5,3), state(4,full,5,2), state(4,full,5,1), state(4,full,4,4), state(4,full,4,3), state(4,full,4,2), state(4,full,4,1), state(4,full,3,5), state(4,full,3,4), state(4,full,3,3), state(4,full,3,2), state(4,full,3,1), state(4,full,6,3), state(4,full,7,3), state(3,full,1,3), state(3,full,2,3), state(3,full,5,5), state(3,full,5,4), state(3,full,5,3), state(3,full,5,2), state(3,full,5,1), state(3,full,4,5), state(3,full,4,4), state(3,full,4,3), state(3,full,4,2), state(3,full,4,1), state(3,full,3,4), state(3,full,3,3), state(3,full,3,2), state(3,full,3,1), state(3,full,6,3), state(3,full,7,3), state(2,full,1,3), state(2,full,2,3), state(2,full,5,5), state(2,full,5,4), state(2,full,5,3), state(2,full,5,2), state(2,full,5,1), state(2,full,4,4), state(2,full,4,3), state(2,full,4,2), state(2,full,4,1), state(2,full,3,5), state(2,full,3,4), state(2,full,3,3), state(2,full,3,2), state(2,full,3,1), state(2,full,6,3), state(2,full,7,3), state(4,full,1,5), state(4,full,5,7), state(4,full,5,6), state(4,full,4,7), state(4,full,3,7), state(4,full,3,6), state(4,full,6,5), state(4,full,7,5), state(3,full,1,5), state(3,full,5,7), state(3,full,5,6), state(3,full,4,7), state(3,full,3,7), state(3,full,3,6), state(3,full,6,5), state(3,full,7,5), state(2,full,1,5), state(2,full,2,5), state(2,full,5,7), state(2,full,5,6), state(2,full,4,7), state(2,full,3,7), state(2,full,3,6), state(2,full,6,5), state(2,full,7,5), state(4,full,2,4), state(4,full,1,4), state(3,full,2,4), state(3,full,1,4), state(2,full,2,4), state(2,full,1,4), state(4,full,7,4), state(4,full,6,4), state(3,full,7,4), state(3,full,6,4), state(2,full,7,4), state(2,full,6,4), changed(4,5,5), changed(4,4,5), changed(4,3,5), changed(3,2,5), changed(3,4,5), changed(3,3,5), changed(2,4,6), changed(2,4,5), changed(2,4,4), state(1,full,5,7), state(1,full,4,7), state(1,full,3,7), state(1,full,5,6), state(1,full,4,6), state(1,full,3,6), state(1,full,7,5), state(1,full,6,5), state(1,full,5,5), state(1,full,4,5), state(1,full,3,5), state(1,full,2,5), state(1,full,1,5), state(1,full,7,4), state(1,full,6,4), state(1,full,5,4), state(1,full,3,4), state(1,full,2,4), state(1,full,1,4), state(1,full,7,3), state(1,full,6,3), state(1,full,5,3), state(1,full,4,3), state(1,full,3,3), state(1,full,2,3), state(1,full,1,3), state(1,full,5,2), state(1,full,4,2), state(1,full,3,2), state(1,full,5,1), state(1,full,4,1), state(1,full,3,1), state(1,empty,4,4), possibleMove(3,up,3,7), possibleMove(1,up,4,6), possibleMove(3,down,3,3), possibleMove(3,down,4,4), possibleMove(3,down,2,3), possibleMove(2,down,4,3), possibleMove(1,down,4,2), possibleMove(3,left,5,5), possibleMove(2,left,6,5), possibleMove(1,left,6,4), possibleMove(2,right,2,5), possibleMove(1,right,2,4)}
{range(1), direction(up), direction(down), direction(left), direction(right), status(full), status(empty), full(3,1), full(4,1), full(5,1), full(3,2), full(4,2), full(5,2), full(1,3), full(2,3), full(3,3), full(4,3), full(5,3), full(6,3), full(7,3), full(1,4), full(2,4), full(3,4), empty(4,4), full(5,4), full(6,4), full(7,4), full(1,5), full(2,5), full(3,5), full(4,5), full(5,5), full(6,5), full(7,5), full(3,6), full(4,6), full(5,6), full(3,7), full(4,7), full(5,7), range(2), range(3), range(4), range(5), range(6), range(7), location(1,5), location(1,4), location(1,3), location(2,5), location(2,4), location(2,3), location(5,7), location(5,6), location(5,5), location(5,4), location(5,3), location(5,2), location(5,1), location(4,7), location(4,6), location(4,5), location(4,4), location(4,3), location(4,2), location(4,1), location(3,7), location(3,6), location(3,5), location(3,4), location(3,3), location(3,2), location(3,1), location(6,5), location(6,4), location(6,3), location(7,5), location(7,4), location(7,3), time(1), time(2), time(3), move(3,up,3,7), move(2,right,2,5), move(1,up,4,6), state(4,empty,2,5), state(4,empty,4,6), state(4,empty,3,7), state(4,empty,3,6), state(3,empty,2,5), state(3,empty,4,6), state(3,empty,3,5), state(2,empty,4,6), state(2,empty,4,5), state(4,full,1,3), state(4,full,2,3), state(4,full,5,5), state(4,full,5,4), state(4,full,5,3), state(4,full,5,2), state(4,full,5,1), state(4,full,4,5), state(4,full,4,4), state(4,full,4,3), state(4,full,4,2), state(4,full,4,1), state(4,full,3,5), state(4,full,3,4), state(4,full,3,3), state(4,full,3,2), state(4,full,3,1), state(4,full,6,3), state(4,full,7,3), state(3,full,1,3), state(3,full,2,3), state(3,full,5,5), state(3,full,5,4), state(3,full,5,3), state(3,full,5,2), state(3,full,5,1), state(3,full,4,5), state(3,full,4,4), state(3,full,4,3), state(3,full,4,2), state(3,full,4,1), state(3,full,3,4), state(3,full,3,3), state(3,full,3,2), state(3,full,3,1), state(3,full,6,3), state(3,full,7,3), state(2,full,1,3), state(2,full,2,3), state(2,full,5,5), state(2,full,5,4), state(2,full,5,3), state(2,full,5,2), state(2,full,5,1), state(2,full,4,4), state(2,full,4,3), state(2,full,4,2), state(2,full,4,1), state(2,full,3,5), state(2,full,3,4), state(2,full,3,3), state(2,full,3,2), state(2,full,3,1), state(2,full,6,3), state(2,full,7,3), state(4,full,1,5), state(4,full,5,7), state(4,full,5,6), state(4,full,4,7), state(4,full,6,5), state(4,full,7,5), state(3,full,1,5), state(3,full,5,7), state(3,full,5,6), state(3,full,4,7), state(3,full,3,7), state(3,full,3,6), state(3,full,6,5), state(3,full,7,5), state(2,full,1,5), state(2,full,2,5), state(2,full,5,7), state(2,full,5,6), state(2,full,4,7), state(2,full,3,7), state(2,full,3,6), state(2,full,6,5), state(2,full,7,5), state(4,full,2,4), state(4,full,1,4), state(3,full,2,4), state(3,full,1,4), state(2,full,2,4), state(2,full,1,4), state(4,full,7,4), state(4,full,6,4), state(3,full,7,4), state(3,full,6,4), state(2,full,7,4), state(2,full,6,4), changed(4,3,7), changed(4,3,6), changed(4,3,5), changed(3,2,5), changed(3,4,5), changed(3,3,5), changed(2,4,6), changed(2,4,5), changed(2,4,4), state(1,full,5,7), state(1,full,4,7), state(1,full,3,7), state(1,full,5,6), state(1,full,4,6), state(1,full,3,6), state(1,full,7,5), state(1,full,6,5), state(1,full,5,5), state(1,full,4,5), state(1,full,3,5), state(1,full,2,5), state(1,full,1,5), state(1,full,7,4), state(1,full,6,4), state(1,full,5,4), state(1,full,3,4), state(1,full,2,4), state(1,full,1,4), state(1,full,7,3), state(1,full,6,3), state(1,full,5,3), state(1,full,4,3), state(1,full,3,3), state(1,full,2,3), state(1,full,1,3), state(1,full,5,2), state(1,full,4,2), state(1,full,3,2), state(1,full,5,1), state(1,full,4,1), state(1,full,3,1), state(1,empty,4,4), possibleMove(3,up,3,7), possibleMove(1,up,4,6), possibleMove(3,down,3,3), possibleMove(3,down,4,4), possibleMove(3,down,2,3), possibleMove(2,down,4,3), possibleMove(1,down,4,2), possibleMove(3,left,5,5), possibleMove(2,left,6,5), possibleMove(1,left,6,4), possibleMove(2,right,2,5), possibleMove(1,right,2,4)}
{range(1), direction(up), direction(down), direction(left), direction(right), status(full), status(empty), full(3,1), full(4,1), full(5,1), full(3,2), full(4,2), full(5,2), full(1,3), full(2,3), full(3,3), full(4,3), full(5,3), full(6,3), full(7,3), full(1,4), full(2,4), full(3,4), empty(4,4), full(5,4), full(6,4), full(7,4), full(1,5), full(2,5), full(3,5), full(4,5), full(5,5), full(6,5), full(7,5), full(3,6), full(4,6), full(5,6), full(3,7), full(4,7), full(5,7), range(2), range(3), range(4), range(5), range(6), range(7), location(1,5), location(1,4), location(1,3), location(2,5), location(2,4), location(2,3), location(5,7), location(5,6), location(5,5), location(5,4), location(5,3), location(5,2), location(5,1), location(4,7), location(4,6), location(4,5), location(4,4), location(4,3), location(4,2), location(4,1), location(3,7), location(3,6), location(3,5), location(3,4), location(3,3), location(3,2), location(3,1), location(6,5), location(6,4), location(6,3), location(7,5), location(7,4), location(7,3), time(1), time(2), time(3), move(3,right,2,5), move(2,up,4,5), move(1,down,4,2), state(4,empty,2,5), state(4,empty,4,4), state(4,empty,4,2), state(4,empty,3,5), state(3,empty,4,5), state(3,empty,4,4), state(3,empty,4,2), state(2,empty,4,3), state(2,empty,4,2), state(4,full,1,3), state(4,full,2,3), state(4,full,5,5), state(4,full,5,4), state(4,full,5,3), state(4,full,5,2), state(4,full,5,1), state(4,full,4,5), state(4,full,4,3), state(4,full,4,1), state(4,full,3,4), state(4,full,3,3), state(4,full,3,2), state(4,full,3,1), state(4,full,6,3), state(4,full,7,3), state(3,full,1,3), state(3,full,2,3), state(3,full,5,5), state(3,full,5,4), state(3,full,5,3), state(3,full,5,2), state(3,full,5,1), state(3,full,4,3), state(3,full,4,1), state(3,full,3,5), state(3,full,3,4), state(3,full,3,3), state(3,full,3,2), state(3,full,3,1), state(3,full,6,3), state(3,full,7,3), state(2,full,1,3), state(2,full,2,3), state(2,full,5,5), state(2,full,5,4), state(2,full,5,3), state(2,full,5,2), state(2,full,5,1), state(2,full,4,5), state(2,full,4,4), state(2,full,4,1), state(2,full,3,5), state(2,full,3,4), state(2,full,3,3), state(2,full,3,2), state(2,full,3,1), state(2,full,6,3), state(2,full,7,3), state(4,full,1,5), state(4,full,5,7), state(4,full,5,6), state(4,full,4,7), state(4,full,4,6), state(4,full,3,7), state(4,full,3,6), state(4,full,6,5), state(4,full,7,5), state(3,full,1,5), state(3,full,2,5), state(3,full,5,7), state(3,full,5,6), state(3,full,4,7), state(3,full,4,6), state(3,full,3,7), state(3,full,3,6), state(3,full,6,5), state(3,full,7,5), state(2,full,1,5), state(2,full,2,5), state(2,full,5,7), state(2,full,5,6), state(2,full,4,7), state(2,full,4,6), state(2,full,3,7), state(2,full,3,6), state(2,full,6,5), state(2,full,7,5), state(4,full,2,4), state(4,full,1,4), state(3,full,2,4), state(3,full,1,4), state(2,full,2,4), state(2,full,1,4), state(4,full,7,4), state(4,full,6,4), state(3,full,7,4), state(3,full,6,4), state(2,full,7,4), state(2,full,6,4), changed(4,2,5), changed(4,4,5), changed(4,3,5), changed(3,4,5), changed(3,4,4), changed(3,4,3), changed(2,4,4), changed(2,4,3), changed(2,4,2), state(1,full,5,7), state(1,full,4,7), state(1,full,3,7), state(1,full,5,6), state(1,full,4,6), state(1,full,3,6), state(1,full,7,5), state(1,full,6,5), state(1,full,5,5), state(1,full,4,5), state(1,full,3,5), state(1,full,2,5), state(1,full,1,5), state(1,full,7,4), state(1,full,6,4), state(1,full,5,4), state(1,full,3,4), state(1,full,2,4), state(1,full,1,4), state(1,full,7,3), state(1,full,6,3), state(1,full,5,3), state(1,full,4,3), state(1,full,3,3), state(1,full,2,3), state(1,full,1,3), state(1,full,5,2), state(1,full,4,2), state(1,full,3,2), state(1,full,5,1), state(1,full,4,1), state(1,full,3,1), state(1,empty,4,4), possibleMove(3,up,4,7), possibleMove(2,up,4,5), possibleMove(1,up,4,6), possibleMove(1,down,4,2), possibleMove(3,left,6,4), possibleMove(3,left,6,5), possibleMove(2,left,6,3), possibleMove(1,left,6,4), possibleMove(3,right,2,4), possibleMove(3,right,2,5), possibleMove(2,right,2,3), possibleMove(1,right,2,4)}
{range(1), direction(up), direction(down), direction(left), direction(right), status(full), status(empty), full(3,1), full(4,1), full(5,1), full(3,2), full(4,2), full(5,2), full(1,3), full(2,3), full(3,3), full(4,3), full(5,3), full(6,3), full(7,3), full(1,4), full(2,4), full(3,4), empty(4,4), full(5,4), full(6,4), full(7,4), full(1,5), full(2,5), full(3,5), full(4,5), full(5,5), full(6,5), full(7,5), full(3,6), full(4,6), full(5,6), full(3,7), full(4,7), full(5,7), range(2), range(3), range(4), range(5), range(6), range(7), location(1,5), location(1,4), location(1,3), location(2,5), location(2,4), location(2,3), location(5,7), location(5,6), location(5,5), location(5,4), location(5,3), location(5,2), location(5,1), location(4,7), location(4,6), location(4,5), location(4,4), location(4,3), location(4,2), location(4,1), location(3,7), location(3,6), location(3,5), location(3,4), location(3,3), location(3,2), location(3,1), location(6,5), location(6,4), location(6,3), location(7,5), location(7,4), location(7,3), time(1), time(2), time(3), move(3,right,3,5), move(2,up,5,6), move(1,left,6,4), state(4,empty,5,6), state(4,empty,4,5), state(4,empty,3,5), state(4,empty,6,4), state(3,empty,5,6), state(3,empty,5,5), state(3,empty,6,4), state(2,empty,5,4), state(2,empty,6,4), state(4,full,1,3), state(4,full,2,3), state(4,full,5,5), state(4,full,5,4), state(4,full,5,3), state(4,full,5,2), state(4,full,5,1), state(4,full,4,4), state(4,full,4,3), state(4,full,4,2), state(4,full,4,1), state(4,full,3,4), state(4,full,3,3), state(4,full,3,2), state(4,full,3,1), state(4,full,6,3), state(4,full,7,3), state(3,full,1,3), state(3,full,2,3), state(3,full,5,4), state(3,full,5,3), state(3,full,5,2), state(3,full,5,1), state(3,full,4,5), state(3,full,4,4), state(3,full,4,3), state(3,full,4,2), state(3,full,4,1), state(3,full,3,5), state(3,full,3,4), state(3,full,3,3), state(3,full,3,2), state(3,full,3,1), state(3,full,6,3), state(3,full,7,3), state(2,full,1,3), state(2,full,2,3), state(2,full,5,5), state(2,full,5,3), state(2,full,5,2), state(2,full,5,1), state(2,full,4,5), state(2,full,4,4), state(2,full,4,3), state(2,full,4,2), state(2,full,4,1), state(2,full,3,5), state(2,full,3,4), state(2,full,3,3), state(2,full,3,2), state(2,full,3,1), state(2,full,6,3), state(2,full,7,3), state(4,full,1,5), state(4,full,2,5), state(4,full,5,7), state(4,full,4,7), state(4,full,4,6), state(4,full,3,7), state(4,full,3,6), state(4,full,6,5), state(4,full,7,5), state(3,full,1,5), state(3,full,2,5), state(3,full,5,7), state(3,full,4,7), state(3,full,4,6), state(3,full,3,7), state(3,full,3,6), state(3,full,6,5), state(3,full,7,5), state(2,full,1,5), state(2,full,2,5), state(2,full,5,7), state(2,full,5,6), state(2,full,4,7), state(2,full,4,6), state(2,full,3,7), state(2,full,3,6), state(2,full,6,5), state(2,full,7,5), state(4,full,2,4), state(4,full,1,4), state(3,full,2,4), state(3,full,1,4), state(2,full,2,4), state(2,full,1,4), state(4,full,7,4), state(3,full,7,4), state(2,full,7,4), changed(4,5,5), changed(4,4,5), changed(4,3,5), changed(3,5,6), changed(3,5,5), changed(3,5,4), changed(2,5,4), changed(2,4,4), changed(2,6,4), state(1,full,5,7), state(1,full,4,7), state(1,full,3,7), state(1,full,5,6), state(1,full,4,6), state(1,full,3,6), state(1,full,7,5), state(1,full,6,5), state(1,full,5,5), state(1,full,4,5), state(1,full,3,5), state(1,full,2,5), state(1,full,1,5), state(1,full,7,4), state(1,full,6,4), state(1,full,5,4), state(1,full,3,4), state(1,full,2,4), state(1,full,1,4), state(1,full,7,3), state(1,full,6,3), state(1,full,5,3), state(1,full,4,3), state(1,full,3,3), state(1,full,2,3), state(1,full,1,3), state(1,full,5,2), state(1,full,4,2), state(1,full,3,2), state(1,full,5,1), state(1,full,4,1), state(1,full,3,1), state(1,empty,4,4), possibleMove(2,up,5,6), possibleMove(1,up,4,6), possibleMove(3,down,5,3), possibleMove(2,down,5,2), possibleMove(1,down,4,2), possibleMove(3,left,7,5), possibleMove(1,left,6,4), possibleMove(3,right,4,4), possibleMove(3,right,3,5), possibleMove(3,right,3,6), possibleMove(2,right,3,4), possibleMove(1,right,2,4)}
"""
| 82.373103
| 4,081
| 0.642977
| 80,825
| 309,311
| 2.460625
| 0.012286
| 0.056642
| 0.09815
| 0.023617
| 0.829138
| 0.80804
| 0.807779
| 0.801373
| 0.801307
| 0.800784
| 0
| 0.254099
| 0.101755
| 309,311
| 3,754
| 4,082
| 82.395045
| 0.461717
| 0
| 0
| 0.000533
| 0
| 0.01838
| 0.9999
| 0.06959
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
49525ffd61df5b8b36d953c4aad2c6e99b85208c
| 3,681
|
py
|
Python
|
galaxy/coralsnp_reports/lib/galaxy/webapps/coralsnp_reports/controllers/colonies.py
|
skitchen19/galaxy_tools
|
b935f36cfe430263564503ebb71f78dc79315acb
|
[
"MIT"
] | 3
|
2017-04-05T18:01:59.000Z
|
2019-05-03T14:15:31.000Z
|
galaxy/coralsnp_reports/lib/galaxy/webapps/coralsnp_reports/controllers/colonies.py
|
skitchen19/galaxy_tools
|
b935f36cfe430263564503ebb71f78dc79315acb
|
[
"MIT"
] | 6
|
2019-02-27T15:45:58.000Z
|
2021-01-12T15:18:50.000Z
|
galaxy/coralsnp_reports/lib/galaxy/webapps/coralsnp_reports/controllers/colonies.py
|
skitchen19/galaxy_tools
|
b935f36cfe430263564503ebb71f78dc79315acb
|
[
"MIT"
] | 2
|
2018-10-26T18:36:39.000Z
|
2019-01-28T15:12:39.000Z
|
import logging
import sqlalchemy as sa
from markupsafe import escape
import galaxy.model
from galaxy import util
from . import BaseUIController
from galaxy.web.base.controller import web
from galaxy.webapps.reports.controllers.query import ReportQueryBuilder
log = logging.getLogger(__name__)
class Colonies(BaseUIController, ReportQueryBuilder):
@web.expose
def all(self, trans, **kwd):
message = escape(util.restore_text(kwd.get('message', '')))
q = sa.select((galaxy.model.corals.Colony.table.c.id,
galaxy.model.corals.Colony.table.c.latitude,
galaxy.model.corals.Colony.table.c.longitude,
galaxy.model.corals.Colony.table.c.depth,
galaxy.model.corals.Colony.table.c.reef_id,
galaxy.model.corals.Sample.table.c.public,
galaxy.model.corals.Sample.table.c.public_after_date),
from_obj=[galaxy.model.corals.Colony.table,
galaxy.model.corals.Sample.table],
whereclause=galaxy.model.corals.Colony.table.c.id == galaxy.model.corals.Sample.table.c.colony_id,
order_by=[galaxy.model.corals.Colony.table.c.id])
colonies = []
for row in q.execute():
public_after_date = str(row.public_after_date)[:10]
if str(row.public) == "True":
latitude = row.latitude
longitude = row.longitude
else:
latitude = "Private until %s" % public_after_date
longitude = "Private until %s" % public_after_date
cols_tup = (row.id, latitude, longitude, row.depth, row.reef_id)
colonies.append(cols_tup)
return trans.fill_template('/webapps/coralsnp_reports/colonies.mako', colonies=colonies, message=message)
@web.expose
def of_sample(self, trans, **kwd):
message = escape(util.restore_text(kwd.get('message', '')))
affy_id = kwd.get('affy_id')
colony_id = kwd.get('colony_id')
q = sa.select((galaxy.model.corals.Colony.table.c.latitude,
galaxy.model.corals.Colony.table.c.longitude,
galaxy.model.corals.Colony.table.c.depth,
galaxy.model.corals.Colony.table.c.reef_id,
galaxy.model.corals.Sample.table.c.public,
galaxy.model.corals.Sample.table.c.public_after_date),
from_obj=[galaxy.model.corals.Colony.table,
galaxy.model.corals.Sample.table],
whereclause=sa.and_(galaxy.model.corals.Colony.table.c.id == colony_id,
galaxy.model.corals.Colony.table.c.id == galaxy.model.corals.Sample.table.c.colony_id),
order_by=[galaxy.model.corals.Colony.table.c.id])
colonies = []
for row in q.execute():
public_after_date = str(row.public_after_date)[:10]
if str(row.public) == "True":
latitude = row.latitude
longitude = row.longitude
else:
latitude = "Private until %s" % public_after_date
longitude = "Private until %s" % public_after_date
cols_tup = (latitude, longitude, row.depth, row.reef_id)
colonies.append(cols_tup)
return trans.fill_template('/webapps/coralsnp_reports/colony_of_sample.mako',
affy_id=affy_id,
colonies=colonies,
message=message)
| 49.08
| 129
| 0.581907
| 416
| 3,681
| 5.016827
| 0.185096
| 0.131768
| 0.195496
| 0.17633
| 0.767609
| 0.767609
| 0.767609
| 0.751797
| 0.751797
| 0.743172
| 0
| 0.001579
| 0.3116
| 3,681
| 74
| 130
| 49.743243
| 0.822021
| 0
| 0
| 0.597015
| 0
| 0
| 0.051073
| 0.023363
| 0
| 0
| 0
| 0
| 0
| 1
| 0.029851
| false
| 0
| 0.119403
| 0
| 0.19403
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
497c98258d82606e90508a4c90bf4f00ae4e6795
| 63,107
|
py
|
Python
|
pycqed/measurement/awg_sweep_functions.py
|
nuttamas/PycQED_py3
|
1ee35c7428d36ed42ba4afb5d4bda98140b2283e
|
[
"MIT"
] | 60
|
2016-08-03T10:00:18.000Z
|
2021-11-10T11:46:16.000Z
|
pycqed/measurement/awg_sweep_functions.py
|
nuttamas/PycQED_py3
|
1ee35c7428d36ed42ba4afb5d4bda98140b2283e
|
[
"MIT"
] | 512
|
2016-08-03T17:10:02.000Z
|
2022-03-31T14:03:43.000Z
|
pycqed/measurement/awg_sweep_functions.py
|
nuttamas/PycQED_py3
|
1ee35c7428d36ed42ba4afb5d4bda98140b2283e
|
[
"MIT"
] | 34
|
2016-10-19T12:00:52.000Z
|
2022-03-19T04:43:26.000Z
|
import numpy as np
import logging
from pycqed.measurement import sweep_functions as swf
from pycqed.measurement.randomized_benchmarking import randomized_benchmarking as rb
from pycqed.measurement.pulse_sequences import standard_sequences as st_seqs
from pycqed.measurement.pulse_sequences import single_qubit_tek_seq_elts as sqs
from pycqed.measurement.pulse_sequences import single_qubit_2nd_exc_seqs as sqs2
from pycqed.measurement.pulse_sequences import fluxing_sequences as fsqs
from pycqed.measurement.pulse_sequences import multi_qubit_tek_seq_elts as mq_sqs
import time
class File(swf.Hard_Sweep):
def __init__(self, filename, AWG, title=None, NoElements=None, upload=True):
self.upload = upload
self.AWG = AWG
if title:
self.name = title
else:
self.name = filename
self.filename = filename + '_FILE'
self.upload = upload
self.parameter_name = 'amplitude'
self.unit = 'V'
def prepare(self, **kw):
if self.upload:
self.AWG.set_setup_filename(self.filename)
class awg_seq_swf(swf.Hard_Sweep):
def __init__(self, awg_seq_func, awg_seq_func_kwargs,
parameter_name=None, unit='a.u.',
AWG=None,
fluxing_channels=[],
upload=True, return_seq=False):
"""
A wrapper for awg sequence generating functions.
Works as a general awg sweep function.
"""
super().__init__()
self.upload = upload
self.awg_seq_func = awg_seq_func
self.awg_seq_func_kwargs = awg_seq_func_kwargs
self.unit = unit
self.name = 'swf_' + awg_seq_func.__name__
self.fluxing_channels = fluxing_channels
self.AWG = AWG
if parameter_name is not None:
self.parameter_name = parameter_name
else:
self.parameter_name = 'points'
def prepare(self, **kw):
if self.parameter_name != 'points':
self.awg_seq_func_kwargs[self.parameter_name] = self.sweep_points
if self.upload:
old_vals = np.zeros(len(self.fluxing_channels))
for i, ch in enumerate(self.fluxing_channels):
old_vals[i] = self.AWG.get('{}_amp'.format(ch))
self.AWG.set('{}_amp'.format(ch), 2)
self.awg_seq_func(**self.awg_seq_func_kwargs)
for i, ch in enumerate(self.fluxing_channels):
self.AWG.set('{}_amp'.format(ch), old_vals[i])
def set_parameter(self, val, **kw):
# exists for compatibility reasons with 2D sweeps
pass
class Rabi(swf.Hard_Sweep):
def __init__(self, pulse_pars, RO_pars, n=1, upload=True, return_seq=False):
super().__init__()
self.pulse_pars = pulse_pars
self.RO_pars = RO_pars
self.n = n
self.upload = upload
self.name = 'Rabi'
self.parameter_name = 'amplitude'
self.unit = 'V'
self.return_seq = return_seq
def prepare(self, **kw):
if self.upload:
sqs.Rabi_seq(amps=self.sweep_points,
pulse_pars=self.pulse_pars,
RO_pars=self.RO_pars,
n=self.n, return_seq=self.return_seq)
class Flipping(swf.Hard_Sweep):
def __init__(self, pulse_pars, RO_pars, upload=True, return_seq=False):
super().__init__()
self.pulse_pars = pulse_pars
self.RO_pars = RO_pars
self.upload = upload
self.name = 'Flipping'
self.parameter_name = 'Number of Pulses'
self.unit = '#'
self.return_seq = return_seq
def prepare(self, **kw):
if self.upload:
sqs.Flipping_seq(N=self.sweep_points,
pulse_pars=self.pulse_pars,
RO_pars=self.RO_pars,
return_seq=self.return_seq)
class two_qubit_tomo_cardinal(swf.Hard_Sweep):
def __init__(self, cardinal, q0_pulse_pars, q1_pulse_pars,
RO_pars, timings_dict, upload=True, return_seq=False):
super().__init__()
self.cardinal = cardinal
self.q0_pulse_pars = q0_pulse_pars
self.q1_pulse_pars = q1_pulse_pars
self.RO_pars = RO_pars
self.timings_dict = timings_dict
self.upload = upload
self.return_seq = return_seq
self.name = 'Tomo2Q_%d' % cardinal
self.parameter_name = 'Tomo Pulses'
self.unit = 'V'
def prepare(self, **kw):
if self.upload:
self.seq = mq_sqs.two_qubit_tomo_cardinal(cardinal=self.cardinal,
q0_pulse_pars=self.q0_pulse_pars,
q1_pulse_pars=self.q1_pulse_pars,
RO_pars=self.RO_pars,
timings_dict=self.timings_dict,
upload=self.upload,
return_seq=self.return_seq)
class two_qubit_tomo_bell(swf.Hard_Sweep):
def __init__(self, bell_state, q0_pulse_pars, q1_pulse_pars,
q0_flux_pars, q1_flux_pars,
RO_pars, distortion_dict, AWG,
timings_dict, CPhase=True, upload=True, return_seq=False):
super().__init__()
self.bell_state = bell_state
self.q0_pulse_pars = q0_pulse_pars
self.q1_pulse_pars = q1_pulse_pars
self.q0_flux_pars = q0_flux_pars
self.q1_flux_pars = q1_flux_pars
self.RO_pars = RO_pars
self.CPhase = CPhase
self.distortion_dict = distortion_dict
self.timings_dict = timings_dict
self.AWG = AWG
self.upload = upload
self.return_seq = return_seq
self.name = 'Tomo2Q_%d' % bell_state
self.parameter_name = 'Tomo Pulses'
self.unit = 'V'
def prepare(self, **kw):
if self.upload:
old_val_qS = self.AWG.get(
'{}_amp'.format(self.q0_flux_pars['channel']))
old_val_qCP = self.AWG.get(
'{}_amp'.format(self.q1_flux_pars['channel']))
# Rescaling the AWG channel amp is done to ensure that the dac
# values of the flux pulses (including kernels) are defined on
# a 2Vpp scale.
self.AWG.set(
'{}_amp'.format(self.q1_flux_pars['channel']), 2.)
self.AWG.set(
'{}_amp'.format(self.q0_flux_pars['channel']), 2.)
self.seq = mq_sqs.two_qubit_tomo_bell(bell_state=self.bell_state,
q0_pulse_pars=self.q0_pulse_pars,
q1_pulse_pars=self.q1_pulse_pars,
q0_flux_pars=self.q0_flux_pars,
q1_flux_pars=self.q1_flux_pars,
RO_pars=self.RO_pars,
distortion_dict=self.distortion_dict,
timings_dict=self.timings_dict,
CPhase=self.CPhase,
upload=self.upload,
return_seq=self.return_seq)
self.AWG.set('{}_amp'.format(self.q1_flux_pars['channel']),
old_val_qCP)
self.AWG.set('{}_amp'.format(self.q0_flux_pars['channel']),
old_val_qS)
self.upload = False
class Rabi_amp90(swf.Hard_Sweep):
def __init__(self, pulse_pars, RO_pars, n=1, upload=True):
super().__init__()
self.pulse_pars = pulse_pars
self.RO_pars = RO_pars
self.n = n
self.upload = upload
self.name = 'Rabi_amp90'
self.parameter_name = 'ratio_amp90_amp180'
self.unit = ''
def prepare(self, **kw):
if self.upload:
sqs.Rabi_amp90_seq(scales=self.sweep_points,
pulse_pars=self.pulse_pars,
RO_pars=self.RO_pars,
n=self.n)
class Rabi_2nd_exc(swf.Hard_Sweep):
def __init__(self, pulse_pars, pulse_pars_2nd,
RO_pars, amps=None, n=1, cal_points=True, upload=True):
super().__init__()
self.pulse_pars = pulse_pars
self.pulse_pars_2nd = pulse_pars_2nd
self.RO_pars = RO_pars
self.n = n
self.upload = upload
self.name = 'Rabi 2nd excited state'
self.parameter_name = 'amplitude'
self.unit = 'V'
if cal_points and amps is not None:
self.sweep_points = np.concatenate([amps,
[amps[-1]*1.05,
amps[-1]*1.06,
amps[-1]*1.07,
amps[-1]*1.08,
amps[-1]*1.09,
amps[-1]*1.1]])
def prepare(self, **kw):
if self.upload:
sqs2.Rabi_2nd_exc_seq(amps=self.sweep_points,
pulse_pars=self.pulse_pars,
pulse_pars_2nd=self.pulse_pars_2nd,
RO_pars=self.RO_pars,
n=self.n)
# class chevron_length(swf.Hard_Sweep):
# def __init__(self, operation_dict,
# # mw_pulse_pars, RO_pars,
# # flux_pulse_pars,
# dist_dict, AWG, upload=True,
# return_seq=False):
# super().__init__()
# # self.mw_pulse_pars = mw_pulse_pars
# # self.RO_pars = RO_pars
# # self.flux_pulse_pars = flux_pulse_pars
# self.operation_dict = operation_dict
# self.fluxing_channel = fluxing_channel
# self.dist_dict = dist_dict
# self.upload = upload
# self.name = 'Chevron'
# self.parameter_name = 'Time'
# self.unit = 's'
# self.return_seq = return_seq
# self.AWG = AWG
# def prepare(self, **kw):
# if self.upload:
# old_val = self.AWG.get(
# '{}_amp'.format(self.flux_pulse_pars['channel']))
# # Rescaling the AWG channel amp is done to ensure that the dac
# # values of the flux pulses (including kernels) are defined on
# # a 2Vpp scale.
# self.AWG.set('{}_amp'.format(self.flux_pulse_pars['channel']), 2.)
# fsqs.chevron_seq(self.mw_pulse_pars,
# self.RO_pars,
# self.flux_pulse_pars,
# pulse_lengths=self.sweep_points,
# distortion_dict=self.dist_dict)
# self.AWG.set('{}_amp'.format(self.flux_pulse_pars['channel']),
# old_val)
# def set_parameter(self, val, **kw):
# pass
class chevron_single_element(swf.Soft_Sweep):
def __init__(self, pulse_length, mw_pulse_pars, RO_pars,
flux_pulse_pars, dist_dict, AWG, upload=True,
return_seq=False):
super().__init__()
self.mw_pulse_pars = mw_pulse_pars
self.RO_pars = RO_pars
self.flux_pulse_pars = flux_pulse_pars
self.dist_dict = dist_dict
self.upload = upload
self.name = 'Chevron'
self.parameter_name = 'Time'
self.unit = 's'
self.return_seq = return_seq
self.AWG = AWG
self.pulse_length = pulse_length
def prepare(self, **kw):
if self.upload:
old_val = self.AWG.get(
'{}_amp'.format(self.flux_pulse_pars['channel']))
# Rescaling the AWG channel amp is done to ensure that the dac
# values of the flux pulses (including kernels) are defined on
# a 2Vpp scale.
self.AWG.set('{}_amp'.format(self.flux_pulse_pars['channel']), 2.)
fsqs.chevron_seq(self.mw_pulse_pars,
self.RO_pars,
self.flux_pulse_pars,
pulse_lengths=[self.pulse_length],
distortion_dict=self.dist_dict,
cal_points=False)
self.AWG.set('{}_amp'.format(self.flux_pulse_pars['channel']),
old_val)
def set_parameter(self, val, **kw):
pass
class swap_swap_wait(swf.Hard_Sweep):
def __init__(self, mw_pulse_pars, RO_pars,
flux_pulse_pars, dist_dict, AWG,
inter_swap_wait=100e-9,
upload=True,
return_seq=False):
super().__init__()
self.mw_pulse_pars = mw_pulse_pars
self.RO_pars = RO_pars
self.flux_pulse_pars = flux_pulse_pars
self.dist_dict = dist_dict
self.upload = upload
self.name = 'swap-wait-swap'
self.parameter_name = 'phase'
self.unit = 'deg'
self.return_seq = return_seq
self.AWG = AWG
self.inter_swap_wait = inter_swap_wait
def prepare(self, **kw):
if self.upload:
old_val = self.AWG.get(
'{}_amp'.format(self.flux_pulse_pars['channel']))
# Rescaling the AWG channel amp is done to ensure that the dac
# values of the flux pulses (including kernels) are defined on
# a 2Vpp scale.
self.AWG.set('{}_amp'.format(self.flux_pulse_pars['channel']), 2.)
fsqs.swap_swap_wait(self.mw_pulse_pars,
self.RO_pars,
self.flux_pulse_pars,
phases=self.sweep_points,
inter_swap_wait=self.inter_swap_wait,
distortion_dict=self.dist_dict)
self.AWG.set('{}_amp'.format(self.flux_pulse_pars['channel']),
old_val)
def set_parameter(self, val, **kw):
pass
class swap_CP_swap_2Qubits(swf.Hard_Sweep):
def __init__(self,
mw_pulse_pars_qCP, mw_pulse_pars_qS,
flux_pulse_pars_qCP, flux_pulse_pars_qS,
RO_pars,
dist_dict,
AWG,
CPhase=True,
excitations='both',
inter_swap_wait=100e-9,
upload=True,
identity=False,
return_seq=False,
reverse_control_target=False):
super().__init__()
self.mw_pulse_pars_qCP = mw_pulse_pars_qCP
self.mw_pulse_pars_qS = mw_pulse_pars_qS
self.flux_pulse_pars_qCP = flux_pulse_pars_qCP
self.flux_pulse_pars_qS = flux_pulse_pars_qS
self.RO_pars = RO_pars
self.dist_dict = dist_dict
self.CPhase = CPhase
self.excitations = excitations
self.inter_swap_wait = inter_swap_wait
self.upload = upload
self.name = 'swap-CP-swap'
self.parameter_name = 'phase'
self.unit = 'deg'
self.return_seq = return_seq
self.AWG = AWG
self.reverse_control_target=reverse_control_target
def prepare(self, **kw):
if self.upload:
old_val_qS = self.AWG.get(
'{}_amp'.format(self.flux_pulse_pars_qS['channel']))
old_val_qCP = self.AWG.get(
'{}_amp'.format(self.flux_pulse_pars_qCP['channel']))
# Rescaling the AWG channel amp is done to ensure that the dac
# values of the flux pulses (including kernels) are defined on
# a 2Vpp scale.
self.AWG.set(
'{}_amp'.format(self.flux_pulse_pars_qCP['channel']), 2.)
self.AWG.set(
'{}_amp'.format(self.flux_pulse_pars_qS['channel']), 2.)
fsqs.swap_CP_swap_2Qubits(
mw_pulse_pars_qCP=self.mw_pulse_pars_qCP,
mw_pulse_pars_qS=self.mw_pulse_pars_qS,
flux_pulse_pars_qCP=self.flux_pulse_pars_qCP,
flux_pulse_pars_qS=self.flux_pulse_pars_qS,
RO_pars=self.RO_pars,
distortion_dict=self.dist_dict,
CPhase=self.CPhase,
excitations=self.excitations,
phases=self.sweep_points,
inter_swap_wait=self.inter_swap_wait,
reverse_control_target=self.reverse_control_target
)
self.AWG.set('{}_amp'.format(self.flux_pulse_pars_qCP['channel']),
old_val_qCP)
self.AWG.set('{}_amp'.format(self.flux_pulse_pars_qS['channel']),
old_val_qS)
def set_parameter(self, val, **kw):
pass
class swap_CP_swap_2Qubits_1qphasesweep(swf.Hard_Sweep):
def __init__(self,
mw_pulse_pars_qCP, mw_pulse_pars_qS,
flux_pulse_pars_qCP, flux_pulse_pars_qS,
RO_pars,
dist_dict,
timings_dict,
AWG,
CPhase=True,
excitations='both',
inter_swap_wait=100e-9,
upload=True,
identity=False,
return_seq=False,
reverse_control_target=False,
sweep_q=0):
super().__init__()
self.mw_pulse_pars_qCP = mw_pulse_pars_qCP
self.mw_pulse_pars_qS = mw_pulse_pars_qS
self.flux_pulse_pars_qCP = flux_pulse_pars_qCP
self.flux_pulse_pars_qS = flux_pulse_pars_qS
self.RO_pars = RO_pars
self.dist_dict = dist_dict
self.timings_dict = timings_dict
self.CPhase = CPhase
self.excitations = excitations
self.inter_swap_wait = inter_swap_wait
self.upload = upload
self.name = 'swap-CP-swap'
self.parameter_name = 'phase'
self.unit = 'deg'
self.return_seq = return_seq
self.AWG = AWG
self.sweep_q = sweep_q
self.reverse_control_target=reverse_control_target
def prepare(self, **kw):
if self.upload:
old_val_qS = self.AWG.get(
'{}_amp'.format(self.flux_pulse_pars_qS['channel']))
old_val_qCP = self.AWG.get(
'{}_amp'.format(self.flux_pulse_pars_qCP['channel']))
# Rescaling the AWG channel amp is done to ensure that the dac
# values of the flux pulses (including kernels) are defined on
# a 2Vpp scale.
self.AWG.set(
'{}_amp'.format(self.flux_pulse_pars_qCP['channel']), 2.)
self.AWG.set(
'{}_amp'.format(self.flux_pulse_pars_qS['channel']), 2.)
self.last_seq = fsqs.swap_CP_swap_2Qubits_1qphasesweep(
mw_pulse_pars_qCP=self.mw_pulse_pars_qCP,
mw_pulse_pars_qS=self.mw_pulse_pars_qS,
flux_pulse_pars_qCP=self.flux_pulse_pars_qCP,
flux_pulse_pars_qS=self.flux_pulse_pars_qS,
RO_pars=self.RO_pars,
distortion_dict=self.dist_dict,
timings_dict=self.timings_dict,
CPhase=self.CPhase,
excitations=self.excitations,
sphasesweep=self.sweep_points,
inter_swap_wait=self.inter_swap_wait,
reverse_control_target=self.reverse_control_target,
sweep_q=self.sweep_q)
self.AWG.set('{}_amp'.format(self.flux_pulse_pars_qCP['channel']),
old_val_qCP)
self.AWG.set('{}_amp'.format(self.flux_pulse_pars_qS['channel']),
old_val_qS)
return self.last_seq
def set_parameter(self, val, **kw):
pass
class swap_CP_swap_2Qubits_1qphasesweep_amp(swf.Hard_Sweep):
def __init__(self,
mw_pulse_pars_qCP, mw_pulse_pars_qS,
flux_pulse_pars_qCP, flux_pulse_pars_qS,
RO_pars,
dist_dict,
timings_dict,
AWG,
CPhase=True,
excitations='both',
inter_swap_wait=100e-9,
upload=True,
identity=False,
return_seq=False,
reverse_control_target=False,
sweep_q=0):
super().__init__()
self.mw_pulse_pars_qCP = mw_pulse_pars_qCP
self.mw_pulse_pars_qS = mw_pulse_pars_qS
self.flux_pulse_pars_qCP = flux_pulse_pars_qCP
self.flux_pulse_pars_qS = flux_pulse_pars_qS
self.RO_pars = RO_pars
self.dist_dict = dist_dict
self.timings_dict = timings_dict
self.CPhase = CPhase
self.excitations = excitations
self.inter_swap_wait = inter_swap_wait
self.upload = upload
self.name = 'swap-CP-swap'
self.parameter_name = 'phase'
self.unit = 'deg'
self.return_seq = return_seq
self.AWG = AWG
self.sweep_q = sweep_q
self.reverse_control_target=reverse_control_target
def prepare(self, **kw):
if self.upload:
old_val_qS = self.AWG.get(
'{}_amp'.format(self.flux_pulse_pars_qS['channel']))
old_val_qCP = self.AWG.get(
'{}_amp'.format(self.flux_pulse_pars_qCP['channel']))
# Rescaling the AWG channel amp is done to ensure that the dac
# values of the flux pulses (including kernels) are defined on
# a 2Vpp scale.
self.AWG.set(
'{}_amp'.format(self.flux_pulse_pars_qCP['channel']), 2.)
self.AWG.set(
'{}_amp'.format(self.flux_pulse_pars_qS['channel']), 2.)
self.last_seq = fsqs.swap_CP_swap_2Qubits_1qphasesweep_amp(
mw_pulse_pars_qCP=self.mw_pulse_pars_qCP,
mw_pulse_pars_qS=self.mw_pulse_pars_qS,
flux_pulse_pars_qCP=self.flux_pulse_pars_qCP,
flux_pulse_pars_qS=self.flux_pulse_pars_qS,
RO_pars=self.RO_pars,
distortion_dict=self.dist_dict,
timings_dict=self.timings_dict,
CPhase=self.CPhase,
excitations=self.excitations,
sphasesweep=self.sweep_points,
inter_swap_wait=self.inter_swap_wait,
reverse_control_target=self.reverse_control_target,
sweep_q=self.sweep_q)
self.AWG.set('{}_amp'.format(self.flux_pulse_pars_qCP['channel']),
old_val_qCP)
self.AWG.set('{}_amp'.format(self.flux_pulse_pars_qS['channel']),
old_val_qS)
return self.last_seq
def set_parameter(self, val, **kw):
pass
class chevron_with_excited_bus_2Qubits(swf.Hard_Sweep):
def __init__(self,
mw_pulse_pars_qCP, mw_pulse_pars_qS,
flux_pulse_pars_qCP, flux_pulse_pars_qS,
RO_pars,
dist_dict,
AWG,
CPhase=True,
excitations=1,
upload=True,
return_seq=False):
super().__init__()
self.mw_pulse_pars_qCP = mw_pulse_pars_qCP
self.mw_pulse_pars_qS = mw_pulse_pars_qS
self.flux_pulse_pars_qCP = flux_pulse_pars_qCP
self.flux_pulse_pars_qS = flux_pulse_pars_qS
self.RO_pars = RO_pars
self.dist_dict = dist_dict
self.CPhase = CPhase
self.excitations = excitations
self.upload = upload
self.name = 'swap-CP length'
self.parameter_name = 'swap-CP length'
self.unit = 's'
self.return_seq = return_seq
self.AWG = AWG
def prepare(self, **kw):
if self.upload:
old_val_qS = self.AWG.get(
'{}_amp'.format(self.flux_pulse_pars_qS['channel']))
old_val_qCP = self.AWG.get(
'{}_amp'.format(self.flux_pulse_pars_qCP['channel']))
# Rescaling the AWG channel amp is done to ensure that the dac
# values of the flux pulses (including kernels) are defined on
# a 2Vpp scale.
self.AWG.set(
'{}_amp'.format(self.flux_pulse_pars_qCP['channel']), 2.)
self.AWG.set(
'{}_amp'.format(self.flux_pulse_pars_qS['channel']), 2.)
fsqs.chevron_with_excited_bus_2Qubits(
mw_pulse_pars_qCP=self.mw_pulse_pars_qCP,
mw_pulse_pars_qS=self.mw_pulse_pars_qS,
flux_pulse_pars_qCP=self.flux_pulse_pars_qCP,
flux_pulse_pars_qS=self.flux_pulse_pars_qS,
RO_pars=self.RO_pars,
distortion_dict=self.dist_dict,
excitations=self.excitations,
chevron_pulse_lengths=self.sweep_points,
)
self.AWG.set('{}_amp'.format(self.flux_pulse_pars_qCP['channel']),
old_val_qCP)
self.AWG.set('{}_amp'.format(self.flux_pulse_pars_qS['channel']),
old_val_qS)
def set_parameter(self, val, **kw):
pass
class chevron_cphase_length(swf.Hard_Sweep):
# TODO: Delete this function it is deprecasted
def __init__(self, length_vec, mw_pulse_pars, RO_pars,
flux_pulse_pars, cphase_pulse_pars, phase_2, dist_dict, AWG,
upload=True, return_seq=False, cal_points=True,
toggle_amplitude_sign=False):
super().__init__()
self.length_vec = length_vec
self.mw_pulse_pars = mw_pulse_pars
self.RO_pars = RO_pars
self.flux_pulse_pars = flux_pulse_pars
self.dist_dict = dist_dict
self.artificial_detuning = 4./length_vec[-1]
self.upload = upload
self.name = 'Chevron'
self.parameter_name = 'Time'
self.unit = 's'
self.return_seq = return_seq
self.cphase_pulse_pars = cphase_pulse_pars
self.phase_2 = phase_2
self.AWG = AWG
self.cal_points = cal_points
self.toggle_amplitude_sign = toggle_amplitude_sign
def prepare(self, **kw):
if self.upload:
fsqs.chevron_seq_cphase(lengths=self.length_vec,
mw_pulse_pars=self.mw_pulse_pars,
RO_pars=self.RO_pars,
flux_pulse_pars=self.flux_pulse_pars,
cphase_pulse_pars=self.cphase_pulse_pars,
artificial_detuning=self.artificial_detuning,
phase_2=self.phase_2,
distortion_dict=self.dist_dict,
toggle_amplitude_sign=self.toggle_amplitude_sign,
cal_points=self.cal_points)
def pre_upload(self, **kw):
self.seq = fsqs.chevron_seq_cphase(lengths=self.length_vec,
mw_pulse_pars=self.mw_pulse_pars,
RO_pars=self.RO_pars,
flux_pulse_pars=self.flux_pulse_pars,
cphase_pulse_pars=self.cphase_pulse_pars,
artificial_detuning=self.artificial_detuning,
phase_2=self.phase_2,
distortion_dict=self.dist_dict,
toggle_amplitude_sign=self.toggle_amplitude_sign,
cal_points=self.cal_points,
return_seq=True)
class BusT2(swf.Hard_Sweep):
def __init__(self, times_vec, mw_pulse_pars, RO_pars,
flux_pulse_pars, dist_dict, AWG, upload=True,
return_seq=False):
super().__init__()
self.times_vec = times_vec
self.mw_pulse_pars = mw_pulse_pars
self.RO_pars = RO_pars
self.flux_pulse_pars = flux_pulse_pars
self.dist_dict = dist_dict
self.upload = upload
self.name = 'Chevron'
self.parameter_name = 'Time'
self.unit = 's'
self.return_seq = return_seq
self.AWG = AWG
def prepare(self, **kw):
if self.upload:
fsqs.BusT2(self.times_vec,
self.mw_pulse_pars,
self.RO_pars,
self.flux_pulse_pars,
distortion_dict=self.dist_dict)
def pre_upload(self, **kw):
self.seq = fsqs.BusT2(self.times_vec,
self.mw_pulse_pars,
self.RO_pars,
self.flux_pulse_pars,
distortion_dict=self.dist_dict, return_seq=True)
class BusEcho(swf.Hard_Sweep):
def __init__(self, times_vec, mw_pulse_pars, RO_pars, artificial_detuning,
flux_pulse_pars, dist_dict, AWG, upload=True,
return_seq=False):
super().__init__()
self.times_vec = times_vec
self.mw_pulse_pars = mw_pulse_pars
self.RO_pars = RO_pars
self.flux_pulse_pars = flux_pulse_pars
self.dist_dict = dist_dict
self.artificial_detuning = artificial_detuning
self.upload = upload
self.name = 'Chevron'
self.parameter_name = 'Time'
self.unit = 's'
self.return_seq = return_seq
self.AWG = AWG
def prepare(self, **kw):
if self.upload:
fsqs.BusEcho(self.times_vec,
self.mw_pulse_pars,
self.RO_pars,
self.artificial_detuning,
self.flux_pulse_pars,
distortion_dict=self.dist_dict)
def pre_upload(self, **kw):
self.seq = fsqs.BusEcho(self.times_vec,
self.mw_pulse_pars,
self.RO_pars,
self.artificial_detuning,
self.flux_pulse_pars,
distortion_dict=self.dist_dict, return_seq=True)
class Ramsey_2nd_exc(swf.Hard_Sweep):
def __init__(self, pulse_pars, pulse_pars_2nd,
RO_pars, times=None, n=1, cal_points=True, upload=True):
super().__init__()
self.pulse_pars = pulse_pars
self.pulse_pars_2nd = pulse_pars_2nd
self.RO_pars = RO_pars
self.n = n
self.upload = upload
self.name = 'Rabi 2nd excited state'
self.parameter_name = 'amplitude'
self.unit = 'V'
if cal_points and times is not None:
self.sweep_points = np.concatenate([times,
[times[-1]*1.05,
times[-1]*1.06,
times[-1]*1.07,
times[-1]*1.08,
times[-1]*1.09,
times[-1]*1.1]])
def prepare(self, **kw):
if self.upload:
sqs2.Ramsey_2nd_exc_seq(times=self.sweep_points,
pulse_pars=self.pulse_pars,
pulse_pars_2nd=self.pulse_pars_2nd,
RO_pars=self.RO_pars,
n=self.n)
class cphase_fringes(swf.Hard_Sweep):
def __init__(self, phases, q0_pulse_pars, q1_pulse_pars, RO_pars,
swap_pars_q0, cphase_pars_q1, timings_dict,
dist_dict, upload=True, return_seq=False):
super().__init__()
self.phases = phases,
self.q0_pulse_pars = q0_pulse_pars,
self.q1_pulse_pars = q1_pulse_pars,
self.RO_pars = RO_pars,
self.swap_pars_q0 = swap_pars_q0,
self.cphase_pars_q1 = cphase_pars_q1,
self.timings_dict = timings_dict,
self.dist_dict = dist_dict
self.upload = upload
self.name = 'CPhase'
self.parameter_name = 'Phase'
self.unit = 'deg'
self.return_seq = return_seq
def prepare(self, **kw):
if self.upload:
mq_sqs.cphase_fringes(phases=self.phases,
q0_pulse_pars=self.q0_pulse_pars,
q1_pulse_pars=self.q1_pulse_pars,
RO_pars=self.RO_pars,
swap_pars_q0=self.swap_pars_q0,
cphase_pars_q1=self.cphase_pars_q1,
timings_dict=self.timings_dict,
distortion_dict=self.dist_dict)
def pre_upload(self, **kw):
self.seq = mq_sqs.cphase_fringes(phases=self.phases,
q0_pulse_pars=self.q0_pulse_pars,
q1_pulse_pars=self.q1_pulse_pars,
RO_pars=self.RO_pars,
swap_pars_q0=self.swap_pars_q0,
cphase_pars_q1=self.cphase_pars_q1,
timings_dict=self.timings_dict,
distortion_dict=self.dist_dict,
return_seq=True)
class T1(swf.Hard_Sweep):
def __init__(self, pulse_pars, RO_pars, upload=True):
super().__init__()
self.pulse_pars = pulse_pars
self.RO_pars = RO_pars
self.upload = upload
self.name = 'T1'
self.parameter_name = 't'
self.unit = 's'
def prepare(self, **kw):
if self.upload:
sqs.T1_seq(times=self.sweep_points,
pulse_pars=self.pulse_pars,
RO_pars=self.RO_pars)
class T1_qp(swf.Hard_Sweep):
def __init__(self, pulse_pars, RO_pars,
N_pi_pulses, N_pi_pulse_delay,
cal_points=True,
upload=True):
super().__init__()
self.pulse_pars = pulse_pars
self.RO_pars = RO_pars
self.N_pi_pulses = N_pi_pulses
self.N_pi_pulse_delay = N_pi_pulse_delay
self.upload = upload
self.cal_points = cal_points
self.name = 'T1_qp'
self.parameter_name = 't'
self.unit = 's'
def prepare(self, **kw):
if self.upload:
sqs.T1_qp_seq(times=self.sweep_points,
N_pi_pulses = self.N_pi_pulses,
N_pi_pulse_delay = self.N_pi_pulse_delay,
pulse_pars=self.pulse_pars,
RO_pars=self.RO_pars,
cal_points=self.cal_points)
class T1_2pi_qp(swf.Hard_Sweep):
def __init__(self, pulse_pars, RO_pars,
N_2pi_pulses, N_2pi_pulse_delay,
cal_points=True,
upload=True):
super().__init__()
self.pulse_pars = pulse_pars
self.RO_pars = RO_pars
self.N_2pi_pulses = N_2pi_pulses
self.N_2pi_pulse_delay = N_2pi_pulse_delay
self.upload = upload
self.cal_points = cal_points
self.name = 'T1_2pi_qp'
self.parameter_name = 't'
self.unit = 's'
def prepare(self, **kw):
if self.upload:
sqs.T1_2pi_qp_seq(times=self.sweep_points,
N_2pi_pulses = self.N_2pi_pulses,
N_2pi_pulse_delay = self.N_2pi_pulse_delay,
pulse_pars=self.pulse_pars,
RO_pars=self.RO_pars,
cal_points=self.cal_points)
class AllXY(swf.Hard_Sweep):
def __init__(self, pulse_pars, RO_pars, double_points=False, upload=True):
super().__init__()
self.pulse_pars = pulse_pars
self.RO_pars = RO_pars
self.double_points = double_points
self.upload = upload
self.parameter_name = 'AllXY element'
self.unit = '#'
self.name = 'AllXY'
if not double_points:
self.sweep_points = np.arange(21)
else:
self.sweep_points = np.arange(42)
def prepare(self, **kw):
if self.upload:
sqs.AllXY_seq(pulse_pars=self.pulse_pars,
RO_pars=self.RO_pars,
double_points=self.double_points)
class OffOn(swf.Hard_Sweep):
def __init__(self, pulse_pars, RO_pars, upload=True,
pulse_comb='OffOn', nr_samples=2):
super().__init__()
self.pulse_pars = pulse_pars
self.RO_pars = RO_pars
self.upload = upload
self.parameter_name = 'sample'
self.unit = '#'
self.name = pulse_comb
self.sweep_points = np.arange(nr_samples)
def prepare(self, **kw):
if self.upload:
sqs.OffOn_seq(pulse_pars=self.pulse_pars,
RO_pars=self.RO_pars, pulse_comb=self.name)
class Butterfly(swf.Hard_Sweep):
def __init__(self, pulse_pars, RO_pars, initialize=False, upload=True,
post_msmt_delay=2000e-9):
super().__init__()
self.pulse_pars = pulse_pars
self.RO_pars = RO_pars
self.upload = upload
self.parameter_name = 'Buttefly element'
self.unit = '#'
self.name = 'Butterfly'
self.sweep_points = np.arange(2)
self.initialize = initialize
self.post_msmt_delay = post_msmt_delay
def prepare(self, **kw):
if self.upload:
sqs.Butterfly_seq(pulse_pars=self.pulse_pars,
post_msmt_delay=self.post_msmt_delay,
RO_pars=self.RO_pars, initialize=self.initialize)
class Randomized_Benchmarking(swf.Hard_Sweep):
def __init__(self, pulse_pars, RO_pars,
nr_seeds, nr_cliffords,
cal_points=True,
double_curves=False, seq_name=None,
upload=True):
# If nr_cliffords is None it still needs to be specfied when setting
# the experiment
super().__init__()
self.pulse_pars = pulse_pars
self.RO_pars = RO_pars
self.upload = upload
self.nr_seeds = nr_seeds
self.cal_points = cal_points
self.sweep_points = nr_cliffords
self.double_curves = double_curves
self.seq_name = seq_name
self.parameter_name = 'Nr of Cliffords'
self.unit = '#'
self.name = 'Randomized_Benchmarking'
self.sweep_points = nr_cliffords
if double_curves:
nr_cliffords = np.repeat(nr_cliffords, 2)
if self.cal_points:
self.sweep_points = np.concatenate([nr_cliffords,
[nr_cliffords[-1]+.2,
nr_cliffords[-1]+.3,
nr_cliffords[-1]+.7,
nr_cliffords[-1]+.8]])
def prepare(self, **kw):
if self.upload:
sqs.Randomized_Benchmarking_seq(
self.pulse_pars, self.RO_pars,
nr_cliffords=self.sweep_points,
nr_seeds=self.nr_seeds,
cal_points=self.cal_points,
double_curves=self.double_curves,
seq_name=self.seq_name)
class Ramsey(swf.Hard_Sweep):
def __init__(self, pulse_pars, RO_pars,
artificial_detuning=None,
cal_points=True,
upload=True):
super().__init__()
self.pulse_pars = pulse_pars
self.RO_pars = RO_pars
self.upload = upload
self.cal_points = cal_points
self.artificial_detuning = artificial_detuning
self.name = 'Ramsey'
self.parameter_name = 't'
self.unit = 's'
def prepare(self, **kw):
if self.upload:
sqs.Ramsey_seq(times=self.sweep_points,
pulse_pars=self.pulse_pars,
RO_pars=self.RO_pars,
artificial_detuning=self.artificial_detuning,
cal_points=self.cal_points)
class Echo(swf.Hard_Sweep):
def __init__(self, pulse_pars, RO_pars,
artificial_detuning=None,
cal_points=True,
upload=True):
super().__init__()
self.pulse_pars = pulse_pars
self.RO_pars = RO_pars
self.upload = upload
self.cal_points = cal_points
self.artificial_detuning = artificial_detuning
self.name = 'Echo'
self.parameter_name = 't'
self.unit = 's'
def prepare(self, **kw):
if self.upload:
sqs.Echo_seq(times=self.sweep_points,
pulse_pars=self.pulse_pars,
RO_pars=self.RO_pars,
artificial_detuning=self.artificial_detuning,
cal_points=self.cal_points)
class CPMG(swf.Hard_Sweep):
def __init__(self, pulse_pars, CPMG_order, RO_pars,
artificial_detuning=None,
cal_points=True,
upload=True):
super().__init__()
self.pulse_pars = pulse_pars
self.RO_pars = RO_pars
self.CPMG_order = CPMG_order
self.upload = upload
self.cal_points = cal_points
self.artificial_detuning = artificial_detuning
self.name = 'CPMG'
self.parameter_name = 't'
self.unit = 's'
def prepare(self, **kw):
if self.upload:
sqs.CPMG_seq(times=self.sweep_points,
CPMG_order = self.CPMG_order,
pulse_pars=self.pulse_pars,
RO_pars=self.RO_pars,
artificial_detuning=self.artificial_detuning,
cal_points=self.cal_points)
class Motzoi_XY(swf.Hard_Sweep):
def __init__(self, motzois, pulse_pars, RO_pars, upload=True):
'''
Measures 2 points per motzoi value specified in motzois and adds 4
calibration points to it.
'''
super().__init__()
self.pulse_pars = pulse_pars
self.RO_pars = RO_pars
self.upload = upload
self.name = 'Motzoi_XY'
self.parameter_name = 'motzoi'
self.unit = ' '
sweep_pts = np.repeat(motzois, 2)
self.sweep_points = np.append(sweep_pts,
[motzois[-1]+(motzois[-1]-motzois[-2])]*4)
def prepare(self, **kw):
if self.upload:
sqs.Motzoi_XY(motzois=self.sweep_points,
pulse_pars=self.pulse_pars,
RO_pars=self.RO_pars)
class Freq_XY(swf.Hard_Sweep):
def __init__(self, freqs, pulse_pars, RO_pars, upload=True):
'''
Measures 2 points per motzoi value specified in freqs and adds 4
calibration points to it.
'''
super().__init__()
self.pulse_pars = pulse_pars
self.RO_pars = RO_pars
self.upload = upload
self.name = 'Motzoi_XY'
self.parameter_name = 'motzoi'
self.unit = ' '
sweep_pts = np.repeat(freqs, 2)
self.sweep_points = np.append(sweep_pts,
[freqs[-1]+(freqs[-1]-freqs[-2])]*4)
def prepare(self, **kw):
if self.upload:
sqs.Motzoi_XY(motzois=self.sweep_points,
pulse_pars=self.pulse_pars,
RO_pars=self.RO_pars)
class CBox_T1(swf.Hard_Sweep):
def __init__(self, IF, RO_pulse_delay, RO_trigger_delay, mod_amp, AWG,
upload=True):
super().__init__()
self.IF = IF
self.RO_pulse_delay = RO_pulse_delay
self.RO_trigger_delay = RO_trigger_delay
self.name = 'T1'
self.parameter_name = 'tau'
self.unit = 's'
self.AWG = AWG
self.mod_amp = mod_amp
self.upload = upload
def prepare(self, **kw):
if self.upload:
ch3_amp = self.AWG.get('ch3_amp')
ch4_amp = self.AWG.get('ch4_amp')
st_seqs.CBox_T1_marker_seq(IF=self.IF, times=self.sweep_points,
RO_pulse_delay=self.RO_pulse_delay,
RO_trigger_delay=self.RO_trigger_delay,
verbose=False)
self.AWG.set('ch3_amp', ch3_amp)
self.AWG.set('ch4_amp', ch4_amp)
class CBox_v3_T1(swf.Hard_Sweep):
def __init__(self, CBox, upload=True):
super().__init__()
self.name = 'T1'
self.parameter_name = 'tau'
self.unit = 's'
self.upload = upload
self.CBox = CBox
def prepare(self, **kw):
if self.upload:
self.CBox.AWG0_mode('Codeword-trigger mode')
self.CBox.AWG1_mode('Codeword-trigger mode')
self.CBox.AWG2_mode('Codeword-trigger mode')
self.CBox.set_master_controller_working_state(0, 0, 0)
self.CBox.load_instructions('CBox_v3_test_program\T1.asm')
self.CBox.set_master_controller_working_state(1, 0, 0)
class CBox_v3_T1(swf.Hard_Sweep):
def __init__(self, CBox, upload=True):
super().__init__()
self.name = 'T1'
self.parameter_name = 'tau'
self.unit = 's'
self.upload = upload
self.CBox = CBox
def prepare(self, **kw):
if self.upload:
self.CBox.AWG0_mode('Codeword-trigger mode')
self.CBox.AWG1_mode('Codeword-trigger mode')
self.CBox.AWG2_mode('Codeword-trigger mode')
self.CBox.set_master_controller_working_state(0, 0, 0)
self.CBox.load_instructions('CBox_v3_test_program\T1.asm')
self.CBox.set_master_controller_working_state(1, 0, 0)
class CBox_Ramsey(swf.Hard_Sweep):
def __init__(self, IF, RO_pulse_length,
RO_pulse_delay, RO_trigger_delay, pulse_delay,
AWG, CBox, cal_points=True,
upload=True):
super().__init__()
self.IF = IF
self.RO_pulse_delay = RO_pulse_delay
self.RO_trigger_delay = RO_trigger_delay
self.pulse_delay = pulse_delay
self.RO_pulse_length = RO_pulse_length
self.name = 'T2*'
self.parameter_name = 'tau'
self.unit = 's'
self.AWG = AWG
self.CBox = CBox
self.upload = upload
self.cal_points = cal_points
def prepare(self, **kw):
if self.upload:
ch3_amp = self.AWG.get('ch3_amp')
ch4_amp = self.AWG.get('ch3_amp')
st_seqs.CBox_Ramsey_marker_seq(
IF=self.IF, times=self.sweep_points,
RO_pulse_delay=self.RO_pulse_delay,
RO_pulse_length=self.RO_pulse_length,
RO_trigger_delay=self.RO_trigger_delay,
pulse_delay=self.pulse_delay,
verbose=False)
self.AWG.set('ch3_amp', ch3_amp)
self.AWG.set('ch4_amp', ch4_amp)
# gets assigned in MC.set sweep_points
nr_elts = len(self.sweep_points)
if self.cal_points: # append the calibration points to the tape
tape = [3, 3] * (nr_elts-4) + [0, 0, 0, 0, 0, 1, 0, 1]
else:
tape = [3, 3] * nr_elts
self.AWG.stop()
# TODO Change to segmented tape if we have the new timing tape
self.CBox.AWG0_mode.set('segmented tape')
self.CBox.AWG1_mode.set('segmented tape')
self.CBox.restart_awg_tape(0)
self.CBox.restart_awg_tape(1)
self.CBox.set('AWG0_tape', tape)
self.CBox.set('AWG1_tape', tape)
class CBox_Echo(swf.Hard_Sweep):
def __init__(self, IF,
RO_pulse_delay, RO_trigger_delay, pulse_delay,
AWG, CBox, cal_points=True,
upload=True):
super().__init__()
self.IF = IF
self.RO_pulse_delay = RO_pulse_delay
self.RO_trigger_delay = RO_trigger_delay
self.pulse_delay = pulse_delay
self.name = 'T2-echo'
self.parameter_name = 'tau'
self.unit = 's'
self.AWG = AWG
self.CBox = CBox
self.upload = upload
self.cal_points = cal_points
logging.warning('Underlying sequence is not implemented')
logging.warning('Replace it with the multi-pulse sequence')
def prepare(self, **kw):
if self.upload:
ch3_amp = self.AWG.get('ch3_amp')
ch4_amp = self.AWG.get('ch3_amp')
st_seqs.CBox_Echo_marker_seq(
IF=self.IF, times=self.sweep_points,
RO_pulse_delay=self.RO_pulse_delay,
RO_trigger_delay=self.RO_trigger_delay,
verbose=False)
self.AWG.set('ch3_amp', ch3_amp)
self.AWG.set('ch4_amp', ch4_amp)
# gets assigned in MC.set sweep_points
nr_elts = len(self.sweep_points)
if self.cal_points:
tape = [3, 3] * (nr_elts-4) + [0, 1]
else:
tape = [3, 3] * nr_elts
self.AWG.stop()
self.CBox.AWG0_mode.set('Segmented tape')
self.CBox.AWG1_mode.set('Segmented tape')
self.CBox.restart_awg_tape(0)
self.CBox.restart_awg_tape(1)
self.CBox.set('AWG0_tape', tape)
self.CBox.set('AWG1_tape', tape)
class CBox_OffOn(swf.Hard_Sweep):
def __init__(self, IF, RO_pulse_delay, RO_trigger_delay,
RO_pulse_length,
AWG, CBox,
upload=True):
super().__init__()
self.IF = IF
self.RO_pulse_delay = RO_pulse_delay
self.RO_trigger_delay = RO_trigger_delay
self.parameter_name = 'Tape element'
self.unit = ''
self.name = 'Off-On'
self.tape = [0, 1]
self.sweep_points = np.array(self.tape) # array for transpose in MC
self.AWG = AWG
self.CBox = CBox
self.RO_pulse_length = RO_pulse_length
# would actually like to check if file is already loaded
# filename can be get using AWG.get('setup_filename')
self.upload = upload
def prepare(self, **kw):
self.AWG.stop()
self.CBox.AWG0_mode.set('Segmented tape')
self.CBox.AWG1_mode.set('Segmented tape')
self.CBox.restart_awg_tape(0)
self.CBox.restart_awg_tape(1)
self.CBox.set('AWG0_tape', self.tape)
self.CBox.set('AWG1_tape', self.tape)
if self.upload:
ch3_amp = self.AWG.get('ch3_amp')
ch4_amp = self.AWG.get('ch3_amp')
st_seqs.CBox_single_pulse_seq(
IF=self.IF,
RO_pulse_delay=self.RO_pulse_delay,
RO_trigger_delay=self.RO_trigger_delay,
RO_pulse_length=self.RO_pulse_length,
verbose=False)
self.AWG.set('ch3_amp', ch3_amp)
self.AWG.set('ch4_amp', ch4_amp)
# Prevents reloading, potentially bug prone as reusing the swf
# does not rest the upload flag
self.upload = False
class CBox_AllXY(swf.Hard_Sweep):
def __init__(self, IF, pulse_delay,
RO_pulse_delay,
RO_trigger_delay,
RO_pulse_length,
AWG, CBox,
double_points=True,
upload=True):
'''
Generates a sequence for the AWG to trigger the CBox and sets the tape
in the CBox to measure an AllXY.
double_points: True will measure the tape twice per element, this
should give insight wether the deviation is real.
'''
super().__init__()
self.parameter_name = 'AllXY element'
self.unit = '#'
self.name = 'AllXY'
# would actually like to check if file is already loaded
# filename can be get using AWG.get('setup_filename')
self.upload = upload
# The AllXY tape
self.tape = np.array([0, 0, 1, 1, # 1, 2
2, 2, 1, 2, # 3, 4
2, 1, 3, 0, # 5, 6
4, 0, 3, 4, # 7, 8
4, 3, 3, 2, # 9, 10
4, 1, 1, 4, # 11, 12
2, 3, 3, 1, # 13, 14
1, 3, 4, 2, # 15, 16
2, 4, 1, 0, # 17, 18
2, 0, 3, 3, # 19, 20
4, 4]) # 21
if double_points:
double_tape = []
for i in range(len(self.tape)//2):
for j in range(2):
double_tape.extend((self.tape[2*i:2*i+2]))
self.tape = double_tape
self.sweep_points = np.arange(
int(len(self.tape)/2)) # 2 pulses per elt
# Making input pars available to prepare
# Required instruments
self.AWG = AWG
self.CBox = CBox
self.IF = IF
self.RO_pulse_delay = RO_pulse_delay
self.RO_trigger_delay = RO_trigger_delay
self.RO_pulse_length = RO_pulse_length
self.pulse_delay = pulse_delay
def prepare(self, **kw):
self.AWG.stop()
self.CBox.AWG0_mode.set('Segmented tape')
self.CBox.AWG1_mode.set('Segmented tape')
self.CBox.restart_awg_tape(0)
self.CBox.restart_awg_tape(1)
self.CBox.set('AWG0_tape', self.tape)
self.CBox.set('AWG1_tape', self.tape)
if self.upload:
ch3_amp = self.AWG.get('ch3_amp')
ch4_amp = self.AWG.get('ch3_amp')
st_seqs.CBox_two_pulse_seq(
IF=self.IF,
pulse_delay=self.pulse_delay,
RO_pulse_delay=self.RO_pulse_delay,
RO_pulse_length=self.RO_pulse_length,
RO_trigger_delay=self.RO_trigger_delay, verbose=False)
self.AWG.set('ch3_amp', ch3_amp)
self.AWG.set('ch4_amp', ch4_amp)
class CBox_multi_element_tape(swf.Hard_Sweep):
def __init__(self, n_pulses, tape,
pulse_delay,
IF, RO_pulse_delay, RO_trigger_delay,
RO_pulse_length,
AWG, CBox,
upload=True):
'''
Sets an arbitrary tape as a sequence
n_pulses is the number of pulses per element in the sequence
by default
'''
super().__init__()
self.n_pulses = n_pulses
self.parameter_name = 'Element'
self.unit = '#'
self.name = 'multi-element tape'
self.tape = tape
self.upload = upload
self.sweep_points = np.arange(int(len(self.tape)/n_pulses))
self.AWG = AWG
self.CBox = CBox
self.IF = IF
self.RO_pulse_delay = RO_pulse_delay
self.RO_trigger_delay = RO_trigger_delay
self.RO_pulse_length = RO_pulse_length
self.pulse_delay = pulse_delay
def prepare(self, **kw):
self.AWG.stop()
self.CBox.AWG0_mode.set('Segmented tape')
self.CBox.AWG1_mode.set('Segmented tape')
self.CBox.restart_awg_tape(0)
self.CBox.restart_awg_tape(1)
self.CBox.set('AWG0_tape', self.tape)
self.CBox.set('AWG1_tape', self.tape)
if self.upload:
ch3_amp = self.AWG.get('ch3_amp')
ch4_amp = self.AWG.get('ch3_amp')
st_seqs.CBox_multi_pulse_seq(
n_pulses=self.n_pulses, pulse_delay=self.pulse_delay,
IF=self.IF,
RO_pulse_delay=self.RO_pulse_delay,
RO_trigger_delay=self.RO_trigger_delay,
RO_pulse_length=self.RO_pulse_length,
verbose=False)
self.AWG.set('ch3_amp', ch3_amp)
self.AWG.set('ch4_amp', ch4_amp)
class Resetless_tape(swf.Hard_Sweep):
def __init__(self, n_pulses, tape,
pulse_delay, resetless_interval,
IF, RO_pulse_delay, RO_trigger_delay,
RO_pulse_length,
AWG, CBox,
upload=True):
super().__init__()
self.IF = IF
self.RO_pulse_delay = RO_pulse_delay
self.RO_trigger_delay = RO_trigger_delay
self.parameter_name = 'Tape element'
self.unit = ''
self.name = 'Resetless_tape'
self.tape = tape
# array for transpose in MC these values are bs
self.sweep_points = np.array(self.tape)
self.AWG = AWG
self.CBox = CBox
self.RO_pulse_length = RO_pulse_length
# would actually like to check if file is already loaded
# filename can be get using AWG.get('setup_filename')
self.upload = upload
self.n_pulses = n_pulses
self.resetless_interval = resetless_interval
self.pulse_delay = pulse_delay
def prepare(self, **kw):
self.AWG.stop()
self.CBox.AWG0_mode.set('Segmented tape')
self.CBox.AWG1_mode.set('Segmented tape')
self.CBox.restart_awg_tape(0)
self.CBox.restart_awg_tape(1)
self.CBox.set('AWG0_tape', self.tape)
self.CBox.set('AWG1_tape', self.tape)
if self.upload:
ch3_amp = self.AWG.get('ch3_amp')
ch4_amp = self.AWG.get('ch3_amp')
st_seqs.CBox_resetless_multi_pulse_seq(
n_pulses=self.n_pulses, pulse_delay=self.pulse_delay,
resetless_interval=self.resetless_interval,
IF=self.IF,
RO_pulse_delay=self.RO_pulse_delay,
RO_trigger_delay=self.RO_trigger_delay,
RO_pulse_length=self.RO_pulse_length,
verbose=False)
self.AWG.set('ch3_amp', ch3_amp)
self.AWG.set('ch4_amp', ch4_amp)
class CBox_RB_sweep(swf.Hard_Sweep):
def __init__(self,
IF, RO_pulse_length,
RO_pulse_delay, RO_trigger_delay,
pulse_delay,
AWG, CBox, LutMan,
cal_points=True,
nr_cliffords=[1, 3, 5, 10, 20],
nr_seeds=3, max_seq_duration=15e-6,
safety_margin=500e-9,
upload=True):
super().__init__()
self.parameter_name = 'Nr of Cliffords'
self.unit = '#'
self.name = 'Randomized_Benchmarking'
self.safety_margin = safety_margin
# Making input pars available to prepare
# Required instruments
self.AWG = AWG
self.CBox = CBox
self.LutMan = LutMan
self.nr_seeds = nr_seeds
self.cal_points = [0, 0, 1, 1]
self.nr_cliffords = np.array(nr_cliffords)
self.max_seq_duration = max_seq_duration
self.pulse_delay_ns = pulse_delay*1e9
self.IF = IF
self.RO_pulse_length = RO_pulse_length
self.RO_pulse_delay = RO_pulse_delay
self.RO_trigger_delay = RO_trigger_delay
# Funny last sweep point values are to make the cal points appear
# in sensible (visible) places in the plot
self.sweep_points = np.concatenate([nr_cliffords,
[nr_cliffords[-1]+.2,
nr_cliffords[-1]+.3,
nr_cliffords[-1]+.7,
nr_cliffords[-1]+.8]])
def prepare(self, upload_tek_seq=True, **kw):
self.AWG.stop()
n_cls = self.nr_cliffords
time_tape = []
pulse_length = self.LutMan.gauss_width.get()*4
for seed in range(self.nr_seeds):
for n_cl in n_cls:
cliffords = rb.randomized_benchmarking_sequence(n_cl)
cl_tape = rb.convert_clifford_sequence_to_tape(
cliffords,
self.LutMan.lut_mapping.get())
for i, tape_elt in enumerate(cl_tape):
if i == 0:
# wait_time is in ns
wait_time = (self.max_seq_duration*1e9 -
(len(cl_tape)-1)*self.pulse_delay_ns -
pulse_length)
else:
wait_time = self.pulse_delay_ns - pulse_length
end_of_marker = (i == (len(cl_tape)-1))
entry = self.CBox.create_timing_tape_entry(
wait_time, tape_elt, end_of_marker, prepend_elt=0)
time_tape.extend(entry)
for cal_pt in self.cal_points:
wait_time = self.max_seq_duration*1e9 - pulse_length
time_tape.extend(self.CBox.create_timing_tape_entry(
wait_time, cal_pt, True, prepend_elt=0))
for awg in range(3):
self.CBox.set('AWG{}_mode'.format(awg), 'Segmented')
self.CBox.set_segmented_tape(awg, time_tape)
self.CBox.restart_awg_tape(awg)
if upload_tek_seq:
self.upload_tek_seq()
def upload_tek_seq(self):
st_seqs.CBox_single_pulse_seq(
IF=self.IF,
RO_pulse_delay=self.RO_pulse_delay +
self.max_seq_duration+self.safety_margin,
RO_trigger_delay=self.RO_trigger_delay,
RO_pulse_length=self.RO_pulse_length)
class Two_d_CBox_RB_seq(swf.Soft_Sweep):
def __init__(self, CBox_RB_sweepfunction):
super().__init__()
self.parameter_name = 'Idx'
self.unit = '#'
self.name = 'Randomized_Benchmarking_random_seeds'
self.CBox_RB_sweepfunction = CBox_RB_sweepfunction
def set_parameter(self, val):
'''
Uses the CBox RB sweepfunction to upload a new tape of random cliffords
explicitly does not reupload the AWG sequence.
'''
self.CBox_RB_sweepfunction.prepare(upload_tek_seq=False)
class Load_Sequence_Tek(swf.Hard_Sweep):
def __init__(self, AWG, sequence_name, seq_elements, upload=True):
super().__init__()
self.sweep_points = seq_elements
self.len = len(seq_elements)
self.name = sequence_name
self.parameter_name = 'amplitude'
self.unit = 'V'
self.upload = upload
self.sequence_name = sequence_name
self.AWG = AWG
def prepare(self, **kw):
if self.upload:
self.AWG.set_setup_filename(self.sequence_name)
| 37.653341
| 92
| 0.551112
| 7,652
| 63,107
| 4.194982
| 0.050836
| 0.085794
| 0.041308
| 0.033894
| 0.837882
| 0.809564
| 0.776168
| 0.752212
| 0.720374
| 0.705732
| 0
| 0.013338
| 0.360832
| 63,107
| 1,675
| 93
| 37.675821
| 0.782477
| 0.069469
| 0
| 0.730312
| 0
| 0
| 0.034447
| 0.002328
| 0
| 0
| 0
| 0.001194
| 0
| 1
| 0.072808
| false
| 0.005201
| 0.007429
| 0
| 0.11367
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7732306bf2308f1ada027789f77eff447432ceee
| 2,619
|
py
|
Python
|
crashes/migrations/0023_auto_20210812_0337.py
|
grigoryk/sentry-analyzer
|
8a48706297ea4bc8033c8c9d3223b3e59cd1cb6d
|
[
"Apache-2.0"
] | 2
|
2021-07-23T19:51:39.000Z
|
2021-08-07T01:50:19.000Z
|
crashes/migrations/0023_auto_20210812_0337.py
|
grigoryk/sentry-analyzer
|
8a48706297ea4bc8033c8c9d3223b3e59cd1cb6d
|
[
"Apache-2.0"
] | 6
|
2021-03-30T13:02:30.000Z
|
2021-07-19T17:01:53.000Z
|
crashes/migrations/0023_auto_20210812_0337.py
|
grigoryk/sentry-analyzer
|
8a48706297ea4bc8033c8c9d3223b3e59cd1cb6d
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 3.2.5 on 2021-08-12 03:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('crashes', '0022_remove_projectendpointcache_processed'),
]
operations = [
migrations.AlterField(
model_name='assignedcategory',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='category',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='categorycount',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='computedtrend',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='event',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='eventgroup',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='eventtag',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='eventtagkeyed',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='project',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='projectendpointcache',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='stacktrace',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]
| 37.956522
| 111
| 0.608247
| 260
| 2,619
| 5.946154
| 0.192308
| 0.085382
| 0.177878
| 0.206339
| 0.792367
| 0.792367
| 0.792367
| 0.792367
| 0.792367
| 0.792367
| 0
| 0.009974
| 0.272623
| 2,619
| 68
| 112
| 38.514706
| 0.801575
| 0.017182
| 0
| 0.709677
| 1
| 0
| 0.083981
| 0.01633
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.016129
| 0
| 0.064516
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
91ffa46c9e2440a886202ddccd456b77e50ba1cb
| 7,327
|
py
|
Python
|
square/api/terminal_api.py
|
okenshields/test-python
|
4f763090b7b29e3b33a03d966f1202fe5634ad7d
|
[
"Apache-2.0"
] | 1
|
2021-07-28T21:37:00.000Z
|
2021-07-28T21:37:00.000Z
|
square/api/terminal_api.py
|
okenshields/test-python
|
4f763090b7b29e3b33a03d966f1202fe5634ad7d
|
[
"Apache-2.0"
] | null | null | null |
square/api/terminal_api.py
|
okenshields/test-python
|
4f763090b7b29e3b33a03d966f1202fe5634ad7d
|
[
"Apache-2.0"
] | 2
|
2020-11-13T12:00:13.000Z
|
2021-08-16T23:59:00.000Z
|
# -*- coding: utf-8 -*-
from square.api_helper import APIHelper
from square.http.api_response import ApiResponse
from square.api.base_api import BaseApi
from square.http.auth.o_auth_2 import OAuth2
class TerminalApi(BaseApi):
"""A Controller to access Endpoints in the square API."""
def __init__(self, config, call_back=None):
super(TerminalApi, self).__init__(config, call_back)
def create_terminal_checkout(self,
body):
"""Does a POST request to /v2/terminals/checkouts.
Creates a new Terminal checkout request and sends it to the specified
device to take a payment for the requested amount.
Args:
body (CreateTerminalCheckoutRequest): An object containing the
fields to POST for the request. See the corresponding object
definition for field details.
Returns:
CreateTerminalCheckoutResponse: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/v2/terminals/checkouts'
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json',
'content-type': 'application/json; charset=utf-8'
}
# Prepare and execute request
_request = self.config.http_client.post(_query_url, headers=_headers, parameters=APIHelper.json_serialize(body))
OAuth2.apply(self.config, _request)
_response = self.execute_request(_request)
decoded = APIHelper.json_deserialize(_response.text)
if type(decoded) is dict:
_errors = decoded.get('errors')
else:
_errors = None
_result = ApiResponse(_response, body=decoded, errors=_errors)
return _result
def search_terminal_checkouts(self,
body):
"""Does a POST request to /v2/terminals/checkouts/search.
Retrieves a filtered list of Terminal checkout requests created by the
account making the request.
Args:
body (SearchTerminalCheckoutsRequest): An object containing the
fields to POST for the request. See the corresponding object
definition for field details.
Returns:
SearchTerminalCheckoutsResponse: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/v2/terminals/checkouts/search'
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json',
'content-type': 'application/json; charset=utf-8'
}
# Prepare and execute request
_request = self.config.http_client.post(_query_url, headers=_headers, parameters=APIHelper.json_serialize(body))
OAuth2.apply(self.config, _request)
_response = self.execute_request(_request)
decoded = APIHelper.json_deserialize(_response.text)
if type(decoded) is dict:
_errors = decoded.get('errors')
else:
_errors = None
_result = ApiResponse(_response, body=decoded, errors=_errors)
return _result
def get_terminal_checkout(self,
checkout_id):
"""Does a GET request to /v2/terminals/checkouts/{checkout_id}.
Retrieves a Terminal checkout request by checkout_id.
Args:
checkout_id (string): Unique ID for the desired
`TerminalCheckout`
Returns:
GetTerminalCheckoutResponse: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/v2/terminals/checkouts/{checkout_id}'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'checkout_id': checkout_id
})
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json'
}
# Prepare and execute request
_request = self.config.http_client.get(_query_url, headers=_headers)
OAuth2.apply(self.config, _request)
_response = self.execute_request(_request)
decoded = APIHelper.json_deserialize(_response.text)
if type(decoded) is dict:
_errors = decoded.get('errors')
else:
_errors = None
_result = ApiResponse(_response, body=decoded, errors=_errors)
return _result
def cancel_terminal_checkout(self,
checkout_id):
"""Does a POST request to /v2/terminals/checkouts/{checkout_id}/cancel.
Cancels a Terminal checkout request, if the status of the request
permits it.
Args:
checkout_id (string): Unique ID for the desired
`TerminalCheckout`
Returns:
CancelTerminalCheckoutResponse: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/v2/terminals/checkouts/{checkout_id}/cancel'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'checkout_id': checkout_id
})
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json'
}
# Prepare and execute request
_request = self.config.http_client.post(_query_url, headers=_headers)
OAuth2.apply(self.config, _request)
_response = self.execute_request(_request)
decoded = APIHelper.json_deserialize(_response.text)
if type(decoded) is dict:
_errors = decoded.get('errors')
else:
_errors = None
_result = ApiResponse(_response, body=decoded, errors=_errors)
return _result
| 35.225962
| 120
| 0.624539
| 795
| 7,327
| 5.522013
| 0.181132
| 0.029613
| 0.036446
| 0.018223
| 0.807517
| 0.807517
| 0.804784
| 0.778815
| 0.770159
| 0.770159
| 0
| 0.003346
| 0.306537
| 7,327
| 207
| 121
| 35.396135
| 0.860657
| 0.367681
| 0
| 0.769231
| 0
| 0
| 0.085035
| 0.032188
| 0
| 0
| 0
| 0
| 0
| 1
| 0.054945
| false
| 0
| 0.043956
| 0
| 0.153846
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
62263b9d4fd131f09864efab43e13bdbfe559903
| 114
|
py
|
Python
|
values/__init__.py
|
joey00072/Marathi-Programing-Language
|
190cbb40eca25413f3c69056b13a4399c1e06fdd
|
[
"MIT"
] | 52
|
2021-07-11T14:49:33.000Z
|
2022-03-16T02:45:06.000Z
|
values/__init__.py
|
d-kaustubh/Baji-Marathi-Programing-Language
|
0a03193c1af8f2d718ed0439931335ba961ca756
|
[
"MIT"
] | 1
|
2021-07-15T04:13:41.000Z
|
2021-08-06T19:41:01.000Z
|
values/__init__.py
|
d-kaustubh/Baji-Marathi-Programing-Language
|
0a03193c1af8f2d718ed0439931335ba961ca756
|
[
"MIT"
] | 6
|
2021-07-13T02:27:21.000Z
|
2022-01-12T10:33:45.000Z
|
from Values.number import *
from Values.function import *
from Values.string import *
from Values.list import *
| 28.5
| 30
| 0.77193
| 16
| 114
| 5.5
| 0.4375
| 0.454545
| 0.545455
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.157895
| 114
| 4
| 31
| 28.5
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
622a03fe7a01a775552e07a12b86d50733616f26
| 21,176
|
py
|
Python
|
lib/installed_clients/kb_SetUtilitiesClient.py
|
dcchivian/kb_blast
|
51519274df133fc25204c59e8379381e7b3554e6
|
[
"MIT"
] | null | null | null |
lib/installed_clients/kb_SetUtilitiesClient.py
|
dcchivian/kb_blast
|
51519274df133fc25204c59e8379381e7b3554e6
|
[
"MIT"
] | 25
|
2017-03-21T22:11:49.000Z
|
2021-08-05T18:09:58.000Z
|
lib/installed_clients/kb_SetUtilitiesClient.py
|
dcchivian/kb_blast
|
51519274df133fc25204c59e8379381e7b3554e6
|
[
"MIT"
] | 8
|
2017-03-18T22:00:42.000Z
|
2020-03-18T17:25:00.000Z
|
# -*- coding: utf-8 -*-
############################################################
#
# Autogenerated by the KBase type compiler -
# any changes made here will be overwritten
#
############################################################
from __future__ import print_function
# the following is a hack to get the baseclient to import whether we're in a
# package or not. This makes pep8 unhappy hence the annotations.
try:
# baseclient and this client are in a package
from .baseclient import BaseClient as _BaseClient # @UnusedImport
except ImportError:
# no they aren't
from baseclient import BaseClient as _BaseClient # @Reimport
class kb_SetUtilities(object):
def __init__(
self, url=None, timeout=30 * 60, user_id=None,
password=None, token=None, ignore_authrc=False,
trust_all_ssl_certificates=False,
auth_svc='https://ci.kbase.us/services/auth/api/legacy/KBase/Sessions/Login',
service_ver='release',
async_job_check_time_ms=100, async_job_check_time_scale_percent=150,
async_job_check_max_time_ms=300000):
if url is None:
raise ValueError('A url is required')
self._service_ver = service_ver
self._client = _BaseClient(
url, timeout=timeout, user_id=user_id, password=password,
token=token, ignore_authrc=ignore_authrc,
trust_all_ssl_certificates=trust_all_ssl_certificates,
auth_svc=auth_svc,
async_job_check_time_ms=async_job_check_time_ms,
async_job_check_time_scale_percent=async_job_check_time_scale_percent,
async_job_check_max_time_ms=async_job_check_max_time_ms)
def KButil_Localize_GenomeSet(self, params, context=None):
"""
:param params: instance of type "KButil_Localize_GenomeSet_Params"
(KButil_Localize_GenomeSet() ** ** Method for creating Genome Set
with all local Genomes) -> structure: parameter "workspace_name"
of type "workspace_name" (** The workspace object refs are of
form: ** ** objects = ws.get_objects([{'ref':
params['workspace_id']+'/'+params['obj_name']}]) ** ** "ref" means
the entire name combining the workspace id and the object name **
"id" is a numerical identifier of the workspace or object, and
should just be used for workspace ** "name" is a string identifier
of a workspace or object. This is received from Narrative.),
parameter "input_ref" of type "data_obj_ref", parameter
"output_name" of type "data_obj_name"
:returns: instance of type "KButil_Localize_GenomeSet_Output" ->
structure: parameter "report_name" of type "data_obj_name",
parameter "report_ref" of type "data_obj_ref"
"""
return self._client.run_job('kb_SetUtilities.KButil_Localize_GenomeSet',
[params], self._service_ver, context)
def KButil_Localize_FeatureSet(self, params, context=None):
"""
:param params: instance of type "KButil_Localize_FeatureSet_Params"
(KButil_Localize_FeatureSet() ** ** Method for creating Feature
Set with all local Genomes) -> structure: parameter
"workspace_name" of type "workspace_name" (** The workspace object
refs are of form: ** ** objects = ws.get_objects([{'ref':
params['workspace_id']+'/'+params['obj_name']}]) ** ** "ref" means
the entire name combining the workspace id and the object name **
"id" is a numerical identifier of the workspace or object, and
should just be used for workspace ** "name" is a string identifier
of a workspace or object. This is received from Narrative.),
parameter "input_ref" of type "data_obj_ref", parameter
"output_name" of type "data_obj_name"
:returns: instance of type "KButil_Localize_FeatureSet_Output" ->
structure: parameter "report_name" of type "data_obj_name",
parameter "report_ref" of type "data_obj_ref"
"""
return self._client.run_job('kb_SetUtilities.KButil_Localize_FeatureSet',
[params], self._service_ver, context)
def KButil_Merge_FeatureSet_Collection(self, params, context=None):
"""
:param params: instance of type
"KButil_Merge_FeatureSet_Collection_Params"
(KButil_Merge_FeatureSet_Collection() ** ** Method for merging
FeatureSets) -> structure: parameter "workspace_name" of type
"workspace_name" (** The workspace object refs are of form: ** **
objects = ws.get_objects([{'ref':
params['workspace_id']+'/'+params['obj_name']}]) ** ** "ref" means
the entire name combining the workspace id and the object name **
"id" is a numerical identifier of the workspace or object, and
should just be used for workspace ** "name" is a string identifier
of a workspace or object. This is received from Narrative.),
parameter "input_refs" of type "data_obj_ref", parameter
"output_name" of type "data_obj_name", parameter "desc" of String
:returns: instance of type
"KButil_Merge_FeatureSet_Collection_Output" -> structure:
parameter "report_name" of type "data_obj_name", parameter
"report_ref" of type "data_obj_ref"
"""
return self._client.run_job('kb_SetUtilities.KButil_Merge_FeatureSet_Collection',
[params], self._service_ver, context)
def KButil_Slice_FeatureSets_by_Genomes(self, params, context=None):
"""
:param params: instance of type
"KButil_Slice_FeatureSets_by_Genomes_Params"
(KButil_Slice_FeatureSets_by_Genomes() ** ** Method for Slicing a
FeatureSet or FeatureSets by a Genome, Genomes, or GenomeSet) ->
structure: parameter "workspace_name" of type "workspace_name" (**
The workspace object refs are of form: ** ** objects =
ws.get_objects([{'ref':
params['workspace_id']+'/'+params['obj_name']}]) ** ** "ref" means
the entire name combining the workspace id and the object name **
"id" is a numerical identifier of the workspace or object, and
should just be used for workspace ** "name" is a string identifier
of a workspace or object. This is received from Narrative.),
parameter "input_featureSet_refs" of type "data_obj_ref",
parameter "input_genome_refs" of type "data_obj_ref", parameter
"output_name" of type "data_obj_name", parameter "desc" of String
:returns: instance of type
"KButil_Slice_FeatureSets_by_Genomes_Output" -> structure:
parameter "report_name" of type "data_obj_name", parameter
"report_ref" of type "data_obj_ref"
"""
return self._client.run_job('kb_SetUtilities.KButil_Slice_FeatureSets_by_Genomes',
[params], self._service_ver, context)
def KButil_Logical_Slice_Two_FeatureSets(self, params, context=None):
"""
:param params: instance of type
"KButil_Logical_Slice_Two_FeatureSets_Params"
(KButil_Logical_Slice_Two_FeatureSets() ** ** Method for Slicing
Two FeatureSets by Venn overlap) -> structure: parameter
"workspace_name" of type "workspace_name" (** The workspace object
refs are of form: ** ** objects = ws.get_objects([{'ref':
params['workspace_id']+'/'+params['obj_name']}]) ** ** "ref" means
the entire name combining the workspace id and the object name **
"id" is a numerical identifier of the workspace or object, and
should just be used for workspace ** "name" is a string identifier
of a workspace or object. This is received from Narrative.),
parameter "input_featureSet_ref_A" of type "data_obj_ref",
parameter "input_featureSet_ref_B" of type "data_obj_ref",
parameter "operator" of String, parameter "desc" of String,
parameter "output_name" of type "data_obj_name"
:returns: instance of type
"KButil_Logical_Slice_Two_FeatureSets_Output" -> structure:
parameter "report_name" of type "data_obj_name", parameter
"report_ref" of type "data_obj_ref"
"""
return self._client.run_job('kb_SetUtilities.KButil_Logical_Slice_Two_FeatureSets',
[params], self._service_ver, context)
def KButil_Merge_GenomeSets(self, params, context=None):
"""
:param params: instance of type "KButil_Merge_GenomeSets_Params"
(KButil_Merge_GenomeSets() ** ** Method for merging GenomeSets)
-> structure: parameter "workspace_name" of type "workspace_name"
(** The workspace object refs are of form: ** ** objects =
ws.get_objects([{'ref':
params['workspace_id']+'/'+params['obj_name']}]) ** ** "ref" means
the entire name combining the workspace id and the object name **
"id" is a numerical identifier of the workspace or object, and
should just be used for workspace ** "name" is a string identifier
of a workspace or object. This is received from Narrative.),
parameter "input_refs" of type "data_obj_ref", parameter
"output_name" of type "data_obj_name", parameter "desc" of String
:returns: instance of type "KButil_Merge_GenomeSets_Output" ->
structure: parameter "report_name" of type "data_obj_name",
parameter "report_ref" of type "data_obj_ref"
"""
return self._client.run_job('kb_SetUtilities.KButil_Merge_GenomeSets',
[params], self._service_ver, context)
def KButil_Build_GenomeSet(self, params, context=None):
"""
:param params: instance of type "KButil_Build_GenomeSet_Params"
(KButil_Build_GenomeSet() ** ** Method for creating a GenomeSet)
-> structure: parameter "workspace_name" of type "workspace_name"
(** The workspace object refs are of form: ** ** objects =
ws.get_objects([{'ref':
params['workspace_id']+'/'+params['obj_name']}]) ** ** "ref" means
the entire name combining the workspace id and the object name **
"id" is a numerical identifier of the workspace or object, and
should just be used for workspace ** "name" is a string identifier
of a workspace or object. This is received from Narrative.),
parameter "input_refs" of type "data_obj_ref", parameter
"output_name" of type "data_obj_name", parameter "desc" of String
:returns: instance of type "KButil_Build_GenomeSet_Output" ->
structure: parameter "report_name" of type "data_obj_name",
parameter "report_ref" of type "data_obj_ref"
"""
return self._client.run_job('kb_SetUtilities.KButil_Build_GenomeSet',
[params], self._service_ver, context)
def KButil_Build_GenomeSet_from_FeatureSet(self, params, context=None):
"""
:param params: instance of type
"KButil_Build_GenomeSet_from_FeatureSet_Params"
(KButil_Build_GenomeSet_from_FeatureSet() ** ** Method for
obtaining a GenomeSet from a FeatureSet) -> structure: parameter
"workspace_name" of type "workspace_name" (** The workspace object
refs are of form: ** ** objects = ws.get_objects([{'ref':
params['workspace_id']+'/'+params['obj_name']}]) ** ** "ref" means
the entire name combining the workspace id and the object name **
"id" is a numerical identifier of the workspace or object, and
should just be used for workspace ** "name" is a string identifier
of a workspace or object. This is received from Narrative.),
parameter "input_ref" of type "data_obj_ref", parameter
"output_name" of type "data_obj_name", parameter "desc" of String
:returns: instance of type
"KButil_Build_GenomeSet_from_FeatureSet_Output" -> structure:
parameter "report_name" of type "data_obj_name", parameter
"report_ref" of type "data_obj_ref"
"""
return self._client.run_job('kb_SetUtilities.KButil_Build_GenomeSet_from_FeatureSet',
[params], self._service_ver, context)
def KButil_Add_Genomes_to_GenomeSet(self, params, context=None):
"""
:param params: instance of type
"KButil_Add_Genomes_to_GenomeSet_Params"
(KButil_Add_Genomes_to_GenomeSet() ** ** Method for adding a
Genome to a GenomeSet) -> structure: parameter "workspace_name" of
type "workspace_name" (** The workspace object refs are of form:
** ** objects = ws.get_objects([{'ref':
params['workspace_id']+'/'+params['obj_name']}]) ** ** "ref" means
the entire name combining the workspace id and the object name **
"id" is a numerical identifier of the workspace or object, and
should just be used for workspace ** "name" is a string identifier
of a workspace or object. This is received from Narrative.),
parameter "input_genome_refs" of type "data_obj_ref", parameter
"input_genomeset_ref" of type "data_obj_ref", parameter
"output_name" of type "data_obj_name", parameter "desc" of String
:returns: instance of type "KButil_Add_Genomes_to_GenomeSet_Output"
-> structure: parameter "report_name" of type "data_obj_name",
parameter "report_ref" of type "data_obj_ref"
"""
return self._client.run_job('kb_SetUtilities.KButil_Add_Genomes_to_GenomeSet',
[params], self._service_ver, context)
def KButil_Build_ReadsSet(self, params, context=None):
"""
:param params: instance of type "KButil_Build_ReadsSet_Params"
(KButil_Build_ReadsSet() ** ** Method for creating a ReadsSet) ->
structure: parameter "workspace_name" of type "workspace_name" (**
The workspace object refs are of form: ** ** objects =
ws.get_objects([{'ref':
params['workspace_id']+'/'+params['obj_name']}]) ** ** "ref" means
the entire name combining the workspace id and the object name **
"id" is a numerical identifier of the workspace or object, and
should just be used for workspace ** "name" is a string identifier
of a workspace or object. This is received from Narrative.),
parameter "input_refs" of type "data_obj_ref", parameter
"output_name" of type "data_obj_name", parameter "desc" of String
:returns: instance of type "KButil_Build_ReadsSet_Output" ->
structure: parameter "report_name" of type "data_obj_name",
parameter "report_ref" of type "data_obj_ref"
"""
return self._client.run_job('kb_SetUtilities.KButil_Build_ReadsSet',
[params], self._service_ver, context)
def KButil_Merge_MultipleReadsSets_to_OneReadsSet(self, params, context=None):
"""
:param params: instance of type
"KButil_Merge_MultipleReadsSets_to_OneReadsSet_Params"
(KButil_Merge_MultipleReadsSets_to_OneReadsSet() ** ** Method for
merging multiple ReadsSets into one ReadsSet) -> structure:
parameter "workspace_name" of type "workspace_name" (** The
workspace object refs are of form: ** ** objects =
ws.get_objects([{'ref':
params['workspace_id']+'/'+params['obj_name']}]) ** ** "ref" means
the entire name combining the workspace id and the object name **
"id" is a numerical identifier of the workspace or object, and
should just be used for workspace ** "name" is a string identifier
of a workspace or object. This is received from Narrative.),
parameter "input_refs" of type "data_obj_ref", parameter
"output_name" of type "data_obj_name", parameter "desc" of String
:returns: instance of type
"KButil_Merge_MultipleReadsSets_to_OneReadsSet_Output" ->
structure: parameter "report_name" of type "data_obj_name",
parameter "report_ref" of type "data_obj_ref"
"""
return self._client.run_job('kb_SetUtilities.KButil_Merge_MultipleReadsSets_to_OneReadsSet',
[params], self._service_ver, context)
def KButil_Build_AssemblySet(self, params, context=None):
"""
:param params: instance of type "KButil_Build_AssemblySet_Params"
(KButil_Build_AssemblySet() ** ** Method for creating an
AssemblySet) -> structure: parameter "workspace_name" of type
"workspace_name" (** The workspace object refs are of form: ** **
objects = ws.get_objects([{'ref':
params['workspace_id']+'/'+params['obj_name']}]) ** ** "ref" means
the entire name combining the workspace id and the object name **
"id" is a numerical identifier of the workspace or object, and
should just be used for workspace ** "name" is a string identifier
of a workspace or object. This is received from Narrative.),
parameter "input_refs" of type "data_obj_ref", parameter
"output_name" of type "data_obj_name", parameter "desc" of String
:returns: instance of type "KButil_Build_AssemblySet_Output" ->
structure: parameter "report_name" of type "data_obj_name",
parameter "report_ref" of type "data_obj_ref"
"""
return self._client.run_job('kb_SetUtilities.KButil_Build_AssemblySet',
[params], self._service_ver, context)
def KButil_Batch_Create_AssemblySet(self, params, context=None):
"""
:param params: instance of type
"KButil_Batch_Create_AssemblySet_Params"
(KButil_Batch_Create_AssemblySet() ** ** Method for creating an
AssemblySet without specifying individual objects) -> structure:
parameter "workspace_name" of type "workspace_name" (** The
workspace object refs are of form: ** ** objects =
ws.get_objects([{'ref':
params['workspace_id']+'/'+params['obj_name']}]) ** ** "ref" means
the entire name combining the workspace id and the object name **
"id" is a numerical identifier of the workspace or object, and
should just be used for workspace ** "name" is a string identifier
of a workspace or object. This is received from Narrative.),
parameter "name_pattern" of String, parameter "output_name" of
type "data_obj_name", parameter "desc" of String
:returns: instance of type "KButil_Batch_Create_AssemblySet_Output"
-> structure: parameter "report_name" of type "data_obj_name",
parameter "report_ref" of type "data_obj_ref"
"""
return self._client.run_job('kb_SetUtilities.KButil_Batch_Create_AssemblySet',
[params], self._service_ver, context)
def KButil_Batch_Create_GenomeSet(self, params, context=None):
"""
:param params: instance of type
"KButil_Batch_Create_GenomeSet_Params"
(KButil_Batch_Create_GenomeSet() ** ** Method for creating a
GenomeSet without specifying individual objects) -> structure:
parameter "workspace_name" of type "workspace_name" (** The
workspace object refs are of form: ** ** objects =
ws.get_objects([{'ref':
params['workspace_id']+'/'+params['obj_name']}]) ** ** "ref" means
the entire name combining the workspace id and the object name **
"id" is a numerical identifier of the workspace or object, and
should just be used for workspace ** "name" is a string identifier
of a workspace or object. This is received from Narrative.),
parameter "name_pattern" of String, parameter "output_name" of
type "data_obj_name", parameter "desc" of String
:returns: instance of type "KButil_Batch_Create_GenomeSet_Output" ->
structure: parameter "report_name" of type "data_obj_name",
parameter "report_ref" of type "data_obj_ref"
"""
return self._client.run_job('kb_SetUtilities.KButil_Batch_Create_GenomeSet',
[params], self._service_ver, context)
def status(self, context=None):
return self._client.run_job('kb_SetUtilities.status',
[], self._service_ver, context)
| 58.822222
| 100
| 0.639309
| 2,540
| 21,176
| 5.079134
| 0.072835
| 0.046043
| 0.044183
| 0.057437
| 0.886133
| 0.861406
| 0.820479
| 0.802108
| 0.763119
| 0.743741
| 0
| 0.00116
| 0.267331
| 21,176
| 359
| 101
| 58.986072
| 0.830358
| 0.67227
| 0
| 0.2
| 1
| 0.014286
| 0.158914
| 0.140181
| 0
| 0
| 0
| 0
| 0
| 1
| 0.228571
| false
| 0.028571
| 0.057143
| 0.014286
| 0.514286
| 0.014286
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 9
|
624d3bfc88c68778b91e3141bf3150c5a8f4721e
| 28,491
|
py
|
Python
|
enc/enc_open.py
|
Alpha-Demon404/RE-14
|
b5b46a9f0eee218f2a642b615c77135c33c6f4ad
|
[
"MIT"
] | 39
|
2020-02-26T09:44:36.000Z
|
2022-03-23T00:18:25.000Z
|
enc/enc_open.py
|
B4BY-DG/reverse-enginnering
|
b5b46a9f0eee218f2a642b615c77135c33c6f4ad
|
[
"MIT"
] | 15
|
2020-05-14T10:07:26.000Z
|
2022-01-06T02:55:32.000Z
|
enc/enc_open.py
|
B4BY-DG/reverse-enginnering
|
b5b46a9f0eee218f2a642b615c77135c33c6f4ad
|
[
"MIT"
] | 41
|
2020-03-16T22:36:38.000Z
|
2022-03-17T14:47:19.000Z
|
# Time Succses Parser : Sun Jun 28 23:28:56 2020
# Auto Parser Dis Version : 1.1.0
# Source : https://www.github.com/Datez-Kun
import base64, zlib, marshal, sys, os
def keluar():
print '[!] Exit'
os.sys.exit()
logo = '\x1b[1;33m\xe2\x95\x94\xe2\x95\x90\xe2\x95\x97\xe2\x94\x8c\xe2\x94\x90\xe2\x94\x8c\xe2\x94\x8c\xe2\x94\x80\xe2\x94\x90\xe2\x94\xac\xe2\x94\x80\xe2\x94\x90\xe2\x94\xac \xe2\x94\xac\xe2\x94\x8c\xe2\x94\x80\xe2\x94\x90\xe2\x94\x8c\xe2\x94\xac\xe2\x94\x90\x1b[0;36m<<<\x1b[0;34m\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x97\x1b[0m\n\x1b[1;37m\xe2\x95\x91\xe2\x95\xa3 \xe2\x94\x82\xe2\x94\x82\xe2\x94\x82\xe2\x94\x82 \xe2\x94\x9c\xe2\x94\xac\xe2\x94\x98\xe2\x94\x94\xe2\x94\xac\xe2\x94\x98\xe2\x94\x9c\xe2\x94\x80\xe2\x94\x98 \xe2\x94\x82 \x1b[41m \x1b[0m\x1b[0;34m\xe2\x95\x91\x1b[0m\n\x1b[0;32m\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x9d\xe2\x94\x98\xe2\x94\x94\xe2\x94\x98\xe2\x94\x94\xe2\x94\x80\xe2\x94\x98\xe2\x94\xb4\xe2\x94\x94\xe2\x94\x80 \xe2\x94\xb4 \xe2\x94\xb4 \xe2\x94\xb4 \x1b[0;33mpython\x1b[0m \x1b[41m \x1b[0m\x1b[0;34m\xe2\x95\x91\x1b[0m\n\x1b[0;32mAuthor \x1b[1;37m: \x1b[1;30mSumarr ID \x1b[47m \x1b[0m\x1b[0;34m\xe2\x95\x91\x1b[0m\n\x1b[0;32mKontak \x1b[1;37m: \x1b[1;30m0895615431xxx \x1b[47m \x1b[0m\x1b[0;34m\xe2\x95\x91\x1b[0m\n\x1b[0;32mGitlab \x1b[1;37m: \x1b[1;30mHttps://github.com/Sumarr-ID \x1b[0;34m\xe2\x95\x91\x1b[0m\n\x1b[0;34m\xe2\x95\x91\x1b[1;35m-\xe2\x96\xba \x1b[0;31m{\x1b[1;37m01\x1b[0;31m}\x1b[0m Encrypt \x1b[1;30mBase16 \x1b[0;34m \xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x9d\x1b[0m\n\x1b[0;34m\xe2\x95\x91\x1b[1;35m-\xe2\x96\xba \x1b[0;31m{\x1b[1;37m02\x1b[0;31m}\x1b[0m Encrypt \x1b[1;30mBase32 \x1b[0;34m\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x97\x1b[0m\n\x1b[0;34m\xe2\x95\x91\x1b[1;35m-\xe2\x96\xba \x1b[0;31m{\x1b[1;37m03\x1b[0;31m}\x1b[0m Encrypt \x1b[1;30mBase64 \x1b[0;34m\xe2\x95\x91\x1b[0;30m\x1b[47mraz p5w\x1b[0m\x1b[0;34m\xe2\x95\x91\x1b[0;32m]\x1b[0;34m\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x97\x1b[0m\n\x1b[0;34m\xe2\x95\x91\x1b[1;35m-\xe2\x96\xba \x1b[0;31m{\x1b[1;37m04\x1b[0;31m}\x1b[0m Encrypt \x1b[1;30mMarshal \x1b[0;34m \xe2\x95\x9a\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x9d \xe2\x95\x91\x1b[0m\n\x1b[0;34m\xe2\x95\x91\x1b[1;35m-\xe2\x96\xba \x1b[0;31m{\x1b[1;37m05\x1b[0;31m}\x1b[0m Encrypt \x1b[1;30mZlib,Base16 \x1b[0;34m\xe2\x95\x91\x1b[0m\n\x1b[0;34m\xe2\x95\x91\x1b[1;35m-\xe2\x96\xba \x1b[0;31m{\x1b[1;37m06\x1b[0;31m}\x1b[0m Encrypt \x1b[1;30mZlib,Base32 \x1b[0;34m\xe2\x95\x91\x1b[0m\n\x1b[0;34m\xe2\x95\x91\x1b[1;35m-\xe2\x96\xba \x1b[0;31m{\x1b[1;37m07\x1b[0;31m}\x1b[0m Encrypt \x1b[1;30mZlib,Base64 \x1b[0;34m\xe2\x95\x91\x1b[0m\n\x1b[0;34m\xe2\x95\x91\x1b[1;35m-\xe2\x96\xba \x1b[0;31m{\x1b[1;37m08\x1b[0;31m}\x1b[0m Encrypt \x1b[1;30mMarshal,Zlib,Base16 \x1b[0;34m\xe2\x95\x91\x1b[0m\n\x1b[0;34m\xe2\x95\x91\x1b[1;35m-\xe2\x96\xba \x1b[0;31m{\x1b[1;37m09\x1b[0;31m}\x1b[0m Encrypt \x1b[1;30mMarshal,Zlib,Base32 \x1b[0;34m\xe2\x95\x91\x1b[0m\n\x1b[0;34m\xe2\x95\x91\x1b[1;35m-\xe2\x96\xba \x1b[0;31m{\x1b[1;37m10\x1b[0;31m}\x1b[0m Encrypt \x1b[1;30mMarshal,Zlib,Base64 \x1b[0;34m\xe2\x95\x91\x1b[0m\n\x1b[0;34m\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\x1b[0;32m[\x1b[0;35mEncrypt python\x1b[0;32m]\x1b[0;34m\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x9d\x1b[0m'
def menu():
os.system('clear')
print logo
print
masuk = raw_input('\x1b[0;31m[\x1b[1;33m+\x1b[0;31m] \x1b[0;34mChoice \x1b[1;37m\xe2\x9e\xa4\x1b[0m ')
if masuk == '':
print '[!] Wrong input'
keluar()
elif masuk == '1':
satu()
elif masuk == '2':
dua()
elif masuk == '3':
tiga()
elif masuk == '4':
empat()
elif masuk == '5':
lima()
elif masuk == '6':
enam()
elif masuk == '7':
tujuh()
elif masuk == '8':
delapan()
elif masuk == '9':
sembilan()
elif masuk == '10':
sepuluh()
elif masuk == '0':
keluar()
else:
print '[!] Wrong input'
keluar()
def satu():
try:
file = raw_input('\x1b[0;36m[\x1b[0;31m!\x1b[0;36m] \x1b[1;37mFile \x1b[0;31m-\xe2\x96\xba\x1b[0m ')
fileopen = open(file).read()
a = base64.b16encode(fileopen)
b = "#Encrypt by Sumarr ID\n#Gitlab : Https://gitlab.com/Sumarr-ID\nimport base64\nexec(base64.b16decode('" + a + "'))"
c = file.replace('.py', '-enc.py')
d = open(c, 'w')
d.write(b)
d.close()
print '\x1b[0;31m[\x1b[0;34m+\x1b[0;31m] \x1b[0;32mHasil \x1b[1;37m:\x1b[0m ', c
print '\x1b[0;31m[\x1b[1;33m+\x1b[0;31m] \x1b[0;31m[\x1b[0;32mTekan enter untuk kembali\x1b[0;31m]\x1b[0;34m\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x97'
print '\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x9d'
raw_input('\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\x1b[0;31m[\x1b[1;37mBack\x1b[0;31m]\x1b[0m')
menu()
except:
print '[?] File tidak ada'
print '\x1b[0;31m[\x1b[1;33m+\x1b[0;31m] \x1b[0;31m[\x1b[0;32mTekan enter untuk kembali\x1b[0;31m]\x1b[0;34m\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x97'
print '\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x9d'
raw_input('\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\x1b[0;31m[\x1b[1;37mBack\x1b[0;31m]\x1b[0m')
menu()
def dua():
try:
file = raw_input('\x1b[0;36m[\x1b[0;31m!\x1b[0;36m] \x1b[1;37mFile \x1b[0;31m-\xe2\x96\xba\x1b[0m ')
fileopen = open(file).read()
a = base64.b32encode(fileopen)
b = "#Encrypt by Sumarr ID\n#Gitlab : Https://gitlab.com/Sumarr-ID\nimport base64\nexec(base64.b32decode('" + a + "'))"
c = file.replace('.py', '-enc.py')
d = open(c, 'w')
d.write(b)
d.close()
print '\x1b[0;31m[\x1b[0;34m+\x1b[0;31m] \x1b[0;32mHasil \x1b[1;37m:\x1b[0m ', c
print '\x1b[0;31m[\x1b[1;33m+\x1b[0;31m] \x1b[0;31m[\x1b[0;32mTekan enter untuk kembali\x1b[0;31m]\x1b[0;34m\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x97'
print '\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x9d'
raw_input('\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\x1b[0;31m[\x1b[1;37mBack\x1b[0;31m]\x1b[0m')
menu()
except:
print '[?] File tidak ada'
print '\x1b[0;31m[\x1b[1;33m+\x1b[0;31m] \x1b[0;31m[\x1b[0;32mTekan enter untuk kembali\x1b[0;31m]\x1b[0;34m\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x97'
print '\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x9d'
raw_input('\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\x1b[0;31m[\x1b[1;37mBack\x1b[0;31m]\x1b[0m')
menu()
def tiga():
try:
file = raw_input('\x1b[0;36m[\x1b[0;31m!\x1b[0;36m] \x1b[1;37mFile \x1b[0;31m-\xe2\x96\xba\x1b[0m ')
fileopen = open(file).read()
a = base64.b64encode(fileopen)
b = "#Encrypt by Sumarr ID\n#Gitlab : Https://gitlab.com/Sumarr-ID\nimport base64\nexec(base64.b64decode('" + a + "'))"
c = file.replace('.py', '-enc.py')
d = open(c, 'w')
d.write(b)
d.close()
print '\x1b[0;31m[\x1b[0;34m+\x1b[0;31m] \x1b[0;32mHasil \x1b[1;37m:\x1b[0m ', c
print '\x1b[0;31m[\x1b[1;33m+\x1b[0;31m] \x1b[0;31m[\x1b[0;32mTekan enter untuk kembali\x1b[0;31m]\x1b[0;34m\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x97'
print '\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x9d'
raw_input('\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\x1b[0;31m[\x1b[1;37mBack\x1b[0;31m]\x1b[0m')
menu()
except:
print '[?] File tidak ada'
print '\x1b[0;31m[\x1b[1;33m+\x1b[0;31m] \x1b[0;31m[\x1b[0;32mTekan enter untuk kembali\x1b[0;31m]\x1b[0;34m\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x97'
print '\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x9d'
raw_input('\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\x1b[0;31m[\x1b[1;37mBack\x1b[0;31m]\x1b[0m')
menu()
def empat():
try:
file = raw_input('\x1b[0;36m[\x1b[0;31m!\x1b[0;36m] \x1b[1;37mFile \x1b[0;31m-\xe2\x96\xba\x1b[0m ')
fileopen = open(file).read()
a = compile(fileopen, 'Sumarr ID', 'exec')
m = marshal.dumps(a)
s = repr(m)
b = '#Encrypt by Sumarr ID\n#Gitlab : Https://gitlab.com/Sumarr-ID\nimport marshal\nexec(marshal.loads(' + s + '))'
c = file.replace('.py', '-enc.py')
d = open(c, 'w')
d.write(b)
d.close()
print '\x1b[0;31m[\x1b[0;34m+\x1b[0;31m] \x1b[0;32mHasil \x1b[1;37m:\x1b[0m ', c
print '\x1b[0;31m[\x1b[1;33m+\x1b[0;31m] \x1b[0;31m[\x1b[0;32mTekan enter untuk kembali\x1b[0;31m]\x1b[0;34m\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x97'
print '\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x9d'
raw_input('\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\x1b[0;31m[\x1b[1;37mBack\x1b[0;31m]\x1b[0m')
main()
except:
print '[?] File tidak ada'
print '\x1b[0;31m[\x1b[1;33m+\x1b[0;31m] \x1b[0;31m[\x1b[0;32mTekan enter untuk kembali\x1b[0;31m]\x1b[0;34m\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x97'
print '\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x9d'
raw_input('\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\x1b[0;31m[\x1b[1;37mBack\x1b[0;31m]\x1b[0m')
main()
def lima():
try:
file = raw_input('\x1b[0;36m[\x1b[0;31m!\x1b[0;36m] \x1b[1;37mFile \x1b[0;31m-\xe2\x96\xba\x1b[0m ')
fileopen = open(file).read()
c = zlib.compress(fileopen)
d = base64.b16encode(c)
e = '#Encrypt by Sumarr ID\n#Gitlab : Https://gitlab.com/Sumarr-ID\nimport marshal,zlib,base64\nexec(zlib.decompress(base64.b16decode("' + d + '")))'
f = file.replace('.py', '-enc.py')
g = open(f, 'w')
g.write(e)
g.close()
print '\x1b[0;31m[\x1b[0;34m+\x1b[0;31m] \x1b[0;32mHasil \x1b[1;37m:\x1b[0m ', f
print '\x1b[0;31m[\x1b[1;33m+\x1b[0;31m] \x1b[0;31m[\x1b[0;32mTekan enter untuk kembali\x1b[0;31m]\x1b[0;34m\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x97'
print '\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x9d'
raw_input('\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\x1b[0;31m[\x1b[1;37mBack\x1b[0;31m]\x1b[0m')
menu()
except:
print '[?] File tidak ada'
print '\x1b[0;31m[\x1b[1;33m+\x1b[0;31m] \x1b[0;31m[\x1b[0;32mTekan enter untuk kembali\x1b[0;31m]\x1b[0;34m\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x97'
print '\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x9d'
raw_input('\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\x1b[0;31m[\x1b[1;37mBack\x1b[0;31m]\x1b[0m')
menu()
def enam():
try:
file = raw_input('\x1b[0;36m[\x1b[0;31m!\x1b[0;36m] \x1b[1;37mFile \x1b[0;31m-\xe2\x96\xba\x1b[0m ')
fileopen = open(file).read()
c = zlib.compress(fileopen)
d = base64.b32encode(c)
e = '#Encrypt by Sumarr ID\n#Gitlab : Https://gitlab.com/Sumarr-ID\nimport marshal,zlib,base64\nexec(zlib.decompress(base64.b32decode("' + d + '")))'
f = file.replace('.py', '-enc.py')
g = open(f, 'w')
g.write(e)
g.close()
print '\x1b[0;31m[\x1b[0;34m+\x1b[0;31m] \x1b[0;32mHasil \x1b[1;37m:\x1b[0m ', f
print '\x1b[0;31m[\x1b[1;33m+\x1b[0;31m] \x1b[0;31m[\x1b[0;32mTekan enter untuk kembali\x1b[0;31m]\x1b[0;34m\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x97'
print '\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x9d'
raw_input('\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\x1b[0;31m[\x1b[1;37mBack\x1b[0;31m]\x1b[0m')
menu()
except:
print '[?] File tidak ada'
print '\x1b[0;31m[\x1b[1;33m+\x1b[0;31m] \x1b[0;31m[\x1b[0;32mTekan enter untuk kembali\x1b[0;31m]\x1b[0;34m\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x97'
print '\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x9d'
raw_input('\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\x1b[0;31m[\x1b[1;37mBack\x1b[0;31m]\x1b[0m')
menu()
def tujuh():
try:
file = raw_input('\x1b[0;36m[\x1b[0;31m!\x1b[0;36m] \x1b[1;37mFile \x1b[0;31m-\xe2\x96\xba\x1b[0m ')
fileopen = open(file).read()
c = zlib.compress(fileopen)
d = base64.b64encode(c)
e = '#Encrypt by Sumarr ID\n#Gitlab : Https://gitlab.com/Sumarr-ID\nimport marshal,zlib,base64\nexec(zlib.decompress(base64.b64decode("' + d + '")))'
f = file.replace('.py', '-enc.py')
g = open(f, 'w')
g.write(e)
g.close()
print '\x1b[0;31m[\x1b[0;34m+\x1b[0;31m] \x1b[0;32mHasil \x1b[1;37m:\x1b[0m ', f
print '\x1b[0;31m[\x1b[1;33m+\x1b[0;31m] \x1b[0;31m[\x1b[0;32mTekan enter untuk kembali\x1b[0;31m]\x1b[0;34m\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x97'
print '\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x9d'
raw_input('\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\x1b[0;31m[\x1b[1;37mBack\x1b[0;31m]\x1b[0m')
menu()
except:
print '[?] File tidak ada'
print '\x1b[0;31m[\x1b[1;33m+\x1b[0;31m] \x1b[0;31m[\x1b[0;32mTekan enter untuk kembali\x1b[0;31m]\x1b[0;34m\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x97'
print '\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x9d'
raw_input('\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\x1b[0;31m[\x1b[1;37mBack\x1b[0;31m]\x1b[0m')
menu()
def delapan():
try:
file = raw_input('\x1b[0;36m[\x1b[0;31m!\x1b[0;36m] \x1b[1;37mFile \x1b[0;31m-\xe2\x96\xba\x1b[0m ')
fileopen = open(file).read()
sa = compile(fileopen, 'Sumarr ID', 'exec')
sb = marshal.dumps(sa)
c = zlib.compress(sb)
d = base64.b16encode(c)
e = '#Encrypt by Sumarr ID\n#Gitlab : Https://gitlab.com/Sumarr-ID\nimport marshal,zlib,base64\nexec(marshal.loads(zlib.decompress(base64.b16decode("' + str(d) + '"))))'
f = file.replace('.py', '-enc.py')
g = open(f, 'w')
g.write(e)
g.close()
print '\x1b[0;31m[\x1b[0;34m+\x1b[0;31m] \x1b[0;32mHasil \x1b[1;37m:\x1b[0m ', f
print '\x1b[0;31m[\x1b[1;33m+\x1b[0;31m] \x1b[0;31m[\x1b[0;32mTekan enter untuk kembali\x1b[0;31m]\x1b[0;34m\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x97'
print '\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x9d'
raw_input('\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\x1b[0;31m[\x1b[1;37mBack\x1b[0;31m]\x1b[0m')
menu()
except:
print '[?] File tidak ada'
print '[+] [Tekan enter untuk kembali]\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x97'
print '\x1b[0;31m[\x1b[1;33m+\x1b[0;31m] \x1b[0;31m[\x1b[0;32mTekan enter untuk kembali\x1b[0;31m]\x1b[0;34m\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x97'
print '\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x9d'
raw_input('\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\x1b[0;31m[\x1b[1;37mBack\x1b[0;31m]\x1b[0m')
menu()
def sembilan():
try:
file = raw_input('\x1b[0;36m[\x1b[0;31m!\x1b[0;36m] \x1b[1;37mFile \x1b[0;31m-\xe2\x96\xba\x1b[0m ')
fileopen = open(file).read()
sa = compile(fileopen, 'Sumarr ID', 'exec')
sb = marshal.dumps(sa)
c = zlib.compress(sb)
d = base64.b32encode(c)
e = '#Encrypt by Sumarr ID\n#Gitlab : Https://gitlab.com/Sumarr-ID\nimport marshal,zlib,base64\nexec(marshal.loads(zlib.decompress(base64.b32decode("' + str(d) + '"))))'
f = file.replace('.py', '-enc.py')
g = open(f, 'w')
g.write(e)
g.close()
print '\x1b[0;31m[\x1b[0;34m+\x1b[0;31m] \x1b[0;32mHasil \x1b[1;37m:\x1b[0m ', f
print '\x1b[0;31m[\x1b[1;33m+\x1b[0;31m] \x1b[0;31m[\x1b[0;32mTekan enter untuk kembali\x1b[0;31m]\x1b[0;34m\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x97'
print '\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x9d'
raw_input('\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\x1b[0;31m[\x1b[1;37mBack\x1b[0;31m]\x1b[0m')
menu()
except:
print '[?] File tidak ada'
print '[+] [Tekan enter untuk kembali]\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x97'
print '\x1b[0;31m[\x1b[1;33m+\x1b[0;31m] \x1b[0;31m[\x1b[0;32mTekan enter untuk kembali\x1b[0;31m]\x1b[0;34m\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x97'
print '\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x9d'
raw_input('\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\x1b[0;31m[\x1b[1;37mBack\x1b[0;31m]\x1b[0m')
menu()
def sepuluh():
try:
file = raw_input('\x1b[0;36m[\x1b[0;31m!\x1b[0;36m] \x1b[1;37mFile \x1b[0;31m-\xe2\x96\xba\x1b[0m ')
fileopen = open(file).read()
sa = compile(fileopen, 'Sumarr ID', 'exec')
sb = marshal.dumps(sa)
c = zlib.compress(sb)
d = base64.b64encode(c)
e = '#Encrypt by Sumarr ID\n#Gitlab : Https://gitlab.com/Sumarr-ID\nimport marshal,zlib,base64\nexec(marshal.loads(zlib.decompress(base64.b64decode("' + str(d) + '"))))'
f = file.replace('.py', '-enc.py')
g = open(f, 'w')
g.write(e)
g.close()
print '\x1b[0;31m[\x1b[0;34m+\x1b[0;31m] \x1b[0;32mHasil \x1b[1;37m:\x1b[0m ', f
print '\x1b[0;31m[\x1b[1;33m+\x1b[0;31m] \x1b[0;31m[\x1b[0;32mTekan enter untuk kembali\x1b[0;31m]\x1b[0;34m\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x97'
print '\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x9d'
raw_input('\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\x1b[0;31m[\x1b[1;37mBack\x1b[0;31m]\x1b[0m')
menu()
except:
print '[?] File tidak ada'
print '\x1b[0;31m[\x1b[1;33m+\x1b[0;31m] \x1b[0;31m[\x1b[0;32mTekan enter untuk kembali\x1b[0;31m]\x1b[0;34m\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x97'
print '\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x9d'
raw_input('\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\x1b[0;31m[\x1b[1;37mBack\x1b[0;31m]\x1b[0m')
menu()
if __name__ == '__main__':
menu()
| 96.253378
| 3,782
| 0.662034
| 6,052
| 28,491
| 3.110212
| 0.029577
| 0.375498
| 0.504914
| 0.659831
| 0.951867
| 0.946342
| 0.945067
| 0.944908
| 0.927748
| 0.920098
| 0
| 0.315253
| 0.104419
| 28,491
| 296
| 3,783
| 96.253378
| 0.422441
| 0.004212
| 0
| 0.774436
| 0
| 0.353383
| 0.803441
| 0.727439
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.041353
| null | null | 0.25188
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 17
|
626fbc73487c99101049a5b53bc2147eddb13fd1
| 54,490
|
py
|
Python
|
boto3_type_annotations_with_docs/boto3_type_annotations/elbv2/paginator.py
|
cowboygneox/boto3_type_annotations
|
450dce1de4e066b939de7eac2ec560ed1a7ddaa2
|
[
"MIT"
] | 119
|
2018-12-01T18:20:57.000Z
|
2022-02-02T10:31:29.000Z
|
boto3_type_annotations_with_docs/boto3_type_annotations/elbv2/paginator.py
|
cowboygneox/boto3_type_annotations
|
450dce1de4e066b939de7eac2ec560ed1a7ddaa2
|
[
"MIT"
] | 15
|
2018-11-16T00:16:44.000Z
|
2021-11-13T03:44:18.000Z
|
boto3_type_annotations_with_docs/boto3_type_annotations/elbv2/paginator.py
|
cowboygneox/boto3_type_annotations
|
450dce1de4e066b939de7eac2ec560ed1a7ddaa2
|
[
"MIT"
] | 11
|
2019-05-06T05:26:51.000Z
|
2021-09-28T15:27:59.000Z
|
from typing import Dict
from typing import List
from botocore.paginate import Paginator
class DescribeAccountLimits(Paginator):
def paginate(self, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`ElasticLoadBalancingv2.Client.describe_account_limits`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/DescribeAccountLimits>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'Limits': [
{
'Name': 'string',
'Max': 'string'
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
- **Limits** *(list) --*
Information about the limits.
- *(dict) --*
Information about an Elastic Load Balancing resource limit for your AWS account.
- **Name** *(string) --*
The name of the limit. The possible values are:
* application-load-balancers
* listeners-per-application-load-balancer
* listeners-per-network-load-balancer
* network-load-balancers
* rules-per-application-load-balancer
* target-groups
* targets-per-application-load-balancer
* targets-per-availability-zone-per-network-load-balancer
* targets-per-network-load-balancer
- **Max** *(string) --*
The maximum value of the limit.
- **NextToken** *(string) --*
A token to resume pagination.
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeListenerCertificates(Paginator):
def paginate(self, ListenerArn: str, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`ElasticLoadBalancingv2.Client.describe_listener_certificates`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/DescribeListenerCertificates>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
ListenerArn='string',
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'Certificates': [
{
'CertificateArn': 'string',
'IsDefault': True|False
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
- **Certificates** *(list) --*
Information about the certificates.
- *(dict) --*
Information about an SSL server certificate.
- **CertificateArn** *(string) --*
The Amazon Resource Name (ARN) of the certificate.
- **IsDefault** *(boolean) --*
Indicates whether the certificate is the default certificate. Do not set ``IsDefault`` when specifying a certificate as an input parameter.
- **NextToken** *(string) --*
A token to resume pagination.
:type ListenerArn: string
:param ListenerArn: **[REQUIRED]**
The Amazon Resource Names (ARN) of the listener.
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeListeners(Paginator):
def paginate(self, LoadBalancerArn: str = None, ListenerArns: List = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`ElasticLoadBalancingv2.Client.describe_listeners`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/DescribeListeners>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
LoadBalancerArn='string',
ListenerArns=[
'string',
],
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'Listeners': [
{
'ListenerArn': 'string',
'LoadBalancerArn': 'string',
'Port': 123,
'Protocol': 'HTTP'|'HTTPS'|'TCP'|'TLS',
'Certificates': [
{
'CertificateArn': 'string',
'IsDefault': True|False
},
],
'SslPolicy': 'string',
'DefaultActions': [
{
'Type': 'forward'|'authenticate-oidc'|'authenticate-cognito'|'redirect'|'fixed-response',
'TargetGroupArn': 'string',
'AuthenticateOidcConfig': {
'Issuer': 'string',
'AuthorizationEndpoint': 'string',
'TokenEndpoint': 'string',
'UserInfoEndpoint': 'string',
'ClientId': 'string',
'ClientSecret': 'string',
'SessionCookieName': 'string',
'Scope': 'string',
'SessionTimeout': 123,
'AuthenticationRequestExtraParams': {
'string': 'string'
},
'OnUnauthenticatedRequest': 'deny'|'allow'|'authenticate',
'UseExistingClientSecret': True|False
},
'AuthenticateCognitoConfig': {
'UserPoolArn': 'string',
'UserPoolClientId': 'string',
'UserPoolDomain': 'string',
'SessionCookieName': 'string',
'Scope': 'string',
'SessionTimeout': 123,
'AuthenticationRequestExtraParams': {
'string': 'string'
},
'OnUnauthenticatedRequest': 'deny'|'allow'|'authenticate'
},
'Order': 123,
'RedirectConfig': {
'Protocol': 'string',
'Port': 'string',
'Host': 'string',
'Path': 'string',
'Query': 'string',
'StatusCode': 'HTTP_301'|'HTTP_302'
},
'FixedResponseConfig': {
'MessageBody': 'string',
'StatusCode': 'string',
'ContentType': 'string'
}
},
]
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
- **Listeners** *(list) --*
Information about the listeners.
- *(dict) --*
Information about a listener.
- **ListenerArn** *(string) --*
The Amazon Resource Name (ARN) of the listener.
- **LoadBalancerArn** *(string) --*
The Amazon Resource Name (ARN) of the load balancer.
- **Port** *(integer) --*
The port on which the load balancer is listening.
- **Protocol** *(string) --*
The protocol for connections from clients to the load balancer.
- **Certificates** *(list) --*
The SSL server certificate. You must provide a certificate if the protocol is HTTPS or TLS.
- *(dict) --*
Information about an SSL server certificate.
- **CertificateArn** *(string) --*
The Amazon Resource Name (ARN) of the certificate.
- **IsDefault** *(boolean) --*
Indicates whether the certificate is the default certificate. Do not set ``IsDefault`` when specifying a certificate as an input parameter.
- **SslPolicy** *(string) --*
The security policy that defines which ciphers and protocols are supported. The default is the current predefined security policy.
- **DefaultActions** *(list) --*
The default actions for the listener.
- *(dict) --*
Information about an action.
- **Type** *(string) --*
The type of action. Each rule must include exactly one of the following types of actions: ``forward`` , ``fixed-response`` , or ``redirect`` .
- **TargetGroupArn** *(string) --*
The Amazon Resource Name (ARN) of the target group. Specify only when ``Type`` is ``forward`` .
- **AuthenticateOidcConfig** *(dict) --*
[HTTPS listeners] Information about an identity provider that is compliant with OpenID Connect (OIDC). Specify only when ``Type`` is ``authenticate-oidc`` .
- **Issuer** *(string) --*
The OIDC issuer identifier of the IdP. This must be a full URL, including the HTTPS protocol, the domain, and the path.
- **AuthorizationEndpoint** *(string) --*
The authorization endpoint of the IdP. This must be a full URL, including the HTTPS protocol, the domain, and the path.
- **TokenEndpoint** *(string) --*
The token endpoint of the IdP. This must be a full URL, including the HTTPS protocol, the domain, and the path.
- **UserInfoEndpoint** *(string) --*
The user info endpoint of the IdP. This must be a full URL, including the HTTPS protocol, the domain, and the path.
- **ClientId** *(string) --*
The OAuth 2.0 client identifier.
- **ClientSecret** *(string) --*
The OAuth 2.0 client secret. This parameter is required if you are creating a rule. If you are modifying a rule, you can omit this parameter if you set ``UseExistingClientSecret`` to true.
- **SessionCookieName** *(string) --*
The name of the cookie used to maintain session information. The default is AWSELBAuthSessionCookie.
- **Scope** *(string) --*
The set of user claims to be requested from the IdP. The default is ``openid`` .
To verify which scope values your IdP supports and how to separate multiple values, see the documentation for your IdP.
- **SessionTimeout** *(integer) --*
The maximum duration of the authentication session, in seconds. The default is 604800 seconds (7 days).
- **AuthenticationRequestExtraParams** *(dict) --*
The query parameters (up to 10) to include in the redirect request to the authorization endpoint.
- *(string) --*
- *(string) --*
- **OnUnauthenticatedRequest** *(string) --*
The behavior if the user is not authenticated. The following are possible values:
* deny- Return an HTTP 401 Unauthorized error.
* allow- Allow the request to be forwarded to the target.
* authenticate- Redirect the request to the IdP authorization endpoint. This is the default value.
- **UseExistingClientSecret** *(boolean) --*
Indicates whether to use the existing client secret when modifying a rule. If you are creating a rule, you can omit this parameter or set it to false.
- **AuthenticateCognitoConfig** *(dict) --*
[HTTPS listeners] Information for using Amazon Cognito to authenticate users. Specify only when ``Type`` is ``authenticate-cognito`` .
- **UserPoolArn** *(string) --*
The Amazon Resource Name (ARN) of the Amazon Cognito user pool.
- **UserPoolClientId** *(string) --*
The ID of the Amazon Cognito user pool client.
- **UserPoolDomain** *(string) --*
The domain prefix or fully-qualified domain name of the Amazon Cognito user pool.
- **SessionCookieName** *(string) --*
The name of the cookie used to maintain session information. The default is AWSELBAuthSessionCookie.
- **Scope** *(string) --*
The set of user claims to be requested from the IdP. The default is ``openid`` .
To verify which scope values your IdP supports and how to separate multiple values, see the documentation for your IdP.
- **SessionTimeout** *(integer) --*
The maximum duration of the authentication session, in seconds. The default is 604800 seconds (7 days).
- **AuthenticationRequestExtraParams** *(dict) --*
The query parameters (up to 10) to include in the redirect request to the authorization endpoint.
- *(string) --*
- *(string) --*
- **OnUnauthenticatedRequest** *(string) --*
The behavior if the user is not authenticated. The following are possible values:
* deny- Return an HTTP 401 Unauthorized error.
* allow- Allow the request to be forwarded to the target.
* authenticate- Redirect the request to the IdP authorization endpoint. This is the default value.
- **Order** *(integer) --*
The order for the action. This value is required for rules with multiple actions. The action with the lowest value for order is performed first. The final action to be performed must be a ``forward`` or a ``fixed-response`` action.
- **RedirectConfig** *(dict) --*
[Application Load Balancer] Information for creating a redirect action. Specify only when ``Type`` is ``redirect`` .
- **Protocol** *(string) --*
The protocol. You can specify HTTP, HTTPS, or #{protocol}. You can redirect HTTP to HTTP, HTTP to HTTPS, and HTTPS to HTTPS. You cannot redirect HTTPS to HTTP.
- **Port** *(string) --*
The port. You can specify a value from 1 to 65535 or #{port}.
- **Host** *(string) --*
The hostname. This component is not percent-encoded. The hostname can contain #{host}.
- **Path** *(string) --*
The absolute path, starting with the leading "/". This component is not percent-encoded. The path can contain #{host}, #{path}, and #{port}.
- **Query** *(string) --*
The query parameters, URL-encoded when necessary, but not percent-encoded. Do not include the leading "?", as it is automatically added. You can specify any of the reserved keywords.
- **StatusCode** *(string) --*
The HTTP redirect code. The redirect is either permanent (HTTP 301) or temporary (HTTP 302).
- **FixedResponseConfig** *(dict) --*
[Application Load Balancer] Information for creating an action that returns a custom HTTP response. Specify only when ``Type`` is ``fixed-response`` .
- **MessageBody** *(string) --*
The message.
- **StatusCode** *(string) --*
The HTTP response code (2XX, 4XX, or 5XX).
- **ContentType** *(string) --*
The content type.
Valid Values: text/plain | text/css | text/html | application/javascript | application/json
- **NextToken** *(string) --*
A token to resume pagination.
:type LoadBalancerArn: string
:param LoadBalancerArn:
The Amazon Resource Name (ARN) of the load balancer.
:type ListenerArns: list
:param ListenerArns:
The Amazon Resource Names (ARN) of the listeners.
- *(string) --*
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeLoadBalancers(Paginator):
def paginate(self, LoadBalancerArns: List = None, Names: List = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`ElasticLoadBalancingv2.Client.describe_load_balancers`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/DescribeLoadBalancers>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
LoadBalancerArns=[
'string',
],
Names=[
'string',
],
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'LoadBalancers': [
{
'LoadBalancerArn': 'string',
'DNSName': 'string',
'CanonicalHostedZoneId': 'string',
'CreatedTime': datetime(2015, 1, 1),
'LoadBalancerName': 'string',
'Scheme': 'internet-facing'|'internal',
'VpcId': 'string',
'State': {
'Code': 'active'|'provisioning'|'active_impaired'|'failed',
'Reason': 'string'
},
'Type': 'application'|'network',
'AvailabilityZones': [
{
'ZoneName': 'string',
'SubnetId': 'string',
'LoadBalancerAddresses': [
{
'IpAddress': 'string',
'AllocationId': 'string'
},
]
},
],
'SecurityGroups': [
'string',
],
'IpAddressType': 'ipv4'|'dualstack'
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
- **LoadBalancers** *(list) --*
Information about the load balancers.
- *(dict) --*
Information about a load balancer.
- **LoadBalancerArn** *(string) --*
The Amazon Resource Name (ARN) of the load balancer.
- **DNSName** *(string) --*
The public DNS name of the load balancer.
- **CanonicalHostedZoneId** *(string) --*
The ID of the Amazon Route 53 hosted zone associated with the load balancer.
- **CreatedTime** *(datetime) --*
The date and time the load balancer was created.
- **LoadBalancerName** *(string) --*
The name of the load balancer.
- **Scheme** *(string) --*
The nodes of an Internet-facing load balancer have public IP addresses. The DNS name of an Internet-facing load balancer is publicly resolvable to the public IP addresses of the nodes. Therefore, Internet-facing load balancers can route requests from clients over the internet.
The nodes of an internal load balancer have only private IP addresses. The DNS name of an internal load balancer is publicly resolvable to the private IP addresses of the nodes. Therefore, internal load balancers can only route requests from clients with access to the VPC for the load balancer.
- **VpcId** *(string) --*
The ID of the VPC for the load balancer.
- **State** *(dict) --*
The state of the load balancer.
- **Code** *(string) --*
The state code. The initial state of the load balancer is ``provisioning`` . After the load balancer is fully set up and ready to route traffic, its state is ``active`` . If the load balancer could not be set up, its state is ``failed`` .
- **Reason** *(string) --*
A description of the state.
- **Type** *(string) --*
The type of load balancer.
- **AvailabilityZones** *(list) --*
The Availability Zones for the load balancer.
- *(dict) --*
Information about an Availability Zone.
- **ZoneName** *(string) --*
The name of the Availability Zone.
- **SubnetId** *(string) --*
The ID of the subnet.
- **LoadBalancerAddresses** *(list) --*
[Network Load Balancers] The static IP address.
- *(dict) --*
Information about a static IP address for a load balancer.
- **IpAddress** *(string) --*
The static IP address.
- **AllocationId** *(string) --*
[Network Load Balancers] The allocation ID of the Elastic IP address.
- **SecurityGroups** *(list) --*
The IDs of the security groups for the load balancer.
- *(string) --*
- **IpAddressType** *(string) --*
The type of IP addresses used by the subnets for your load balancer. The possible values are ``ipv4`` (for IPv4 addresses) and ``dualstack`` (for IPv4 and IPv6 addresses).
- **NextToken** *(string) --*
A token to resume pagination.
:type LoadBalancerArns: list
:param LoadBalancerArns:
The Amazon Resource Names (ARN) of the load balancers. You can specify up to 20 load balancers in a single call.
- *(string) --*
:type Names: list
:param Names:
The names of the load balancers.
- *(string) --*
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeRules(Paginator):
def paginate(self, ListenerArn: str = None, RuleArns: List = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`ElasticLoadBalancingv2.Client.describe_rules`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/DescribeRules>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
ListenerArn='string',
RuleArns=[
'string',
],
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'Rules': [
{
'RuleArn': 'string',
'Priority': 'string',
'Conditions': [
{
'Field': 'string',
'Values': [
'string',
],
'HostHeaderConfig': {
'Values': [
'string',
]
},
'PathPatternConfig': {
'Values': [
'string',
]
},
'HttpHeaderConfig': {
'HttpHeaderName': 'string',
'Values': [
'string',
]
},
'QueryStringConfig': {
'Values': [
{
'Key': 'string',
'Value': 'string'
},
]
},
'HttpRequestMethodConfig': {
'Values': [
'string',
]
},
'SourceIpConfig': {
'Values': [
'string',
]
}
},
],
'Actions': [
{
'Type': 'forward'|'authenticate-oidc'|'authenticate-cognito'|'redirect'|'fixed-response',
'TargetGroupArn': 'string',
'AuthenticateOidcConfig': {
'Issuer': 'string',
'AuthorizationEndpoint': 'string',
'TokenEndpoint': 'string',
'UserInfoEndpoint': 'string',
'ClientId': 'string',
'ClientSecret': 'string',
'SessionCookieName': 'string',
'Scope': 'string',
'SessionTimeout': 123,
'AuthenticationRequestExtraParams': {
'string': 'string'
},
'OnUnauthenticatedRequest': 'deny'|'allow'|'authenticate',
'UseExistingClientSecret': True|False
},
'AuthenticateCognitoConfig': {
'UserPoolArn': 'string',
'UserPoolClientId': 'string',
'UserPoolDomain': 'string',
'SessionCookieName': 'string',
'Scope': 'string',
'SessionTimeout': 123,
'AuthenticationRequestExtraParams': {
'string': 'string'
},
'OnUnauthenticatedRequest': 'deny'|'allow'|'authenticate'
},
'Order': 123,
'RedirectConfig': {
'Protocol': 'string',
'Port': 'string',
'Host': 'string',
'Path': 'string',
'Query': 'string',
'StatusCode': 'HTTP_301'|'HTTP_302'
},
'FixedResponseConfig': {
'MessageBody': 'string',
'StatusCode': 'string',
'ContentType': 'string'
}
},
],
'IsDefault': True|False
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
- **Rules** *(list) --*
Information about the rules.
- *(dict) --*
Information about a rule.
- **RuleArn** *(string) --*
The Amazon Resource Name (ARN) of the rule.
- **Priority** *(string) --*
The priority.
- **Conditions** *(list) --*
The conditions.
- *(dict) --*
Information about a condition for a rule.
- **Field** *(string) --*
The name of the field. The possible values are ``host-header`` and ``path-pattern`` .
- **Values** *(list) --*
The condition value.
If the field name is ``host-header`` , you can specify a single host name (for example, my.example.com). A host name is case insensitive, can be up to 128 characters in length, and can contain any of the following characters. You can include up to three wildcard characters.
* A-Z, a-z, 0-9
* - .
* * (matches 0 or more characters)
* ? (matches exactly 1 character)
If the field name is ``path-pattern`` , you can specify a single path pattern (for example, /img/*). A path pattern is case-sensitive, can be up to 128 characters in length, and can contain any of the following characters. You can include up to three wildcard characters.
* A-Z, a-z, 0-9
* _ - . $ / ~ " ' @ : +
* & (using &)
* * (matches 0 or more characters)
* ? (matches exactly 1 character)
- *(string) --*
- **HostHeaderConfig** *(dict) --*
- **Values** *(list) --*
- *(string) --*
- **PathPatternConfig** *(dict) --*
- **Values** *(list) --*
- *(string) --*
- **HttpHeaderConfig** *(dict) --*
- **HttpHeaderName** *(string) --*
- **Values** *(list) --*
- *(string) --*
- **QueryStringConfig** *(dict) --*
- **Values** *(list) --*
- *(dict) --*
- **Key** *(string) --*
- **Value** *(string) --*
- **HttpRequestMethodConfig** *(dict) --*
- **Values** *(list) --*
- *(string) --*
- **SourceIpConfig** *(dict) --*
- **Values** *(list) --*
- *(string) --*
- **Actions** *(list) --*
The actions.
- *(dict) --*
Information about an action.
- **Type** *(string) --*
The type of action. Each rule must include exactly one of the following types of actions: ``forward`` , ``fixed-response`` , or ``redirect`` .
- **TargetGroupArn** *(string) --*
The Amazon Resource Name (ARN) of the target group. Specify only when ``Type`` is ``forward`` .
- **AuthenticateOidcConfig** *(dict) --*
[HTTPS listeners] Information about an identity provider that is compliant with OpenID Connect (OIDC). Specify only when ``Type`` is ``authenticate-oidc`` .
- **Issuer** *(string) --*
The OIDC issuer identifier of the IdP. This must be a full URL, including the HTTPS protocol, the domain, and the path.
- **AuthorizationEndpoint** *(string) --*
The authorization endpoint of the IdP. This must be a full URL, including the HTTPS protocol, the domain, and the path.
- **TokenEndpoint** *(string) --*
The token endpoint of the IdP. This must be a full URL, including the HTTPS protocol, the domain, and the path.
- **UserInfoEndpoint** *(string) --*
The user info endpoint of the IdP. This must be a full URL, including the HTTPS protocol, the domain, and the path.
- **ClientId** *(string) --*
The OAuth 2.0 client identifier.
- **ClientSecret** *(string) --*
The OAuth 2.0 client secret. This parameter is required if you are creating a rule. If you are modifying a rule, you can omit this parameter if you set ``UseExistingClientSecret`` to true.
- **SessionCookieName** *(string) --*
The name of the cookie used to maintain session information. The default is AWSELBAuthSessionCookie.
- **Scope** *(string) --*
The set of user claims to be requested from the IdP. The default is ``openid`` .
To verify which scope values your IdP supports and how to separate multiple values, see the documentation for your IdP.
- **SessionTimeout** *(integer) --*
The maximum duration of the authentication session, in seconds. The default is 604800 seconds (7 days).
- **AuthenticationRequestExtraParams** *(dict) --*
The query parameters (up to 10) to include in the redirect request to the authorization endpoint.
- *(string) --*
- *(string) --*
- **OnUnauthenticatedRequest** *(string) --*
The behavior if the user is not authenticated. The following are possible values:
* deny- Return an HTTP 401 Unauthorized error.
* allow- Allow the request to be forwarded to the target.
* authenticate- Redirect the request to the IdP authorization endpoint. This is the default value.
- **UseExistingClientSecret** *(boolean) --*
Indicates whether to use the existing client secret when modifying a rule. If you are creating a rule, you can omit this parameter or set it to false.
- **AuthenticateCognitoConfig** *(dict) --*
[HTTPS listeners] Information for using Amazon Cognito to authenticate users. Specify only when ``Type`` is ``authenticate-cognito`` .
- **UserPoolArn** *(string) --*
The Amazon Resource Name (ARN) of the Amazon Cognito user pool.
- **UserPoolClientId** *(string) --*
The ID of the Amazon Cognito user pool client.
- **UserPoolDomain** *(string) --*
The domain prefix or fully-qualified domain name of the Amazon Cognito user pool.
- **SessionCookieName** *(string) --*
The name of the cookie used to maintain session information. The default is AWSELBAuthSessionCookie.
- **Scope** *(string) --*
The set of user claims to be requested from the IdP. The default is ``openid`` .
To verify which scope values your IdP supports and how to separate multiple values, see the documentation for your IdP.
- **SessionTimeout** *(integer) --*
The maximum duration of the authentication session, in seconds. The default is 604800 seconds (7 days).
- **AuthenticationRequestExtraParams** *(dict) --*
The query parameters (up to 10) to include in the redirect request to the authorization endpoint.
- *(string) --*
- *(string) --*
- **OnUnauthenticatedRequest** *(string) --*
The behavior if the user is not authenticated. The following are possible values:
* deny- Return an HTTP 401 Unauthorized error.
* allow- Allow the request to be forwarded to the target.
* authenticate- Redirect the request to the IdP authorization endpoint. This is the default value.
- **Order** *(integer) --*
The order for the action. This value is required for rules with multiple actions. The action with the lowest value for order is performed first. The final action to be performed must be a ``forward`` or a ``fixed-response`` action.
- **RedirectConfig** *(dict) --*
[Application Load Balancer] Information for creating a redirect action. Specify only when ``Type`` is ``redirect`` .
- **Protocol** *(string) --*
The protocol. You can specify HTTP, HTTPS, or #{protocol}. You can redirect HTTP to HTTP, HTTP to HTTPS, and HTTPS to HTTPS. You cannot redirect HTTPS to HTTP.
- **Port** *(string) --*
The port. You can specify a value from 1 to 65535 or #{port}.
- **Host** *(string) --*
The hostname. This component is not percent-encoded. The hostname can contain #{host}.
- **Path** *(string) --*
The absolute path, starting with the leading "/". This component is not percent-encoded. The path can contain #{host}, #{path}, and #{port}.
- **Query** *(string) --*
The query parameters, URL-encoded when necessary, but not percent-encoded. Do not include the leading "?", as it is automatically added. You can specify any of the reserved keywords.
- **StatusCode** *(string) --*
The HTTP redirect code. The redirect is either permanent (HTTP 301) or temporary (HTTP 302).
- **FixedResponseConfig** *(dict) --*
[Application Load Balancer] Information for creating an action that returns a custom HTTP response. Specify only when ``Type`` is ``fixed-response`` .
- **MessageBody** *(string) --*
The message.
- **StatusCode** *(string) --*
The HTTP response code (2XX, 4XX, or 5XX).
- **ContentType** *(string) --*
The content type.
Valid Values: text/plain | text/css | text/html | application/javascript | application/json
- **IsDefault** *(boolean) --*
Indicates whether this is the default rule.
- **NextToken** *(string) --*
A token to resume pagination.
:type ListenerArn: string
:param ListenerArn:
The Amazon Resource Name (ARN) of the listener.
:type RuleArns: list
:param RuleArns:
The Amazon Resource Names (ARN) of the rules.
- *(string) --*
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeSSLPolicies(Paginator):
def paginate(self, Names: List = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`ElasticLoadBalancingv2.Client.describe_ssl_policies`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/DescribeSSLPolicies>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
Names=[
'string',
],
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'SslPolicies': [
{
'SslProtocols': [
'string',
],
'Ciphers': [
{
'Name': 'string',
'Priority': 123
},
],
'Name': 'string'
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
- **SslPolicies** *(list) --*
Information about the policies.
- *(dict) --*
Information about a policy used for SSL negotiation.
- **SslProtocols** *(list) --*
The protocols.
- *(string) --*
- **Ciphers** *(list) --*
The ciphers.
- *(dict) --*
Information about a cipher used in a policy.
- **Name** *(string) --*
The name of the cipher.
- **Priority** *(integer) --*
The priority of the cipher.
- **Name** *(string) --*
The name of the policy.
- **NextToken** *(string) --*
A token to resume pagination.
:type Names: list
:param Names:
The names of the policies.
- *(string) --*
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeTargetGroups(Paginator):
def paginate(self, LoadBalancerArn: str = None, TargetGroupArns: List = None, Names: List = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`ElasticLoadBalancingv2.Client.describe_target_groups`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/DescribeTargetGroups>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
LoadBalancerArn='string',
TargetGroupArns=[
'string',
],
Names=[
'string',
],
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'TargetGroups': [
{
'TargetGroupArn': 'string',
'TargetGroupName': 'string',
'Protocol': 'HTTP'|'HTTPS'|'TCP'|'TLS',
'Port': 123,
'VpcId': 'string',
'HealthCheckProtocol': 'HTTP'|'HTTPS'|'TCP'|'TLS',
'HealthCheckPort': 'string',
'HealthCheckEnabled': True|False,
'HealthCheckIntervalSeconds': 123,
'HealthCheckTimeoutSeconds': 123,
'HealthyThresholdCount': 123,
'UnhealthyThresholdCount': 123,
'HealthCheckPath': 'string',
'Matcher': {
'HttpCode': 'string'
},
'LoadBalancerArns': [
'string',
],
'TargetType': 'instance'|'ip'|'lambda'
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
- **TargetGroups** *(list) --*
Information about the target groups.
- *(dict) --*
Information about a target group.
- **TargetGroupArn** *(string) --*
The Amazon Resource Name (ARN) of the target group.
- **TargetGroupName** *(string) --*
The name of the target group.
- **Protocol** *(string) --*
The protocol to use for routing traffic to the targets.
- **Port** *(integer) --*
The port on which the targets are listening.
- **VpcId** *(string) --*
The ID of the VPC for the targets.
- **HealthCheckProtocol** *(string) --*
The protocol to use to connect with the target.
- **HealthCheckPort** *(string) --*
The port to use to connect with the target.
- **HealthCheckEnabled** *(boolean) --*
Indicates whether health checks are enabled.
- **HealthCheckIntervalSeconds** *(integer) --*
The approximate amount of time, in seconds, between health checks of an individual target.
- **HealthCheckTimeoutSeconds** *(integer) --*
The amount of time, in seconds, during which no response means a failed health check.
- **HealthyThresholdCount** *(integer) --*
The number of consecutive health checks successes required before considering an unhealthy target healthy.
- **UnhealthyThresholdCount** *(integer) --*
The number of consecutive health check failures required before considering the target unhealthy.
- **HealthCheckPath** *(string) --*
The destination for the health check request.
- **Matcher** *(dict) --*
The HTTP codes to use when checking for a successful response from a target.
- **HttpCode** *(string) --*
The HTTP codes.
For Application Load Balancers, you can specify values between 200 and 499, and the default value is 200. You can specify multiple values (for example, "200,202") or a range of values (for example, "200-299").
For Network Load Balancers, this is 200–399.
- **LoadBalancerArns** *(list) --*
The Amazon Resource Names (ARN) of the load balancers that route traffic to this target group.
- *(string) --*
- **TargetType** *(string) --*
The type of target that you must specify when registering targets with this target group. The possible values are ``instance`` (targets are specified by instance ID) or ``ip`` (targets are specified by IP address).
- **NextToken** *(string) --*
A token to resume pagination.
:type LoadBalancerArn: string
:param LoadBalancerArn:
The Amazon Resource Name (ARN) of the load balancer.
:type TargetGroupArns: list
:param TargetGroupArns:
The Amazon Resource Names (ARN) of the target groups.
- *(string) --*
:type Names: list
:param Names:
The names of the target groups.
- *(string) --*
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
| 55.602041
| 313
| 0.469536
| 4,457
| 54,490
| 5.733453
| 0.107471
| 0.030289
| 0.013305
| 0.00767
| 0.757142
| 0.732723
| 0.704078
| 0.686624
| 0.667449
| 0.649331
| 0
| 0.0099
| 0.434612
| 54,490
| 979
| 314
| 55.658836
| 0.819527
| 0.839182
| 0
| 0.291667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.291667
| false
| 0.291667
| 0.125
| 0
| 0.708333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 7
|
6278adde6558fcef6b606b4e564f5c1c3758a3f9
| 23
|
py
|
Python
|
virtual/lib/python3.9/site-packages/package_name/__init__.py
|
marykamau2/Blog
|
71619902c6b09490f092bfac7fcdcd7097ec4354
|
[
"MIT"
] | null | null | null |
virtual/lib/python3.9/site-packages/package_name/__init__.py
|
marykamau2/Blog
|
71619902c6b09490f092bfac7fcdcd7097ec4354
|
[
"MIT"
] | null | null | null |
virtual/lib/python3.9/site-packages/package_name/__init__.py
|
marykamau2/Blog
|
71619902c6b09490f092bfac7fcdcd7097ec4354
|
[
"MIT"
] | null | null | null |
def foo(): return 100
| 7.666667
| 21
| 0.652174
| 4
| 23
| 3.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 0.217391
| 23
| 2
| 22
| 11.5
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| true
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
6574b774f07cb8e398d0eaca0f39f3553a894b50
| 31,712
|
py
|
Python
|
baselines/ppo1/Environment/vehicle.py
|
idthanm/baselines
|
05f595d600e55d5a39d98e215fc0da51c7d3a08a
|
[
"MIT"
] | 1
|
2019-03-06T12:27:32.000Z
|
2019-03-06T12:27:32.000Z
|
baselines/ppo1/Environment/vehicle.py
|
idthanm/baselines
|
05f595d600e55d5a39d98e215fc0da51c7d3a08a
|
[
"MIT"
] | null | null | null |
baselines/ppo1/Environment/vehicle.py
|
idthanm/baselines
|
05f595d600e55d5a39d98e215fc0da51c7d3a08a
|
[
"MIT"
] | null | null | null |
import random
import numpy as np
from math import pi, hypot, sin, cos, asin, tan
V_max = 40/3.6; V_min = 10/3.6 # (m/s)
a_max = 1; a_min = -1 # (m^2/s)
delta_max = pi / 6; delta_min = - pi / 6 # limit of delta
T = 0.1 # sample time
epsilon = 1 # threshold of the target check
regionScale = 0.8 # start position random region
C = 2.7 # distance form rear to forward
L = 4.7 # length of the vehicle
W = 2.0 # width of the vehicle
offset = 1.35 # distance from rear to center
latDist = 0.2
longDist = 0.2
THW = 0.1
class Vehicle(object):
def __init__(self, trafficModel, manualDist = 0):
self.trafficModel = trafficModel
if trafficModel.flag[0] == 'D':
self.posx = trafficModel.start[0]
self.posy = trafficModel.start[1] + regionScale * abs(trafficModel.start[1]) * random.random() + manualDist
self.theta = 0.5 * pi
if trafficModel.flag[0] == 'R':
self.posx = (1 - regionScale) * trafficModel.start[0] + regionScale * abs(trafficModel.start[0]) * random.random()
self.posy = trafficModel.start[1]
self.theta = pi
if trafficModel.flag[0] == 'U':
self.posx = trafficModel.start[0]
self.posy = (1 - regionScale) * trafficModel.start[1] + regionScale * abs(trafficModel.start[1]) * random.random()
self.theta = - 0.5 * pi
if trafficModel.flag[0] == 'L':
self.posx = trafficModel.start[0] + regionScale * abs(trafficModel.start[0]) * random.random()
self.posy = trafficModel.start[1]
self.theta = 0
self.target = trafficModel.end # self.target is a tuple i.e. (x, y)
self.vel = V_min + (V_max - V_min) * random.random()
# self.vel = V_min
self.C = C # distance form rear to forward
self.L = L # length of the vehicle
self.W = W # width of the vehicle
self.offset = offset # distance from rear to center
self.R = hypot(self.L, self.W) / 2.0 # radius of the vehicle
self.delta = 0
self.cen_x, self.cen_y = self.cenPos()
self.boundX, self.boundY = self.carBox()
self.safeX, self.safeY = self.carSafeBox()
self.history = [] # self.history.append((x, y, theta))
self.R_max = self.C / tan(delta_max)
self.alpha = 0
self.endFlag = False
if trafficModel.flag == 'DR':#1
self.ref = [(trafficModel.start[0],trafficModel.start[1]),
(trafficModel.start[0], trafficModel.end[1]-self.R_max),
(trafficModel.start[0]+self.R_max, trafficModel.end[1]),
(trafficModel.end[0],trafficModel.end[1])]
self.middlePoint = (self.ref[2][0] - self.R_max * cos(0.25 * pi),
self.ref[1][1] + self.R_max * cos(0.25 * pi))
elif trafficModel.flag == 'DU':#2
self.ref = [(trafficModel.start[0], trafficModel.start[1]),
(trafficModel.start[0], trafficModel.start[1] + self.R_max),
(trafficModel.start[0], trafficModel.end[1]- self.R_max),
(trafficModel.start[0], trafficModel.end[1])]
self.middlePoint = (self.ref[0][0], 0)
elif trafficModel.flag == 'DL':#3
self.ref = [(trafficModel.start[0], trafficModel.start[1]),
(trafficModel.start[0], trafficModel.end[1] - self.R_max),
(trafficModel.start[0] - self.R_max, trafficModel.end[1]),
(trafficModel.end[0], trafficModel.end[1])]
self.middlePoint = (self.ref[2][0] + self.R_max * cos(0.25 * pi),
self.ref[1][1] + self.R_max * cos(0.25 * pi))
elif trafficModel.flag == 'RU':#4
self.ref = [(trafficModel.start[0], trafficModel.start[1]),
(trafficModel.end[0] + self.R_max, trafficModel.start[1]),
(trafficModel.end[0], trafficModel.start[1] + self.R_max),
(trafficModel.end[0], trafficModel.end[1])]
self.middlePoint = (self.ref[1][0] - self.R_max * cos(0.25 * pi),
self.ref[2][1] - self.R_max * cos(0.25 * pi))
elif trafficModel.flag == 'RL':#5
self.ref = [(trafficModel.start[0], trafficModel.start[1]),
(trafficModel.start[0] - self.R_max, trafficModel.end[1] ),
(trafficModel.end[0] + self.R_max, trafficModel.end[1]),
(trafficModel.end[0], trafficModel.end[1])]
self.middlePoint = (0, self.ref[0][1])
elif trafficModel.flag == 'RD':#6
self.ref = [(trafficModel.start[0], trafficModel.start[1]),
(trafficModel.end[0] + self.R_max, trafficModel.start[1] ),
(trafficModel.end[0] , trafficModel.start[1]- self.R_max),
(trafficModel.end[0], trafficModel.end[1])]
self.middlePoint = (self.ref[1][0] - self.R_max * cos(0.25 * pi),
self.ref[2][1] + self.R_max * cos(0.25 * pi))
elif trafficModel.flag == 'LD':#7
self.ref = [(trafficModel.start[0], trafficModel.start[1]),
(trafficModel.end[0] - self.R_max, trafficModel.start[1] ),
(trafficModel.end[0] , trafficModel.start[1]- self.R_max),
(trafficModel.end[0], trafficModel.end[1])]
self.middlePoint = (self.ref[1][0] + self.R_max * cos(0.25 * pi),
self.ref[2][1] + self.R_max * cos(0.25 * pi))
elif trafficModel.flag == 'LR': # 8
self.ref = [(trafficModel.start[0], trafficModel.start[1]),
(trafficModel.start[0] + self.R_max, trafficModel.end[1]),
(trafficModel.end[0] - self.R_max, trafficModel.end[1]),
(trafficModel.end[0], trafficModel.end[1])]
self.middlePoint = (0, self.ref[0][1])
elif trafficModel.flag == 'LU': # 9
self.ref = [(trafficModel.start[0], trafficModel.start[1]),
(trafficModel.end[0] - self.R_max, trafficModel.start[1]),
(trafficModel.end[0] , trafficModel.start[1]+ self.R_max),
(trafficModel.end[0], trafficModel.end[1])]
self.middlePoint = (self.ref[1][0] + self.R_max * cos(0.25 * pi),
self.ref[2][1] - self.R_max * cos(0.25 * pi))
elif trafficModel.flag == 'UL': # 10
self.ref = [(trafficModel.start[0], trafficModel.start[1]),
(trafficModel.start[0] , trafficModel.end[1]+ self.R_max),
(trafficModel.start[0] - self.R_max, trafficModel.end[1]),
(trafficModel.end[0], trafficModel.end[1])]
self.middlePoint = (self.ref[2][0] + self.R_max * cos(0.25 * pi),
self.ref[1][1] - self.R_max * cos(0.25 * pi))
elif trafficModel.flag == 'UD': # 11
self.ref = [(trafficModel.start[0], trafficModel.start[1]),
(trafficModel.start[0] , trafficModel.start[1]- self.R_max),
(trafficModel.end[0] , trafficModel.end[1]+ self.R_max),
(trafficModel.end[0], trafficModel.end[1])]
self.middlePoint = (self.ref[1][0], 0)
else: # 12
self.ref = [(trafficModel.start[0], trafficModel.start[1]),
(trafficModel.start[0] , trafficModel.end[1]+ self.R_max),
(trafficModel.start[0] + self.R_max, trafficModel.end[1]),
(trafficModel.end[0], trafficModel.end[1])]
self.middlePoint = (self.ref[2][0] - self.R_max * cos(0.25 * pi),
self.ref[1][1] - self.R_max * cos(0.25 * pi))
def carBox(self):
x0 = np.mat([[self.cen_x], [self.cen_y]])
car1 = x0[0:2] + np.mat([[np.cos(self.theta) * self.L / 2], [np.sin(self.theta) * self.L / 2]]) + np.mat(
[[np.sin(self.theta) * self.W / 2], [-np.cos(self.theta) * self.W / 2]])
car2 = x0[0:2] + np.mat([[np.cos(self.theta) * self.L / 2], [np.sin(self.theta) * self.L / 2]]) - np.mat(
[[np.sin(self.theta) * self.W / 2], [-np.cos(self.theta) * self.W / 2]])
car3 = x0[0:2] - np.mat([[np.cos(self.theta) * self.L / 2], [np.sin(self.theta) * self.L / 2]]) + np.mat(
[[np.sin(self.theta) * self.W / 2], [-np.cos(self.theta) * self.W / 2]])
car4 = x0[0:2] - np.mat([[np.cos(self.theta) * self.L / 2], [np.sin(self.theta) * self.L / 2]]) - np.mat(
[[np.sin(self.theta) * self.W / 2], [-np.cos(self.theta) * self.W / 2]])
x = [car1[0, 0], car2[0, 0], car4[0, 0], car3[0, 0], car1[0, 0]]
y = [car1[1, 0], car2[1, 0], car4[1, 0], car3[1, 0], car1[1, 0]]
return x, y
def carSafeBox(self):
# if self.theta == 0 or 0.5 * pi or pi or (-0.5 * pi) or (- pi):
if self.delta == 0:
forwardDist = longDist + self.vel * THW
else:
forwardDist = longDist
car1 = np.mat([[self.boundX[0]], [self.boundY[0]]]) + \
np.mat([[np.cos(self.theta) * forwardDist], [np.sin(self.theta) * forwardDist]]) + \
np.mat([[np.sin(self.theta) * latDist], [-np.cos(self.theta) * latDist]])
car2 = np.mat([[self.boundX[1]], [self.boundY[1]]]) + \
np.mat([[np.cos(self.theta) * forwardDist], [np.sin(self.theta) * forwardDist]]) - \
np.mat([[np.sin(self.theta) * latDist], [-np.cos(self.theta) * latDist]])
car3 = np.mat([[self.boundX[3]], [self.boundY[3]]]) - \
np.mat([[np.cos(self.theta) * longDist], [np.sin(self.theta) * longDist]]) + \
np.mat([[np.sin(self.theta) * latDist], [-np.cos(self.theta) * latDist]])
car4 = np.mat([[self.boundX[2]], [self.boundY[2]]]) - \
np.mat([[np.cos(self.theta) * longDist], [np.sin(self.theta) * longDist]]) - \
np.mat([[np.sin(self.theta) * latDist], [-np.cos(self.theta) * latDist]])
x = [car1[0, 0], car2[0, 0], car4[0, 0], car3[0, 0], car1[0, 0]]
y = [car1[1, 0], car2[1, 0], car4[1, 0], car3[1, 0], car1[1, 0]]
return x, y
def cenPos(self):
x0 = np.mat([[self.posx], [self.posy]])
Rot0 = np.mat([[np.cos(self.theta), -np.sin(self.theta)],
[np.sin(self.theta), np.cos(self.theta)]])
centerCar0 = x0 + Rot0 * np.mat([[self.offset], [0]])
return centerCar0[0, 0], centerCar0[1, 0]
def stateupdate(self, action):
if not self.endFlag:
v_temp = self.vel + action * T
if 0 <= v_temp <= V_max:
self.vel += action * T
s = self.vel * T + 1 / 2 * action * T ** 2
elif v_temp > V_max:
v_temp = V_max
s = 0.5 * (self.vel + v_temp) * T
self.vel = V_max
else:
v_temp = 0
s = 0.5 * (self.vel + v_temp) * T
self.vel = 0
if self.trafficModel.flag == 'DR':
self.DR(s)
elif self.trafficModel.flag == 'DU':
self.DU(s)
elif self.trafficModel.flag == 'DL':
self.DL(s)
elif self.trafficModel.flag == 'RU':
self.RU(s)
elif self.trafficModel.flag == 'RL':
self.RL(s)
elif self.trafficModel.flag == 'RD':
self.RD(s)
elif self.trafficModel.flag == 'LD':
self.LD(s)
elif self.trafficModel.flag == 'LR':
self.LR(s)
elif self.trafficModel.flag == 'LU':
self.LU(s)
elif self.trafficModel.flag == 'UL':
self.UL(s)
elif self.trafficModel.flag == 'UD':
self.UD(s)
elif self.trafficModel.flag == 'UR':
self.UR(s)
self.cen_x, self.cen_y = self.cenPos()
self.boundX, self.boundY = self.carBox()
self.safeX, self.safeY = self.carSafeBox()
self.endFlag = self.targetCheck()
def getRelPos(self):
if self.trafficModel.flag == 'DR':
if self.posy < self.middlePoint[1]:
pos = self.middlePoint[1] - self.posy
else:
pos = self.middlePoint[0] - self.posx
elif self.trafficModel.flag == 'DU':
if self.posy < self.middlePoint[1]:
pos = self.middlePoint[1] - self.posy
else:
pos = self.middlePoint[1] - self.posy
elif self.trafficModel.flag == 'DL':
if self.posy < self.middlePoint[1]:
pos = self.middlePoint[1] - self.posy
else:
pos = - (self.middlePoint[0] - self.posx)
elif self.trafficModel.flag == 'RU':
if self.posx > self.middlePoint[0]:
pos = - (self.middlePoint[0] - self.posx)
else:
pos = self.middlePoint[1] - self.posy
elif self.trafficModel.flag == 'RL':
if self.posx > self.middlePoint[0]:
pos = - (self.middlePoint[0] - self.posx)
else:
pos = - (self.middlePoint[0] - self.posx)
elif self.trafficModel.flag == 'RD':
if self.posx > self.middlePoint[0]:
pos = - (self.middlePoint[0] - self.posx)
else:
pos = - (self.middlePoint[1] - self.posy)
elif self.trafficModel.flag == 'LD':
if self.posx < self.middlePoint[0]:
pos = self.middlePoint[0] - self.posx
else:
pos = - (self.middlePoint[1] - self.posy)
elif self.trafficModel.flag == 'LR':
if self.posx < self.middlePoint[0]:
pos = self.middlePoint[0] - self.posx
else:
pos = self.middlePoint[0] - self.posx
elif self.trafficModel.flag == 'LU':
if self.posx < self.middlePoint[0]:
pos = self.middlePoint[0] - self.posx
else:
pos = self.middlePoint[1] - self.posy
elif self.trafficModel.flag == 'UL':
if self.posy > self.middlePoint[1]:
pos = - (self.middlePoint[1] - self.posy)
else:
pos = - (self.middlePoint[0] - self.posx)
elif self.trafficModel.flag == 'UD':
if self.posy > self.middlePoint[1]:
pos = - (self.middlePoint[1] - self.posy)
else:
pos = - (self.middlePoint[1] - self.posy)
else:
if self.posy > self.middlePoint[1]:
pos = - (self.middlePoint[1] - self.posy)
else:
pos = self.middlePoint[0] - self.posx
return pos
def targetCheck(self):
if abs(self.posy - self.ref[3][1]) < epsilon and abs(self.posx - self.ref[3][0]) < epsilon:
return True
else:
return False
def DR(self, s):
if self.posy < self.ref[1][1] and self.posx == self.ref[1][0]:
x=self.posx
y=self.posy+s
if y< self.ref[1][1]:
self.posy = y
self.posx = x
if y > self.ref[1][1]:
self.alpha = abs(y-self.ref[1][1])/self.R_max #这里的alpha下面可以用吗
self.posx = self.ref[2][0] - self.R_max * cos(self.alpha)
self.posy = self.ref[1][1] + self.R_max * sin(self.alpha)
self.delta = 0
self.theta = pi / 2
self.history.append((self.posx, self.posy, self.theta))
if self.posx >= self.ref[1][0] and self.ref[1][1] <= self.posy < self.ref[2][1]:
self.alpha = self.alpha + s / self.R_max #这里怎么调用上面的alpha
x = self.ref[2][0] - self.R_max * cos(self.alpha)
y = self.ref[1][1] + self.R_max * sin(self.alpha)
if x < self.ref[2][0]:
self.posy = y
self.posx = x
if x > self.ref[2][0]:
beta = asin(abs( x - self.ref[2][0])/self.R_max)
self.posy = self.ref[2][1]
self.posx = self.ref[2][0]+ self.R_max*beta
self.delta = delta_max
self.theta = pi / 2 - self.alpha
self.history.append((self.posx, self.posy, self.theta))
if self.posy == self.ref[2][1]:
x = self.posx + s
y = self.ref[2][1]
if x < self.ref[3][0]:
self.posy = y
self.posx = x
if x >= self.ref[3][0]:
self.posy = self.ref[3][1]
self.posx = self.ref[3][0]
self.delta = 0
self.theta = 0
self.history.append((self.posx, self.posy, self.theta))
def DU(self, s):
if self.posy < self.ref[3][1] and self.posx == self.ref[1][0]:
x = self.posx
y = self.posy + s
if y < self.ref[3][1]:
self.posy = y
self.posx = x
if y >= self.ref[3][1]:
self.posy = self.ref[3][1]
self.posx = self.ref[3][0]
self.delta = 0
self.theta = pi / 2
self.history.append((self.posx, self.posy, self.theta))
def DL(self, s):
if self.posy < self.ref[1][1] and self.posx == self.ref[1][0]:
x = self.posx
y = self.posy + s
if y < self.ref[1][1]:
self.posy = y
self.posx = x
if y >= self.ref[1][1]:
alpha = abs(y - self.ref[1][1]) / self.R_max
self.posx = self.ref[2][0] + self.R_max * cos(self.alpha) # it was minus in ‘DR’
self.posy = self.ref[1][1] + self.R_max * sin(self.alpha)
self.delta = 0
self.theta = pi / 2
self.history.append((self.posx, self.posy, self.theta))
# self.delta = delta_max
elif self.posx <= self.ref[1][0] and self.ref[1][1] <= self.posy < self.ref[2][1]:
self.alpha = self.alpha + s / self.R_max
x = self.ref[2][0] + self.R_max * cos(self.alpha)
y = self.ref[1][1] + self.R_max * sin(self.alpha)
if x > self.ref[2][0]:
self.posy = y
self.posx = x
if x < self.ref[2][0]:
beta = asin(abs(x - self.ref[2][0]) / self.R_max)
self.posy = self.ref[2][1]
self.posx = self.ref[2][0] - self.R_max * beta
self.delta = delta_max
self.theta = pi / 2 + self.alpha
self.history.append((self.posx, self.posy, self.theta))
elif self.posy == self.ref[2][1]:
x = self.posx - s
y = self.ref[2][1]
if x > self.ref[3][0]:
self.posy = y
self.posx = x
if x <= self.ref[3][0]:
self.posy = self.ref[3][1]
self.posx = self.ref[3][0]
self.delta = 0
self.theta = - pi
self.history.append((self.posx, self.posy, self.theta))
def RU(self, s):
if self.posx > self.ref[1][0] and self.posy == self.ref[1][1]:
x = self.posx - s
y = self.posy
if x > self.ref[1][0]:
self.posy = y
self.posx = x
if x <= self.ref[1][0]:
self.alpha = abs(x - self.ref[1][0]) / self.R_max # 这里的alpha下面可以用吗
self.posx = self.ref[1][0] - self.R_max * sin(self.alpha)
self.posy = self.ref[2][1] - self.R_max * cos(self.alpha)
self.delta = 0
self.theta = - pi
self.history.append((self.posx, self.posy, self.theta))
if self.posx <= self.ref[1][0] and self.posy < self.ref[2][1]:
self.alpha = self.alpha + s / self.R_max # 这里怎么调用上面的alpha
x = self.ref[1][0] - self.R_max * sin(self.alpha)
y = self.ref[2][1] - self.R_max * cos(self.alpha)
if y < self.ref[2][1]:
self.posy = y
self.posx = x
if y > self.ref[2][1]:
beta = asin(abs(y - self.ref[2][1]) / self.R_max)
self.posy = self.ref[2][1] + self.R_max * beta
self.posx = self.ref[2][0]
self.delta = delta_max
self.theta = pi - self.alpha
self.history.append((self.posx, self.posy, self.theta))
if self.posx == self.ref[2][0]:
y = self.posy + s
x = self.ref[2][0]
if y < self.ref[3][1]:
self.posy = y
self.posx = x
if y >= self.ref[3][1]:
self.posy = self.ref[3][1]
self.posx = self.ref[3][0]
self.delta = 0
self.theta = pi / 2
self.history.append((self.posx, self.posy, self.theta))
def RL(self, s):
if self.posx >= self.ref[3][0] and self.posy == self.ref[1][1]:
x = self.posx - s
y = self.posy
if x > self.ref[3][0]:
self.posy = y
self.posx = x
if x <= self.ref[3][0]:
self.posy = self.ref[3][1]
self.posx = self.ref[3][0]
self.delta = 0
# self.theta = -pi
self.history.append((self.posx, self.posy, self.theta))
def RD(self, s):
if self.posx > self.ref[1][0] and self.posy == self.ref[1][1]:
x = self.posx - s
y = self.posy
if x > self.ref[1][0]:
self.posy = y
self.posx = x
if x <= self.ref[1][0]:
self.alpha = abs(x - self.ref[1][0]) / self.R_max # 这里的alpha下面可以用吗
self.posx = self.ref[1][0] - self.R_max * sin(self.alpha)
self.posy = self.ref[2][1] + self.R_max * cos(self.alpha)
self.delta = 0
#self.theta = - pi
self.history.append((self.posx, self.posy, self.theta))
if self.ref[2][0] < self.posx <= self.ref[1][0] and self.posy >= self.ref[2][1]:
self.alpha = self.alpha + s / self.R_max # 这里怎么调用上面的alpha
x = self.ref[1][0] - self.R_max * sin(self.alpha)
y = self.ref[2][1] + self.R_max * cos(self.alpha)
if y > self.ref[2][1]:
self.posy = y
self.posx = x
if y <= self.ref[2][1]:
beta = asin(abs(y - self.ref[2][1]) / self.R_max)
self.posy = self.ref[2][1] - self.R_max * beta
self.posx = self.ref[2][0]
self.delta = delta_max
self.theta = - (pi - self.alpha)
self.history.append((self.posx, self.posy, self.theta))
if self.posx == self.ref[2][0]:
y = self.posy - s
x = self.ref[2][0]
if y > self.ref[3][1]:
self.posy = y
self.posx = x
if y <= self.ref[3][1]:
self.posy = self.ref[3][1]
self.posx = self.ref[3][0]
self.delta = 0
self.theta = - pi / 2
self.history.append((self.posx, self.posy, self.theta))
def LD(self, s):
if self.posx < self.ref[1][0] and self.posy == self.ref[1][1]:
x = self.posx + s
y = self.posy
if x < self.ref[1][0]:
self.posy = y
self.posx = x
if x >= self.ref[1][0]:
self.alpha = abs(x - self.ref[1][0]) / self.R_max # 这里的alpha下面可以用吗
self.posx = self.ref[1][0] + self.R_max * sin(self.alpha)
self.posy = self.ref[2][1] + self.R_max * cos(self.alpha)
self.delta = 0
self.theta = 0
self.history.append((self.posx, self.posy, self.theta))
if self.posx > self.ref[1][0] and self.posy > self.ref[2][1]:
self.alpha = self.alpha + s / self.R_max # 这里怎么调用上面的alpha
x = self.ref[1][0] + self.R_max * sin(self.alpha)
y = self.ref[2][1] + self.R_max * cos(self.alpha)
if y > self.ref[2][1]:
self.posy = y
self.posx = x
if y < self.ref[2][1]:
beta = asin(abs(y - self.ref[2][1]) / self.R_max)
self.posy = self.ref[2][1] - self.R_max * beta
self.posx = self.ref[2][0]
self.delta = delta_max
self.theta = - self.alpha
self.history.append((self.posx, self.posy, self.theta))
if self.posx == self.ref[2][0]:
y = self.posy - s
x = self.ref[2][0]
if y > self.ref[3][1]:
self.posy = y
self.posx = x
if y <= self.ref[3][1]:
self.posy = self.ref[3][1]
self.posx = self.ref[3][0]
self.delta = 0
self.theta = -pi / 2
self.history.append((self.posx, self.posy, self.theta))
def LR(self, s):
if self.posx < self.ref[3][0] and self.posy == self.ref[1][1]:
x = self.posx + s
y = self.posy
if x < self.ref[3][0]:
self.posy = y
self.posx = x
if x >= self.ref[3][0]:
self.posy = self.ref[3][1]
self.posx = self.ref[3][0]
self.delta = 0
self.theta = 0
self.history.append((self.posx, self.posy, self.theta))
def LU(self, s):
if self.posx < self.ref[1][0] and self.posy == self.ref[1][1]:
x = self.posx + s
y = self.posy
if x < self.ref[1][0]:
self.posy = y
self.posx = x
if x >= self.ref[1][0]:
self.alpha = abs(x - self.ref[1][0]) / self.R_max # 这里的alpha下面可以用吗
self.posx = self.ref[1][0] + self.R_max * sin(self.alpha)
self.posy = self.ref[2][1] - self.R_max * cos(self.alpha)
self.delta = 0
self.theta = 0
self.history.append((self.posx, self.posy, self.theta))
if self.posx >= self.ref[1][0] and self.posy < self.ref[2][1]:
self.alpha = self.alpha + s / self.R_max # 这里怎么调用上面的alpha
x = self.ref[1][0] + self.R_max * sin(self.alpha)
y = self.ref[2][1] - self.R_max * cos(self.alpha)
if y <self.ref[2][1]:
self.posy = y
self.posx = x
if y >= self.ref[2][1]:
beta = asin(abs(y - self.ref[2][1]) / self.R_max)
self.posy = self.ref[2][1] + self.R_max * beta
self.posx = self.ref[2][0]
self.delta = delta_max
self.theta = self.alpha
self.history.append((self.posx, self.posy, self.theta))
if self.posx == self.ref[2][0]:
y = self.posy + s
x = self.ref[2][0]
if y < self.ref[3][1]:
self.posy = y
self.posx = x
if y >= self.ref[3][1]:
self.posy = self.ref[3][1]
self.posx = self.ref[3][0]
self.delta = 0
self.theta = pi / 2
self.history.append((self.posx, self.posy, self.theta))
def UL(self, s):
if self.posy > self.ref[1][1]and self.posx == self.ref[1][0]:
x=self.posx
y=self.posy - s
if y > self.ref[1][1]:
self.posy = y
self.posx = x
if y < self.ref[1][1]:
self.alpha = abs(y-self.ref[1][1])/self.R_max #这里的alpha下面可以用吗
self.posx = self.ref[2][0] + self.R_max * cos(self.alpha)
self.posy = self.ref[1][1] - self.R_max * sin(self.alpha)
self.delta = 0
self.theta = -pi / 2
self.history.append((self.posx, self.posy, self.theta))
if self.posx < self.ref[1][0] and self.posy > self.ref[2][1]:
self.alpha = self.alpha + s / self.R_max #这里怎么调用上面的alpha
x = self.ref[2][0] + self.R_max * cos(self.alpha)
y = self.ref[1][1] - self.R_max * sin(self.alpha)
if x > self.ref[2][0]:
self.posy = y
self.posx = x
if x < self.ref[2][0]:
beta = asin(abs( x - self.ref[2][0])/self.R_max)
self.posy = self.ref[2][1]
self.theta = -pi / 2 - self.alpha
self.posx = self.ref[2][0] - self.R_max*beta
self.delta = delta_max
self.theta = -pi / 2 - self.alpha
self.history.append((self.posx, self.posy, self.theta))
if self.posy == self.ref[2][1]:
x = self.posx - s
y = self.ref[2][1]
if x > self.ref[3][0]:
self.posy = y
self.posx = x
if x <= self.ref[3][0]:
self.posy = self.ref[3][1]
self.posx = self.ref[3][0]
self.delta = 0
self.theta = -pi
self.history.append((self.posx, self.posy, self.theta))
def UD(self, s):
if self.posy > self.ref[3][1] and self.posx == self.ref[1][0]:
x = self.posx
y = self.posy - s
if y > self.ref[3][1]:
self.posy = y
self.posx = x
if y <= self.ref[3][1]:
self.posy = self.ref[3][1]
self.posx = self.ref[3][0]
self.delta = 0
self.theta = -pi / 2
self.history.append((self.posx, self.posy, self.theta))
def UR(self, s):
if self.posy > self.ref[1][1]and self.posx == self.ref[1][0]:
x=self.posx
y=self.posy - s
if y > self.ref[1][1]:
self.posy = y
self.posx = x
if y < self.ref[1][1]:
self.alpha = abs(y-self.ref[1][1])/self.R_max #这里的alpha下面可以用吗
self.posx = self.ref[2][0] - self.R_max * cos(self.alpha)
self.posy = self.ref[1][1] - self.R_max * sin(self.alpha)
self.delta = 0
self.theta = - pi/2
self.history.append((self.posx, self.posy, self.theta))
if self.posx > self.ref[1][0] and self.posy > self.ref[2][1]:
self.alpha = self.alpha + s / self.R_max #这里怎么调用上面的alpha
x = self.ref[2][0] - self.R_max * cos(self.alpha)
y = self.ref[1][1] - self.R_max * sin(self.alpha)
if x < self.ref[2][0]:
self.posy = y
self.posx = x
if x >= self.ref[2][0]:
beta = asin(abs( x - self.ref[2][0])/self.R_max)
self.posy = self.ref[2][1]
self.posx = self.ref[2][0] + self.R_max*beta
self.delta = delta_max
self.theta = -pi / 2 + self.alpha
self.history.append((self.posx, self.posy, self.theta))
if self.posy == self.ref[2][1]:
x = self.posx + s
y = self.ref[2][1]
if x < self.ref[3][0]:
self.posy = y
self.posx = x
if x >= self.ref[3][0]:
self.posy = self.ref[3][1]
self.posx = self.ref[3][0]
self.delta = 0
self.theta = 0
self.history.append((self.posx, self.posy, self.theta))
| 41.453595
| 126
| 0.477106
| 4,359
| 31,712
| 3.437486
| 0.033723
| 0.110718
| 0.05606
| 0.053057
| 0.897491
| 0.86272
| 0.857381
| 0.848772
| 0.847571
| 0.8433
| 0
| 0.043845
| 0.364941
| 31,712
| 764
| 127
| 41.507853
| 0.700184
| 0.024502
| 0
| 0.768875
| 0
| 0
| 0.002331
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.029276
| false
| 0
| 0.004623
| 0
| 0.044684
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
65d8050e67ba8a741a81a899efba7984245bd270
| 231,099
|
py
|
Python
|
dask-fargate/.env/lib/python3.6/site-packages/aws_cdk/aws_stepfunctions/__init__.py
|
chriscoombs/amazon-sagemaker-cdk-examples
|
ba848218dab59abb03f68dc92bcad7929841fcc9
|
[
"Apache-2.0"
] | 41
|
2019-08-22T13:03:42.000Z
|
2022-02-24T05:07:32.000Z
|
dask-fargate/.env/lib/python3.6/site-packages/aws_cdk/aws_stepfunctions/__init__.py
|
chriscoombs/amazon-sagemaker-cdk-examples
|
ba848218dab59abb03f68dc92bcad7929841fcc9
|
[
"Apache-2.0"
] | 1
|
2020-06-17T17:44:28.000Z
|
2021-02-12T22:40:01.000Z
|
dask-fargate/.env/lib/python3.6/site-packages/aws_cdk/aws_stepfunctions/__init__.py
|
chriscoombs/amazon-sagemaker-cdk-examples
|
ba848218dab59abb03f68dc92bcad7929841fcc9
|
[
"Apache-2.0"
] | 31
|
2019-08-23T17:33:41.000Z
|
2022-03-28T09:20:07.000Z
|
"""
## AWS Step Functions Construct Library
<!--BEGIN STABILITY BANNER-->---

> **This is a *developer preview* (public beta) module. Releases might lack important features and might have
> future breaking changes.**
>
> This API is still under active development and subject to non-backward
> compatible changes or removal in any future version. Use of the API is not recommended in production
> environments. Experimental APIs are not subject to the Semantic Versioning model.
---
<!--END STABILITY BANNER-->
The `@aws-cdk/aws-stepfunctions` package contains constructs for building
serverless workflows using objects. Use this in conjunction with the
`@aws-cdk/aws-stepfunctions-tasks` package, which contains classes used
to call other AWS services.
Defining a workflow looks like this (for the [Step Functions Job Poller
example](https://docs.aws.amazon.com/step-functions/latest/dg/job-status-poller-sample.html)):
### TypeScript example
```python
# Example automatically generated. See https://github.com/aws/jsii/issues/826
import aws_cdk.aws_stepfunctions as sfn
import aws_cdk.aws_stepfunctions_tasks as tasks
submit_lambda = lambda.Function(self, "SubmitLambda", ...)
get_status_lambda = lambda.Function(self, "CheckLambda", ...)
submit_job = sfn.Task(self, "Submit Job",
task=tasks.InvokeFunction(submit_lambda),
# Put Lambda's result here in the execution's state object
result_path="$.guid"
)
wait_x = sfn.Wait(self, "Wait X Seconds",
duration=sfn.WaitDuration.seconds_path("$.wait_time")
)
get_status = sfn.Task(self, "Get Job Status",
task=tasks.InvokeFunction(get_status_lambda),
# Pass just the field named "guid" into the Lambda, put the
# Lambda's result in a field called "status"
input_path="$.guid",
result_path="$.status"
)
job_failed = sfn.Fail(self, "Job Failed",
cause="AWS Batch Job Failed",
error="DescribeJob returned FAILED"
)
final_status = sfn.Task(self, "Get Final Job Status",
task=tasks.InvokeFunction(get_status_lambda),
# Use "guid" field as input, output of the Lambda becomes the
# entire state machine output.
input_path="$.guid"
)
definition = submit_job.next(wait_x).next(get_status).next(sfn.Choice(self, "Job Complete?").when(sfn.Condition.string_equals("$.status", "FAILED"), job_failed).when(sfn.Condition.string_equals("$.status", "SUCCEEDED"), final_status).otherwise(wait_x))
sfn.StateMachine(self, "StateMachine",
definition=definition,
timeout=Duration.minutes(5)
)
```
## State Machine
A `stepfunctions.StateMachine` is a resource that takes a state machine
definition. The definition is specified by its start state, and encompasses
all states reachable from the start state:
```python
# Example automatically generated. See https://github.com/aws/jsii/issues/826
start_state = stepfunctions.Pass(self, "StartState")
stepfunctions.StateMachine(self, "StateMachine",
definition=start_state
)
```
State machines execute using an IAM Role, which will automatically have all
permissions added that are required to make all state machine tasks execute
properly (for example, permissions to invoke any Lambda functions you add to
your workflow). A role will be created by default, but you can supply an
existing one as well.
## Amazon States Language
This library comes with a set of classes that model the [Amazon States
Language](https://states-language.net/spec.html). The following State classes
are supported:
* `Task`
* `Pass`
* `Wait`
* `Choice`
* `Parallel`
* `Succeed`
* `Fail`
An arbitrary JSON object (specified at execution start) is passed from state to
state and transformed during the execution of the workflow. For more
information, see the States Language spec.
### Task
A `Task` represents some work that needs to be done. The exact work to be
done is determine by a class that implements `IStepFunctionsTask`, a collection
of which can be found in the `@aws-cdk/aws-stepfunctions-tasks` package. A
couple of the tasks available are:
* `tasks.InvokeActivity` -- start an Activity (Activities represent a work
queue that you poll on a compute fleet you manage yourself)
* `tasks.InvokeFunction` -- invoke a Lambda function with function ARN
* `tasks.RunLambdaTask` -- call Lambda as integrated service with magic ARN
* `tasks.PublishToTopic` -- publish a message to an SNS topic
* `tasks.SendToQueue` -- send a message to an SQS queue
* `tasks.RunEcsFargateTask`/`ecs.RunEcsEc2Task` -- run a container task,
depending on the type of capacity.
* `tasks.SagemakerTrainTask` -- run a SageMaker training job
* `tasks.SagemakerTransformTask` -- run a SageMaker transform job
* `tasks.StartExecution` -- call StartExecution to a state machine of Step Functions
* `tasks.EvaluateExpression` -- evaluate an expression referencing state paths
Except `tasks.InvokeActivity` and `tasks.InvokeFunction`, the [service integration
pattern](https://docs.aws.amazon.com/step-functions/latest/dg/connect-to-resource.html)
(`integrationPattern`) are supposed to be given as parameter when customers want
to call integrated services within a Task state. The default value is `FIRE_AND_FORGET`.
#### Task parameters from the state json
Many tasks take parameters. The values for those can either be supplied
directly in the workflow definition (by specifying their values), or at
runtime by passing a value obtained from the static functions on `Data`,
such as `Data.stringAt()`.
If so, the value is taken from the indicated location in the state JSON,
similar to (for example) `inputPath`.
#### Lambda example - InvokeFunction
```python
# Example automatically generated. See https://github.com/aws/jsii/issues/826
task = sfn.Task(self, "Invoke1",
task=tasks.InvokeFunction(my_lambda),
input_path="$.input",
timeout=Duration.minutes(5)
)
# Add a retry policy
task.add_retry(
interval=Duration.seconds(5),
max_attempts=10
)
# Add an error handler
task.add_catch(error_handler_state)
# Set the next state
task.next(next_state)
```
#### Lambda example - RunLambdaTask
```python
# Example automatically generated. See https://github.com/aws/jsii/issues/826
task = sfn.Task(stack, "Invoke2",
task=tasks.RunLambdaTask(my_lambda,
integration_pattern=sfn.ServiceIntegrationPattern.WAIT_FOR_TASK_TOKEN,
payload={
"token": sfn.Context.task_token
}
)
)
```
#### SNS example
```python
# Example automatically generated. See https://github.com/aws/jsii/issues/826
import aws_cdk.aws_sns as sns
# ...
topic = sns.Topic(self, "Topic")
# Use a field from the execution data as message.
task1 = sfn.Task(self, "Publish1",
task=tasks.PublishToTopic(topic,
integration_pattern=sfn.ServiceIntegrationPattern.FIRE_AND_FORGET,
message=TaskInput.from_data_at("$.state.message")
)
)
# Combine a field from the execution data with
# a literal object.
task2 = sfn.Task(self, "Publish2",
task=tasks.PublishToTopic(topic,
message=TaskInput.from_object(
field1="somedata",
field2=Data.string_at("$.field2")
)
)
)
```
#### SQS example
```python
# Example automatically generated. See https://github.com/aws/jsii/issues/826
import aws_cdk.aws_sqs as sqs
# ...
queue = sns.Queue(self, "Queue")
# Use a field from the execution data as message.
task1 = sfn.Task(self, "Send1",
task=tasks.SendToQueue(queue,
message_body=TaskInput.from_data_at("$.message"),
# Only for FIFO queues
message_group_id="1234"
)
)
# Combine a field from the execution data with
# a literal object.
task2 = sfn.Task(self, "Send2",
task=tasks.SendToQueue(queue,
message_body=TaskInput.from_object(
field1="somedata",
field2=Data.string_at("$.field2")
),
# Only for FIFO queues
message_group_id="1234"
)
)
```
#### ECS example
```python
# Example automatically generated. See https://github.com/aws/jsii/issues/826
import aws_cdk.aws_ecs as ecs
# See examples in ECS library for initialization of 'cluster' and 'taskDefinition'
fargate_task = ecs.RunEcsFargateTask(
cluster=cluster,
task_definition=task_definition,
container_overrides=[{
"container_name": "TheContainer",
"environment": [{
"name": "CONTAINER_INPUT",
"value": Data.string_at("$.valueFromStateData")
}
]
}
]
)
fargate_task.connections.allow_to_default_port(rds_cluster, "Read the database")
task = sfn.Task(self, "CallFargate",
task=fargate_task
)
```
#### SageMaker Transform example
```python
# Example automatically generated. See https://github.com/aws/jsii/issues/826
transform_job = tasks.SagemakerTransformTask(transform_job_name, "MyTransformJob", model_name, "MyModelName", role, transform_input, {
"transform_data_source": {
"s3_data_source": {
"s3_uri": "s3://inputbucket/train",
"s3_data_type": S3DataType.S3Prefix
}
}
}, transform_output, {
"s3_output_path": "s3://outputbucket/TransformJobOutputPath"
}, transform_resources,
instance_count=1,
instance_type=ec2.InstanceType.of(ec2.InstanceClass.M4, ec2.InstanceSize.XLarge)
)
task = sfn.Task(self, "Batch Inference",
task=transform_job
)
```
#### Step Functions example
```python
# Example automatically generated. See https://github.com/aws/jsii/issues/826
# Define a state machine with one Pass state
child = sfn.StateMachine(stack, "ChildStateMachine",
definition=sfn.Chain.start(sfn.Pass(stack, "PassState"))
)
# Include the state machine in a Task state with callback pattern
task = sfn.Task(stack, "ChildTask",
task=tasks.ExecuteStateMachine(child,
integration_pattern=sfn.ServiceIntegrationPattern.WAIT_FOR_TASK_TOKEN,
input={
"token": sfn.Context.task_token,
"foo": "bar"
},
name="MyExecutionName"
)
)
# Define a second state machine with the Task state above
sfn.StateMachine(stack, "ParentStateMachine",
definition=task
)
```
#### Eval example
Use the `EvaluateExpression` to perform simple operations referencing state paths. The
`expression` referenced in the task will be evaluated in a Lambda function
(`eval()`). This allows you to not have to write Lambda code for simple operations.
Example: convert a wait time from milliseconds to seconds, concat this in a message and wait:
```python
# Example automatically generated. See https://github.com/aws/jsii/issues/826
convert_to_seconds = sfn.Task(self, "Convert to seconds",
task=tasks.EvaluateExpression(expression="$.waitMilliseconds / 1000"),
result_path="$.waitSeconds"
)
create_message = sfn.Task(self, "Create message",
# Note: this is a string inside a string.
task=tasks.EvaluateExpression(expression="`Now waiting ${$.waitSeconds} seconds...`"),
result_path="$.message"
)
publish_message = sfn.Task(self, "Publish message",
task=tasks.PublishToTopic(topic,
message=sfn.TaskInput.from_data_at("$.message")
),
result_path="$.sns"
)
wait = sfn.Wait(self, "Wait",
time=sfn.WaitTime.seconds_path("$.waitSeconds")
)
sfn.StateMachine(self, "StateMachine",
definition=convert_to_seconds.next(create_message).next(publish_message).next(wait)
)
```
The `EvaluateExpression` supports a `runtime` prop to specify the Lambda
runtime to use to evaluate the expression. Currently, the only runtime
supported is `lambda.Runtime.NODEJS_10_X`.
### Pass
A `Pass` state does no work, but it can optionally transform the execution's
JSON state.
```python
# Example automatically generated. See https://github.com/aws/jsii/issues/826
# Makes the current JSON state { ..., "subObject": { "hello": "world" } }
pass = stepfunctions.Pass(self, "Add Hello World",
result={"hello": "world"},
result_path="$.subObject"
)
# Set the next state
pass.next(next_state)
```
### Wait
A `Wait` state waits for a given number of seconds, or until the current time
hits a particular time. The time to wait may be taken from the execution's JSON
state.
```python
# Example automatically generated. See https://github.com/aws/jsii/issues/826
# Wait until it's the time mentioned in the the state object's "triggerTime"
# field.
wait = stepfunctions.Wait(self, "Wait For Trigger Time",
time=stepfunctions.WaitTime.timestamp_path("$.triggerTime")
)
# Set the next state
wait.next(start_the_work)
```
### Choice
A `Choice` state can take a differen path through the workflow based on the
values in the execution's JSON state:
```python
# Example automatically generated. See https://github.com/aws/jsii/issues/826
choice = stepfunctions.Choice(self, "Did it work?")
# Add conditions with .when()
choice.when(stepfunctions.Condition.string_equal("$.status", "SUCCESS"), success_state)
choice.when(stepfunctions.Condition.number_greater_than("$.attempts", 5), failure_state)
# Use .otherwise() to indicate what should be done if none of the conditions match
choice.otherwise(try_again_state)
```
If you want to temporarily branch your workflow based on a condition, but have
all branches come together and continuing as one (similar to how an `if ... then ... else` works in a programming language), use the `.afterwards()` method:
```python
# Example automatically generated. See https://github.com/aws/jsii/issues/826
choice = stepfunctions.Choice(self, "What color is it?")
choice.when(stepfunctions.Condition.string_equal("$.color", "BLUE"), handle_blue_item)
choice.when(stepfunctions.Condition.string_equal("$.color", "RED"), handle_red_item)
choice.otherwise(handle_other_item_color)
# Use .afterwards() to join all possible paths back together and continue
choice.afterwards().next(ship_the_item)
```
If your `Choice` doesn't have an `otherwise()` and none of the conditions match
the JSON state, a `NoChoiceMatched` error will be thrown. Wrap the state machine
in a `Parallel` state if you want to catch and recover from this.
### Parallel
A `Parallel` state executes one or more subworkflows in parallel. It can also
be used to catch and recover from errors in subworkflows.
```python
# Example automatically generated. See https://github.com/aws/jsii/issues/826
parallel = stepfunctions.Parallel(self, "Do the work in parallel")
# Add branches to be executed in parallel
parallel.branch(ship_item)
parallel.branch(send_invoice)
parallel.branch(restock)
# Retry the whole workflow if something goes wrong
parallel.add_retry(max_attempts=1)
# How to recover from errors
parallel.add_catch(send_failure_notification)
# What to do in case everything succeeded
parallel.next(close_order)
```
### Succeed
Reaching a `Succeed` state terminates the state machine execution with a
succesful status.
```python
# Example automatically generated. See https://github.com/aws/jsii/issues/826
success = stepfunctions.Succeed(self, "We did it!")
```
### Fail
Reaching a `Fail` state terminates the state machine execution with a
failure status. The fail state should report the reason for the failure.
Failures can be caught by encompassing `Parallel` states.
```python
# Example automatically generated. See https://github.com/aws/jsii/issues/826
success = stepfunctions.Fail(self, "Fail",
error="WorkflowFailure",
cause="Something went wrong"
)
```
## Task Chaining
To make defining work flows as convenient (and readable in a top-to-bottom way)
as writing regular programs, it is possible to chain most methods invocations.
In particular, the `.next()` method can be repeated. The result of a series of
`.next()` calls is called a **Chain**, and can be used when defining the jump
targets of `Choice.on` or `Parallel.branch`:
```python
# Example automatically generated. See https://github.com/aws/jsii/issues/826
definition = step1.next(step2).next(choice.when(condition1, step3.next(step4).next(step5)).otherwise(step6).afterwards()).next(parallel.branch(step7.next(step8)).branch(step9.next(step10))).next(finish)
stepfunctions.StateMachine(self, "StateMachine",
definition=definition
)
```
If you don't like the visual look of starting a chain directly off the first
step, you can use `Chain.start`:
```python
# Example automatically generated. See https://github.com/aws/jsii/issues/826
definition = stepfunctions.Chain.start(step1).next(step2).next(step3)
```
## State Machine Fragments
It is possible to define reusable (or abstracted) mini-state machines by
defining a construct that implements `IChainable`, which requires you to define
two fields:
* `startState: State`, representing the entry point into this state machine.
* `endStates: INextable[]`, representing the (one or more) states that outgoing
transitions will be added to if you chain onto the fragment.
Since states will be named after their construct IDs, you may need to prefix the
IDs of states if you plan to instantiate the same state machine fragment
multiples times (otherwise all states in every instantiation would have the same
name).
The class `StateMachineFragment` contains some helper functions (like
`prefixStates()`) to make it easier for you to do this. If you define your state
machine as a subclass of this, it will be convenient to use:
```python
# Example automatically generated. See https://github.com/aws/jsii/issues/826
class MyJob(stepfunctions.StateMachineFragment):
def __init__(self, parent, id, *, jobFlavor):
super().__init__(parent, id)
first = stepfunctions.Task(self, "First", ...)
# ...
last = stepfunctions.Task(self, "Last", ...)
self.start_state = first
self.end_states = [last]
# Do 3 different variants of MyJob in parallel
stepfunctions.Parallel(self, "All jobs").branch(MyJob(self, "Quick", job_flavor="quick").prefix_states()).branch(MyJob(self, "Medium", job_flavor="medium").prefix_states()).branch(MyJob(self, "Slow", job_flavor="slow").prefix_states())
```
## Activity
**Activities** represent work that is done on some non-Lambda worker pool. The
Step Functions workflow will submit work to this Activity, and a worker pool
that you run yourself, probably on EC2, will pull jobs from the Activity and
submit the results of individual jobs back.
You need the ARN to do so, so if you use Activities be sure to pass the Activity
ARN into your worker pool:
```python
# Example automatically generated. See https://github.com/aws/jsii/issues/826
activity = stepfunctions.Activity(self, "Activity")
# Read this CloudFormation Output from your application and use it to poll for work on
# the activity.
cdk.CfnOutput(self, "ActivityArn", value=activity.activity_arn)
```
## Metrics
`Task` object expose various metrics on the execution of that particular task. For example,
to create an alarm on a particular task failing:
```python
# Example automatically generated. See https://github.com/aws/jsii/issues/826
cloudwatch.Alarm(self, "TaskAlarm",
metric=task.metric_failed(),
threshold=1,
evaluation_periods=1
)
```
There are also metrics on the complete state machine:
```python
# Example automatically generated. See https://github.com/aws/jsii/issues/826
cloudwatch.Alarm(self, "StateMachineAlarm",
metric=state_machine.metric_failed(),
threshold=1,
evaluation_periods=1
)
```
And there are metrics on the capacity of all state machines in your account:
```python
# Example automatically generated. See https://github.com/aws/jsii/issues/826
cloudwatch.Alarm(self, "ThrottledAlarm",
metric=StateTransitionMetrics.metric_throttled_events(),
threshold=10,
evaluation_periods=2
)
```
## Future work
Contributions welcome:
* [ ] A single `LambdaTask` class that is both a `Lambda` and a `Task` in one
might make for a nice API.
* [ ] Expression parser for Conditions.
* [ ] Simulate state machines in unit tests.
"""
import abc
import datetime
import enum
import typing
import jsii
import jsii.compat
import publication
from jsii.python import classproperty
import aws_cdk.aws_cloudwatch
import aws_cdk.aws_events
import aws_cdk.aws_iam
import aws_cdk.core
__jsii_assembly__ = jsii.JSIIAssembly.load("@aws-cdk/aws-stepfunctions", "1.18.0", __name__, "aws-stepfunctions@1.18.0.jsii.tgz")
@jsii.data_type(jsii_type="@aws-cdk/aws-stepfunctions.ActivityProps", jsii_struct_bases=[], name_mapping={'activity_name': 'activityName'})
class ActivityProps():
def __init__(self, *, activity_name: typing.Optional[str]=None):
"""
:param activity_name: The name for this activity. Default: If not supplied, a name is generated
stability
:stability: experimental
"""
self._values = {
}
if activity_name is not None: self._values["activity_name"] = activity_name
@property
def activity_name(self) -> typing.Optional[str]:
"""The name for this activity.
default
:default: If not supplied, a name is generated
stability
:stability: experimental
"""
return self._values.get('activity_name')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'ActivityProps(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-stepfunctions.AfterwardsOptions", jsii_struct_bases=[], name_mapping={'include_error_handlers': 'includeErrorHandlers', 'include_otherwise': 'includeOtherwise'})
class AfterwardsOptions():
def __init__(self, *, include_error_handlers: typing.Optional[bool]=None, include_otherwise: typing.Optional[bool]=None):
"""Options for selecting the choice paths.
:param include_error_handlers: Whether to include error handling states. If this is true, all states which are error handlers (added through 'onError') and states reachable via error handlers will be included as well. Default: false
:param include_otherwise: Whether to include the default/otherwise transition for the current Choice state. If this is true and the current Choice does not have a default outgoing transition, one will be added included when .next() is called on the chain. Default: false
stability
:stability: experimental
"""
self._values = {
}
if include_error_handlers is not None: self._values["include_error_handlers"] = include_error_handlers
if include_otherwise is not None: self._values["include_otherwise"] = include_otherwise
@property
def include_error_handlers(self) -> typing.Optional[bool]:
"""Whether to include error handling states.
If this is true, all states which are error handlers (added through 'onError')
and states reachable via error handlers will be included as well.
default
:default: false
stability
:stability: experimental
"""
return self._values.get('include_error_handlers')
@property
def include_otherwise(self) -> typing.Optional[bool]:
"""Whether to include the default/otherwise transition for the current Choice state.
If this is true and the current Choice does not have a default outgoing
transition, one will be added included when .next() is called on the chain.
default
:default: false
stability
:stability: experimental
"""
return self._values.get('include_otherwise')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'AfterwardsOptions(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-stepfunctions.CatchProps", jsii_struct_bases=[], name_mapping={'errors': 'errors', 'result_path': 'resultPath'})
class CatchProps():
def __init__(self, *, errors: typing.Optional[typing.List[str]]=None, result_path: typing.Optional[str]=None):
"""Error handler details.
:param errors: Errors to recover from by going to the given state. A list of error strings to retry, which can be either predefined errors (for example Errors.NoChoiceMatched) or a self-defined error. Default: All errors
:param result_path: JSONPath expression to indicate where to inject the error data. May also be the special value DISCARD, which will cause the error data to be discarded. Default: $
stability
:stability: experimental
"""
self._values = {
}
if errors is not None: self._values["errors"] = errors
if result_path is not None: self._values["result_path"] = result_path
@property
def errors(self) -> typing.Optional[typing.List[str]]:
"""Errors to recover from by going to the given state.
A list of error strings to retry, which can be either predefined errors
(for example Errors.NoChoiceMatched) or a self-defined error.
default
:default: All errors
stability
:stability: experimental
"""
return self._values.get('errors')
@property
def result_path(self) -> typing.Optional[str]:
"""JSONPath expression to indicate where to inject the error data.
May also be the special value DISCARD, which will cause the error
data to be discarded.
default
:default: $
stability
:stability: experimental
"""
return self._values.get('result_path')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'CatchProps(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.implements(aws_cdk.core.IInspectable)
class CfnActivity(aws_cdk.core.CfnResource, metaclass=jsii.JSIIMeta, jsii_type="@aws-cdk/aws-stepfunctions.CfnActivity"):
"""A CloudFormation ``AWS::StepFunctions::Activity``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-stepfunctions-activity.html
cloudformationResource:
:cloudformationResource:: AWS::StepFunctions::Activity
"""
def __init__(self, scope: aws_cdk.core.Construct, id: str, *, name: str, tags: typing.Optional[typing.List["TagsEntryProperty"]]=None) -> None:
"""Create a new ``AWS::StepFunctions::Activity``.
:param scope: - scope in which this resource is defined.
:param id: - scoped id of the resource.
:param props: - resource properties.
:param name: ``AWS::StepFunctions::Activity.Name``.
:param tags: ``AWS::StepFunctions::Activity.Tags``.
"""
props = CfnActivityProps(name=name, tags=tags)
jsii.create(CfnActivity, self, [scope, id, props])
@jsii.member(jsii_name="inspect")
def inspect(self, inspector: aws_cdk.core.TreeInspector) -> None:
"""Examines the CloudFormation resource and discloses attributes.
:param inspector: - tree inspector to collect and process attributes.
stability
:stability: experimental
"""
return jsii.invoke(self, "inspect", [inspector])
@jsii.member(jsii_name="renderProperties")
def _render_properties(self, props: typing.Mapping[str,typing.Any]) -> typing.Mapping[str,typing.Any]:
"""
:param props: -
"""
return jsii.invoke(self, "renderProperties", [props])
@classproperty
@jsii.member(jsii_name="CFN_RESOURCE_TYPE_NAME")
def CFN_RESOURCE_TYPE_NAME(cls) -> str:
"""The CloudFormation resource type name for this resource class."""
return jsii.sget(cls, "CFN_RESOURCE_TYPE_NAME")
@property
@jsii.member(jsii_name="attrName")
def attr_name(self) -> str:
"""
cloudformationAttribute:
:cloudformationAttribute:: Name
"""
return jsii.get(self, "attrName")
@property
@jsii.member(jsii_name="cfnProperties")
def _cfn_properties(self) -> typing.Mapping[str,typing.Any]:
return jsii.get(self, "cfnProperties")
@property
@jsii.member(jsii_name="tags")
def tags(self) -> aws_cdk.core.TagManager:
"""``AWS::StepFunctions::Activity.Tags``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-stepfunctions-activity.html#cfn-stepfunctions-activity-tags
"""
return jsii.get(self, "tags")
@property
@jsii.member(jsii_name="name")
def name(self) -> str:
"""``AWS::StepFunctions::Activity.Name``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-stepfunctions-activity.html#cfn-stepfunctions-activity-name
"""
return jsii.get(self, "name")
@name.setter
def name(self, value: str):
return jsii.set(self, "name", value)
@jsii.data_type(jsii_type="@aws-cdk/aws-stepfunctions.CfnActivity.TagsEntryProperty", jsii_struct_bases=[], name_mapping={'key': 'key', 'value': 'value'})
class TagsEntryProperty():
def __init__(self, *, key: str, value: str):
"""
:param key: ``CfnActivity.TagsEntryProperty.Key``.
:param value: ``CfnActivity.TagsEntryProperty.Value``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-stepfunctions-activity-tagsentry.html
"""
self._values = {
'key': key,
'value': value,
}
@property
def key(self) -> str:
"""``CfnActivity.TagsEntryProperty.Key``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-stepfunctions-activity-tagsentry.html#cfn-stepfunctions-activity-tagsentry-key
"""
return self._values.get('key')
@property
def value(self) -> str:
"""``CfnActivity.TagsEntryProperty.Value``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-stepfunctions-activity-tagsentry.html#cfn-stepfunctions-activity-tagsentry-value
"""
return self._values.get('value')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'TagsEntryProperty(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-stepfunctions.CfnActivityProps", jsii_struct_bases=[], name_mapping={'name': 'name', 'tags': 'tags'})
class CfnActivityProps():
def __init__(self, *, name: str, tags: typing.Optional[typing.List["CfnActivity.TagsEntryProperty"]]=None):
"""Properties for defining a ``AWS::StepFunctions::Activity``.
:param name: ``AWS::StepFunctions::Activity.Name``.
:param tags: ``AWS::StepFunctions::Activity.Tags``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-stepfunctions-activity.html
"""
self._values = {
'name': name,
}
if tags is not None: self._values["tags"] = tags
@property
def name(self) -> str:
"""``AWS::StepFunctions::Activity.Name``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-stepfunctions-activity.html#cfn-stepfunctions-activity-name
"""
return self._values.get('name')
@property
def tags(self) -> typing.Optional[typing.List["CfnActivity.TagsEntryProperty"]]:
"""``AWS::StepFunctions::Activity.Tags``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-stepfunctions-activity.html#cfn-stepfunctions-activity-tags
"""
return self._values.get('tags')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'CfnActivityProps(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.implements(aws_cdk.core.IInspectable)
class CfnStateMachine(aws_cdk.core.CfnResource, metaclass=jsii.JSIIMeta, jsii_type="@aws-cdk/aws-stepfunctions.CfnStateMachine"):
"""A CloudFormation ``AWS::StepFunctions::StateMachine``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-stepfunctions-statemachine.html
cloudformationResource:
:cloudformationResource:: AWS::StepFunctions::StateMachine
"""
def __init__(self, scope: aws_cdk.core.Construct, id: str, *, definition_string: str, role_arn: str, state_machine_name: typing.Optional[str]=None, tags: typing.Optional[typing.List["TagsEntryProperty"]]=None) -> None:
"""Create a new ``AWS::StepFunctions::StateMachine``.
:param scope: - scope in which this resource is defined.
:param id: - scoped id of the resource.
:param props: - resource properties.
:param definition_string: ``AWS::StepFunctions::StateMachine.DefinitionString``.
:param role_arn: ``AWS::StepFunctions::StateMachine.RoleArn``.
:param state_machine_name: ``AWS::StepFunctions::StateMachine.StateMachineName``.
:param tags: ``AWS::StepFunctions::StateMachine.Tags``.
"""
props = CfnStateMachineProps(definition_string=definition_string, role_arn=role_arn, state_machine_name=state_machine_name, tags=tags)
jsii.create(CfnStateMachine, self, [scope, id, props])
@jsii.member(jsii_name="inspect")
def inspect(self, inspector: aws_cdk.core.TreeInspector) -> None:
"""Examines the CloudFormation resource and discloses attributes.
:param inspector: - tree inspector to collect and process attributes.
stability
:stability: experimental
"""
return jsii.invoke(self, "inspect", [inspector])
@jsii.member(jsii_name="renderProperties")
def _render_properties(self, props: typing.Mapping[str,typing.Any]) -> typing.Mapping[str,typing.Any]:
"""
:param props: -
"""
return jsii.invoke(self, "renderProperties", [props])
@classproperty
@jsii.member(jsii_name="CFN_RESOURCE_TYPE_NAME")
def CFN_RESOURCE_TYPE_NAME(cls) -> str:
"""The CloudFormation resource type name for this resource class."""
return jsii.sget(cls, "CFN_RESOURCE_TYPE_NAME")
@property
@jsii.member(jsii_name="attrName")
def attr_name(self) -> str:
"""
cloudformationAttribute:
:cloudformationAttribute:: Name
"""
return jsii.get(self, "attrName")
@property
@jsii.member(jsii_name="cfnProperties")
def _cfn_properties(self) -> typing.Mapping[str,typing.Any]:
return jsii.get(self, "cfnProperties")
@property
@jsii.member(jsii_name="tags")
def tags(self) -> aws_cdk.core.TagManager:
"""``AWS::StepFunctions::StateMachine.Tags``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-stepfunctions-statemachine.html#cfn-stepfunctions-statemachine-tags
"""
return jsii.get(self, "tags")
@property
@jsii.member(jsii_name="definitionString")
def definition_string(self) -> str:
"""``AWS::StepFunctions::StateMachine.DefinitionString``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-stepfunctions-statemachine.html#cfn-stepfunctions-statemachine-definitionstring
"""
return jsii.get(self, "definitionString")
@definition_string.setter
def definition_string(self, value: str):
return jsii.set(self, "definitionString", value)
@property
@jsii.member(jsii_name="roleArn")
def role_arn(self) -> str:
"""``AWS::StepFunctions::StateMachine.RoleArn``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-stepfunctions-statemachine.html#cfn-stepfunctions-statemachine-rolearn
"""
return jsii.get(self, "roleArn")
@role_arn.setter
def role_arn(self, value: str):
return jsii.set(self, "roleArn", value)
@property
@jsii.member(jsii_name="stateMachineName")
def state_machine_name(self) -> typing.Optional[str]:
"""``AWS::StepFunctions::StateMachine.StateMachineName``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-stepfunctions-statemachine.html#cfn-stepfunctions-statemachine-statemachinename
"""
return jsii.get(self, "stateMachineName")
@state_machine_name.setter
def state_machine_name(self, value: typing.Optional[str]):
return jsii.set(self, "stateMachineName", value)
@jsii.data_type(jsii_type="@aws-cdk/aws-stepfunctions.CfnStateMachine.TagsEntryProperty", jsii_struct_bases=[], name_mapping={'key': 'key', 'value': 'value'})
class TagsEntryProperty():
def __init__(self, *, key: str, value: str):
"""
:param key: ``CfnStateMachine.TagsEntryProperty.Key``.
:param value: ``CfnStateMachine.TagsEntryProperty.Value``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-stepfunctions-statemachine-tagsentry.html
"""
self._values = {
'key': key,
'value': value,
}
@property
def key(self) -> str:
"""``CfnStateMachine.TagsEntryProperty.Key``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-stepfunctions-statemachine-tagsentry.html#cfn-stepfunctions-statemachine-tagsentry-key
"""
return self._values.get('key')
@property
def value(self) -> str:
"""``CfnStateMachine.TagsEntryProperty.Value``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-stepfunctions-statemachine-tagsentry.html#cfn-stepfunctions-statemachine-tagsentry-value
"""
return self._values.get('value')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'TagsEntryProperty(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-stepfunctions.CfnStateMachineProps", jsii_struct_bases=[], name_mapping={'definition_string': 'definitionString', 'role_arn': 'roleArn', 'state_machine_name': 'stateMachineName', 'tags': 'tags'})
class CfnStateMachineProps():
def __init__(self, *, definition_string: str, role_arn: str, state_machine_name: typing.Optional[str]=None, tags: typing.Optional[typing.List["CfnStateMachine.TagsEntryProperty"]]=None):
"""Properties for defining a ``AWS::StepFunctions::StateMachine``.
:param definition_string: ``AWS::StepFunctions::StateMachine.DefinitionString``.
:param role_arn: ``AWS::StepFunctions::StateMachine.RoleArn``.
:param state_machine_name: ``AWS::StepFunctions::StateMachine.StateMachineName``.
:param tags: ``AWS::StepFunctions::StateMachine.Tags``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-stepfunctions-statemachine.html
"""
self._values = {
'definition_string': definition_string,
'role_arn': role_arn,
}
if state_machine_name is not None: self._values["state_machine_name"] = state_machine_name
if tags is not None: self._values["tags"] = tags
@property
def definition_string(self) -> str:
"""``AWS::StepFunctions::StateMachine.DefinitionString``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-stepfunctions-statemachine.html#cfn-stepfunctions-statemachine-definitionstring
"""
return self._values.get('definition_string')
@property
def role_arn(self) -> str:
"""``AWS::StepFunctions::StateMachine.RoleArn``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-stepfunctions-statemachine.html#cfn-stepfunctions-statemachine-rolearn
"""
return self._values.get('role_arn')
@property
def state_machine_name(self) -> typing.Optional[str]:
"""``AWS::StepFunctions::StateMachine.StateMachineName``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-stepfunctions-statemachine.html#cfn-stepfunctions-statemachine-statemachinename
"""
return self._values.get('state_machine_name')
@property
def tags(self) -> typing.Optional[typing.List["CfnStateMachine.TagsEntryProperty"]]:
"""``AWS::StepFunctions::StateMachine.Tags``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-stepfunctions-statemachine.html#cfn-stepfunctions-statemachine-tags
"""
return self._values.get('tags')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'CfnStateMachineProps(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-stepfunctions.ChoiceProps", jsii_struct_bases=[], name_mapping={'comment': 'comment', 'input_path': 'inputPath', 'output_path': 'outputPath'})
class ChoiceProps():
def __init__(self, *, comment: typing.Optional[str]=None, input_path: typing.Optional[str]=None, output_path: typing.Optional[str]=None):
"""Properties for defining a Choice state.
:param comment: An optional description for this state. Default: No comment
:param input_path: JSONPath expression to select part of the state to be the input to this state. May also be the special value DISCARD, which will cause the effective input to be the empty object {}. Default: $
:param output_path: JSONPath expression to select part of the state to be the output to this state. May also be the special value DISCARD, which will cause the effective output to be the empty object {}. Default: $
stability
:stability: experimental
"""
self._values = {
}
if comment is not None: self._values["comment"] = comment
if input_path is not None: self._values["input_path"] = input_path
if output_path is not None: self._values["output_path"] = output_path
@property
def comment(self) -> typing.Optional[str]:
"""An optional description for this state.
default
:default: No comment
stability
:stability: experimental
"""
return self._values.get('comment')
@property
def input_path(self) -> typing.Optional[str]:
"""JSONPath expression to select part of the state to be the input to this state.
May also be the special value DISCARD, which will cause the effective
input to be the empty object {}.
default
:default: $
stability
:stability: experimental
"""
return self._values.get('input_path')
@property
def output_path(self) -> typing.Optional[str]:
"""JSONPath expression to select part of the state to be the output to this state.
May also be the special value DISCARD, which will cause the effective
output to be the empty object {}.
default
:default: $
stability
:stability: experimental
"""
return self._values.get('output_path')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'ChoiceProps(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
class Condition(metaclass=jsii.JSIIAbstractClass, jsii_type="@aws-cdk/aws-stepfunctions.Condition"):
"""A Condition for use in a Choice state branch.
stability
:stability: experimental
"""
@staticmethod
def __jsii_proxy_class__():
return _ConditionProxy
def __init__(self) -> None:
jsii.create(Condition, self, [])
@jsii.member(jsii_name="and")
@classmethod
def and_(cls, *conditions: "Condition") -> "Condition":
"""Combine two or more conditions with a logical AND.
:param conditions: -
stability
:stability: experimental
"""
return jsii.sinvoke(cls, "and", [*conditions])
@jsii.member(jsii_name="booleanEquals")
@classmethod
def boolean_equals(cls, variable: str, value: bool) -> "Condition":
"""Matches if a boolean field has the given value.
:param variable: -
:param value: -
stability
:stability: experimental
"""
return jsii.sinvoke(cls, "booleanEquals", [variable, value])
@jsii.member(jsii_name="not")
@classmethod
def not_(cls, condition: "Condition") -> "Condition":
"""Negate a condition.
:param condition: -
stability
:stability: experimental
"""
return jsii.sinvoke(cls, "not", [condition])
@jsii.member(jsii_name="numberEquals")
@classmethod
def number_equals(cls, variable: str, value: jsii.Number) -> "Condition":
"""Matches if a numeric field has the given value.
:param variable: -
:param value: -
stability
:stability: experimental
"""
return jsii.sinvoke(cls, "numberEquals", [variable, value])
@jsii.member(jsii_name="numberGreaterThan")
@classmethod
def number_greater_than(cls, variable: str, value: jsii.Number) -> "Condition":
"""Matches if a numeric field is greater than the given value.
:param variable: -
:param value: -
stability
:stability: experimental
"""
return jsii.sinvoke(cls, "numberGreaterThan", [variable, value])
@jsii.member(jsii_name="numberGreaterThanEquals")
@classmethod
def number_greater_than_equals(cls, variable: str, value: jsii.Number) -> "Condition":
"""Matches if a numeric field is greater than or equal to the given value.
:param variable: -
:param value: -
stability
:stability: experimental
"""
return jsii.sinvoke(cls, "numberGreaterThanEquals", [variable, value])
@jsii.member(jsii_name="numberLessThan")
@classmethod
def number_less_than(cls, variable: str, value: jsii.Number) -> "Condition":
"""Matches if a numeric field is less than the given value.
:param variable: -
:param value: -
stability
:stability: experimental
"""
return jsii.sinvoke(cls, "numberLessThan", [variable, value])
@jsii.member(jsii_name="numberLessThanEquals")
@classmethod
def number_less_than_equals(cls, variable: str, value: jsii.Number) -> "Condition":
"""Matches if a numeric field is less than or equal to the given value.
:param variable: -
:param value: -
stability
:stability: experimental
"""
return jsii.sinvoke(cls, "numberLessThanEquals", [variable, value])
@jsii.member(jsii_name="or")
@classmethod
def or_(cls, *conditions: "Condition") -> "Condition":
"""Combine two or more conditions with a logical OR.
:param conditions: -
stability
:stability: experimental
"""
return jsii.sinvoke(cls, "or", [*conditions])
@jsii.member(jsii_name="stringEquals")
@classmethod
def string_equals(cls, variable: str, value: str) -> "Condition":
"""Matches if a string field has the given value.
:param variable: -
:param value: -
stability
:stability: experimental
"""
return jsii.sinvoke(cls, "stringEquals", [variable, value])
@jsii.member(jsii_name="stringGreaterThan")
@classmethod
def string_greater_than(cls, variable: str, value: str) -> "Condition":
"""Matches if a string field sorts after a given value.
:param variable: -
:param value: -
stability
:stability: experimental
"""
return jsii.sinvoke(cls, "stringGreaterThan", [variable, value])
@jsii.member(jsii_name="stringGreaterThanEquals")
@classmethod
def string_greater_than_equals(cls, variable: str, value: str) -> "Condition":
"""Matches if a string field sorts after or equal to a given value.
:param variable: -
:param value: -
stability
:stability: experimental
"""
return jsii.sinvoke(cls, "stringGreaterThanEquals", [variable, value])
@jsii.member(jsii_name="stringLessThan")
@classmethod
def string_less_than(cls, variable: str, value: str) -> "Condition":
"""Matches if a string field sorts before a given value.
:param variable: -
:param value: -
stability
:stability: experimental
"""
return jsii.sinvoke(cls, "stringLessThan", [variable, value])
@jsii.member(jsii_name="stringLessThanEquals")
@classmethod
def string_less_than_equals(cls, variable: str, value: str) -> "Condition":
"""Matches if a string field sorts equal to or before a given value.
:param variable: -
:param value: -
stability
:stability: experimental
"""
return jsii.sinvoke(cls, "stringLessThanEquals", [variable, value])
@jsii.member(jsii_name="timestampEquals")
@classmethod
def timestamp_equals(cls, variable: str, value: str) -> "Condition":
"""Matches if a timestamp field is the same time as the given timestamp.
:param variable: -
:param value: -
stability
:stability: experimental
"""
return jsii.sinvoke(cls, "timestampEquals", [variable, value])
@jsii.member(jsii_name="timestampGreaterThan")
@classmethod
def timestamp_greater_than(cls, variable: str, value: str) -> "Condition":
"""Matches if a timestamp field is after the given timestamp.
:param variable: -
:param value: -
stability
:stability: experimental
"""
return jsii.sinvoke(cls, "timestampGreaterThan", [variable, value])
@jsii.member(jsii_name="timestampGreaterThanEquals")
@classmethod
def timestamp_greater_than_equals(cls, variable: str, value: str) -> "Condition":
"""Matches if a timestamp field is after or equal to the given timestamp.
:param variable: -
:param value: -
stability
:stability: experimental
"""
return jsii.sinvoke(cls, "timestampGreaterThanEquals", [variable, value])
@jsii.member(jsii_name="timestampLessThan")
@classmethod
def timestamp_less_than(cls, variable: str, value: str) -> "Condition":
"""Matches if a timestamp field is before the given timestamp.
:param variable: -
:param value: -
stability
:stability: experimental
"""
return jsii.sinvoke(cls, "timestampLessThan", [variable, value])
@jsii.member(jsii_name="timestampLessThanEquals")
@classmethod
def timestamp_less_than_equals(cls, variable: str, value: str) -> "Condition":
"""Matches if a timestamp field is before or equal to the given timestamp.
:param variable: -
:param value: -
stability
:stability: experimental
"""
return jsii.sinvoke(cls, "timestampLessThanEquals", [variable, value])
@jsii.member(jsii_name="renderCondition")
@abc.abstractmethod
def render_condition(self) -> typing.Any:
"""Render Amazon States Language JSON for the condition.
stability
:stability: experimental
"""
...
class _ConditionProxy(Condition):
@jsii.member(jsii_name="renderCondition")
def render_condition(self) -> typing.Any:
"""Render Amazon States Language JSON for the condition.
stability
:stability: experimental
"""
return jsii.invoke(self, "renderCondition", [])
class Context(metaclass=jsii.JSIIMeta, jsii_type="@aws-cdk/aws-stepfunctions.Context"):
"""Extract a field from the State Machine Context data.
see
:see: https://docs.aws.amazon.com/step-functions/latest/dg/connect-to-resource.html#wait-token-contextobject
stability
:stability: experimental
"""
@jsii.member(jsii_name="numberAt")
@classmethod
def number_at(cls, path: str) -> jsii.Number:
"""Instead of using a literal number, get the value from a JSON path.
:param path: -
stability
:stability: experimental
"""
return jsii.sinvoke(cls, "numberAt", [path])
@jsii.member(jsii_name="stringAt")
@classmethod
def string_at(cls, path: str) -> str:
"""Instead of using a literal string, get the value from a JSON path.
:param path: -
stability
:stability: experimental
"""
return jsii.sinvoke(cls, "stringAt", [path])
@classproperty
@jsii.member(jsii_name="entireContext")
def entire_context(cls) -> str:
"""Use the entire context data structure.
Will be an object at invocation time, but is represented in the CDK
application as a string.
stability
:stability: experimental
"""
return jsii.sget(cls, "entireContext")
@classproperty
@jsii.member(jsii_name="taskToken")
def task_token(cls) -> str:
"""Return the Task Token field.
External actions will need this token to report step completion
back to StepFunctions using the ``SendTaskSuccess`` or ``SendTaskFailure``
calls.
stability
:stability: experimental
"""
return jsii.sget(cls, "taskToken")
class Data(metaclass=jsii.JSIIMeta, jsii_type="@aws-cdk/aws-stepfunctions.Data"):
"""Extract a field from the State Machine data that gets passed around between states.
stability
:stability: experimental
"""
@jsii.member(jsii_name="isJsonPathString")
@classmethod
def is_json_path_string(cls, value: str) -> bool:
"""
:param value: -
stability
:stability: experimental
"""
return jsii.sinvoke(cls, "isJsonPathString", [value])
@jsii.member(jsii_name="listAt")
@classmethod
def list_at(cls, path: str) -> typing.List[str]:
"""Instead of using a literal string list, get the value from a JSON path.
:param path: -
stability
:stability: experimental
"""
return jsii.sinvoke(cls, "listAt", [path])
@jsii.member(jsii_name="numberAt")
@classmethod
def number_at(cls, path: str) -> jsii.Number:
"""Instead of using a literal number, get the value from a JSON path.
:param path: -
stability
:stability: experimental
"""
return jsii.sinvoke(cls, "numberAt", [path])
@jsii.member(jsii_name="stringAt")
@classmethod
def string_at(cls, path: str) -> str:
"""Instead of using a literal string, get the value from a JSON path.
:param path: -
stability
:stability: experimental
"""
return jsii.sinvoke(cls, "stringAt", [path])
@classproperty
@jsii.member(jsii_name="entirePayload")
def entire_payload(cls) -> str:
"""Use the entire data structure.
Will be an object at invocation time, but is represented in the CDK
application as a string.
stability
:stability: experimental
"""
return jsii.sget(cls, "entirePayload")
class Errors(metaclass=jsii.JSIIMeta, jsii_type="@aws-cdk/aws-stepfunctions.Errors"):
"""Predefined error strings.
stability
:stability: experimental
"""
def __init__(self) -> None:
jsii.create(Errors, self, [])
@classproperty
@jsii.member(jsii_name="ALL")
def ALL(cls) -> str:
"""Matches any Error.
stability
:stability: experimental
"""
return jsii.sget(cls, "ALL")
@classproperty
@jsii.member(jsii_name="BRANCH_FAILED")
def BRANCH_FAILED(cls) -> str:
"""A branch of a Parallel state failed.
stability
:stability: experimental
"""
return jsii.sget(cls, "BRANCH_FAILED")
@classproperty
@jsii.member(jsii_name="NO_CHOICE_MATCHED")
def NO_CHOICE_MATCHED(cls) -> str:
"""A Choice state failed to find a match for the condition field extracted from its input.
stability
:stability: experimental
"""
return jsii.sget(cls, "NO_CHOICE_MATCHED")
@classproperty
@jsii.member(jsii_name="PERMISSIONS")
def PERMISSIONS(cls) -> str:
"""A Task State failed because it had insufficient privileges to execute the specified code.
stability
:stability: experimental
"""
return jsii.sget(cls, "PERMISSIONS")
@classproperty
@jsii.member(jsii_name="RESULT_PATH_MATCH_FAILURE")
def RESULT_PATH_MATCH_FAILURE(cls) -> str:
"""A Task State’s “ResultPath” field cannot be applied to the input the state received.
stability
:stability: experimental
"""
return jsii.sget(cls, "RESULT_PATH_MATCH_FAILURE")
@classproperty
@jsii.member(jsii_name="TASKS_FAILED")
def TASKS_FAILED(cls) -> str:
"""A Task State failed during the execution.
stability
:stability: experimental
"""
return jsii.sget(cls, "TASKS_FAILED")
@classproperty
@jsii.member(jsii_name="TIMEOUT")
def TIMEOUT(cls) -> str:
"""A Task State either ran longer than the “TimeoutSeconds” value, or failed to heartbeat for a time longer than the “HeartbeatSeconds” value.
stability
:stability: experimental
"""
return jsii.sget(cls, "TIMEOUT")
@jsii.data_type(jsii_type="@aws-cdk/aws-stepfunctions.FailProps", jsii_struct_bases=[], name_mapping={'cause': 'cause', 'comment': 'comment', 'error': 'error'})
class FailProps():
def __init__(self, *, cause: typing.Optional[str]=None, comment: typing.Optional[str]=None, error: typing.Optional[str]=None):
"""Properties for defining a Fail state.
:param cause: A description for the cause of the failure. Default: No description
:param comment: An optional description for this state. Default: No comment
:param error: Error code used to represent this failure. Default: No error code
stability
:stability: experimental
"""
self._values = {
}
if cause is not None: self._values["cause"] = cause
if comment is not None: self._values["comment"] = comment
if error is not None: self._values["error"] = error
@property
def cause(self) -> typing.Optional[str]:
"""A description for the cause of the failure.
default
:default: No description
stability
:stability: experimental
"""
return self._values.get('cause')
@property
def comment(self) -> typing.Optional[str]:
"""An optional description for this state.
default
:default: No comment
stability
:stability: experimental
"""
return self._values.get('comment')
@property
def error(self) -> typing.Optional[str]:
"""Error code used to represent this failure.
default
:default: No error code
stability
:stability: experimental
"""
return self._values.get('error')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'FailProps(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
class FieldUtils(metaclass=jsii.JSIIMeta, jsii_type="@aws-cdk/aws-stepfunctions.FieldUtils"):
"""Helper functions to work with structures containing fields.
stability
:stability: experimental
"""
@jsii.member(jsii_name="containsTaskToken")
@classmethod
def contains_task_token(cls, obj: typing.Optional[typing.Mapping[str,typing.Any]]=None) -> bool:
"""Returns whether the given task structure contains the TaskToken field anywhere.
The field is considered included if the field itself or one of its containing
fields occurs anywhere in the payload.
:param obj: -
stability
:stability: experimental
"""
return jsii.sinvoke(cls, "containsTaskToken", [obj])
@jsii.member(jsii_name="findReferencedPaths")
@classmethod
def find_referenced_paths(cls, obj: typing.Optional[typing.Mapping[str,typing.Any]]=None) -> typing.List[str]:
"""Return all JSON paths used in the given structure.
:param obj: -
stability
:stability: experimental
"""
return jsii.sinvoke(cls, "findReferencedPaths", [obj])
@jsii.member(jsii_name="renderObject")
@classmethod
def render_object(cls, obj: typing.Optional[typing.Mapping[str,typing.Any]]=None) -> typing.Optional[typing.Mapping[str,typing.Any]]:
"""Render a JSON structure containing fields to the right StepFunctions structure.
:param obj: -
stability
:stability: experimental
"""
return jsii.sinvoke(cls, "renderObject", [obj])
@jsii.data_type(jsii_type="@aws-cdk/aws-stepfunctions.FindStateOptions", jsii_struct_bases=[], name_mapping={'include_error_handlers': 'includeErrorHandlers'})
class FindStateOptions():
def __init__(self, *, include_error_handlers: typing.Optional[bool]=None):
"""Options for finding reachable states.
:param include_error_handlers: Whether or not to follow error-handling transitions. Default: false
stability
:stability: experimental
"""
self._values = {
}
if include_error_handlers is not None: self._values["include_error_handlers"] = include_error_handlers
@property
def include_error_handlers(self) -> typing.Optional[bool]:
"""Whether or not to follow error-handling transitions.
default
:default: false
stability
:stability: experimental
"""
return self._values.get('include_error_handlers')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'FindStateOptions(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.interface(jsii_type="@aws-cdk/aws-stepfunctions.IActivity")
class IActivity(aws_cdk.core.IResource, jsii.compat.Protocol):
"""
stability
:stability: experimental
"""
@staticmethod
def __jsii_proxy_class__():
return _IActivityProxy
@property
@jsii.member(jsii_name="activityArn")
def activity_arn(self) -> str:
"""The ARN of the activity.
stability
:stability: experimental
attribute:
:attribute:: true
"""
...
@property
@jsii.member(jsii_name="activityName")
def activity_name(self) -> str:
"""The name of the activity.
stability
:stability: experimental
attribute:
:attribute:: true
"""
...
class _IActivityProxy(jsii.proxy_for(aws_cdk.core.IResource)):
"""
stability
:stability: experimental
"""
__jsii_type__ = "@aws-cdk/aws-stepfunctions.IActivity"
@property
@jsii.member(jsii_name="activityArn")
def activity_arn(self) -> str:
"""The ARN of the activity.
stability
:stability: experimental
attribute:
:attribute:: true
"""
return jsii.get(self, "activityArn")
@property
@jsii.member(jsii_name="activityName")
def activity_name(self) -> str:
"""The name of the activity.
stability
:stability: experimental
attribute:
:attribute:: true
"""
return jsii.get(self, "activityName")
@jsii.implements(IActivity)
class Activity(aws_cdk.core.Resource, metaclass=jsii.JSIIMeta, jsii_type="@aws-cdk/aws-stepfunctions.Activity"):
"""Define a new StepFunctions activity.
stability
:stability: experimental
"""
def __init__(self, scope: aws_cdk.core.Construct, id: str, *, activity_name: typing.Optional[str]=None) -> None:
"""
:param scope: -
:param id: -
:param props: -
:param activity_name: The name for this activity. Default: If not supplied, a name is generated
stability
:stability: experimental
"""
props = ActivityProps(activity_name=activity_name)
jsii.create(Activity, self, [scope, id, props])
@jsii.member(jsii_name="fromActivityArn")
@classmethod
def from_activity_arn(cls, scope: aws_cdk.core.Construct, id: str, activity_arn: str) -> "IActivity":
"""Construct an Activity from an existing Activity ARN.
:param scope: -
:param id: -
:param activity_arn: -
stability
:stability: experimental
"""
return jsii.sinvoke(cls, "fromActivityArn", [scope, id, activity_arn])
@jsii.member(jsii_name="fromActivityName")
@classmethod
def from_activity_name(cls, scope: aws_cdk.core.Construct, id: str, activity_name: str) -> "IActivity":
"""Construct an Activity from an existing Activity Name.
:param scope: -
:param id: -
:param activity_name: -
stability
:stability: experimental
"""
return jsii.sinvoke(cls, "fromActivityName", [scope, id, activity_name])
@jsii.member(jsii_name="metric")
def metric(self, metric_name: str, *, color: typing.Optional[str]=None, dimensions: typing.Optional[typing.Mapping[str,typing.Any]]=None, label: typing.Optional[str]=None, period: typing.Optional[aws_cdk.core.Duration]=None, statistic: typing.Optional[str]=None, unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit]=None) -> aws_cdk.aws_cloudwatch.Metric:
"""Return the given named metric for this Activity.
:param metric_name: -
:param props: -
:param color: Color for this metric when added to a Graph in a Dashboard.
:param dimensions: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard.
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit for the metric that is associated with the alarm.
default
:default: sum over 5 minutes
stability
:stability: experimental
"""
props = aws_cdk.aws_cloudwatch.MetricOptions(color=color, dimensions=dimensions, label=label, period=period, statistic=statistic, unit=unit)
return jsii.invoke(self, "metric", [metric_name, props])
@jsii.member(jsii_name="metricFailed")
def metric_failed(self, *, color: typing.Optional[str]=None, dimensions: typing.Optional[typing.Mapping[str,typing.Any]]=None, label: typing.Optional[str]=None, period: typing.Optional[aws_cdk.core.Duration]=None, statistic: typing.Optional[str]=None, unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit]=None) -> aws_cdk.aws_cloudwatch.Metric:
"""Metric for the number of times this activity fails.
:param props: -
:param color: Color for this metric when added to a Graph in a Dashboard.
:param dimensions: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard.
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit for the metric that is associated with the alarm.
default
:default: sum over 5 minutes
stability
:stability: experimental
"""
props = aws_cdk.aws_cloudwatch.MetricOptions(color=color, dimensions=dimensions, label=label, period=period, statistic=statistic, unit=unit)
return jsii.invoke(self, "metricFailed", [props])
@jsii.member(jsii_name="metricHeartbeatTimedOut")
def metric_heartbeat_timed_out(self, *, color: typing.Optional[str]=None, dimensions: typing.Optional[typing.Mapping[str,typing.Any]]=None, label: typing.Optional[str]=None, period: typing.Optional[aws_cdk.core.Duration]=None, statistic: typing.Optional[str]=None, unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit]=None) -> aws_cdk.aws_cloudwatch.Metric:
"""Metric for the number of times the heartbeat times out for this activity.
:param props: -
:param color: Color for this metric when added to a Graph in a Dashboard.
:param dimensions: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard.
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit for the metric that is associated with the alarm.
default
:default: sum over 5 minutes
stability
:stability: experimental
"""
props = aws_cdk.aws_cloudwatch.MetricOptions(color=color, dimensions=dimensions, label=label, period=period, statistic=statistic, unit=unit)
return jsii.invoke(self, "metricHeartbeatTimedOut", [props])
@jsii.member(jsii_name="metricRunTime")
def metric_run_time(self, *, color: typing.Optional[str]=None, dimensions: typing.Optional[typing.Mapping[str,typing.Any]]=None, label: typing.Optional[str]=None, period: typing.Optional[aws_cdk.core.Duration]=None, statistic: typing.Optional[str]=None, unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit]=None) -> aws_cdk.aws_cloudwatch.Metric:
"""The interval, in milliseconds, between the time the activity starts and the time it closes.
:param props: -
:param color: Color for this metric when added to a Graph in a Dashboard.
:param dimensions: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard.
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit for the metric that is associated with the alarm.
default
:default: average over 5 minutes
stability
:stability: experimental
"""
props = aws_cdk.aws_cloudwatch.MetricOptions(color=color, dimensions=dimensions, label=label, period=period, statistic=statistic, unit=unit)
return jsii.invoke(self, "metricRunTime", [props])
@jsii.member(jsii_name="metricScheduled")
def metric_scheduled(self, *, color: typing.Optional[str]=None, dimensions: typing.Optional[typing.Mapping[str,typing.Any]]=None, label: typing.Optional[str]=None, period: typing.Optional[aws_cdk.core.Duration]=None, statistic: typing.Optional[str]=None, unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit]=None) -> aws_cdk.aws_cloudwatch.Metric:
"""Metric for the number of times this activity is scheduled.
:param props: -
:param color: Color for this metric when added to a Graph in a Dashboard.
:param dimensions: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard.
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit for the metric that is associated with the alarm.
default
:default: sum over 5 minutes
stability
:stability: experimental
"""
props = aws_cdk.aws_cloudwatch.MetricOptions(color=color, dimensions=dimensions, label=label, period=period, statistic=statistic, unit=unit)
return jsii.invoke(self, "metricScheduled", [props])
@jsii.member(jsii_name="metricScheduleTime")
def metric_schedule_time(self, *, color: typing.Optional[str]=None, dimensions: typing.Optional[typing.Mapping[str,typing.Any]]=None, label: typing.Optional[str]=None, period: typing.Optional[aws_cdk.core.Duration]=None, statistic: typing.Optional[str]=None, unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit]=None) -> aws_cdk.aws_cloudwatch.Metric:
"""The interval, in milliseconds, for which the activity stays in the schedule state.
:param props: -
:param color: Color for this metric when added to a Graph in a Dashboard.
:param dimensions: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard.
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit for the metric that is associated with the alarm.
default
:default: average over 5 minutes
stability
:stability: experimental
"""
props = aws_cdk.aws_cloudwatch.MetricOptions(color=color, dimensions=dimensions, label=label, period=period, statistic=statistic, unit=unit)
return jsii.invoke(self, "metricScheduleTime", [props])
@jsii.member(jsii_name="metricStarted")
def metric_started(self, *, color: typing.Optional[str]=None, dimensions: typing.Optional[typing.Mapping[str,typing.Any]]=None, label: typing.Optional[str]=None, period: typing.Optional[aws_cdk.core.Duration]=None, statistic: typing.Optional[str]=None, unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit]=None) -> aws_cdk.aws_cloudwatch.Metric:
"""Metric for the number of times this activity is started.
:param props: -
:param color: Color for this metric when added to a Graph in a Dashboard.
:param dimensions: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard.
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit for the metric that is associated with the alarm.
default
:default: sum over 5 minutes
stability
:stability: experimental
"""
props = aws_cdk.aws_cloudwatch.MetricOptions(color=color, dimensions=dimensions, label=label, period=period, statistic=statistic, unit=unit)
return jsii.invoke(self, "metricStarted", [props])
@jsii.member(jsii_name="metricSucceeded")
def metric_succeeded(self, *, color: typing.Optional[str]=None, dimensions: typing.Optional[typing.Mapping[str,typing.Any]]=None, label: typing.Optional[str]=None, period: typing.Optional[aws_cdk.core.Duration]=None, statistic: typing.Optional[str]=None, unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit]=None) -> aws_cdk.aws_cloudwatch.Metric:
"""Metric for the number of times this activity succeeds.
:param props: -
:param color: Color for this metric when added to a Graph in a Dashboard.
:param dimensions: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard.
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit for the metric that is associated with the alarm.
default
:default: sum over 5 minutes
stability
:stability: experimental
"""
props = aws_cdk.aws_cloudwatch.MetricOptions(color=color, dimensions=dimensions, label=label, period=period, statistic=statistic, unit=unit)
return jsii.invoke(self, "metricSucceeded", [props])
@jsii.member(jsii_name="metricTime")
def metric_time(self, *, color: typing.Optional[str]=None, dimensions: typing.Optional[typing.Mapping[str,typing.Any]]=None, label: typing.Optional[str]=None, period: typing.Optional[aws_cdk.core.Duration]=None, statistic: typing.Optional[str]=None, unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit]=None) -> aws_cdk.aws_cloudwatch.Metric:
"""The interval, in milliseconds, between the time the activity is scheduled and the time it closes.
:param props: -
:param color: Color for this metric when added to a Graph in a Dashboard.
:param dimensions: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard.
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit for the metric that is associated with the alarm.
default
:default: average over 5 minutes
stability
:stability: experimental
"""
props = aws_cdk.aws_cloudwatch.MetricOptions(color=color, dimensions=dimensions, label=label, period=period, statistic=statistic, unit=unit)
return jsii.invoke(self, "metricTime", [props])
@jsii.member(jsii_name="metricTimedOut")
def metric_timed_out(self, *, color: typing.Optional[str]=None, dimensions: typing.Optional[typing.Mapping[str,typing.Any]]=None, label: typing.Optional[str]=None, period: typing.Optional[aws_cdk.core.Duration]=None, statistic: typing.Optional[str]=None, unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit]=None) -> aws_cdk.aws_cloudwatch.Metric:
"""Metric for the number of times this activity times out.
:param props: -
:param color: Color for this metric when added to a Graph in a Dashboard.
:param dimensions: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard.
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit for the metric that is associated with the alarm.
default
:default: sum over 5 minutes
stability
:stability: experimental
"""
props = aws_cdk.aws_cloudwatch.MetricOptions(color=color, dimensions=dimensions, label=label, period=period, statistic=statistic, unit=unit)
return jsii.invoke(self, "metricTimedOut", [props])
@property
@jsii.member(jsii_name="activityArn")
def activity_arn(self) -> str:
"""The ARN of the activity.
stability
:stability: experimental
attribute:
:attribute:: true
"""
return jsii.get(self, "activityArn")
@property
@jsii.member(jsii_name="activityName")
def activity_name(self) -> str:
"""The name of the activity.
stability
:stability: experimental
attribute:
:attribute:: true
"""
return jsii.get(self, "activityName")
@jsii.interface(jsii_type="@aws-cdk/aws-stepfunctions.IChainable")
class IChainable(jsii.compat.Protocol):
"""Interface for objects that can be used in a Chain.
stability
:stability: experimental
"""
@staticmethod
def __jsii_proxy_class__():
return _IChainableProxy
@property
@jsii.member(jsii_name="endStates")
def end_states(self) -> typing.List["INextable"]:
"""The chainable end state(s) of this chainable.
stability
:stability: experimental
"""
...
@property
@jsii.member(jsii_name="id")
def id(self) -> str:
"""Descriptive identifier for this chainable.
stability
:stability: experimental
"""
...
@property
@jsii.member(jsii_name="startState")
def start_state(self) -> "State":
"""The start state of this chainable.
stability
:stability: experimental
"""
...
class _IChainableProxy():
"""Interface for objects that can be used in a Chain.
stability
:stability: experimental
"""
__jsii_type__ = "@aws-cdk/aws-stepfunctions.IChainable"
@property
@jsii.member(jsii_name="endStates")
def end_states(self) -> typing.List["INextable"]:
"""The chainable end state(s) of this chainable.
stability
:stability: experimental
"""
return jsii.get(self, "endStates")
@property
@jsii.member(jsii_name="id")
def id(self) -> str:
"""Descriptive identifier for this chainable.
stability
:stability: experimental
"""
return jsii.get(self, "id")
@property
@jsii.member(jsii_name="startState")
def start_state(self) -> "State":
"""The start state of this chainable.
stability
:stability: experimental
"""
return jsii.get(self, "startState")
@jsii.implements(IChainable)
class Chain(metaclass=jsii.JSIIMeta, jsii_type="@aws-cdk/aws-stepfunctions.Chain"):
"""A collection of states to chain onto.
A Chain has a start and zero or more chainable ends. If there are
zero ends, calling next() on the Chain will fail.
stability
:stability: experimental
"""
@jsii.member(jsii_name="custom")
@classmethod
def custom(cls, start_state: "State", end_states: typing.List["INextable"], last_added: "IChainable") -> "Chain":
"""Make a Chain with specific start and end states, and a last-added Chainable.
:param start_state: -
:param end_states: -
:param last_added: -
stability
:stability: experimental
"""
return jsii.sinvoke(cls, "custom", [start_state, end_states, last_added])
@jsii.member(jsii_name="sequence")
@classmethod
def sequence(cls, start: "IChainable", next: "IChainable") -> "Chain":
"""Make a Chain with the start from one chain and the ends from another.
:param start: -
:param next: -
stability
:stability: experimental
"""
return jsii.sinvoke(cls, "sequence", [start, next])
@jsii.member(jsii_name="start")
@classmethod
def start(cls, state: "IChainable") -> "Chain":
"""Begin a new Chain from one chainable.
:param state: -
stability
:stability: experimental
"""
return jsii.sinvoke(cls, "start", [state])
@jsii.member(jsii_name="next")
def next(self, next: "IChainable") -> "Chain":
"""Continue normal execution with the given state.
:param next: -
stability
:stability: experimental
"""
return jsii.invoke(self, "next", [next])
@jsii.member(jsii_name="toSingleState")
def to_single_state(self, id: str, *, comment: typing.Optional[str]=None, input_path: typing.Optional[str]=None, output_path: typing.Optional[str]=None, result_path: typing.Optional[str]=None) -> "Parallel":
"""Return a single state that encompasses all states in the chain.
This can be used to add error handling to a sequence of states.
Be aware that this changes the result of the inner state machine
to be an array with the result of the state machine in it. Adjust
your paths accordingly. For example, change 'outputPath' to
'$[0]'.
:param id: -
:param props: -
:param comment: An optional description for this state. Default: No comment
:param input_path: JSONPath expression to select part of the state to be the input to this state. May also be the special value DISCARD, which will cause the effective input to be the empty object {}. Default: $
:param output_path: JSONPath expression to select part of the state to be the output to this state. May also be the special value DISCARD, which will cause the effective output to be the empty object {}. Default: $
:param result_path: JSONPath expression to indicate where to inject the state's output. May also be the special value DISCARD, which will cause the state's input to become its output. Default: $
stability
:stability: experimental
"""
props = ParallelProps(comment=comment, input_path=input_path, output_path=output_path, result_path=result_path)
return jsii.invoke(self, "toSingleState", [id, props])
@property
@jsii.member(jsii_name="endStates")
def end_states(self) -> typing.List["INextable"]:
"""The chainable end state(s) of this chain.
stability
:stability: experimental
"""
return jsii.get(self, "endStates")
@property
@jsii.member(jsii_name="id")
def id(self) -> str:
"""Identify this Chain.
stability
:stability: experimental
"""
return jsii.get(self, "id")
@property
@jsii.member(jsii_name="startState")
def start_state(self) -> "State":
"""The start state of this chain.
stability
:stability: experimental
"""
return jsii.get(self, "startState")
@jsii.interface(jsii_type="@aws-cdk/aws-stepfunctions.INextable")
class INextable(jsii.compat.Protocol):
"""Interface for states that can have 'next' states.
stability
:stability: experimental
"""
@staticmethod
def __jsii_proxy_class__():
return _INextableProxy
@jsii.member(jsii_name="next")
def next(self, state: "IChainable") -> "Chain":
"""Go to the indicated state after this state.
:param state: -
return
:return: The chain of states built up
stability
:stability: experimental
"""
...
class _INextableProxy():
"""Interface for states that can have 'next' states.
stability
:stability: experimental
"""
__jsii_type__ = "@aws-cdk/aws-stepfunctions.INextable"
@jsii.member(jsii_name="next")
def next(self, state: "IChainable") -> "Chain":
"""Go to the indicated state after this state.
:param state: -
return
:return: The chain of states built up
stability
:stability: experimental
"""
return jsii.invoke(self, "next", [state])
@jsii.interface(jsii_type="@aws-cdk/aws-stepfunctions.IStateMachine")
class IStateMachine(aws_cdk.core.IResource, jsii.compat.Protocol):
"""A State Machine.
stability
:stability: experimental
"""
@staticmethod
def __jsii_proxy_class__():
return _IStateMachineProxy
@property
@jsii.member(jsii_name="stateMachineArn")
def state_machine_arn(self) -> str:
"""The ARN of the state machine.
stability
:stability: experimental
attribute:
:attribute:: true
"""
...
class _IStateMachineProxy(jsii.proxy_for(aws_cdk.core.IResource)):
"""A State Machine.
stability
:stability: experimental
"""
__jsii_type__ = "@aws-cdk/aws-stepfunctions.IStateMachine"
@property
@jsii.member(jsii_name="stateMachineArn")
def state_machine_arn(self) -> str:
"""The ARN of the state machine.
stability
:stability: experimental
attribute:
:attribute:: true
"""
return jsii.get(self, "stateMachineArn")
@jsii.interface(jsii_type="@aws-cdk/aws-stepfunctions.IStepFunctionsTask")
class IStepFunctionsTask(jsii.compat.Protocol):
"""Interface for resources that can be used as tasks.
stability
:stability: experimental
"""
@staticmethod
def __jsii_proxy_class__():
return _IStepFunctionsTaskProxy
@jsii.member(jsii_name="bind")
def bind(self, task: "Task") -> "StepFunctionsTaskConfig":
"""Called when the task object is used in a workflow.
:param task: -
stability
:stability: experimental
"""
...
class _IStepFunctionsTaskProxy():
"""Interface for resources that can be used as tasks.
stability
:stability: experimental
"""
__jsii_type__ = "@aws-cdk/aws-stepfunctions.IStepFunctionsTask"
@jsii.member(jsii_name="bind")
def bind(self, task: "Task") -> "StepFunctionsTaskConfig":
"""Called when the task object is used in a workflow.
:param task: -
stability
:stability: experimental
"""
return jsii.invoke(self, "bind", [task])
@jsii.enum(jsii_type="@aws-cdk/aws-stepfunctions.InputType")
class InputType(enum.Enum):
"""The type of task input.
stability
:stability: experimental
"""
TEXT = "TEXT"
"""
stability
:stability: experimental
"""
OBJECT = "OBJECT"
"""
stability
:stability: experimental
"""
@jsii.data_type(jsii_type="@aws-cdk/aws-stepfunctions.MapProps", jsii_struct_bases=[], name_mapping={'comment': 'comment', 'input_path': 'inputPath', 'items_path': 'itemsPath', 'max_concurrency': 'maxConcurrency', 'output_path': 'outputPath', 'parameters': 'parameters', 'result_path': 'resultPath'})
class MapProps():
def __init__(self, *, comment: typing.Optional[str]=None, input_path: typing.Optional[str]=None, items_path: typing.Optional[str]=None, max_concurrency: typing.Optional[jsii.Number]=None, output_path: typing.Optional[str]=None, parameters: typing.Optional[typing.Mapping[str,typing.Any]]=None, result_path: typing.Optional[str]=None):
"""Properties for defining a Map state.
:param comment: An optional description for this state. Default: No comment
:param input_path: JSONPath expression to select part of the state to be the input to this state. May also be the special value DISCARD, which will cause the effective input to be the empty object {}. Default: $
:param items_path: JSONPath expression to select the array to iterate over. Default: $
:param max_concurrency: MaxConcurrency. An upper bound on the number of iterations you want running at once. Default: - full concurrency
:param output_path: JSONPath expression to select part of the state to be the output to this state. May also be the special value DISCARD, which will cause the effective output to be the empty object {}. Default: $
:param parameters: The JSON that you want to override your default iteration input. Default: $
:param result_path: JSONPath expression to indicate where to inject the state's output. May also be the special value DISCARD, which will cause the state's input to become its output. Default: $
stability
:stability: experimental
"""
self._values = {
}
if comment is not None: self._values["comment"] = comment
if input_path is not None: self._values["input_path"] = input_path
if items_path is not None: self._values["items_path"] = items_path
if max_concurrency is not None: self._values["max_concurrency"] = max_concurrency
if output_path is not None: self._values["output_path"] = output_path
if parameters is not None: self._values["parameters"] = parameters
if result_path is not None: self._values["result_path"] = result_path
@property
def comment(self) -> typing.Optional[str]:
"""An optional description for this state.
default
:default: No comment
stability
:stability: experimental
"""
return self._values.get('comment')
@property
def input_path(self) -> typing.Optional[str]:
"""JSONPath expression to select part of the state to be the input to this state.
May also be the special value DISCARD, which will cause the effective
input to be the empty object {}.
default
:default: $
stability
:stability: experimental
"""
return self._values.get('input_path')
@property
def items_path(self) -> typing.Optional[str]:
"""JSONPath expression to select the array to iterate over.
default
:default: $
stability
:stability: experimental
"""
return self._values.get('items_path')
@property
def max_concurrency(self) -> typing.Optional[jsii.Number]:
"""MaxConcurrency.
An upper bound on the number of iterations you want running at once.
default
:default: - full concurrency
stability
:stability: experimental
"""
return self._values.get('max_concurrency')
@property
def output_path(self) -> typing.Optional[str]:
"""JSONPath expression to select part of the state to be the output to this state.
May also be the special value DISCARD, which will cause the effective
output to be the empty object {}.
default
:default: $
stability
:stability: experimental
"""
return self._values.get('output_path')
@property
def parameters(self) -> typing.Optional[typing.Mapping[str,typing.Any]]:
"""The JSON that you want to override your default iteration input.
default
:default: $
stability
:stability: experimental
"""
return self._values.get('parameters')
@property
def result_path(self) -> typing.Optional[str]:
"""JSONPath expression to indicate where to inject the state's output.
May also be the special value DISCARD, which will cause the state's
input to become its output.
default
:default: $
stability
:stability: experimental
"""
return self._values.get('result_path')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'MapProps(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-stepfunctions.ParallelProps", jsii_struct_bases=[], name_mapping={'comment': 'comment', 'input_path': 'inputPath', 'output_path': 'outputPath', 'result_path': 'resultPath'})
class ParallelProps():
def __init__(self, *, comment: typing.Optional[str]=None, input_path: typing.Optional[str]=None, output_path: typing.Optional[str]=None, result_path: typing.Optional[str]=None):
"""Properties for defining a Parallel state.
:param comment: An optional description for this state. Default: No comment
:param input_path: JSONPath expression to select part of the state to be the input to this state. May also be the special value DISCARD, which will cause the effective input to be the empty object {}. Default: $
:param output_path: JSONPath expression to select part of the state to be the output to this state. May also be the special value DISCARD, which will cause the effective output to be the empty object {}. Default: $
:param result_path: JSONPath expression to indicate where to inject the state's output. May also be the special value DISCARD, which will cause the state's input to become its output. Default: $
stability
:stability: experimental
"""
self._values = {
}
if comment is not None: self._values["comment"] = comment
if input_path is not None: self._values["input_path"] = input_path
if output_path is not None: self._values["output_path"] = output_path
if result_path is not None: self._values["result_path"] = result_path
@property
def comment(self) -> typing.Optional[str]:
"""An optional description for this state.
default
:default: No comment
stability
:stability: experimental
"""
return self._values.get('comment')
@property
def input_path(self) -> typing.Optional[str]:
"""JSONPath expression to select part of the state to be the input to this state.
May also be the special value DISCARD, which will cause the effective
input to be the empty object {}.
default
:default: $
stability
:stability: experimental
"""
return self._values.get('input_path')
@property
def output_path(self) -> typing.Optional[str]:
"""JSONPath expression to select part of the state to be the output to this state.
May also be the special value DISCARD, which will cause the effective
output to be the empty object {}.
default
:default: $
stability
:stability: experimental
"""
return self._values.get('output_path')
@property
def result_path(self) -> typing.Optional[str]:
"""JSONPath expression to indicate where to inject the state's output.
May also be the special value DISCARD, which will cause the state's
input to become its output.
default
:default: $
stability
:stability: experimental
"""
return self._values.get('result_path')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'ParallelProps(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-stepfunctions.PassProps", jsii_struct_bases=[], name_mapping={'comment': 'comment', 'input_path': 'inputPath', 'output_path': 'outputPath', 'parameters': 'parameters', 'result': 'result', 'result_path': 'resultPath'})
class PassProps():
def __init__(self, *, comment: typing.Optional[str]=None, input_path: typing.Optional[str]=None, output_path: typing.Optional[str]=None, parameters: typing.Optional[typing.Mapping[str,typing.Any]]=None, result: typing.Optional["Result"]=None, result_path: typing.Optional[str]=None):
"""Properties for defining a Pass state.
:param comment: An optional description for this state. Default: No comment
:param input_path: JSONPath expression to select part of the state to be the input to this state. May also be the special value DISCARD, which will cause the effective input to be the empty object {}. Default: $
:param output_path: JSONPath expression to select part of the state to be the output to this state. May also be the special value DISCARD, which will cause the effective output to be the empty object {}. Default: $
:param parameters: Parameters pass a collection of key-value pairs, either static values or JSONPath expressions that select from the input. Default: No parameters
:param result: If given, treat as the result of this operation. Can be used to inject or replace the current execution state. Default: No injected result
:param result_path: JSONPath expression to indicate where to inject the state's output. May also be the special value DISCARD, which will cause the state's input to become its output. Default: $
stability
:stability: experimental
"""
self._values = {
}
if comment is not None: self._values["comment"] = comment
if input_path is not None: self._values["input_path"] = input_path
if output_path is not None: self._values["output_path"] = output_path
if parameters is not None: self._values["parameters"] = parameters
if result is not None: self._values["result"] = result
if result_path is not None: self._values["result_path"] = result_path
@property
def comment(self) -> typing.Optional[str]:
"""An optional description for this state.
default
:default: No comment
stability
:stability: experimental
"""
return self._values.get('comment')
@property
def input_path(self) -> typing.Optional[str]:
"""JSONPath expression to select part of the state to be the input to this state.
May also be the special value DISCARD, which will cause the effective
input to be the empty object {}.
default
:default: $
stability
:stability: experimental
"""
return self._values.get('input_path')
@property
def output_path(self) -> typing.Optional[str]:
"""JSONPath expression to select part of the state to be the output to this state.
May also be the special value DISCARD, which will cause the effective
output to be the empty object {}.
default
:default: $
stability
:stability: experimental
"""
return self._values.get('output_path')
@property
def parameters(self) -> typing.Optional[typing.Mapping[str,typing.Any]]:
"""Parameters pass a collection of key-value pairs, either static values or JSONPath expressions that select from the input.
default
:default: No parameters
see
:see: https://docs.aws.amazon.com/step-functions/latest/dg/input-output-inputpath-params.html#input-output-parameters
stability
:stability: experimental
"""
return self._values.get('parameters')
@property
def result(self) -> typing.Optional["Result"]:
"""If given, treat as the result of this operation.
Can be used to inject or replace the current execution state.
default
:default: No injected result
stability
:stability: experimental
"""
return self._values.get('result')
@property
def result_path(self) -> typing.Optional[str]:
"""JSONPath expression to indicate where to inject the state's output.
May also be the special value DISCARD, which will cause the state's
input to become its output.
default
:default: $
stability
:stability: experimental
"""
return self._values.get('result_path')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'PassProps(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
class Result(metaclass=jsii.JSIIMeta, jsii_type="@aws-cdk/aws-stepfunctions.Result"):
"""The result of a Pass operation.
stability
:stability: experimental
"""
def __init__(self, value: typing.Any) -> None:
"""
:param value: -
stability
:stability: experimental
"""
jsii.create(Result, self, [value])
@jsii.member(jsii_name="fromArray")
@classmethod
def from_array(cls, value: typing.List[typing.Any]) -> "Result":
"""The result of the operation is an array.
:param value: -
stability
:stability: experimental
"""
return jsii.sinvoke(cls, "fromArray", [value])
@jsii.member(jsii_name="fromBoolean")
@classmethod
def from_boolean(cls, value: bool) -> "Result":
"""The result of the operation is a boolean.
:param value: -
stability
:stability: experimental
"""
return jsii.sinvoke(cls, "fromBoolean", [value])
@jsii.member(jsii_name="fromNumber")
@classmethod
def from_number(cls, value: jsii.Number) -> "Result":
"""The result of the operation is a number.
:param value: -
stability
:stability: experimental
"""
return jsii.sinvoke(cls, "fromNumber", [value])
@jsii.member(jsii_name="fromObject")
@classmethod
def from_object(cls, value: typing.Mapping[str,typing.Any]) -> "Result":
"""The result of the operation is an object.
:param value: -
stability
:stability: experimental
"""
return jsii.sinvoke(cls, "fromObject", [value])
@jsii.member(jsii_name="fromString")
@classmethod
def from_string(cls, value: str) -> "Result":
"""The result of the operation is a string.
:param value: -
stability
:stability: experimental
"""
return jsii.sinvoke(cls, "fromString", [value])
@property
@jsii.member(jsii_name="value")
def value(self) -> typing.Any:
"""
stability
:stability: experimental
"""
return jsii.get(self, "value")
@jsii.data_type(jsii_type="@aws-cdk/aws-stepfunctions.RetryProps", jsii_struct_bases=[], name_mapping={'backoff_rate': 'backoffRate', 'errors': 'errors', 'interval': 'interval', 'max_attempts': 'maxAttempts'})
class RetryProps():
def __init__(self, *, backoff_rate: typing.Optional[jsii.Number]=None, errors: typing.Optional[typing.List[str]]=None, interval: typing.Optional[aws_cdk.core.Duration]=None, max_attempts: typing.Optional[jsii.Number]=None):
"""Retry details.
:param backoff_rate: Multiplication for how much longer the wait interval gets on every retry. Default: 2
:param errors: Errors to retry. A list of error strings to retry, which can be either predefined errors (for example Errors.NoChoiceMatched) or a self-defined error. Default: All errors
:param interval: How many seconds to wait initially before retrying. Default: Duration.seconds(1)
:param max_attempts: How many times to retry this particular error. May be 0 to disable retry for specific errors (in case you have a catch-all retry policy). Default: 3
stability
:stability: experimental
"""
self._values = {
}
if backoff_rate is not None: self._values["backoff_rate"] = backoff_rate
if errors is not None: self._values["errors"] = errors
if interval is not None: self._values["interval"] = interval
if max_attempts is not None: self._values["max_attempts"] = max_attempts
@property
def backoff_rate(self) -> typing.Optional[jsii.Number]:
"""Multiplication for how much longer the wait interval gets on every retry.
default
:default: 2
stability
:stability: experimental
"""
return self._values.get('backoff_rate')
@property
def errors(self) -> typing.Optional[typing.List[str]]:
"""Errors to retry.
A list of error strings to retry, which can be either predefined errors
(for example Errors.NoChoiceMatched) or a self-defined error.
default
:default: All errors
stability
:stability: experimental
"""
return self._values.get('errors')
@property
def interval(self) -> typing.Optional[aws_cdk.core.Duration]:
"""How many seconds to wait initially before retrying.
default
:default: Duration.seconds(1)
stability
:stability: experimental
"""
return self._values.get('interval')
@property
def max_attempts(self) -> typing.Optional[jsii.Number]:
"""How many times to retry this particular error.
May be 0 to disable retry for specific errors (in case you have
a catch-all retry policy).
default
:default: 3
stability
:stability: experimental
"""
return self._values.get('max_attempts')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'RetryProps(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.enum(jsii_type="@aws-cdk/aws-stepfunctions.ServiceIntegrationPattern")
class ServiceIntegrationPattern(enum.Enum):
"""Three ways to call an integrated service: Request Response, Run a Job and Wait for a Callback with Task Token.
default
:default: FIRE_AND_FORGET
see
:see:
https://docs.aws.amazon.com/step-functions/latest/dg/connect-to-resource.html
Here, they are named as FIRE_AND_FORGET, SYNC and WAIT_FOR_TASK_TOKEN respectly.
stability
:stability: experimental
"""
FIRE_AND_FORGET = "FIRE_AND_FORGET"
"""Call a service and progress to the next state immediately after the API call completes.
stability
:stability: experimental
"""
SYNC = "SYNC"
"""Call a service and wait for a job to complete.
stability
:stability: experimental
"""
WAIT_FOR_TASK_TOKEN = "WAIT_FOR_TASK_TOKEN"
"""Call a service with a task token and wait until that token is returned by SendTaskSuccess/SendTaskFailure with paylaod.
stability
:stability: experimental
"""
@jsii.data_type(jsii_type="@aws-cdk/aws-stepfunctions.SingleStateOptions", jsii_struct_bases=[ParallelProps], name_mapping={'comment': 'comment', 'input_path': 'inputPath', 'output_path': 'outputPath', 'result_path': 'resultPath', 'prefix_states': 'prefixStates', 'state_id': 'stateId'})
class SingleStateOptions(ParallelProps):
def __init__(self, *, comment: typing.Optional[str]=None, input_path: typing.Optional[str]=None, output_path: typing.Optional[str]=None, result_path: typing.Optional[str]=None, prefix_states: typing.Optional[str]=None, state_id: typing.Optional[str]=None):
"""Options for creating a single state.
:param comment: An optional description for this state. Default: No comment
:param input_path: JSONPath expression to select part of the state to be the input to this state. May also be the special value DISCARD, which will cause the effective input to be the empty object {}. Default: $
:param output_path: JSONPath expression to select part of the state to be the output to this state. May also be the special value DISCARD, which will cause the effective output to be the empty object {}. Default: $
:param result_path: JSONPath expression to indicate where to inject the state's output. May also be the special value DISCARD, which will cause the state's input to become its output. Default: $
:param prefix_states: String to prefix all stateIds in the state machine with. Default: stateId
:param state_id: ID of newly created containing state. Default: Construct ID of the StateMachineFragment
stability
:stability: experimental
"""
self._values = {
}
if comment is not None: self._values["comment"] = comment
if input_path is not None: self._values["input_path"] = input_path
if output_path is not None: self._values["output_path"] = output_path
if result_path is not None: self._values["result_path"] = result_path
if prefix_states is not None: self._values["prefix_states"] = prefix_states
if state_id is not None: self._values["state_id"] = state_id
@property
def comment(self) -> typing.Optional[str]:
"""An optional description for this state.
default
:default: No comment
stability
:stability: experimental
"""
return self._values.get('comment')
@property
def input_path(self) -> typing.Optional[str]:
"""JSONPath expression to select part of the state to be the input to this state.
May also be the special value DISCARD, which will cause the effective
input to be the empty object {}.
default
:default: $
stability
:stability: experimental
"""
return self._values.get('input_path')
@property
def output_path(self) -> typing.Optional[str]:
"""JSONPath expression to select part of the state to be the output to this state.
May also be the special value DISCARD, which will cause the effective
output to be the empty object {}.
default
:default: $
stability
:stability: experimental
"""
return self._values.get('output_path')
@property
def result_path(self) -> typing.Optional[str]:
"""JSONPath expression to indicate where to inject the state's output.
May also be the special value DISCARD, which will cause the state's
input to become its output.
default
:default: $
stability
:stability: experimental
"""
return self._values.get('result_path')
@property
def prefix_states(self) -> typing.Optional[str]:
"""String to prefix all stateIds in the state machine with.
default
:default: stateId
stability
:stability: experimental
"""
return self._values.get('prefix_states')
@property
def state_id(self) -> typing.Optional[str]:
"""ID of newly created containing state.
default
:default: Construct ID of the StateMachineFragment
stability
:stability: experimental
"""
return self._values.get('state_id')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'SingleStateOptions(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.implements(IChainable)
class State(aws_cdk.core.Construct, metaclass=jsii.JSIIAbstractClass, jsii_type="@aws-cdk/aws-stepfunctions.State"):
"""Base class for all other state classes.
stability
:stability: experimental
"""
@staticmethod
def __jsii_proxy_class__():
return _StateProxy
def __init__(self, scope: aws_cdk.core.Construct, id: str, *, comment: typing.Optional[str]=None, input_path: typing.Optional[str]=None, output_path: typing.Optional[str]=None, parameters: typing.Optional[typing.Mapping[str,typing.Any]]=None, result_path: typing.Optional[str]=None) -> None:
"""
:param scope: -
:param id: -
:param props: -
:param comment: A comment describing this state. Default: No comment
:param input_path: JSONPath expression to select part of the state to be the input to this state. May also be the special value DISCARD, which will cause the effective input to be the empty object {}. Default: $
:param output_path: JSONPath expression to select part of the state to be the output to this state. May also be the special value DISCARD, which will cause the effective output to be the empty object {}. Default: $
:param parameters: Parameters pass a collection of key-value pairs, either static values or JSONPath expressions that select from the input. Default: No parameters
:param result_path: JSONPath expression to indicate where to inject the state's output. May also be the special value DISCARD, which will cause the state's input to become its output. Default: $
stability
:stability: experimental
"""
props = StateProps(comment=comment, input_path=input_path, output_path=output_path, parameters=parameters, result_path=result_path)
jsii.create(State, self, [scope, id, props])
@jsii.member(jsii_name="filterNextables")
@classmethod
def filter_nextables(cls, states: typing.List["State"]) -> typing.List["INextable"]:
"""Return only the states that allow chaining from an array of states.
:param states: -
stability
:stability: experimental
"""
return jsii.sinvoke(cls, "filterNextables", [states])
@jsii.member(jsii_name="findReachableEndStates")
@classmethod
def find_reachable_end_states(cls, start: "State", *, include_error_handlers: typing.Optional[bool]=None) -> typing.List["State"]:
"""Find the set of end states states reachable through transitions from the given start state.
:param start: -
:param options: -
:param include_error_handlers: Whether or not to follow error-handling transitions. Default: false
stability
:stability: experimental
"""
options = FindStateOptions(include_error_handlers=include_error_handlers)
return jsii.sinvoke(cls, "findReachableEndStates", [start, options])
@jsii.member(jsii_name="prefixStates")
@classmethod
def prefix_states(cls, root: aws_cdk.core.IConstruct, prefix: str) -> None:
"""Add a prefix to the stateId of all States found in a construct tree.
:param root: -
:param prefix: -
stability
:stability: experimental
"""
return jsii.sinvoke(cls, "prefixStates", [root, prefix])
@jsii.member(jsii_name="addBranch")
def _add_branch(self, branch: "StateGraph") -> None:
"""Add a paralle branch to this state.
:param branch: -
stability
:stability: experimental
"""
return jsii.invoke(self, "addBranch", [branch])
@jsii.member(jsii_name="addChoice")
def _add_choice(self, condition: "Condition", next: "State") -> None:
"""Add a choice branch to this state.
:param condition: -
:param next: -
stability
:stability: experimental
"""
return jsii.invoke(self, "addChoice", [condition, next])
@jsii.member(jsii_name="addIterator")
def _add_iterator(self, iteration: "StateGraph") -> None:
"""Add a map iterator to this state.
:param iteration: -
stability
:stability: experimental
"""
return jsii.invoke(self, "addIterator", [iteration])
@jsii.member(jsii_name="addPrefix")
def add_prefix(self, x: str) -> None:
"""Add a prefix to the stateId of this state.
:param x: -
stability
:stability: experimental
"""
return jsii.invoke(self, "addPrefix", [x])
@jsii.member(jsii_name="bindToGraph")
def bind_to_graph(self, graph: "StateGraph") -> None:
"""Register this state as part of the given graph.
Don't call this. It will be called automatically when you work
with states normally.
:param graph: -
stability
:stability: experimental
"""
return jsii.invoke(self, "bindToGraph", [graph])
@jsii.member(jsii_name="makeDefault")
def _make_default(self, def_: "State") -> None:
"""Make the indicated state the default choice transition of this state.
:param def_: -
stability
:stability: experimental
"""
return jsii.invoke(self, "makeDefault", [def_])
@jsii.member(jsii_name="makeNext")
def _make_next(self, next: "State") -> None:
"""Make the indicated state the default transition of this state.
:param next: -
stability
:stability: experimental
"""
return jsii.invoke(self, "makeNext", [next])
@jsii.member(jsii_name="renderBranches")
def _render_branches(self) -> typing.Any:
"""Render parallel branches in ASL JSON format.
stability
:stability: experimental
"""
return jsii.invoke(self, "renderBranches", [])
@jsii.member(jsii_name="renderChoices")
def _render_choices(self) -> typing.Any:
"""Render the choices in ASL JSON format.
stability
:stability: experimental
"""
return jsii.invoke(self, "renderChoices", [])
@jsii.member(jsii_name="renderInputOutput")
def _render_input_output(self) -> typing.Any:
"""Render InputPath/Parameters/OutputPath in ASL JSON format.
stability
:stability: experimental
"""
return jsii.invoke(self, "renderInputOutput", [])
@jsii.member(jsii_name="renderIterator")
def _render_iterator(self) -> typing.Any:
"""Render map iterator in ASL JSON format.
stability
:stability: experimental
"""
return jsii.invoke(self, "renderIterator", [])
@jsii.member(jsii_name="renderNextEnd")
def _render_next_end(self) -> typing.Any:
"""Render the default next state in ASL JSON format.
stability
:stability: experimental
"""
return jsii.invoke(self, "renderNextEnd", [])
@jsii.member(jsii_name="renderRetryCatch")
def _render_retry_catch(self) -> typing.Any:
"""Render error recovery options in ASL JSON format.
stability
:stability: experimental
"""
return jsii.invoke(self, "renderRetryCatch", [])
@jsii.member(jsii_name="toStateJson")
@abc.abstractmethod
def to_state_json(self) -> typing.Mapping[typing.Any, typing.Any]:
"""Render the state as JSON.
stability
:stability: experimental
"""
...
@jsii.member(jsii_name="whenBoundToGraph")
def _when_bound_to_graph(self, graph: "StateGraph") -> None:
"""Called whenever this state is bound to a graph.
Can be overridden by subclasses.
:param graph: -
stability
:stability: experimental
"""
return jsii.invoke(self, "whenBoundToGraph", [graph])
@property
@jsii.member(jsii_name="branches")
def _branches(self) -> typing.List["StateGraph"]:
"""
stability
:stability: experimental
"""
return jsii.get(self, "branches")
@property
@jsii.member(jsii_name="endStates")
@abc.abstractmethod
def end_states(self) -> typing.List["INextable"]:
"""Continuable states of this Chainable.
stability
:stability: experimental
"""
...
@property
@jsii.member(jsii_name="id")
def id(self) -> str:
"""Descriptive identifier for this chainable.
stability
:stability: experimental
"""
return jsii.get(self, "id")
@property
@jsii.member(jsii_name="startState")
def start_state(self) -> "State":
"""First state of this Chainable.
stability
:stability: experimental
"""
return jsii.get(self, "startState")
@property
@jsii.member(jsii_name="stateId")
def state_id(self) -> str:
"""Tokenized string that evaluates to the state's ID.
stability
:stability: experimental
"""
return jsii.get(self, "stateId")
@property
@jsii.member(jsii_name="comment")
def _comment(self) -> typing.Optional[str]:
"""
stability
:stability: experimental
"""
return jsii.get(self, "comment")
@property
@jsii.member(jsii_name="inputPath")
def _input_path(self) -> typing.Optional[str]:
"""
stability
:stability: experimental
"""
return jsii.get(self, "inputPath")
@property
@jsii.member(jsii_name="outputPath")
def _output_path(self) -> typing.Optional[str]:
"""
stability
:stability: experimental
"""
return jsii.get(self, "outputPath")
@property
@jsii.member(jsii_name="parameters")
def _parameters(self) -> typing.Optional[typing.Mapping[typing.Any, typing.Any]]:
"""
stability
:stability: experimental
"""
return jsii.get(self, "parameters")
@property
@jsii.member(jsii_name="resultPath")
def _result_path(self) -> typing.Optional[str]:
"""
stability
:stability: experimental
"""
return jsii.get(self, "resultPath")
@property
@jsii.member(jsii_name="defaultChoice")
def _default_choice(self) -> typing.Optional["State"]:
"""
stability
:stability: experimental
"""
return jsii.get(self, "defaultChoice")
@_default_choice.setter
def _default_choice(self, value: typing.Optional["State"]):
return jsii.set(self, "defaultChoice", value)
@property
@jsii.member(jsii_name="iteration")
def _iteration(self) -> typing.Optional["StateGraph"]:
"""
stability
:stability: experimental
"""
return jsii.get(self, "iteration")
@_iteration.setter
def _iteration(self, value: typing.Optional["StateGraph"]):
return jsii.set(self, "iteration", value)
class _StateProxy(State):
@jsii.member(jsii_name="toStateJson")
def to_state_json(self) -> typing.Mapping[typing.Any, typing.Any]:
"""Render the state as JSON.
stability
:stability: experimental
"""
return jsii.invoke(self, "toStateJson", [])
@property
@jsii.member(jsii_name="endStates")
def end_states(self) -> typing.List["INextable"]:
"""Continuable states of this Chainable.
stability
:stability: experimental
"""
return jsii.get(self, "endStates")
class Choice(State, metaclass=jsii.JSIIMeta, jsii_type="@aws-cdk/aws-stepfunctions.Choice"):
"""Define a Choice in the state machine.
A choice state can be used to make decisions based on the execution
state.
stability
:stability: experimental
"""
def __init__(self, scope: aws_cdk.core.Construct, id: str, *, comment: typing.Optional[str]=None, input_path: typing.Optional[str]=None, output_path: typing.Optional[str]=None) -> None:
"""
:param scope: -
:param id: -
:param props: -
:param comment: An optional description for this state. Default: No comment
:param input_path: JSONPath expression to select part of the state to be the input to this state. May also be the special value DISCARD, which will cause the effective input to be the empty object {}. Default: $
:param output_path: JSONPath expression to select part of the state to be the output to this state. May also be the special value DISCARD, which will cause the effective output to be the empty object {}. Default: $
stability
:stability: experimental
"""
props = ChoiceProps(comment=comment, input_path=input_path, output_path=output_path)
jsii.create(Choice, self, [scope, id, props])
@jsii.member(jsii_name="afterwards")
def afterwards(self, *, include_error_handlers: typing.Optional[bool]=None, include_otherwise: typing.Optional[bool]=None) -> "Chain":
"""Return a Chain that contains all reachable end states from this Choice.
Use this to combine all possible choice paths back.
:param options: -
:param include_error_handlers: Whether to include error handling states. If this is true, all states which are error handlers (added through 'onError') and states reachable via error handlers will be included as well. Default: false
:param include_otherwise: Whether to include the default/otherwise transition for the current Choice state. If this is true and the current Choice does not have a default outgoing transition, one will be added included when .next() is called on the chain. Default: false
stability
:stability: experimental
"""
options = AfterwardsOptions(include_error_handlers=include_error_handlers, include_otherwise=include_otherwise)
return jsii.invoke(self, "afterwards", [options])
@jsii.member(jsii_name="otherwise")
def otherwise(self, def_: "IChainable") -> "Choice":
"""If none of the given conditions match, continue execution with the given state.
If no conditions match and no otherwise() has been given, an execution
error will be raised.
:param def_: -
stability
:stability: experimental
"""
return jsii.invoke(self, "otherwise", [def_])
@jsii.member(jsii_name="toStateJson")
def to_state_json(self) -> typing.Mapping[typing.Any, typing.Any]:
"""Return the Amazon States Language object for this state.
stability
:stability: experimental
"""
return jsii.invoke(self, "toStateJson", [])
@jsii.member(jsii_name="when")
def when(self, condition: "Condition", next: "IChainable") -> "Choice":
"""If the given condition matches, continue execution with the given state.
:param condition: -
:param next: -
stability
:stability: experimental
"""
return jsii.invoke(self, "when", [condition, next])
@property
@jsii.member(jsii_name="endStates")
def end_states(self) -> typing.List["INextable"]:
"""Continuable states of this Chainable.
stability
:stability: experimental
"""
return jsii.get(self, "endStates")
class Fail(State, metaclass=jsii.JSIIMeta, jsii_type="@aws-cdk/aws-stepfunctions.Fail"):
"""Define a Fail state in the state machine.
Reaching a Fail state terminates the state execution in failure.
stability
:stability: experimental
"""
def __init__(self, scope: aws_cdk.core.Construct, id: str, *, cause: typing.Optional[str]=None, comment: typing.Optional[str]=None, error: typing.Optional[str]=None) -> None:
"""
:param scope: -
:param id: -
:param props: -
:param cause: A description for the cause of the failure. Default: No description
:param comment: An optional description for this state. Default: No comment
:param error: Error code used to represent this failure. Default: No error code
stability
:stability: experimental
"""
props = FailProps(cause=cause, comment=comment, error=error)
jsii.create(Fail, self, [scope, id, props])
@jsii.member(jsii_name="toStateJson")
def to_state_json(self) -> typing.Mapping[typing.Any, typing.Any]:
"""Return the Amazon States Language object for this state.
stability
:stability: experimental
"""
return jsii.invoke(self, "toStateJson", [])
@property
@jsii.member(jsii_name="endStates")
def end_states(self) -> typing.List["INextable"]:
"""Continuable states of this Chainable.
stability
:stability: experimental
"""
return jsii.get(self, "endStates")
@jsii.implements(INextable)
class Map(State, metaclass=jsii.JSIIMeta, jsii_type="@aws-cdk/aws-stepfunctions.Map"):
"""Define a Map state in the state machine.
A Map state can be used to dynamically process elements of an array through sub state machines
The Result of a Map state is the transformed array after processing through the iterator state machines.
stability
:stability: experimental
"""
def __init__(self, scope: aws_cdk.core.Construct, id: str, *, comment: typing.Optional[str]=None, input_path: typing.Optional[str]=None, items_path: typing.Optional[str]=None, max_concurrency: typing.Optional[jsii.Number]=None, output_path: typing.Optional[str]=None, parameters: typing.Optional[typing.Mapping[str,typing.Any]]=None, result_path: typing.Optional[str]=None) -> None:
"""
:param scope: -
:param id: -
:param props: -
:param comment: An optional description for this state. Default: No comment
:param input_path: JSONPath expression to select part of the state to be the input to this state. May also be the special value DISCARD, which will cause the effective input to be the empty object {}. Default: $
:param items_path: JSONPath expression to select the array to iterate over. Default: $
:param max_concurrency: MaxConcurrency. An upper bound on the number of iterations you want running at once. Default: - full concurrency
:param output_path: JSONPath expression to select part of the state to be the output to this state. May also be the special value DISCARD, which will cause the effective output to be the empty object {}. Default: $
:param parameters: The JSON that you want to override your default iteration input. Default: $
:param result_path: JSONPath expression to indicate where to inject the state's output. May also be the special value DISCARD, which will cause the state's input to become its output. Default: $
stability
:stability: experimental
"""
props = MapProps(comment=comment, input_path=input_path, items_path=items_path, max_concurrency=max_concurrency, output_path=output_path, parameters=parameters, result_path=result_path)
jsii.create(Map, self, [scope, id, props])
@jsii.member(jsii_name="addCatch")
def add_catch(self, handler: "IChainable", *, errors: typing.Optional[typing.List[str]]=None, result_path: typing.Optional[str]=None) -> "Map":
"""Add a recovery handler for this state.
When a particular error occurs, execution will continue at the error
handler instead of failing the state machine execution.
:param handler: -
:param props: -
:param errors: Errors to recover from by going to the given state. A list of error strings to retry, which can be either predefined errors (for example Errors.NoChoiceMatched) or a self-defined error. Default: All errors
:param result_path: JSONPath expression to indicate where to inject the error data. May also be the special value DISCARD, which will cause the error data to be discarded. Default: $
stability
:stability: experimental
"""
props = CatchProps(errors=errors, result_path=result_path)
return jsii.invoke(self, "addCatch", [handler, props])
@jsii.member(jsii_name="addRetry")
def add_retry(self, *, backoff_rate: typing.Optional[jsii.Number]=None, errors: typing.Optional[typing.List[str]]=None, interval: typing.Optional[aws_cdk.core.Duration]=None, max_attempts: typing.Optional[jsii.Number]=None) -> "Map":
"""Add retry configuration for this state.
This controls if and how the execution will be retried if a particular
error occurs.
:param props: -
:param backoff_rate: Multiplication for how much longer the wait interval gets on every retry. Default: 2
:param errors: Errors to retry. A list of error strings to retry, which can be either predefined errors (for example Errors.NoChoiceMatched) or a self-defined error. Default: All errors
:param interval: How many seconds to wait initially before retrying. Default: Duration.seconds(1)
:param max_attempts: How many times to retry this particular error. May be 0 to disable retry for specific errors (in case you have a catch-all retry policy). Default: 3
stability
:stability: experimental
"""
props = RetryProps(backoff_rate=backoff_rate, errors=errors, interval=interval, max_attempts=max_attempts)
return jsii.invoke(self, "addRetry", [props])
@jsii.member(jsii_name="iterator")
def iterator(self, iterator: "IChainable") -> "Map":
"""Define iterator state machine in Map.
:param iterator: -
stability
:stability: experimental
"""
return jsii.invoke(self, "iterator", [iterator])
@jsii.member(jsii_name="next")
def next(self, next: "IChainable") -> "Chain":
"""Continue normal execution with the given state.
:param next: -
stability
:stability: experimental
"""
return jsii.invoke(self, "next", [next])
@jsii.member(jsii_name="toStateJson")
def to_state_json(self) -> typing.Mapping[typing.Any, typing.Any]:
"""Return the Amazon States Language object for this state.
stability
:stability: experimental
"""
return jsii.invoke(self, "toStateJson", [])
@jsii.member(jsii_name="validate")
def _validate(self) -> typing.List[str]:
"""Validate this state.
stability
:stability: experimental
"""
return jsii.invoke(self, "validate", [])
@property
@jsii.member(jsii_name="endStates")
def end_states(self) -> typing.List["INextable"]:
"""Continuable states of this Chainable.
stability
:stability: experimental
"""
return jsii.get(self, "endStates")
@jsii.implements(INextable)
class Parallel(State, metaclass=jsii.JSIIMeta, jsii_type="@aws-cdk/aws-stepfunctions.Parallel"):
"""Define a Parallel state in the state machine.
A Parallel state can be used to run one or more state machines at the same
time.
The Result of a Parallel state is an array of the results of its substatemachines.
stability
:stability: experimental
"""
def __init__(self, scope: aws_cdk.core.Construct, id: str, *, comment: typing.Optional[str]=None, input_path: typing.Optional[str]=None, output_path: typing.Optional[str]=None, result_path: typing.Optional[str]=None) -> None:
"""
:param scope: -
:param id: -
:param props: -
:param comment: An optional description for this state. Default: No comment
:param input_path: JSONPath expression to select part of the state to be the input to this state. May also be the special value DISCARD, which will cause the effective input to be the empty object {}. Default: $
:param output_path: JSONPath expression to select part of the state to be the output to this state. May also be the special value DISCARD, which will cause the effective output to be the empty object {}. Default: $
:param result_path: JSONPath expression to indicate where to inject the state's output. May also be the special value DISCARD, which will cause the state's input to become its output. Default: $
stability
:stability: experimental
"""
props = ParallelProps(comment=comment, input_path=input_path, output_path=output_path, result_path=result_path)
jsii.create(Parallel, self, [scope, id, props])
@jsii.member(jsii_name="addCatch")
def add_catch(self, handler: "IChainable", *, errors: typing.Optional[typing.List[str]]=None, result_path: typing.Optional[str]=None) -> "Parallel":
"""Add a recovery handler for this state.
When a particular error occurs, execution will continue at the error
handler instead of failing the state machine execution.
:param handler: -
:param props: -
:param errors: Errors to recover from by going to the given state. A list of error strings to retry, which can be either predefined errors (for example Errors.NoChoiceMatched) or a self-defined error. Default: All errors
:param result_path: JSONPath expression to indicate where to inject the error data. May also be the special value DISCARD, which will cause the error data to be discarded. Default: $
stability
:stability: experimental
"""
props = CatchProps(errors=errors, result_path=result_path)
return jsii.invoke(self, "addCatch", [handler, props])
@jsii.member(jsii_name="addRetry")
def add_retry(self, *, backoff_rate: typing.Optional[jsii.Number]=None, errors: typing.Optional[typing.List[str]]=None, interval: typing.Optional[aws_cdk.core.Duration]=None, max_attempts: typing.Optional[jsii.Number]=None) -> "Parallel":
"""Add retry configuration for this state.
This controls if and how the execution will be retried if a particular
error occurs.
:param props: -
:param backoff_rate: Multiplication for how much longer the wait interval gets on every retry. Default: 2
:param errors: Errors to retry. A list of error strings to retry, which can be either predefined errors (for example Errors.NoChoiceMatched) or a self-defined error. Default: All errors
:param interval: How many seconds to wait initially before retrying. Default: Duration.seconds(1)
:param max_attempts: How many times to retry this particular error. May be 0 to disable retry for specific errors (in case you have a catch-all retry policy). Default: 3
stability
:stability: experimental
"""
props = RetryProps(backoff_rate=backoff_rate, errors=errors, interval=interval, max_attempts=max_attempts)
return jsii.invoke(self, "addRetry", [props])
@jsii.member(jsii_name="branch")
def branch(self, *branches: "IChainable") -> "Parallel":
"""Define one or more branches to run in parallel.
:param branches: -
stability
:stability: experimental
"""
return jsii.invoke(self, "branch", [*branches])
@jsii.member(jsii_name="next")
def next(self, next: "IChainable") -> "Chain":
"""Continue normal execution with the given state.
:param next: -
stability
:stability: experimental
"""
return jsii.invoke(self, "next", [next])
@jsii.member(jsii_name="toStateJson")
def to_state_json(self) -> typing.Mapping[typing.Any, typing.Any]:
"""Return the Amazon States Language object for this state.
stability
:stability: experimental
"""
return jsii.invoke(self, "toStateJson", [])
@jsii.member(jsii_name="validate")
def _validate(self) -> typing.List[str]:
"""Validate this state.
stability
:stability: experimental
"""
return jsii.invoke(self, "validate", [])
@property
@jsii.member(jsii_name="endStates")
def end_states(self) -> typing.List["INextable"]:
"""Continuable states of this Chainable.
stability
:stability: experimental
"""
return jsii.get(self, "endStates")
@jsii.implements(INextable)
class Pass(State, metaclass=jsii.JSIIMeta, jsii_type="@aws-cdk/aws-stepfunctions.Pass"):
"""Define a Pass in the state machine.
A Pass state can be used to transform the current exeuction's state.
stability
:stability: experimental
"""
def __init__(self, scope: aws_cdk.core.Construct, id: str, *, comment: typing.Optional[str]=None, input_path: typing.Optional[str]=None, output_path: typing.Optional[str]=None, parameters: typing.Optional[typing.Mapping[str,typing.Any]]=None, result: typing.Optional["Result"]=None, result_path: typing.Optional[str]=None) -> None:
"""
:param scope: -
:param id: -
:param props: -
:param comment: An optional description for this state. Default: No comment
:param input_path: JSONPath expression to select part of the state to be the input to this state. May also be the special value DISCARD, which will cause the effective input to be the empty object {}. Default: $
:param output_path: JSONPath expression to select part of the state to be the output to this state. May also be the special value DISCARD, which will cause the effective output to be the empty object {}. Default: $
:param parameters: Parameters pass a collection of key-value pairs, either static values or JSONPath expressions that select from the input. Default: No parameters
:param result: If given, treat as the result of this operation. Can be used to inject or replace the current execution state. Default: No injected result
:param result_path: JSONPath expression to indicate where to inject the state's output. May also be the special value DISCARD, which will cause the state's input to become its output. Default: $
stability
:stability: experimental
"""
props = PassProps(comment=comment, input_path=input_path, output_path=output_path, parameters=parameters, result=result, result_path=result_path)
jsii.create(Pass, self, [scope, id, props])
@jsii.member(jsii_name="next")
def next(self, next: "IChainable") -> "Chain":
"""Continue normal execution with the given state.
:param next: -
stability
:stability: experimental
"""
return jsii.invoke(self, "next", [next])
@jsii.member(jsii_name="toStateJson")
def to_state_json(self) -> typing.Mapping[typing.Any, typing.Any]:
"""Return the Amazon States Language object for this state.
stability
:stability: experimental
"""
return jsii.invoke(self, "toStateJson", [])
@property
@jsii.member(jsii_name="endStates")
def end_states(self) -> typing.List["INextable"]:
"""Continuable states of this Chainable.
stability
:stability: experimental
"""
return jsii.get(self, "endStates")
class StateGraph(metaclass=jsii.JSIIMeta, jsii_type="@aws-cdk/aws-stepfunctions.StateGraph"):
"""A collection of connected states.
A StateGraph is used to keep track of all states that are connected (have
transitions between them). It does not include the substatemachines in
a Parallel's branches: those are their own StateGraphs, but the graphs
themselves have a hierarchical relationship as well.
By assigning states to a definintive StateGraph, we verify that no state
machines are constructed. In particular:
- Every state object can only ever be in 1 StateGraph, and not inadvertently
be used in two graphs.
- Every stateId must be unique across all states in the entire state
machine.
All policy statements in all states in all substatemachines are bubbled so
that the top-level StateMachine instantiation can read them all and add
them to the IAM Role.
You do not need to instantiate this class; it is used internally.
stability
:stability: experimental
"""
def __init__(self, start_state: "State", graph_description: str) -> None:
"""
:param start_state: -
:param graph_description: -
stability
:stability: experimental
"""
jsii.create(StateGraph, self, [start_state, graph_description])
@jsii.member(jsii_name="registerPolicyStatement")
def register_policy_statement(self, statement: aws_cdk.aws_iam.PolicyStatement) -> None:
"""Register a Policy Statement used by states in this graph.
:param statement: -
stability
:stability: experimental
"""
return jsii.invoke(self, "registerPolicyStatement", [statement])
@jsii.member(jsii_name="registerState")
def register_state(self, state: "State") -> None:
"""Register a state as part of this graph.
Called by State.bindToGraph().
:param state: -
stability
:stability: experimental
"""
return jsii.invoke(self, "registerState", [state])
@jsii.member(jsii_name="registerSuperGraph")
def register_super_graph(self, graph: "StateGraph") -> None:
"""Register this graph as a child of the given graph.
Resource changes will be bubbled up to the given graph.
:param graph: -
stability
:stability: experimental
"""
return jsii.invoke(self, "registerSuperGraph", [graph])
@jsii.member(jsii_name="toGraphJson")
def to_graph_json(self) -> typing.Mapping[typing.Any, typing.Any]:
"""Return the Amazon States Language JSON for this graph.
stability
:stability: experimental
"""
return jsii.invoke(self, "toGraphJson", [])
@jsii.member(jsii_name="toString")
def to_string(self) -> str:
"""Return a string description of this graph.
stability
:stability: experimental
"""
return jsii.invoke(self, "toString", [])
@property
@jsii.member(jsii_name="policyStatements")
def policy_statements(self) -> typing.List[aws_cdk.aws_iam.PolicyStatement]:
"""The accumulated policy statements.
stability
:stability: experimental
"""
return jsii.get(self, "policyStatements")
@property
@jsii.member(jsii_name="startState")
def start_state(self) -> "State":
"""
stability
:stability: experimental
"""
return jsii.get(self, "startState")
@property
@jsii.member(jsii_name="timeout")
def timeout(self) -> typing.Optional[aws_cdk.core.Duration]:
"""Set a timeout to render into the graph JSON.
Read/write. Only makes sense on the top-level graph, subgraphs
do not support this feature.
default
:default: No timeout
stability
:stability: experimental
"""
return jsii.get(self, "timeout")
@timeout.setter
def timeout(self, value: typing.Optional[aws_cdk.core.Duration]):
return jsii.set(self, "timeout", value)
@jsii.implements(IStateMachine)
class StateMachine(aws_cdk.core.Resource, metaclass=jsii.JSIIMeta, jsii_type="@aws-cdk/aws-stepfunctions.StateMachine"):
"""Define a StepFunctions State Machine.
stability
:stability: experimental
"""
def __init__(self, scope: aws_cdk.core.Construct, id: str, *, definition: "IChainable", role: typing.Optional[aws_cdk.aws_iam.IRole]=None, state_machine_name: typing.Optional[str]=None, timeout: typing.Optional[aws_cdk.core.Duration]=None) -> None:
"""
:param scope: -
:param id: -
:param props: -
:param definition: Definition for this state machine.
:param role: The execution role for the state machine service. Default: A role is automatically created
:param state_machine_name: A name for the state machine. Default: A name is automatically generated
:param timeout: Maximum run time for this state machine. Default: No timeout
stability
:stability: experimental
"""
props = StateMachineProps(definition=definition, role=role, state_machine_name=state_machine_name, timeout=timeout)
jsii.create(StateMachine, self, [scope, id, props])
@jsii.member(jsii_name="fromStateMachineArn")
@classmethod
def from_state_machine_arn(cls, scope: aws_cdk.core.Construct, id: str, state_machine_arn: str) -> "IStateMachine":
"""Import a state machine.
:param scope: -
:param id: -
:param state_machine_arn: -
stability
:stability: experimental
"""
return jsii.sinvoke(cls, "fromStateMachineArn", [scope, id, state_machine_arn])
@jsii.member(jsii_name="addToRolePolicy")
def add_to_role_policy(self, statement: aws_cdk.aws_iam.PolicyStatement) -> None:
"""Add the given statement to the role's policy.
:param statement: -
stability
:stability: experimental
"""
return jsii.invoke(self, "addToRolePolicy", [statement])
@jsii.member(jsii_name="grantStartExecution")
def grant_start_execution(self, identity: aws_cdk.aws_iam.IGrantable) -> aws_cdk.aws_iam.Grant:
"""Grant the given identity permissions to start an execution of this state machine.
:param identity: -
stability
:stability: experimental
"""
return jsii.invoke(self, "grantStartExecution", [identity])
@jsii.member(jsii_name="metric")
def metric(self, metric_name: str, *, color: typing.Optional[str]=None, dimensions: typing.Optional[typing.Mapping[str,typing.Any]]=None, label: typing.Optional[str]=None, period: typing.Optional[aws_cdk.core.Duration]=None, statistic: typing.Optional[str]=None, unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit]=None) -> aws_cdk.aws_cloudwatch.Metric:
"""Return the given named metric for this State Machine's executions.
:param metric_name: -
:param props: -
:param color: Color for this metric when added to a Graph in a Dashboard.
:param dimensions: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard.
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit for the metric that is associated with the alarm.
default
:default: sum over 5 minutes
stability
:stability: experimental
"""
props = aws_cdk.aws_cloudwatch.MetricOptions(color=color, dimensions=dimensions, label=label, period=period, statistic=statistic, unit=unit)
return jsii.invoke(self, "metric", [metric_name, props])
@jsii.member(jsii_name="metricAborted")
def metric_aborted(self, *, color: typing.Optional[str]=None, dimensions: typing.Optional[typing.Mapping[str,typing.Any]]=None, label: typing.Optional[str]=None, period: typing.Optional[aws_cdk.core.Duration]=None, statistic: typing.Optional[str]=None, unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit]=None) -> aws_cdk.aws_cloudwatch.Metric:
"""Metric for the number of executions that were aborted.
:param props: -
:param color: Color for this metric when added to a Graph in a Dashboard.
:param dimensions: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard.
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit for the metric that is associated with the alarm.
default
:default: sum over 5 minutes
stability
:stability: experimental
"""
props = aws_cdk.aws_cloudwatch.MetricOptions(color=color, dimensions=dimensions, label=label, period=period, statistic=statistic, unit=unit)
return jsii.invoke(self, "metricAborted", [props])
@jsii.member(jsii_name="metricFailed")
def metric_failed(self, *, color: typing.Optional[str]=None, dimensions: typing.Optional[typing.Mapping[str,typing.Any]]=None, label: typing.Optional[str]=None, period: typing.Optional[aws_cdk.core.Duration]=None, statistic: typing.Optional[str]=None, unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit]=None) -> aws_cdk.aws_cloudwatch.Metric:
"""Metric for the number of executions that failed.
:param props: -
:param color: Color for this metric when added to a Graph in a Dashboard.
:param dimensions: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard.
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit for the metric that is associated with the alarm.
default
:default: sum over 5 minutes
stability
:stability: experimental
"""
props = aws_cdk.aws_cloudwatch.MetricOptions(color=color, dimensions=dimensions, label=label, period=period, statistic=statistic, unit=unit)
return jsii.invoke(self, "metricFailed", [props])
@jsii.member(jsii_name="metricStarted")
def metric_started(self, *, color: typing.Optional[str]=None, dimensions: typing.Optional[typing.Mapping[str,typing.Any]]=None, label: typing.Optional[str]=None, period: typing.Optional[aws_cdk.core.Duration]=None, statistic: typing.Optional[str]=None, unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit]=None) -> aws_cdk.aws_cloudwatch.Metric:
"""Metric for the number of executions that were started.
:param props: -
:param color: Color for this metric when added to a Graph in a Dashboard.
:param dimensions: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard.
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit for the metric that is associated with the alarm.
default
:default: sum over 5 minutes
stability
:stability: experimental
"""
props = aws_cdk.aws_cloudwatch.MetricOptions(color=color, dimensions=dimensions, label=label, period=period, statistic=statistic, unit=unit)
return jsii.invoke(self, "metricStarted", [props])
@jsii.member(jsii_name="metricSucceeded")
def metric_succeeded(self, *, color: typing.Optional[str]=None, dimensions: typing.Optional[typing.Mapping[str,typing.Any]]=None, label: typing.Optional[str]=None, period: typing.Optional[aws_cdk.core.Duration]=None, statistic: typing.Optional[str]=None, unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit]=None) -> aws_cdk.aws_cloudwatch.Metric:
"""Metric for the number of executions that succeeded.
:param props: -
:param color: Color for this metric when added to a Graph in a Dashboard.
:param dimensions: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard.
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit for the metric that is associated with the alarm.
default
:default: sum over 5 minutes
stability
:stability: experimental
"""
props = aws_cdk.aws_cloudwatch.MetricOptions(color=color, dimensions=dimensions, label=label, period=period, statistic=statistic, unit=unit)
return jsii.invoke(self, "metricSucceeded", [props])
@jsii.member(jsii_name="metricThrottled")
def metric_throttled(self, *, color: typing.Optional[str]=None, dimensions: typing.Optional[typing.Mapping[str,typing.Any]]=None, label: typing.Optional[str]=None, period: typing.Optional[aws_cdk.core.Duration]=None, statistic: typing.Optional[str]=None, unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit]=None) -> aws_cdk.aws_cloudwatch.Metric:
"""Metric for the number of executions that were throttled.
:param props: -
:param color: Color for this metric when added to a Graph in a Dashboard.
:param dimensions: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard.
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit for the metric that is associated with the alarm.
default
:default: sum over 5 minutes
stability
:stability: experimental
"""
props = aws_cdk.aws_cloudwatch.MetricOptions(color=color, dimensions=dimensions, label=label, period=period, statistic=statistic, unit=unit)
return jsii.invoke(self, "metricThrottled", [props])
@jsii.member(jsii_name="metricTime")
def metric_time(self, *, color: typing.Optional[str]=None, dimensions: typing.Optional[typing.Mapping[str,typing.Any]]=None, label: typing.Optional[str]=None, period: typing.Optional[aws_cdk.core.Duration]=None, statistic: typing.Optional[str]=None, unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit]=None) -> aws_cdk.aws_cloudwatch.Metric:
"""Metric for the interval, in milliseconds, between the time the execution starts and the time it closes.
:param props: -
:param color: Color for this metric when added to a Graph in a Dashboard.
:param dimensions: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard.
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit for the metric that is associated with the alarm.
default
:default: sum over 5 minutes
stability
:stability: experimental
"""
props = aws_cdk.aws_cloudwatch.MetricOptions(color=color, dimensions=dimensions, label=label, period=period, statistic=statistic, unit=unit)
return jsii.invoke(self, "metricTime", [props])
@jsii.member(jsii_name="metricTimedOut")
def metric_timed_out(self, *, color: typing.Optional[str]=None, dimensions: typing.Optional[typing.Mapping[str,typing.Any]]=None, label: typing.Optional[str]=None, period: typing.Optional[aws_cdk.core.Duration]=None, statistic: typing.Optional[str]=None, unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit]=None) -> aws_cdk.aws_cloudwatch.Metric:
"""Metric for the number of executions that succeeded.
:param props: -
:param color: Color for this metric when added to a Graph in a Dashboard.
:param dimensions: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard.
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit for the metric that is associated with the alarm.
default
:default: sum over 5 minutes
stability
:stability: experimental
"""
props = aws_cdk.aws_cloudwatch.MetricOptions(color=color, dimensions=dimensions, label=label, period=period, statistic=statistic, unit=unit)
return jsii.invoke(self, "metricTimedOut", [props])
@property
@jsii.member(jsii_name="role")
def role(self) -> aws_cdk.aws_iam.IRole:
"""Execution role of this state machine.
stability
:stability: experimental
"""
return jsii.get(self, "role")
@property
@jsii.member(jsii_name="stateMachineArn")
def state_machine_arn(self) -> str:
"""The ARN of the state machine.
stability
:stability: experimental
"""
return jsii.get(self, "stateMachineArn")
@property
@jsii.member(jsii_name="stateMachineName")
def state_machine_name(self) -> str:
"""The name of the state machine.
stability
:stability: experimental
attribute:
:attribute:: true
"""
return jsii.get(self, "stateMachineName")
@jsii.implements(IChainable)
class StateMachineFragment(aws_cdk.core.Construct, metaclass=jsii.JSIIAbstractClass, jsii_type="@aws-cdk/aws-stepfunctions.StateMachineFragment"):
"""Base class for reusable state machine fragments.
stability
:stability: experimental
"""
@staticmethod
def __jsii_proxy_class__():
return _StateMachineFragmentProxy
def __init__(self, scope: aws_cdk.core.Construct, id: str) -> None:
"""Creates a new construct node.
:param scope: The scope in which to define this construct.
:param id: The scoped construct ID. Must be unique amongst siblings. If the ID includes a path separator (``/``), then it will be replaced by double dash ``--``.
"""
jsii.create(StateMachineFragment, self, [scope, id])
@jsii.member(jsii_name="next")
def next(self, next: "IChainable") -> "Chain":
"""Continue normal execution with the given state.
:param next: -
stability
:stability: experimental
"""
return jsii.invoke(self, "next", [next])
@jsii.member(jsii_name="prefixStates")
def prefix_states(self, prefix: typing.Optional[str]=None) -> "StateMachineFragment":
"""Prefix the IDs of all states in this state machine fragment.
Use this to avoid multiple copies of the state machine all having the
same state IDs.
:param prefix: The prefix to add. Will use construct ID by default.
stability
:stability: experimental
"""
return jsii.invoke(self, "prefixStates", [prefix])
@jsii.member(jsii_name="toSingleState")
def to_single_state(self, *, prefix_states: typing.Optional[str]=None, state_id: typing.Optional[str]=None, comment: typing.Optional[str]=None, input_path: typing.Optional[str]=None, output_path: typing.Optional[str]=None, result_path: typing.Optional[str]=None) -> "Parallel":
"""Wrap all states in this state machine fragment up into a single state.
This can be used to add retry or error handling onto this state
machine fragment.
Be aware that this changes the result of the inner state machine
to be an array with the result of the state machine in it. Adjust
your paths accordingly. For example, change 'outputPath' to
'$[0]'.
:param options: -
:param prefix_states: String to prefix all stateIds in the state machine with. Default: stateId
:param state_id: ID of newly created containing state. Default: Construct ID of the StateMachineFragment
:param comment: An optional description for this state. Default: No comment
:param input_path: JSONPath expression to select part of the state to be the input to this state. May also be the special value DISCARD, which will cause the effective input to be the empty object {}. Default: $
:param output_path: JSONPath expression to select part of the state to be the output to this state. May also be the special value DISCARD, which will cause the effective output to be the empty object {}. Default: $
:param result_path: JSONPath expression to indicate where to inject the state's output. May also be the special value DISCARD, which will cause the state's input to become its output. Default: $
stability
:stability: experimental
"""
options = SingleStateOptions(prefix_states=prefix_states, state_id=state_id, comment=comment, input_path=input_path, output_path=output_path, result_path=result_path)
return jsii.invoke(self, "toSingleState", [options])
@property
@jsii.member(jsii_name="endStates")
@abc.abstractmethod
def end_states(self) -> typing.List["INextable"]:
"""The states to chain onto if this fragment is used.
stability
:stability: experimental
"""
...
@property
@jsii.member(jsii_name="id")
def id(self) -> str:
"""Descriptive identifier for this chainable.
stability
:stability: experimental
"""
return jsii.get(self, "id")
@property
@jsii.member(jsii_name="startState")
@abc.abstractmethod
def start_state(self) -> "State":
"""The start state of this state machine fragment.
stability
:stability: experimental
"""
...
class _StateMachineFragmentProxy(StateMachineFragment):
@property
@jsii.member(jsii_name="endStates")
def end_states(self) -> typing.List["INextable"]:
"""The states to chain onto if this fragment is used.
stability
:stability: experimental
"""
return jsii.get(self, "endStates")
@property
@jsii.member(jsii_name="startState")
def start_state(self) -> "State":
"""The start state of this state machine fragment.
stability
:stability: experimental
"""
return jsii.get(self, "startState")
@jsii.data_type(jsii_type="@aws-cdk/aws-stepfunctions.StateMachineProps", jsii_struct_bases=[], name_mapping={'definition': 'definition', 'role': 'role', 'state_machine_name': 'stateMachineName', 'timeout': 'timeout'})
class StateMachineProps():
def __init__(self, *, definition: "IChainable", role: typing.Optional[aws_cdk.aws_iam.IRole]=None, state_machine_name: typing.Optional[str]=None, timeout: typing.Optional[aws_cdk.core.Duration]=None):
"""Properties for defining a State Machine.
:param definition: Definition for this state machine.
:param role: The execution role for the state machine service. Default: A role is automatically created
:param state_machine_name: A name for the state machine. Default: A name is automatically generated
:param timeout: Maximum run time for this state machine. Default: No timeout
stability
:stability: experimental
"""
self._values = {
'definition': definition,
}
if role is not None: self._values["role"] = role
if state_machine_name is not None: self._values["state_machine_name"] = state_machine_name
if timeout is not None: self._values["timeout"] = timeout
@property
def definition(self) -> "IChainable":
"""Definition for this state machine.
stability
:stability: experimental
"""
return self._values.get('definition')
@property
def role(self) -> typing.Optional[aws_cdk.aws_iam.IRole]:
"""The execution role for the state machine service.
default
:default: A role is automatically created
stability
:stability: experimental
"""
return self._values.get('role')
@property
def state_machine_name(self) -> typing.Optional[str]:
"""A name for the state machine.
default
:default: A name is automatically generated
stability
:stability: experimental
"""
return self._values.get('state_machine_name')
@property
def timeout(self) -> typing.Optional[aws_cdk.core.Duration]:
"""Maximum run time for this state machine.
default
:default: No timeout
stability
:stability: experimental
"""
return self._values.get('timeout')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'StateMachineProps(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-stepfunctions.StateProps", jsii_struct_bases=[], name_mapping={'comment': 'comment', 'input_path': 'inputPath', 'output_path': 'outputPath', 'parameters': 'parameters', 'result_path': 'resultPath'})
class StateProps():
def __init__(self, *, comment: typing.Optional[str]=None, input_path: typing.Optional[str]=None, output_path: typing.Optional[str]=None, parameters: typing.Optional[typing.Mapping[str,typing.Any]]=None, result_path: typing.Optional[str]=None):
"""Properties shared by all states.
:param comment: A comment describing this state. Default: No comment
:param input_path: JSONPath expression to select part of the state to be the input to this state. May also be the special value DISCARD, which will cause the effective input to be the empty object {}. Default: $
:param output_path: JSONPath expression to select part of the state to be the output to this state. May also be the special value DISCARD, which will cause the effective output to be the empty object {}. Default: $
:param parameters: Parameters pass a collection of key-value pairs, either static values or JSONPath expressions that select from the input. Default: No parameters
:param result_path: JSONPath expression to indicate where to inject the state's output. May also be the special value DISCARD, which will cause the state's input to become its output. Default: $
stability
:stability: experimental
"""
self._values = {
}
if comment is not None: self._values["comment"] = comment
if input_path is not None: self._values["input_path"] = input_path
if output_path is not None: self._values["output_path"] = output_path
if parameters is not None: self._values["parameters"] = parameters
if result_path is not None: self._values["result_path"] = result_path
@property
def comment(self) -> typing.Optional[str]:
"""A comment describing this state.
default
:default: No comment
stability
:stability: experimental
"""
return self._values.get('comment')
@property
def input_path(self) -> typing.Optional[str]:
"""JSONPath expression to select part of the state to be the input to this state.
May also be the special value DISCARD, which will cause the effective
input to be the empty object {}.
default
:default: $
stability
:stability: experimental
"""
return self._values.get('input_path')
@property
def output_path(self) -> typing.Optional[str]:
"""JSONPath expression to select part of the state to be the output to this state.
May also be the special value DISCARD, which will cause the effective
output to be the empty object {}.
default
:default: $
stability
:stability: experimental
"""
return self._values.get('output_path')
@property
def parameters(self) -> typing.Optional[typing.Mapping[str,typing.Any]]:
"""Parameters pass a collection of key-value pairs, either static values or JSONPath expressions that select from the input.
default
:default: No parameters
see
:see: https://docs.aws.amazon.com/step-functions/latest/dg/input-output-inputpath-params.html#input-output-parameters
stability
:stability: experimental
"""
return self._values.get('parameters')
@property
def result_path(self) -> typing.Optional[str]:
"""JSONPath expression to indicate where to inject the state's output.
May also be the special value DISCARD, which will cause the state's
input to become its output.
default
:default: $
stability
:stability: experimental
"""
return self._values.get('result_path')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'StateProps(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
class StateTransitionMetric(metaclass=jsii.JSIIMeta, jsii_type="@aws-cdk/aws-stepfunctions.StateTransitionMetric"):
"""Metrics on the rate limiting performed on state machine execution.
These rate limits are shared across all state machines.
stability
:stability: experimental
"""
def __init__(self) -> None:
jsii.create(StateTransitionMetric, self, [])
@jsii.member(jsii_name="metric")
@classmethod
def metric(cls, metric_name: str, *, color: typing.Optional[str]=None, dimensions: typing.Optional[typing.Mapping[str,typing.Any]]=None, label: typing.Optional[str]=None, period: typing.Optional[aws_cdk.core.Duration]=None, statistic: typing.Optional[str]=None, unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit]=None) -> aws_cdk.aws_cloudwatch.Metric:
"""Return the given named metric for the service's state transition metrics.
:param metric_name: -
:param props: -
:param color: Color for this metric when added to a Graph in a Dashboard.
:param dimensions: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard.
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit for the metric that is associated with the alarm.
default
:default: average over 5 minutes
stability
:stability: experimental
"""
props = aws_cdk.aws_cloudwatch.MetricOptions(color=color, dimensions=dimensions, label=label, period=period, statistic=statistic, unit=unit)
return jsii.sinvoke(cls, "metric", [metric_name, props])
@jsii.member(jsii_name="metricConsumedCapacity")
@classmethod
def metric_consumed_capacity(cls, *, color: typing.Optional[str]=None, dimensions: typing.Optional[typing.Mapping[str,typing.Any]]=None, label: typing.Optional[str]=None, period: typing.Optional[aws_cdk.core.Duration]=None, statistic: typing.Optional[str]=None, unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit]=None) -> aws_cdk.aws_cloudwatch.Metric:
"""Metric for the number of available state transitions per second.
:param props: -
:param color: Color for this metric when added to a Graph in a Dashboard.
:param dimensions: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard.
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit for the metric that is associated with the alarm.
default
:default: average over 5 minutes
stability
:stability: experimental
"""
props = aws_cdk.aws_cloudwatch.MetricOptions(color=color, dimensions=dimensions, label=label, period=period, statistic=statistic, unit=unit)
return jsii.sinvoke(cls, "metricConsumedCapacity", [props])
@jsii.member(jsii_name="metricProvisionedBucketSize")
@classmethod
def metric_provisioned_bucket_size(cls, *, color: typing.Optional[str]=None, dimensions: typing.Optional[typing.Mapping[str,typing.Any]]=None, label: typing.Optional[str]=None, period: typing.Optional[aws_cdk.core.Duration]=None, statistic: typing.Optional[str]=None, unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit]=None) -> aws_cdk.aws_cloudwatch.Metric:
"""Metric for the number of available state transitions.
:param props: -
:param color: Color for this metric when added to a Graph in a Dashboard.
:param dimensions: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard.
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit for the metric that is associated with the alarm.
default
:default: average over 5 minutes
stability
:stability: experimental
"""
props = aws_cdk.aws_cloudwatch.MetricOptions(color=color, dimensions=dimensions, label=label, period=period, statistic=statistic, unit=unit)
return jsii.sinvoke(cls, "metricProvisionedBucketSize", [props])
@jsii.member(jsii_name="metricProvisionedRefillRate")
@classmethod
def metric_provisioned_refill_rate(cls, *, color: typing.Optional[str]=None, dimensions: typing.Optional[typing.Mapping[str,typing.Any]]=None, label: typing.Optional[str]=None, period: typing.Optional[aws_cdk.core.Duration]=None, statistic: typing.Optional[str]=None, unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit]=None) -> aws_cdk.aws_cloudwatch.Metric:
"""Metric for the provisioned steady-state execution rate.
:param props: -
:param color: Color for this metric when added to a Graph in a Dashboard.
:param dimensions: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard.
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit for the metric that is associated with the alarm.
default
:default: average over 5 minutes
stability
:stability: experimental
"""
props = aws_cdk.aws_cloudwatch.MetricOptions(color=color, dimensions=dimensions, label=label, period=period, statistic=statistic, unit=unit)
return jsii.sinvoke(cls, "metricProvisionedRefillRate", [props])
@jsii.member(jsii_name="metricThrottledEvents")
@classmethod
def metric_throttled_events(cls, *, color: typing.Optional[str]=None, dimensions: typing.Optional[typing.Mapping[str,typing.Any]]=None, label: typing.Optional[str]=None, period: typing.Optional[aws_cdk.core.Duration]=None, statistic: typing.Optional[str]=None, unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit]=None) -> aws_cdk.aws_cloudwatch.Metric:
"""Metric for the number of throttled state transitions.
:param props: -
:param color: Color for this metric when added to a Graph in a Dashboard.
:param dimensions: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard.
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit for the metric that is associated with the alarm.
default
:default: sum over 5 minutes
stability
:stability: experimental
"""
props = aws_cdk.aws_cloudwatch.MetricOptions(color=color, dimensions=dimensions, label=label, period=period, statistic=statistic, unit=unit)
return jsii.sinvoke(cls, "metricThrottledEvents", [props])
@jsii.data_type(jsii_type="@aws-cdk/aws-stepfunctions.StepFunctionsTaskConfig", jsii_struct_bases=[], name_mapping={'resource_arn': 'resourceArn', 'heartbeat': 'heartbeat', 'metric_dimensions': 'metricDimensions', 'metric_prefix_plural': 'metricPrefixPlural', 'metric_prefix_singular': 'metricPrefixSingular', 'parameters': 'parameters', 'policy_statements': 'policyStatements'})
class StepFunctionsTaskConfig():
def __init__(self, *, resource_arn: str, heartbeat: typing.Optional[aws_cdk.core.Duration]=None, metric_dimensions: typing.Optional[typing.Mapping[str,typing.Any]]=None, metric_prefix_plural: typing.Optional[str]=None, metric_prefix_singular: typing.Optional[str]=None, parameters: typing.Optional[typing.Mapping[str,typing.Any]]=None, policy_statements: typing.Optional[typing.List[aws_cdk.aws_iam.PolicyStatement]]=None):
"""Properties that define what kind of task should be created.
:param resource_arn: The resource that represents the work to be executed. Either the ARN of a Lambda Function or Activity, or a special ARN.
:param heartbeat: Maximum time between heart beats. If the time between heart beats takes longer than this, a 'Timeout' error is raised. This is only relevant when using an Activity type as resource. Default: No heart beat timeout
:param metric_dimensions: The dimensions to attach to metrics. Default: No metrics
:param metric_prefix_plural: Prefix for plural metric names of activity actions. Default: No such metrics
:param metric_prefix_singular: Prefix for singular metric names of activity actions. Default: No such metrics
:param parameters: Parameters pass a collection of key-value pairs, either static values or JSONPath expressions that select from the input. What is passed here will be merged with any default parameters configured by the ``resource``. For example, a DynamoDB table target will Default: No parameters
:param policy_statements: Additional policy statements to add to the execution role. Default: No policy roles
stability
:stability: experimental
"""
self._values = {
'resource_arn': resource_arn,
}
if heartbeat is not None: self._values["heartbeat"] = heartbeat
if metric_dimensions is not None: self._values["metric_dimensions"] = metric_dimensions
if metric_prefix_plural is not None: self._values["metric_prefix_plural"] = metric_prefix_plural
if metric_prefix_singular is not None: self._values["metric_prefix_singular"] = metric_prefix_singular
if parameters is not None: self._values["parameters"] = parameters
if policy_statements is not None: self._values["policy_statements"] = policy_statements
@property
def resource_arn(self) -> str:
"""The resource that represents the work to be executed.
Either the ARN of a Lambda Function or Activity, or a special
ARN.
stability
:stability: experimental
"""
return self._values.get('resource_arn')
@property
def heartbeat(self) -> typing.Optional[aws_cdk.core.Duration]:
"""Maximum time between heart beats.
If the time between heart beats takes longer than this, a 'Timeout' error is raised.
This is only relevant when using an Activity type as resource.
default
:default: No heart beat timeout
stability
:stability: experimental
"""
return self._values.get('heartbeat')
@property
def metric_dimensions(self) -> typing.Optional[typing.Mapping[str,typing.Any]]:
"""The dimensions to attach to metrics.
default
:default: No metrics
stability
:stability: experimental
"""
return self._values.get('metric_dimensions')
@property
def metric_prefix_plural(self) -> typing.Optional[str]:
"""Prefix for plural metric names of activity actions.
default
:default: No such metrics
stability
:stability: experimental
"""
return self._values.get('metric_prefix_plural')
@property
def metric_prefix_singular(self) -> typing.Optional[str]:
"""Prefix for singular metric names of activity actions.
default
:default: No such metrics
stability
:stability: experimental
"""
return self._values.get('metric_prefix_singular')
@property
def parameters(self) -> typing.Optional[typing.Mapping[str,typing.Any]]:
"""Parameters pass a collection of key-value pairs, either static values or JSONPath expressions that select from the input.
What is passed here will be merged with any default parameters
configured by the ``resource``. For example, a DynamoDB table target
will
default
:default: No parameters
see
:see: https://docs.aws.amazon.com/step-functions/latest/dg/input-output-inputpath-params.html#input-output-parameters
stability
:stability: experimental
"""
return self._values.get('parameters')
@property
def policy_statements(self) -> typing.Optional[typing.List[aws_cdk.aws_iam.PolicyStatement]]:
"""Additional policy statements to add to the execution role.
default
:default: No policy roles
stability
:stability: experimental
"""
return self._values.get('policy_statements')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'StepFunctionsTaskConfig(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
class Succeed(State, metaclass=jsii.JSIIMeta, jsii_type="@aws-cdk/aws-stepfunctions.Succeed"):
"""Define a Succeed state in the state machine.
Reaching a Succeed state terminates the state execution in success.
stability
:stability: experimental
"""
def __init__(self, scope: aws_cdk.core.Construct, id: str, *, comment: typing.Optional[str]=None, input_path: typing.Optional[str]=None, output_path: typing.Optional[str]=None) -> None:
"""
:param scope: -
:param id: -
:param props: -
:param comment: An optional description for this state. Default: No comment
:param input_path: JSONPath expression to select part of the state to be the input to this state. May also be the special value DISCARD, which will cause the effective input to be the empty object {}. Default: $
:param output_path: JSONPath expression to select part of the state to be the output to this state. May also be the special value DISCARD, which will cause the effective output to be the empty object {}. Default: $
stability
:stability: experimental
"""
props = SucceedProps(comment=comment, input_path=input_path, output_path=output_path)
jsii.create(Succeed, self, [scope, id, props])
@jsii.member(jsii_name="toStateJson")
def to_state_json(self) -> typing.Mapping[typing.Any, typing.Any]:
"""Return the Amazon States Language object for this state.
stability
:stability: experimental
"""
return jsii.invoke(self, "toStateJson", [])
@property
@jsii.member(jsii_name="endStates")
def end_states(self) -> typing.List["INextable"]:
"""Continuable states of this Chainable.
stability
:stability: experimental
"""
return jsii.get(self, "endStates")
@jsii.data_type(jsii_type="@aws-cdk/aws-stepfunctions.SucceedProps", jsii_struct_bases=[], name_mapping={'comment': 'comment', 'input_path': 'inputPath', 'output_path': 'outputPath'})
class SucceedProps():
def __init__(self, *, comment: typing.Optional[str]=None, input_path: typing.Optional[str]=None, output_path: typing.Optional[str]=None):
"""Properties for defining a Succeed state.
:param comment: An optional description for this state. Default: No comment
:param input_path: JSONPath expression to select part of the state to be the input to this state. May also be the special value DISCARD, which will cause the effective input to be the empty object {}. Default: $
:param output_path: JSONPath expression to select part of the state to be the output to this state. May also be the special value DISCARD, which will cause the effective output to be the empty object {}. Default: $
stability
:stability: experimental
"""
self._values = {
}
if comment is not None: self._values["comment"] = comment
if input_path is not None: self._values["input_path"] = input_path
if output_path is not None: self._values["output_path"] = output_path
@property
def comment(self) -> typing.Optional[str]:
"""An optional description for this state.
default
:default: No comment
stability
:stability: experimental
"""
return self._values.get('comment')
@property
def input_path(self) -> typing.Optional[str]:
"""JSONPath expression to select part of the state to be the input to this state.
May also be the special value DISCARD, which will cause the effective
input to be the empty object {}.
default
:default: $
stability
:stability: experimental
"""
return self._values.get('input_path')
@property
def output_path(self) -> typing.Optional[str]:
"""JSONPath expression to select part of the state to be the output to this state.
May also be the special value DISCARD, which will cause the effective
output to be the empty object {}.
default
:default: $
stability
:stability: experimental
"""
return self._values.get('output_path')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'SucceedProps(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.implements(INextable)
class Task(State, metaclass=jsii.JSIIMeta, jsii_type="@aws-cdk/aws-stepfunctions.Task"):
"""Define a Task state in the state machine.
Reaching a Task state causes some work to be executed, represented by the
Task's resource property. Task constructs represent a generic Amazon
States Language Task.
For some resource types, more specific subclasses of Task may be available
which are more convenient to use.
stability
:stability: experimental
"""
def __init__(self, scope: aws_cdk.core.Construct, id: str, *, task: "IStepFunctionsTask", comment: typing.Optional[str]=None, input_path: typing.Optional[str]=None, output_path: typing.Optional[str]=None, parameters: typing.Optional[typing.Mapping[str,typing.Any]]=None, result_path: typing.Optional[str]=None, timeout: typing.Optional[aws_cdk.core.Duration]=None) -> None:
"""
:param scope: -
:param id: -
:param props: -
:param task: Actual task to be invoked in this workflow.
:param comment: An optional description for this state. Default: No comment
:param input_path: JSONPath expression to select part of the state to be the input to this state. May also be the special value DISCARD, which will cause the effective input to be the empty object {}. Default: $
:param output_path: JSONPath expression to select part of the state to be the output to this state. May also be the special value DISCARD, which will cause the effective output to be the empty object {}. Default: $
:param parameters: Parameters pass a collection of key-value pairs, either static values or JSONPath expressions that select from the input. Default: No parameters
:param result_path: JSONPath expression to indicate where to inject the state's output. May also be the special value DISCARD, which will cause the state's input to become its output. Default: $
:param timeout: Maximum run time of this state. If the state takes longer than this amount of time to complete, a 'Timeout' error is raised. Default: 60
stability
:stability: experimental
"""
props = TaskProps(task=task, comment=comment, input_path=input_path, output_path=output_path, parameters=parameters, result_path=result_path, timeout=timeout)
jsii.create(Task, self, [scope, id, props])
@jsii.member(jsii_name="addCatch")
def add_catch(self, handler: "IChainable", *, errors: typing.Optional[typing.List[str]]=None, result_path: typing.Optional[str]=None) -> "Task":
"""Add a recovery handler for this state.
When a particular error occurs, execution will continue at the error
handler instead of failing the state machine execution.
:param handler: -
:param props: -
:param errors: Errors to recover from by going to the given state. A list of error strings to retry, which can be either predefined errors (for example Errors.NoChoiceMatched) or a self-defined error. Default: All errors
:param result_path: JSONPath expression to indicate where to inject the error data. May also be the special value DISCARD, which will cause the error data to be discarded. Default: $
stability
:stability: experimental
"""
props = CatchProps(errors=errors, result_path=result_path)
return jsii.invoke(self, "addCatch", [handler, props])
@jsii.member(jsii_name="addRetry")
def add_retry(self, *, backoff_rate: typing.Optional[jsii.Number]=None, errors: typing.Optional[typing.List[str]]=None, interval: typing.Optional[aws_cdk.core.Duration]=None, max_attempts: typing.Optional[jsii.Number]=None) -> "Task":
"""Add retry configuration for this state.
This controls if and how the execution will be retried if a particular
error occurs.
:param props: -
:param backoff_rate: Multiplication for how much longer the wait interval gets on every retry. Default: 2
:param errors: Errors to retry. A list of error strings to retry, which can be either predefined errors (for example Errors.NoChoiceMatched) or a self-defined error. Default: All errors
:param interval: How many seconds to wait initially before retrying. Default: Duration.seconds(1)
:param max_attempts: How many times to retry this particular error. May be 0 to disable retry for specific errors (in case you have a catch-all retry policy). Default: 3
stability
:stability: experimental
"""
props = RetryProps(backoff_rate=backoff_rate, errors=errors, interval=interval, max_attempts=max_attempts)
return jsii.invoke(self, "addRetry", [props])
@jsii.member(jsii_name="metric")
def metric(self, metric_name: str, *, color: typing.Optional[str]=None, dimensions: typing.Optional[typing.Mapping[str,typing.Any]]=None, label: typing.Optional[str]=None, period: typing.Optional[aws_cdk.core.Duration]=None, statistic: typing.Optional[str]=None, unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit]=None) -> aws_cdk.aws_cloudwatch.Metric:
"""Return the given named metric for this Task.
:param metric_name: -
:param props: -
:param color: Color for this metric when added to a Graph in a Dashboard.
:param dimensions: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard.
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit for the metric that is associated with the alarm.
default
:default: sum over 5 minutes
stability
:stability: experimental
"""
props = aws_cdk.aws_cloudwatch.MetricOptions(color=color, dimensions=dimensions, label=label, period=period, statistic=statistic, unit=unit)
return jsii.invoke(self, "metric", [metric_name, props])
@jsii.member(jsii_name="metricFailed")
def metric_failed(self, *, color: typing.Optional[str]=None, dimensions: typing.Optional[typing.Mapping[str,typing.Any]]=None, label: typing.Optional[str]=None, period: typing.Optional[aws_cdk.core.Duration]=None, statistic: typing.Optional[str]=None, unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit]=None) -> aws_cdk.aws_cloudwatch.Metric:
"""Metric for the number of times this activity fails.
:param props: -
:param color: Color for this metric when added to a Graph in a Dashboard.
:param dimensions: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard.
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit for the metric that is associated with the alarm.
default
:default: sum over 5 minutes
stability
:stability: experimental
"""
props = aws_cdk.aws_cloudwatch.MetricOptions(color=color, dimensions=dimensions, label=label, period=period, statistic=statistic, unit=unit)
return jsii.invoke(self, "metricFailed", [props])
@jsii.member(jsii_name="metricHeartbeatTimedOut")
def metric_heartbeat_timed_out(self, *, color: typing.Optional[str]=None, dimensions: typing.Optional[typing.Mapping[str,typing.Any]]=None, label: typing.Optional[str]=None, period: typing.Optional[aws_cdk.core.Duration]=None, statistic: typing.Optional[str]=None, unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit]=None) -> aws_cdk.aws_cloudwatch.Metric:
"""Metric for the number of times the heartbeat times out for this activity.
:param props: -
:param color: Color for this metric when added to a Graph in a Dashboard.
:param dimensions: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard.
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit for the metric that is associated with the alarm.
default
:default: sum over 5 minutes
stability
:stability: experimental
"""
props = aws_cdk.aws_cloudwatch.MetricOptions(color=color, dimensions=dimensions, label=label, period=period, statistic=statistic, unit=unit)
return jsii.invoke(self, "metricHeartbeatTimedOut", [props])
@jsii.member(jsii_name="metricRunTime")
def metric_run_time(self, *, color: typing.Optional[str]=None, dimensions: typing.Optional[typing.Mapping[str,typing.Any]]=None, label: typing.Optional[str]=None, period: typing.Optional[aws_cdk.core.Duration]=None, statistic: typing.Optional[str]=None, unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit]=None) -> aws_cdk.aws_cloudwatch.Metric:
"""The interval, in milliseconds, between the time the Task starts and the time it closes.
:param props: -
:param color: Color for this metric when added to a Graph in a Dashboard.
:param dimensions: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard.
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit for the metric that is associated with the alarm.
default
:default: average over 5 minutes
stability
:stability: experimental
"""
props = aws_cdk.aws_cloudwatch.MetricOptions(color=color, dimensions=dimensions, label=label, period=period, statistic=statistic, unit=unit)
return jsii.invoke(self, "metricRunTime", [props])
@jsii.member(jsii_name="metricScheduled")
def metric_scheduled(self, *, color: typing.Optional[str]=None, dimensions: typing.Optional[typing.Mapping[str,typing.Any]]=None, label: typing.Optional[str]=None, period: typing.Optional[aws_cdk.core.Duration]=None, statistic: typing.Optional[str]=None, unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit]=None) -> aws_cdk.aws_cloudwatch.Metric:
"""Metric for the number of times this activity is scheduled.
:param props: -
:param color: Color for this metric when added to a Graph in a Dashboard.
:param dimensions: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard.
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit for the metric that is associated with the alarm.
default
:default: sum over 5 minutes
stability
:stability: experimental
"""
props = aws_cdk.aws_cloudwatch.MetricOptions(color=color, dimensions=dimensions, label=label, period=period, statistic=statistic, unit=unit)
return jsii.invoke(self, "metricScheduled", [props])
@jsii.member(jsii_name="metricScheduleTime")
def metric_schedule_time(self, *, color: typing.Optional[str]=None, dimensions: typing.Optional[typing.Mapping[str,typing.Any]]=None, label: typing.Optional[str]=None, period: typing.Optional[aws_cdk.core.Duration]=None, statistic: typing.Optional[str]=None, unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit]=None) -> aws_cdk.aws_cloudwatch.Metric:
"""The interval, in milliseconds, for which the activity stays in the schedule state.
:param props: -
:param color: Color for this metric when added to a Graph in a Dashboard.
:param dimensions: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard.
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit for the metric that is associated with the alarm.
default
:default: average over 5 minutes
stability
:stability: experimental
"""
props = aws_cdk.aws_cloudwatch.MetricOptions(color=color, dimensions=dimensions, label=label, period=period, statistic=statistic, unit=unit)
return jsii.invoke(self, "metricScheduleTime", [props])
@jsii.member(jsii_name="metricStarted")
def metric_started(self, *, color: typing.Optional[str]=None, dimensions: typing.Optional[typing.Mapping[str,typing.Any]]=None, label: typing.Optional[str]=None, period: typing.Optional[aws_cdk.core.Duration]=None, statistic: typing.Optional[str]=None, unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit]=None) -> aws_cdk.aws_cloudwatch.Metric:
"""Metric for the number of times this activity is started.
:param props: -
:param color: Color for this metric when added to a Graph in a Dashboard.
:param dimensions: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard.
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit for the metric that is associated with the alarm.
default
:default: sum over 5 minutes
stability
:stability: experimental
"""
props = aws_cdk.aws_cloudwatch.MetricOptions(color=color, dimensions=dimensions, label=label, period=period, statistic=statistic, unit=unit)
return jsii.invoke(self, "metricStarted", [props])
@jsii.member(jsii_name="metricSucceeded")
def metric_succeeded(self, *, color: typing.Optional[str]=None, dimensions: typing.Optional[typing.Mapping[str,typing.Any]]=None, label: typing.Optional[str]=None, period: typing.Optional[aws_cdk.core.Duration]=None, statistic: typing.Optional[str]=None, unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit]=None) -> aws_cdk.aws_cloudwatch.Metric:
"""Metric for the number of times this activity succeeds.
:param props: -
:param color: Color for this metric when added to a Graph in a Dashboard.
:param dimensions: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard.
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit for the metric that is associated with the alarm.
default
:default: sum over 5 minutes
stability
:stability: experimental
"""
props = aws_cdk.aws_cloudwatch.MetricOptions(color=color, dimensions=dimensions, label=label, period=period, statistic=statistic, unit=unit)
return jsii.invoke(self, "metricSucceeded", [props])
@jsii.member(jsii_name="metricTime")
def metric_time(self, *, color: typing.Optional[str]=None, dimensions: typing.Optional[typing.Mapping[str,typing.Any]]=None, label: typing.Optional[str]=None, period: typing.Optional[aws_cdk.core.Duration]=None, statistic: typing.Optional[str]=None, unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit]=None) -> aws_cdk.aws_cloudwatch.Metric:
"""The interval, in milliseconds, between the time the activity is scheduled and the time it closes.
:param props: -
:param color: Color for this metric when added to a Graph in a Dashboard.
:param dimensions: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard.
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit for the metric that is associated with the alarm.
default
:default: average over 5 minutes
stability
:stability: experimental
"""
props = aws_cdk.aws_cloudwatch.MetricOptions(color=color, dimensions=dimensions, label=label, period=period, statistic=statistic, unit=unit)
return jsii.invoke(self, "metricTime", [props])
@jsii.member(jsii_name="metricTimedOut")
def metric_timed_out(self, *, color: typing.Optional[str]=None, dimensions: typing.Optional[typing.Mapping[str,typing.Any]]=None, label: typing.Optional[str]=None, period: typing.Optional[aws_cdk.core.Duration]=None, statistic: typing.Optional[str]=None, unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit]=None) -> aws_cdk.aws_cloudwatch.Metric:
"""Metric for the number of times this activity times out.
:param props: -
:param color: Color for this metric when added to a Graph in a Dashboard.
:param dimensions: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard.
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit for the metric that is associated with the alarm.
default
:default: sum over 5 minutes
stability
:stability: experimental
"""
props = aws_cdk.aws_cloudwatch.MetricOptions(color=color, dimensions=dimensions, label=label, period=period, statistic=statistic, unit=unit)
return jsii.invoke(self, "metricTimedOut", [props])
@jsii.member(jsii_name="next")
def next(self, next: "IChainable") -> "Chain":
"""Continue normal execution with the given state.
:param next: -
stability
:stability: experimental
"""
return jsii.invoke(self, "next", [next])
@jsii.member(jsii_name="toStateJson")
def to_state_json(self) -> typing.Mapping[typing.Any, typing.Any]:
"""Return the Amazon States Language object for this state.
stability
:stability: experimental
"""
return jsii.invoke(self, "toStateJson", [])
@jsii.member(jsii_name="whenBoundToGraph")
def _when_bound_to_graph(self, graph: "StateGraph") -> None:
"""Called whenever this state is bound to a graph.
Can be overridden by subclasses.
:param graph: -
stability
:stability: experimental
"""
return jsii.invoke(self, "whenBoundToGraph", [graph])
@property
@jsii.member(jsii_name="endStates")
def end_states(self) -> typing.List["INextable"]:
"""Continuable states of this Chainable.
stability
:stability: experimental
"""
return jsii.get(self, "endStates")
class TaskInput(metaclass=jsii.JSIIMeta, jsii_type="@aws-cdk/aws-stepfunctions.TaskInput"):
"""Type union for task classes that accept multiple types of payload.
stability
:stability: experimental
"""
@jsii.member(jsii_name="fromContextAt")
@classmethod
def from_context_at(cls, path: str) -> "TaskInput":
"""Use a part of the task context as task input.
Use this when you want to use a subobject or string from
the current task context as complete payload
to a task.
:param path: -
stability
:stability: experimental
"""
return jsii.sinvoke(cls, "fromContextAt", [path])
@jsii.member(jsii_name="fromDataAt")
@classmethod
def from_data_at(cls, path: str) -> "TaskInput":
"""Use a part of the execution data as task input.
Use this when you want to use a subobject or string from
the current state machine execution as complete payload
to a task.
:param path: -
stability
:stability: experimental
"""
return jsii.sinvoke(cls, "fromDataAt", [path])
@jsii.member(jsii_name="fromObject")
@classmethod
def from_object(cls, obj: typing.Mapping[str,typing.Any]) -> "TaskInput":
"""Use an object as task input.
This object may contain Data and Context fields
as object values, if desired.
:param obj: -
stability
:stability: experimental
"""
return jsii.sinvoke(cls, "fromObject", [obj])
@jsii.member(jsii_name="fromText")
@classmethod
def from_text(cls, text: str) -> "TaskInput":
"""Use a literal string as task input.
This might be a JSON-encoded object, or just a text.
:param text: -
stability
:stability: experimental
"""
return jsii.sinvoke(cls, "fromText", [text])
@property
@jsii.member(jsii_name="type")
def type(self) -> "InputType":
"""
stability
:stability: experimental
"""
return jsii.get(self, "type")
@property
@jsii.member(jsii_name="value")
def value(self) -> typing.Any:
"""
stability
:stability: experimental
"""
return jsii.get(self, "value")
@jsii.data_type(jsii_type="@aws-cdk/aws-stepfunctions.TaskProps", jsii_struct_bases=[], name_mapping={'task': 'task', 'comment': 'comment', 'input_path': 'inputPath', 'output_path': 'outputPath', 'parameters': 'parameters', 'result_path': 'resultPath', 'timeout': 'timeout'})
class TaskProps():
def __init__(self, *, task: "IStepFunctionsTask", comment: typing.Optional[str]=None, input_path: typing.Optional[str]=None, output_path: typing.Optional[str]=None, parameters: typing.Optional[typing.Mapping[str,typing.Any]]=None, result_path: typing.Optional[str]=None, timeout: typing.Optional[aws_cdk.core.Duration]=None):
"""Props that are common to all tasks.
:param task: Actual task to be invoked in this workflow.
:param comment: An optional description for this state. Default: No comment
:param input_path: JSONPath expression to select part of the state to be the input to this state. May also be the special value DISCARD, which will cause the effective input to be the empty object {}. Default: $
:param output_path: JSONPath expression to select part of the state to be the output to this state. May also be the special value DISCARD, which will cause the effective output to be the empty object {}. Default: $
:param parameters: Parameters pass a collection of key-value pairs, either static values or JSONPath expressions that select from the input. Default: No parameters
:param result_path: JSONPath expression to indicate where to inject the state's output. May also be the special value DISCARD, which will cause the state's input to become its output. Default: $
:param timeout: Maximum run time of this state. If the state takes longer than this amount of time to complete, a 'Timeout' error is raised. Default: 60
stability
:stability: experimental
"""
self._values = {
'task': task,
}
if comment is not None: self._values["comment"] = comment
if input_path is not None: self._values["input_path"] = input_path
if output_path is not None: self._values["output_path"] = output_path
if parameters is not None: self._values["parameters"] = parameters
if result_path is not None: self._values["result_path"] = result_path
if timeout is not None: self._values["timeout"] = timeout
@property
def task(self) -> "IStepFunctionsTask":
"""Actual task to be invoked in this workflow.
stability
:stability: experimental
"""
return self._values.get('task')
@property
def comment(self) -> typing.Optional[str]:
"""An optional description for this state.
default
:default: No comment
stability
:stability: experimental
"""
return self._values.get('comment')
@property
def input_path(self) -> typing.Optional[str]:
"""JSONPath expression to select part of the state to be the input to this state.
May also be the special value DISCARD, which will cause the effective
input to be the empty object {}.
default
:default: $
stability
:stability: experimental
"""
return self._values.get('input_path')
@property
def output_path(self) -> typing.Optional[str]:
"""JSONPath expression to select part of the state to be the output to this state.
May also be the special value DISCARD, which will cause the effective
output to be the empty object {}.
default
:default: $
stability
:stability: experimental
"""
return self._values.get('output_path')
@property
def parameters(self) -> typing.Optional[typing.Mapping[str,typing.Any]]:
"""Parameters pass a collection of key-value pairs, either static values or JSONPath expressions that select from the input.
default
:default: No parameters
see
:see: https://docs.aws.amazon.com/step-functions/latest/dg/input-output-inputpath-params.html#input-output-parameters
stability
:stability: experimental
"""
return self._values.get('parameters')
@property
def result_path(self) -> typing.Optional[str]:
"""JSONPath expression to indicate where to inject the state's output.
May also be the special value DISCARD, which will cause the state's
input to become its output.
default
:default: $
stability
:stability: experimental
"""
return self._values.get('result_path')
@property
def timeout(self) -> typing.Optional[aws_cdk.core.Duration]:
"""Maximum run time of this state.
If the state takes longer than this amount of time to complete, a 'Timeout' error is raised.
default
:default: 60
stability
:stability: experimental
"""
return self._values.get('timeout')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'TaskProps(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.implements(INextable)
class Wait(State, metaclass=jsii.JSIIMeta, jsii_type="@aws-cdk/aws-stepfunctions.Wait"):
"""Define a Wait state in the state machine.
A Wait state can be used to delay execution of the state machine for a while.
stability
:stability: experimental
"""
def __init__(self, scope: aws_cdk.core.Construct, id: str, *, time: "WaitTime", comment: typing.Optional[str]=None) -> None:
"""
:param scope: -
:param id: -
:param props: -
:param time: Wait duration.
:param comment: An optional description for this state. Default: No comment
stability
:stability: experimental
"""
props = WaitProps(time=time, comment=comment)
jsii.create(Wait, self, [scope, id, props])
@jsii.member(jsii_name="next")
def next(self, next: "IChainable") -> "Chain":
"""Continue normal execution with the given state.
:param next: -
stability
:stability: experimental
"""
return jsii.invoke(self, "next", [next])
@jsii.member(jsii_name="toStateJson")
def to_state_json(self) -> typing.Mapping[typing.Any, typing.Any]:
"""Return the Amazon States Language object for this state.
stability
:stability: experimental
"""
return jsii.invoke(self, "toStateJson", [])
@property
@jsii.member(jsii_name="endStates")
def end_states(self) -> typing.List["INextable"]:
"""Continuable states of this Chainable.
stability
:stability: experimental
"""
return jsii.get(self, "endStates")
@jsii.data_type(jsii_type="@aws-cdk/aws-stepfunctions.WaitProps", jsii_struct_bases=[], name_mapping={'time': 'time', 'comment': 'comment'})
class WaitProps():
def __init__(self, *, time: "WaitTime", comment: typing.Optional[str]=None):
"""Properties for defining a Wait state.
:param time: Wait duration.
:param comment: An optional description for this state. Default: No comment
stability
:stability: experimental
"""
self._values = {
'time': time,
}
if comment is not None: self._values["comment"] = comment
@property
def time(self) -> "WaitTime":
"""Wait duration.
stability
:stability: experimental
"""
return self._values.get('time')
@property
def comment(self) -> typing.Optional[str]:
"""An optional description for this state.
default
:default: No comment
stability
:stability: experimental
"""
return self._values.get('comment')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'WaitProps(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
class WaitTime(metaclass=jsii.JSIIMeta, jsii_type="@aws-cdk/aws-stepfunctions.WaitTime"):
"""
stability
:stability: experimental
"""
@jsii.member(jsii_name="duration")
@classmethod
def duration(cls, duration: aws_cdk.core.Duration) -> "WaitTime":
"""Wait a fixed amount of time.
:param duration: -
stability
:stability: experimental
"""
return jsii.sinvoke(cls, "duration", [duration])
@jsii.member(jsii_name="secondsPath")
@classmethod
def seconds_path(cls, path: str) -> "WaitTime":
"""Wait for a number of seconds stored in the state object.
:param path: -
stability
:stability: experimental
Example::
# Example automatically generated. See https://github.com/aws/jsii/issues/826
$.wait_seconds
"""
return jsii.sinvoke(cls, "secondsPath", [path])
@jsii.member(jsii_name="timestamp")
@classmethod
def timestamp(cls, timestamp: str) -> "WaitTime":
"""Wait until the given ISO8601 timestamp.
:param timestamp: -
stability
:stability: experimental
Example::
# Example automatically generated. See https://github.com/aws/jsii/issues/826
2016 - 03 - 14T01:5900Z
"""
return jsii.sinvoke(cls, "timestamp", [timestamp])
@jsii.member(jsii_name="timestampPath")
@classmethod
def timestamp_path(cls, path: str) -> "WaitTime":
"""Wait until a timestamp found in the state object.
:param path: -
stability
:stability: experimental
Example::
# Example automatically generated. See https://github.com/aws/jsii/issues/826
$.wait_timestamp
"""
return jsii.sinvoke(cls, "timestampPath", [path])
__all__ = ["Activity", "ActivityProps", "AfterwardsOptions", "CatchProps", "CfnActivity", "CfnActivityProps", "CfnStateMachine", "CfnStateMachineProps", "Chain", "Choice", "ChoiceProps", "Condition", "Context", "Data", "Errors", "Fail", "FailProps", "FieldUtils", "FindStateOptions", "IActivity", "IChainable", "INextable", "IStateMachine", "IStepFunctionsTask", "InputType", "Map", "MapProps", "Parallel", "ParallelProps", "Pass", "PassProps", "Result", "RetryProps", "ServiceIntegrationPattern", "SingleStateOptions", "State", "StateGraph", "StateMachine", "StateMachineFragment", "StateMachineProps", "StateProps", "StateTransitionMetric", "StepFunctionsTaskConfig", "Succeed", "SucceedProps", "Task", "TaskInput", "TaskProps", "Wait", "WaitProps", "WaitTime", "__jsii_assembly__"]
publication.publish()
| 39.537896
| 784
| 0.670059
| 28,295
| 231,099
| 5.378936
| 0.041315
| 0.039002
| 0.068004
| 0.026255
| 0.809306
| 0.783793
| 0.768596
| 0.741316
| 0.725566
| 0.697175
| 0
| 0.001543
| 0.223259
| 231,099
| 5,844
| 785
| 39.544661
| 0.846332
| 0.485311
| 0
| 0.588988
| 0
| 0
| 0.127027
| 0.035853
| 0
| 0
| 0
| 0
| 0
| 1
| 0.266965
| false
| 0.004481
| 0.007682
| 0.051216
| 0.553137
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f30ef9e18ce9668283bd3f4add58359284f857f3
| 120
|
py
|
Python
|
nnmnkwii/functions/__init__.py
|
mateuszroszkowski/nnmnkwii
|
64f8e0771688e1d0c537b79aa402a6f04e107d56
|
[
"MIT"
] | null | null | null |
nnmnkwii/functions/__init__.py
|
mateuszroszkowski/nnmnkwii
|
64f8e0771688e1d0c537b79aa402a6f04e107d56
|
[
"MIT"
] | 2
|
2018-12-29T14:56:35.000Z
|
2019-01-02T07:46:36.000Z
|
nnmnkwii/functions/__init__.py
|
mateuszroszkowski/nnmnkwii
|
64f8e0771688e1d0c537b79aa402a6f04e107d56
|
[
"MIT"
] | null | null | null |
# coding: utf-8
from nnmnkwii.preprocessing import *
from nnmnkwii.paramgen import *
from nnmnkwii.postfilters import *
| 24
| 36
| 0.8
| 15
| 120
| 6.4
| 0.6
| 0.375
| 0.375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009524
| 0.125
| 120
| 4
| 37
| 30
| 0.904762
| 0.108333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
f310bb81f5800f59d6056a0ed5b0085ba421ec23
| 172
|
py
|
Python
|
functions.py
|
mandreas-public/wsb-stock-app
|
21cace2cf1e68e65ae7e1443a04c1a098a81fe9d
|
[
"MIT"
] | 1
|
2021-02-04T22:43:24.000Z
|
2021-02-04T22:43:24.000Z
|
functions.py
|
mandreas-public/wsb-stock-app
|
21cace2cf1e68e65ae7e1443a04c1a098a81fe9d
|
[
"MIT"
] | null | null | null |
functions.py
|
mandreas-public/wsb-stock-app
|
21cace2cf1e68e65ae7e1443a04c1a098a81fe9d
|
[
"MIT"
] | null | null | null |
#function to get currently owned stock data
#function to graph currently owned stock data
#function to see recent mentions of an owned stock
#function to analyze sentiment
| 43
| 50
| 0.819767
| 27
| 172
| 5.222222
| 0.555556
| 0.283688
| 0.269504
| 0.326241
| 0.468085
| 0.468085
| 0
| 0
| 0
| 0
| 0
| 0
| 0.156977
| 172
| 4
| 51
| 43
| 0.972414
| 0.953488
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b85c196106e33d74213463242082d8baef1e24c0
| 62,169
|
py
|
Python
|
api_test/api/automationCase.py
|
RockyLiys/api_auto_test_platform
|
7e6aec23b6f54d20c534b77a2679daf37b65c960
|
[
"MIT"
] | null | null | null |
api_test/api/automationCase.py
|
RockyLiys/api_auto_test_platform
|
7e6aec23b6f54d20c534b77a2679daf37b65c960
|
[
"MIT"
] | 10
|
2019-12-04T23:30:37.000Z
|
2022-02-10T12:05:45.000Z
|
api_test/api/automationCase.py
|
RockyLiys/api_auto_test_platform
|
7e6aec23b6f54d20c534b77a2679daf37b65c960
|
[
"MIT"
] | 3
|
2019-07-01T10:18:40.000Z
|
2019-07-06T00:59:19.000Z
|
import json
import logging
import platform
from datetime import datetime
from django.contrib.auth.models import User
from django.core.exceptions import ObjectDoesNotExist
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from django.db import transaction
from django.db.models import Q
from rest_framework.authentication import TokenAuthentication
from rest_framework.parsers import JSONParser
from rest_framework.views import APIView
from api_test.common.WriteExcel import Write
from api_test.common.addTask import add
from api_test.common.api_response import JsonResponse
from api_test.common.common import record_dynamic, create_json, del_task_crontab
from api_test.common.confighttp import test_api
from api_test.models import Project, AutomationGroupLevelFirst, \
AutomationTestCase, AutomationCaseApi, AutomationParameter, GlobalHost, AutomationHead, AutomationTestTask, \
AutomationTestResult, ApiInfo, AutomationParameterRaw, AutomationResponseJson
from api_test.serializers import AutomationGroupLevelFirstSerializer, AutomationTestCaseSerializer, \
AutomationCaseApiSerializer, AutomationCaseApiListSerializer, AutomationTestTaskSerializer, \
AutomationTestResultSerializer, ApiInfoSerializer, CorrelationDataSerializer, AutomationTestReportSerializer, \
AutomationTestCaseDeserializer, AutomationCaseApiDeserializer, AutomationHeadDeserializer, \
AutomationParameterDeserializer, AutomationTestTaskDeserializer, ProjectSerializer, \
AutomationCaseDownSerializer
logger = logging.getLogger(__name__) # 这里使用 __name__ 动态搜索定义的 logger 配置,这里有一个层次关系的知识点。
class Group(APIView):
authentication_classes = (TokenAuthentication,)
permission_classes = ()
def get(self, request):
"""
获取用例分组
:return:
"""
project_id = request.GET.get("project_id")
if not project_id:
return JsonResponse(code="999996", msg="参数有误!")
if not project_id.isdecimal():
return JsonResponse(code="999996", msg="参数有误!")
try:
pro_data = Project.objects.get(id=project_id)
except ObjectDoesNotExist:
return JsonResponse(code="999995", msg="项目不存在!")
pro_data = ProjectSerializer(pro_data)
if not pro_data.data["status"]:
return JsonResponse(code="999985", msg="该项目已禁用")
obi = AutomationGroupLevelFirst.objects.filter(project=project_id)
serialize = AutomationGroupLevelFirstSerializer(obi, many=True)
return JsonResponse(data=serialize.data, code="999999", msg="成功!")
class AddGroup(APIView):
authentication_classes = (TokenAuthentication,)
permission_classes = ()
def parameter_check(self, data):
"""
校验参数
:param data:
:return:
"""
try:
# 校验project_id类型为int
if not isinstance(data["project_id"], int):
return JsonResponse(code="999996", msg="参数有误!")
# 必传参数 name, host
if not data["name"]:
return JsonResponse(code="999996", msg="参数有误!")
except KeyError:
return JsonResponse(code="999996", msg="参数有误!")
def post(self, request):
"""
新增用例分组
:param request:
:return:
"""
data = JSONParser().parse(request)
result = self.parameter_check(data)
if result:
return result
try:
obj = Project.objects.get(id=data["project_id"])
if not request.user.is_superuser and obj.user.is_superuser:
return JsonResponse(code="999983", msg="无操作权限!")
except ObjectDoesNotExist:
return JsonResponse(code="999995", msg="项目不存在!")
pro_data = ProjectSerializer(obj)
if not pro_data.data["status"]:
return JsonResponse(code="999985", msg="该项目已禁用")
serializer = AutomationGroupLevelFirstSerializer(data=data)
if serializer.is_valid():
serializer.save(project=obj)
else:
return JsonResponse(code="999998", msg="失败!")
record_dynamic(project=serializer.data.get("id"),
_type="添加", operationObject="用例分组", user=request.user.pk,
data="新增用例分组“%s”" % data["name"])
return JsonResponse(data={
"group_id": serializer.data.get("id")
}, code="999999", msg="成功!")
class DelGroup(APIView):
authentication_classes = (TokenAuthentication,)
permission_classes = ()
def parameter_check(self, data):
"""
校验参数
:param data:
:return:
"""
try:
# 校验project_id, id类型为int
if not isinstance(data["project_id"], int) or not isinstance(data["id"], int):
return JsonResponse(code="999996", msg="参数有误!")
except KeyError:
return JsonResponse(code="999996", msg="参数有误!")
def post(self, request):
"""
删除用例分组名称
:param request:
:return:
"""
data = JSONParser().parse(request)
result = self.parameter_check(data)
if result:
return result
try:
pro_data = Project.objects.get(id=data["project_id"])
if not request.user.is_superuser and pro_data.user.is_superuser:
return JsonResponse(code="999983", msg="无操作权限!")
except ObjectDoesNotExist:
return JsonResponse(code="999995", msg="项目不存在!")
pro_data = ProjectSerializer(pro_data)
if not pro_data.data["status"]:
return JsonResponse(code="999985", msg="该项目已禁用")
obi = AutomationGroupLevelFirst.objects.filter(id=data["id"], project=data["project_id"])
if obi:
name = obi[0].name
obi.delete()
else:
return JsonResponse(code="999991", msg="分组不存在!")
record_dynamic(project=data["project_id"],
_type="删除", operationObject="用例分组", user=request.user.pk, data="删除用例分组“%s”" % name)
return JsonResponse(code="999999", msg="成功!")
class UpdateNameGroup(APIView):
authentication_classes = (TokenAuthentication,)
permission_classes = ()
def parameter_check(self, data):
"""
校验参数
:param data:
:return:
"""
try:
# 校验project_id, id类型为int
if not isinstance(data["project_id"], int) or not isinstance(data["id"], int):
return JsonResponse(code="999996", msg="参数有误!")
# 必传参数 name, host
if not data["name"]:
return JsonResponse(code="999996", msg="参数有误!")
except KeyError:
return JsonResponse(code="999996", msg="参数有误!")
def post(self, request):
"""
修改用例分组名称
:param request:
:return:
"""
data = JSONParser().parse(request)
result = self.parameter_check(data)
if result:
return result
try:
pro_data = Project.objects.get(id=data["project_id"])
if not request.user.is_superuser and pro_data.user.is_superuser:
return JsonResponse(code="999983", msg="无操作权限!")
except ObjectDoesNotExist:
return JsonResponse(code="999995", msg="项目不存在!")
pro_data = ProjectSerializer(pro_data)
if not pro_data.data["status"]:
return JsonResponse(code="999985", msg="该项目已禁用")
try:
obj = AutomationGroupLevelFirst.objects.get(id=data["id"], project=data["project_id"])
except ObjectDoesNotExist:
return JsonResponse(code="999991", msg="分组不存在!")
serializer = AutomationGroupLevelFirstSerializer(data=data)
if serializer.is_valid():
serializer.update(instance=obj, validated_data=data)
else:
return JsonResponse(code="999998", msg="失败!")
record_dynamic(project=serializer.data.get("id"),
_type="修改", operationObject="用例分组", user=request.user.pk,
data="修改用例分组“%s”" % data["name"])
return JsonResponse(code="999999", msg="成功!")
class UpdateGroup(APIView):
authentication_classes = (TokenAuthentication,)
permission_classes = ()
def parameter_check(self, data):
"""
校验参数
:param data:
:return:
"""
try:
# 校验project_id, id类型为int
if not data["project_id"] or not data["ids"] or not data["automationGroupLevelFirst_id"]:
return JsonResponse(code="999996", msg="参数有误!")
if not isinstance(data["project_id"], int) or not isinstance(data["ids"], list) \
or not isinstance(data["automationGroupLevelFirst_id"], int):
return JsonResponse(code="999996", msg="参数有误!")
for i in data["ids"]:
if not isinstance(i, int):
return JsonResponse(code="999996", msg="参数有误!")
except KeyError:
return JsonResponse(code="999996", msg="参数有误!")
def post(self, request):
"""
修改用例所属分组
:param request:
:return:
"""
data = JSONParser().parse(request)
result = self.parameter_check(data)
if result:
return result
try:
pro_data = Project.objects.get(id=data["project_id"])
if not request.user.is_superuser and pro_data.user.is_superuser:
return JsonResponse(code="999983", msg="无操作权限!")
except ObjectDoesNotExist:
return JsonResponse(code="999995", msg="项目不存在!")
pro_data = ProjectSerializer(pro_data)
if not pro_data.data["status"]:
return JsonResponse(code="999985", msg="该项目已禁用")
try:
obj = AutomationGroupLevelFirst.objects.get(id=data["automationGroupLevelFirst_id"])
except ObjectDoesNotExist:
return JsonResponse(code="999991", msg="分组不存在!")
id_list = Q()
for i in data["ids"]:
id_list = id_list | Q(id=i)
case_list = AutomationTestCase.objects.filter(id_list, project=data["project_id"])
with transaction.atomic():
case_list.update(automationGroupLevelFirst=obj)
name_list = []
for j in case_list:
name_list.append(str(j.caseName))
record_dynamic(project=data["project_id"],
_type="修改", operationObject="用例", user=request.user.pk, data="修改用例分组,列表“%s”" % name_list)
return JsonResponse(code="999999", msg="成功!")
class CaseList(APIView):
authentication_classes = (TokenAuthentication,)
permission_classes = ()
def get(self, request):
"""
获取用例列表
:param request:
:return:
"""
try:
page_size = int(request.GET.get("page_size", 20))
page = int(request.GET.get("page", 1))
except (TypeError, ValueError):
return JsonResponse(code="999985", msg="page and page_size must be integer!")
project_id = request.GET.get("project_id")
first_group_id = request.GET.get("first_group_id")
name = request.GET.get("name")
if not project_id:
return JsonResponse(code="999996", msg="参数有误!")
if not project_id.isdecimal():
return JsonResponse(code="999996", msg="参数有误!")
try:
pro_data = Project.objects.get(id=project_id)
except ObjectDoesNotExist:
return JsonResponse(code="999995", msg="项目不存在!")
pro_data = ProjectSerializer(pro_data)
if not pro_data.data["status"]:
return JsonResponse(code="999985", msg="该项目已禁用")
if first_group_id:
if not first_group_id.isdecimal():
return JsonResponse(code="999996", msg="参数有误!")
if name:
obi = AutomationTestCase.objects.filter(project=project_id, caseName__contains=name,
automationGroupLevelFirst=first_group_id).order_by("id")
else:
obi = AutomationTestCase.objects.filter(project=project_id,
automationGroupLevelFirst=first_group_id).order_by("id")
else:
if name:
obi = AutomationTestCase.objects.filter(project=project_id, caseName__contains=name, ).order_by(
"id")
else:
obi = AutomationTestCase.objects.filter(project=project_id).order_by("id")
paginator = Paginator(obi, page_size) # paginator对象
total = paginator.num_pages # 总页数
try:
obm = paginator.page(page)
except PageNotAnInteger:
obm = paginator.page(1)
except EmptyPage:
obm = paginator.page(paginator.num_pages)
serialize = AutomationTestCaseSerializer(obm, many=True)
return JsonResponse(data={"data": serialize.data,
"page": page,
"total": total
}, code="999999", msg="成功!")
class AddCase(APIView):
authentication_classes = (TokenAuthentication,)
permission_classes = ()
def parameter_check(self, data):
"""
校验参数
:param data:
:return:
"""
try:
# 校验project_id, id类型为int
if not data["project_id"] or not data["caseName"] or not data["automationGroupLevelFirst_id"]:
return JsonResponse(code="999996", msg="参数有误!")
if not isinstance(data["project_id"], int) or not isinstance(data["automationGroupLevelFirst_id"], int):
return JsonResponse(code="999996", msg="参数有误!")
except KeyError:
return JsonResponse(code="999996", msg="参数有误!")
def post(self, request):
"""
添加用例
:param request:
:return:
"""
data = JSONParser().parse(request)
result = self.parameter_check(data)
if result:
return result
data["user"] = request.user.pk
try:
obj = Project.objects.get(id=data["project_id"])
if not request.user.is_superuser and obj.user.is_superuser:
return JsonResponse(code="999983", msg="无操作权限!")
except ObjectDoesNotExist:
return JsonResponse(code="999995", msg="项目不存在!")
pro_data = ProjectSerializer(obj)
if not pro_data.data["status"]:
return JsonResponse(code="999985", msg="该项目已禁用")
case_name = AutomationTestCase.objects.filter(caseName=data["caseName"], project=data["project_id"])
if len(case_name):
return JsonResponse(code="999997", msg="存在相同名称!")
else:
with transaction.atomic():
try:
serialize = AutomationTestCaseDeserializer(data=data)
if serialize.is_valid():
try:
if not isinstance(data["automationGroupLevelFirst_id"], int):
return JsonResponse(code="999996", msg="参数有误!")
obi = AutomationGroupLevelFirst.objects.get(id=data["automationGroupLevelFirst_id"], project=data["project_id"])
serialize.save(project=obj, automationGroupLevelFirst=obi, user=User.objects.get(id=data["user"]))
except KeyError:
serialize.save(project=obj, user=User.objects.get(id=data["user"]))
record_dynamic(project=data["project_id"],
_type="新增", operationObject="用例", user=request.user.pk,
data="新增用例\"%s\"" % data["caseName"])
return JsonResponse(data={"case_id": serialize.data.get("id")},
code="999999", msg="成功!")
return JsonResponse(code="999996", msg="参数有误!")
except:
return JsonResponse(code="999998", msg="失败!")
class UpdateCase(APIView):
authentication_classes = (TokenAuthentication,)
permission_classes = ()
def parameter_check(self, data):
"""
校验参数
:param data:
:return:
"""
try:
# 校验project_id, id类型为int
if not data["project_id"] or not data["caseName"] or not data["id"] \
or not data["automationGroupLevelFirst_id"]:
return JsonResponse(code="999996", msg="参数有误!")
if not isinstance(data["project_id"], int) or not isinstance(data["id"], int) \
or not isinstance(data["automationGroupLevelFirst_id"], int):
return JsonResponse(code="999996", msg="参数有误!")
except KeyError:
return JsonResponse(code="999996", msg="参数有误!")
def post(self, request):
"""
修改用例
:param request:
:return:
"""
data = JSONParser().parse(request)
result = self.parameter_check(data)
if result:
return result
try:
pro_data = Project.objects.get(id=data["project_id"])
if not request.user.is_superuser and pro_data.user.is_superuser:
return JsonResponse(code="999983", msg="无操作权限!")
except ObjectDoesNotExist:
return JsonResponse(code="999995", msg="项目不存在!")
pro_data = ProjectSerializer(pro_data)
if not pro_data.data["status"]:
return JsonResponse(code="999985", msg="该项目已禁用")
try:
obj = AutomationTestCase.objects.get(id=data["id"], project=data["project_id"])
except ObjectDoesNotExist:
return JsonResponse(code="999987", msg="用例不存在!")
try:
AutomationGroupLevelFirst.objects.get(id=data["automationGroupLevelFirst_id"], project=data["project_id"])
except ObjectDoesNotExist:
return JsonResponse(code="999991", msg="分组不存在!")
case_name = AutomationTestCase.objects.filter(caseName=data["caseName"], project=data["project_id"]).exclude(id=data["id"])
if len(case_name):
return JsonResponse(code="999997", msg="存在相同名称!")
else:
serialize = AutomationTestCaseDeserializer(data=data)
if serialize.is_valid():
serialize.update(instance=obj, validated_data=data)
return JsonResponse(code="999999", msg="成功!")
return JsonResponse(code="999998", msg="失败!")
class DelCase(AddCase):
authentication_classes = (TokenAuthentication,)
permission_classes = ()
def parameter_check(self, data):
"""
校验参数
:param data:
:return:
"""
try:
# 校验project_id, id类型为int
if not data["project_id"] or not data["ids"]:
return JsonResponse(code="999996", msg="参数有误!")
if not isinstance(data["project_id"], int) or not isinstance(data["ids"], list):
return JsonResponse(code="999996", msg="参数有误!")
for i in data["ids"]:
if not isinstance(i, int):
return JsonResponse(code="999996", msg="参数有误!")
except KeyError:
return JsonResponse(code="999996", msg="参数有误!")
def post(self, request):
"""
删除用例
:param request:
:return:
"""
data = JSONParser().parse(request)
result = self.parameter_check(data)
if result:
return result
try:
pro_data = Project.objects.get(id=data["project_id"])
if not request.user.is_superuser and pro_data.user.is_superuser:
return JsonResponse(code="999983", msg="无操作权限!")
except ObjectDoesNotExist:
return JsonResponse(code="999995", msg="项目不存在!")
pro_data = ProjectSerializer(pro_data)
if not pro_data.data["status"]:
return JsonResponse(code="999985", msg="该项目已禁用")
for j in data["ids"]:
obi = AutomationTestCase.objects.filter(id=j, project=data['project_id'])
if len(obi) != 0:
name = obi[0].caseName
obi.delete()
record_dynamic(project=data["project_id"],
_type="删除", operationObject="用例", user=request.user.pk, data="删除用例\"%s\"" % name)
return JsonResponse(code="999999", msg="成功!")
class ApiList(APIView):
authentication_classes = (TokenAuthentication,)
permission_classes = ()
def get(self, request):
"""
获取用例接口列表
:param request:
:return:
"""
try:
page_size = int(request.GET.get("page_size", 20))
page = int(request.GET.get("page", 1))
except (TypeError, ValueError):
return JsonResponse(code="999985", msg="page and page_size must be integer!")
project_id = request.GET.get("project_id")
case_id = request.GET.get("case_id")
if not project_id.isdecimal() or not case_id.isdecimal():
return JsonResponse(code="999996", msg="参数有误!")
try:
pro_data = Project.objects.get(id=project_id)
except ObjectDoesNotExist:
return JsonResponse(code="999995", msg="项目不存在!")
pro_data = ProjectSerializer(pro_data)
if not pro_data.data["status"]:
return JsonResponse(code="999985", msg="该项目已禁用")
try:
AutomationTestCase.objects.get(id=case_id, project=project_id)
except ObjectDoesNotExist:
return JsonResponse(code="999987", msg="用例不存在!")
data = AutomationCaseApi.objects.filter(automationTestCase=case_id).order_by("id")
paginator = Paginator(data, page_size) # paginator对象
total = paginator.num_pages # 总页数
try:
obm = paginator.page(page)
except PageNotAnInteger:
obm = paginator.page(1)
except EmptyPage:
obm = paginator.page(paginator.num_pages)
serialize = AutomationCaseApiListSerializer(obm, many=True)
for i in range(0, len(serialize.data)-1):
serialize.data[i]["testStatus"] = False
return JsonResponse(data={"data": serialize.data,
"page": page,
"total": total
}, code="999999", msg="成功!")
class CaseApiInfo(APIView):
authentication_classes = (TokenAuthentication,)
permission_classes = ()
def get(self, request):
"""
获取接口详细信息
:param request:
:return:
"""
project_id = request.GET.get("project_id")
case_id = request.GET.get("case_id")
api_id = request.GET.get("api_id")
if not project_id.isdecimal() or not api_id.isdecimal() or not case_id.isdecimal():
return JsonResponse(code="999996", msg="参数有误!")
try:
pro_data = Project.objects.get(id=project_id)
except ObjectDoesNotExist:
return JsonResponse(code="999995", msg="项目不存在!")
pro_data = ProjectSerializer(pro_data)
if not pro_data.data["status"]:
return JsonResponse(code="999985", msg="该项目已禁用")
try:
AutomationTestCase.objects.get(id=case_id, project=project_id)
except ObjectDoesNotExist:
return JsonResponse(code="999987", msg="用例不存在!")
try:
obm = AutomationCaseApi.objects.get(id=api_id, automationTestCase=case_id)
except ObjectDoesNotExist:
return JsonResponse(code="999990", msg="接口不存在!")
data = AutomationCaseApiSerializer(obm).data
try:
name = AutomationResponseJson.objects.get(automationCaseApi=api_id, type="Regular")
data["RegularParam"] = name.name
except ObjectDoesNotExist:
pass
return JsonResponse(data=data, code="999999", msg="成功!")
class AddOldApi(APIView):
authentication_classes = (TokenAuthentication,)
permission_classes = ()
def parameter_check(self, data):
"""
校验参数
:param data:
:return:
"""
try:
# 校验project_id, id类型为int
if not data["project_id"] or not data["case_id"] or not data["api_ids"]:
return JsonResponse(code="999996", msg="参数有误!")
if not isinstance(data["project_id"], int) or \
not isinstance(data["api_ids"], list) or not isinstance(data["case_id"], int):
return JsonResponse(code="999996", msg="参数有误!")
for i in data["api_ids"]:
if not isinstance(i, int):
return JsonResponse(code="999996", msg="参数有误!")
except KeyError:
return JsonResponse(code="999996", msg="参数有误!")
def post(self, request):
"""
用例下新增已有的api接口
:param request:
:return:
"""
data = JSONParser().parse(request)
result = self.parameter_check(data)
if result:
return result
try:
pro_data = Project.objects.get(id=data["project_id"])
if not request.user.is_superuser and pro_data.user.is_superuser:
return JsonResponse(code="999983", msg="无操作权限!")
except ObjectDoesNotExist:
return JsonResponse(code="999995", msg="项目不存在!")
pro_data = ProjectSerializer(pro_data)
if not pro_data.data["status"]:
return JsonResponse(code="999985", msg="该项目已禁用")
try:
obj = AutomationTestCase.objects.get(id=data["case_id"], project=data["project_id"])
except ObjectDoesNotExist:
return JsonResponse(code="999987", msg="用例不存在!")
for i in data["api_ids"]:
try:
api_data = ApiInfoSerializer(ApiInfo.objects.get(id=i, project=data["project_id"])).data
except ObjectDoesNotExist:
continue
with transaction.atomic():
api_data["automationTestCase_id"] = obj.pk
api_serialize = AutomationCaseApiDeserializer(data=api_data)
if api_serialize.is_valid():
api_serialize.save(automationTestCase=obj)
case_api = api_serialize.data.get("id")
if api_data["requestParameterType"] == "form-data":
if api_data["requestParameter"]:
for j in api_data["requestParameter"]:
if j["name"]:
AutomationParameter(automationCaseApi=AutomationCaseApi.objects.get(id=case_api),
name=j["name"], value=j["value"], interrelate=False).save()
else:
if api_data["requestParameterRaw"]:
# data = json.loads(serializers.serialize("json",data["requestParameterRaw"]))
AutomationParameterRaw(automationCaseApi=AutomationCaseApi.objects.get(id=case_api),
data=json.loads(api_data["requestParameterRaw"]["data"])).save()
if api_data.get("headers"):
for n in api_data["headers"]:
if n["name"]:
AutomationHead(automationCaseApi=AutomationCaseApi.objects.get(id=case_api),
name=n["name"], value=n["value"], interrelate=False).save()
case_name = AutomationTestCaseSerializer(obj).data["caseName"]
record_dynamic(project=data["project_id"],
_type="新增", operationObject="用例接口", user=request.user.pk,
data="用例“%s”新增接口\"%s\"" % (case_name, api_serialize.data.get("name")))
return JsonResponse(code="999999", msg="成功!")
class AddNewApi(APIView):
authentication_classes = (TokenAuthentication,)
permission_classes = ()
def parameter_check(self, data):
"""
校验参数
:param data:
:return:
"""
try:
# 校验project_id, id类型为int
if not data["project_id"] or not data["automationTestCase_id"] or not data["name"] or not data["httpType"]\
or not data["requestType"] or not data["apiAddress"] or not data["requestParameterType"]\
or not data["examineType"]:
return JsonResponse(code="999996", msg="参数有误!")
if not isinstance(data["project_id"], int) or not isinstance(data["automationTestCase_id"], int):
return JsonResponse(code="999996", msg="参数有误!")
if data["httpType"] not in ["HTTP", "HTTPS"]:
return JsonResponse(code="999996", msg="参数有误!")
if data["requestType"] not in ["POST", "GET", "PUT", "DELETE"]:
return JsonResponse(code="999996", msg="参数有误!")
if data["requestParameterType"] not in ["form-data", "raw", "Restful"]:
return JsonResponse(code="999996", msg="参数有误!")
if data["examineType"] not in ["no_check", "only_check_status", "json", "entirely_check", "Regular_check"]:
return JsonResponse(code="999996", msg="参数有误!")
if data["httpCode"]:
if data["httpCode"] not in ["200", "404", "400", "502", "500", "302"]:
return JsonResponse(code="999996", msg="参数有误!")
if not isinstance(data['formatRaw'], bool):
return JsonResponse(code="999996", msg="参数有误!")
except KeyError:
return JsonResponse(code="999996", msg="参数有误!")
def post(self, request):
"""
用例下新增新的api接口
:param request:
:return:
"""
data = JSONParser().parse(request)
result = self.parameter_check(data)
if result:
return result
try:
pro_data = Project.objects.get(id=data["project_id"])
if not request.user.is_superuser and pro_data.user.is_superuser:
return JsonResponse(code="999983", msg="无操作权限!")
except ObjectDoesNotExist:
return JsonResponse(code="999995", msg="项目不存在!")
pro_data = ProjectSerializer(pro_data)
if not pro_data.data["status"]:
return JsonResponse(code="999985", msg="该项目已禁用")
try:
obj = AutomationTestCase.objects.get(id=data["automationTestCase_id"], project=data["project_id"])
except ObjectDoesNotExist:
return JsonResponse(code="999987", msg="用例不存在!")
api_name = AutomationCaseApi.objects.filter(name=data["name"], automationTestCase=data["automationTestCase_id"])
if len(api_name):
return JsonResponse(code="999997", msg="存在相同名称!")
with transaction.atomic():
serialize = AutomationCaseApiDeserializer(data=data)
if serialize.is_valid():
serialize.save(automationTestCase=obj)
api_id = serialize.data.get("id")
if len(data.get("headDict")):
for i in data["headDict"]:
if i["name"]:
i["automationCaseApi_id"] = api_id
head_serialize = AutomationHeadDeserializer(data=i)
if head_serialize.is_valid():
head_serialize.save(automationCaseApi=AutomationCaseApi.objects.get(id=api_id))
if data["requestParameterType"] == "form-data":
if len(data.get("requestList")):
for i in data.get("requestList"):
if i.get("name"):
i["automationCaseApi_id"] = api_id
param_serialize = AutomationParameterDeserializer(data=i)
if param_serialize.is_valid():
param_serialize.save(automationCaseApi=AutomationCaseApi.objects.get(id=api_id))
else:
if len(data.get("requestList")):
AutomationParameterRaw(automationCaseApi=AutomationCaseApi.objects.get(id=api_id),
data=data["requestList"]).save()
api_ids = AutomationCaseApi.objects.get(id=api_id)
if data.get("examineType") == "json":
try:
response = eval(data["responseData"].replace("true", "True").replace("false", "False").replace("null", "None"))
api = "<response[JSON][%s]>" % api_id
create_json(api_ids, api, response)
except KeyError:
return JsonResponse(code="999998", msg="失败!")
except AttributeError:
return JsonResponse(code="999998", msg="校验内容不能为空!")
elif data.get("examineType") == 'Regular_check':
if data.get("RegularParam"):
AutomationResponseJson(automationCaseApi=api_ids,
name=data["RegularParam"],
tier='<response[Regular][%s]["%s"]' % (api_id, data["responseData"]),
type='Regular').save()
return JsonResponse(data={"api_id": api_id}, code="999999", msg="成功!")
return JsonResponse(code="999998", msg="失败!")
class GetCorrelationResponse(APIView):
authentication_classes = (TokenAuthentication,)
permission_classes = ()
def get(self, request):
"""
获取关联接口数据
:param request:
:return:
"""
project_id = request.GET.get("project_id")
case_id = request.GET.get("case_id")
api_id = request.GET.get("api_id")
if not project_id.isdecimal() or not case_id.isdecimal():
return JsonResponse(code="999996", msg="参数有误!")
try:
pro_data = Project.objects.get(id=project_id)
except ObjectDoesNotExist:
return JsonResponse(code="999995", msg="项目不存在!")
pro_data = ProjectSerializer(pro_data)
if not pro_data.data["status"]:
return JsonResponse(code="999985", msg="该项目已禁用")
try:
AutomationTestCase.objects.get(id=case_id, project=project_id)
except ObjectDoesNotExist:
return JsonResponse(code="999987", msg="用例不存在!")
if api_id:
data = CorrelationDataSerializer(AutomationCaseApi.objects.filter(automationTestCase=case_id,
id__lt=api_id), many=True).data
else:
data = CorrelationDataSerializer(AutomationCaseApi.objects.filter(automationTestCase=case_id),
many=True).data
return JsonResponse(code="999999", msg="成功!", data=data)
class UpdateApi(APIView):
authentication_classes = (TokenAuthentication,)
permission_classes = ()
def parameter_check(self, data):
"""
校验参数
:param data:
:return:
"""
try:
# 校验project_id, id类型为int
if not data["project_id"] or not data["automationTestCase_id"] or not data["name"] or not data["httpType"]\
or not data["requestType"] or not data["apiAddress"] or not data["requestParameterType"]\
or not data["examineType"] or not data["id"]:
return JsonResponse(code="999996", msg="参数有误!")
if not isinstance(data["project_id"], int) or not isinstance(data["automationTestCase_id"], int):
return JsonResponse(code="999996", msg="参数有误!")
if data["httpType"] not in ["HTTP", "HTTPS"]:
return JsonResponse(code="999996", msg="参数有误!")
if data["requestType"] not in ["POST", "GET", "PUT", "DELETE"]:
return JsonResponse(code="999996", msg="参数有误!")
if data["requestParameterType"] not in ["form-data", "raw", "Restful"]:
return JsonResponse(code="999996", msg="参数有误!")
if data["examineType"] not in ["no_check", "only_check_status", "json", "entirely_check", "Regular_check"]:
return JsonResponse(code="999996", msg="参数有误!")
if data["httpCode"]:
if data["httpCode"] not in ["200", "404", "400", "502", "500", "302"]:
return JsonResponse(code="999996", msg="参数有误!")
if not isinstance(data['formatRaw'], bool):
return JsonResponse(code="999996", msg="参数有误!")
except KeyError:
return JsonResponse(code="999996", msg="参数有误!")
def post(self, request):
"""
用例下修改api接口
:param request:
:return:
"""
data = JSONParser().parse(request)
result = self.parameter_check(data)
if result:
return result
try:
pro_data = Project.objects.get(id=data["project_id"])
if not request.user.is_superuser and pro_data.user.is_superuser:
return JsonResponse(code="999983", msg="无操作权限!")
except ObjectDoesNotExist:
return JsonResponse(code="999995", msg="项目不存在!")
pro_data = ProjectSerializer(pro_data)
if not pro_data.data["status"]:
return JsonResponse(code="999985", msg="该项目已禁用")
try:
obi = AutomationTestCase.objects.get(id=data["automationTestCase_id"], project=data["project_id"])
except ObjectDoesNotExist:
return JsonResponse(code="999987", msg="用例不存在!")
try:
obj = AutomationCaseApi.objects.get(id=data["id"], automationTestCase=data["automationTestCase_id"])
except ObjectDoesNotExist:
return JsonResponse(code="999990", msg="接口不存在!")
api_name = AutomationCaseApi.objects.filter(name=data["name"], automationTestCase=data["automationTestCase_id"]).exclude(id=data["id"])
if len(api_name):
return JsonResponse(code="999997", msg="存在相同名称!")
with transaction.atomic():
serialize = AutomationCaseApiDeserializer(data=data)
if serialize.is_valid():
serialize.update(instance=obj, validated_data=data)
header = Q()
if len(data.get("headDict")):
for i in data["headDict"]:
if i.get("automationCaseApi") and i.get("id"):
header = header | Q(id=i["id"])
if i["name"]:
head_serialize = AutomationHeadDeserializer(data=i)
if head_serialize.is_valid():
i["automationCaseApi"] = AutomationCaseApi.objects.get(id=i["automationCaseApi"])
head_serialize.update(instance=AutomationHead.objects.get(id=i["id"]), validated_data=i)
else:
if i.get("name"):
i["automationCaseApi"] = data['id']
head_serialize = AutomationHeadDeserializer(data=i)
if head_serialize.is_valid():
head_serialize.save(automationCaseApi=AutomationCaseApi.objects.get(id=data["id"]))
header = header | Q(id=head_serialize.data.get("id"))
AutomationHead.objects.exclude(header).filter(automationCaseApi=data["id"]).delete()
api_param = Q()
api_param_raw = Q()
if len(data.get("requestList")):
if data["requestParameterType"] == "form-data":
AutomationParameterRaw.objects.filter(automationCaseApi=data["id"]).delete()
for i in data["requestList"]:
if i.get("automationCaseApi") and i.get("id"):
api_param = api_param | Q(id=i["id"])
if i["name"]:
param_serialize = AutomationParameterDeserializer(data=i)
if param_serialize.is_valid():
i["automationCaseApi"] = AutomationCaseApi.objects.get(id=i["automationCaseApi"])
param_serialize.update(instance=AutomationParameter.objects.get(id=i["id"]),
validated_data=i)
else:
if i.get("name"):
i["automationCaseApi"] = data['id']
param_serialize = AutomationParameterDeserializer(data=i)
if param_serialize.is_valid():
param_serialize.save(automationCaseApi=AutomationCaseApi.objects.get(id=data["id"]))
api_param = api_param | Q(id=param_serialize.data.get("id"))
else:
try:
obj = AutomationParameterRaw.objects.get(automationCaseApi=data["id"])
obj.data = data["requestList"]
obj.save()
except ObjectDoesNotExist:
obj = AutomationParameterRaw(automationCaseApi=AutomationCaseApi.objects.get(id=data['id']), data=data["requestList"])
obj.save()
api_param_raw = api_param_raw | Q(id=obj.id)
AutomationParameter.objects.exclude(api_param).filter(automationCaseApi=data["id"]).delete()
AutomationParameterRaw.objects.exclude(api_param_raw).filter(automationCaseApi=data["id"]).delete()
api_id = AutomationCaseApi.objects.get(id=data["id"])
AutomationResponseJson.objects.filter(automationCaseApi=api_id).filter(automationCaseApi=data["id"]).delete()
if data.get("examineType") == "json":
try:
response = eval(data["responseData"].replace("true", "True").replace("false", "False").replace("null", "None"))
api = "<response[JSON][%s]>" % api_id
create_json(api_id, api, response)
except KeyError:
return JsonResponse(code="999998", msg="失败!")
except AttributeError:
return JsonResponse(code="999998", msg="校验内容不能为空!")
elif data.get("examineType") == 'Regular_check':
if data.get("RegularParam"):
AutomationResponseJson(automationCaseApi=api_id,
name=data["RegularParam"],
tier='<response[Regular][%s]["%s"]' % (api_id, data["responseData"]),
type='Regular').save()
record_dynamic(project=data["project_id"],
_type="修改", operationObject="用例接口", user=request.user.pk,
data="用例“%s”修改接口\"%s\"" % (obi.caseName, data["name"]))
return JsonResponse(code="999999", msg="成功!")
return JsonResponse(code="999998", msg="失败!")
class DelApi(APIView):
authentication_classes = (TokenAuthentication,)
permission_classes = ()
def parameter_check(self, data):
"""
校验参数
:param data:
:return:
"""
try:
# 校验project_id, id类型为int
if not data["project_id"] or not data["case_id"] or not data["ids"]:
return JsonResponse(code="999996", msg="参数有误!")
if not isinstance(data["project_id"], int) or not isinstance(data["case_id"], int) \
or not isinstance(data["ids"], list):
return JsonResponse(code="999996", msg="参数有误!")
for i in data["ids"]:
if not isinstance(i, int):
return JsonResponse(code="999996", msg="参数有误!")
except KeyError:
return JsonResponse(code="999996", msg="参数有误!")
def post(self, request):
"""
用例下新增新的api接口
:param request:
:return:
"""
data = JSONParser().parse(request)
result = self.parameter_check(data)
if result:
return result
try:
pro_data = Project.objects.get(id=data["project_id"])
if not request.user.is_superuser and pro_data.user.is_superuser:
return JsonResponse(code="999983", msg="无操作权限!")
except ObjectDoesNotExist:
return JsonResponse(code="999995", msg="项目不存在!")
pro_data = ProjectSerializer(pro_data)
if not pro_data.data["status"]:
return JsonResponse(code="999985", msg="该项目已禁用")
try:
obj = AutomationTestCase.objects.get(id=data["case_id"], project=data["project_id"])
except ObjectDoesNotExist:
return JsonResponse(code="999987", msg="用例不存在!")
for j in data["ids"]:
obi = AutomationCaseApi.objects.filter(id=j, automationTestCase=data["case_id"])
if len(obi) != 0:
name = obi[0].name
obi.delete()
record_dynamic(project=data["project_id"],
_type="删除", operationObject="用例接口",
user=request.user.pk, data="删除用例\"%s\"的接口\"%s\"" % (obj.caseName, name))
return JsonResponse(code="999999", msg="成功!")
class StartTest(APIView):
authentication_classes = (TokenAuthentication,)
permission_classes = ()
def parameter_check(self, data):
"""
校验参数
:param data:
:return:
"""
try:
# 校验project_id, id类型为int
if not data["project_id"] or not data["case_id"] or not data["id"] or not data["host_id"]:
return JsonResponse(code="999996", msg="参数有误!")
if not isinstance(data["project_id"], int) or not isinstance(data["case_id"], int) \
or not isinstance(data["id"], int) or not isinstance(data["host_id"], int):
return JsonResponse(code="999996", msg="参数有误!")
except KeyError:
return JsonResponse(code="999996", msg="参数有误!")
def post(self, request):
"""
执行测试用例
:param request:
:return:
"""
data = JSONParser().parse(request)
result = self.parameter_check(data)
if result:
return result
try:
pro_data = Project.objects.get(id=data["project_id"])
except ObjectDoesNotExist:
return JsonResponse(code="999995", msg="项目不存在!")
pro_data = ProjectSerializer(pro_data)
if not pro_data.data["status"]:
return JsonResponse(code="999985", msg="该项目已禁用")
try:
obi = AutomationTestCase.objects.get(id=data["case_id"], project=data["project_id"])
except ObjectDoesNotExist:
return JsonResponse(code="999987", msg="用例不存在!")
try:
GlobalHost.objects.get(id=data["host_id"], project=data["project_id"])
except ObjectDoesNotExist:
return JsonResponse(code="999992", msg="host不存在!")
try:
obj = AutomationCaseApi.objects.get(id=data["id"], automationTestCase=data["case_id"])
except ObjectDoesNotExist:
return JsonResponse(code="999990", msg="接口不存在!")
AutomationTestResult.objects.filter(automationCaseApi=data["id"]).delete()
try:
result = test_api(host_id=data["host_id"], case_id=data["case_id"],
_id=data["id"], project_id=data["project_id"])
except Exception as e:
logging.exception(e)
return JsonResponse(code="999998", msg="失败!")
record_dynamic(project=data["project_id"],
_type="测试", operationObject="用例接口",
user=request.user.pk, data="测试用例“%s”接口\"%s\"" % (obi.caseName, obj.name))
return JsonResponse(data={
"result": result
}, code="999999", msg="成功!")
class AddTimeTask(APIView):
authentication_classes = (TokenAuthentication,)
permission_classes = ()
def parameter_check(self, data):
"""
校验参数
:param data:
:return:
"""
try:
# 校验project_id, id类型为int
if not data["project_id"] or not data["name"] or not data["type"] or \
not data["Host_id"] or not data["startTime"] or not data["endTime"]:
return JsonResponse(code="999996", msg="参数有误!")
if not isinstance(data["project_id"], int) or not isinstance(data["Host_id"], int):
return JsonResponse(code="999996", msg="参数有误!")
if data["type"] not in ["circulation", "timing"]:
return JsonResponse(code="999996", msg="参数有误!")
try:
start_time = datetime.strptime(data["startTime"], "%Y-%m-%d %H:%M:%S")
end_time = datetime.strptime(data["endTime"], "%Y-%m-%d %H:%M:%S")
if start_time > end_time:
return JsonResponse(code="999996", msg="参数有误!")
except ValueError:
return JsonResponse(code="999996", msg="参数有误!")
except KeyError:
return JsonResponse(code="999996", msg="参数有误!")
def post(self, request):
"""
添加测试任务
:param request:
:return:
"""
sys_name = platform.system()
if sys_name == "Windows" or sys_name == "Darwin":
return JsonResponse(code="999998", msg="该操作只能在Linux系统下进行!")
data = JSONParser().parse(request)
result = self.parameter_check(data)
if result:
return result
try:
pro_id = Project.objects.get(id=data["project_id"])
if not request.user.is_superuser and pro_id.user.is_superuser:
return JsonResponse(code="999983", msg="无操作权限!")
except ObjectDoesNotExist:
return JsonResponse(code="999995", msg="项目不存在!")
pro_data = ProjectSerializer(pro_id)
start_time = data["startTime"]
end_time = data["endTime"]
if not pro_data.data["status"]:
return JsonResponse(code="999985", msg="该项目已禁用")
data["startTime"] = datetime.strptime(data["startTime"], "%Y-%m-%d %H:%M:%S")
data["endTime"] = datetime.strptime(data["endTime"], "%Y-%m-%d %H:%M:%S")
try:
host_data = GlobalHost.objects.get(id=data["Host_id"], project=data["project_id"])
except ObjectDoesNotExist:
return JsonResponse(code="999992", msg="host不存在!")
if data["type"] == "circulation":
if not data["frequency"]:
return JsonResponse(code="999996", msg="参数有误!")
if not isinstance(data["frequency"], int):
return JsonResponse(code="999996", msg="参数有误!")
if data["unit"] not in ["m", "h", "d", "w"]:
return JsonResponse(code="999996", msg="参数有误!")
task_name = AutomationTestTask.objects.filter(name=data["name"]).exclude(project=data["project_id"])
if len(task_name):
return JsonResponse(code="999997", msg="存在相同名称!")
else:
try:
rt = AutomationTestTask.objects.get(project=data["project_id"])
serialize = AutomationTestTaskDeserializer(data=data)
if serialize.is_valid():
serialize.update(instance=rt, validated_data=data)
task_id = serialize.data.get("id")
else:
return JsonResponse(code="999996", msg="参数有误!")
except ObjectDoesNotExist:
serialize = AutomationTestTaskDeserializer(data=data)
if serialize.is_valid():
serialize.save(project=pro_id, Host=host_data)
task_id = serialize.data.get("id")
else:
return JsonResponse(code="999996", msg="参数有误!")
record_dynamic(project=data["project_id"],
_type="新增", operationObject="任务",
user=request.user.pk, data="新增循环任务\"%s\"" % data["name"])
add(host_id=data["Host_id"], _type=data["type"], project=str(data["project_id"]),
start_time=start_time, end_time=end_time, frequency=data["frequency"], unit=data["unit"])
else:
task_name = AutomationTestTask.objects.filter(name=data["name"]).exclude(project=data["project_id"])
if len(task_name):
return JsonResponse(code="999997", msg="存在相同名称!")
else:
try:
rt = AutomationTestTask.objects.get(project=data["project_id"])
serialize = AutomationTestTaskDeserializer(data=data)
if serialize.is_valid():
serialize.update(instance=rt, validated_data=data)
task_id = serialize.data.get("id")
else:
return JsonResponse(code="999996", msg="参数有误!")
except ObjectDoesNotExist:
serialize = AutomationTestTaskDeserializer(data=data)
if serialize.is_valid():
serialize.save(project=pro_id, Host=host_data)
task_id = serialize.data.get("id")
else:
return JsonResponse(code="999996", msg="参数有误!")
record_dynamic(project=data["project_id"],
_type="新增", operationObject="任务",
user=request.user.pk, data="新增定时任务\"%s\"" % data["name"])
add(host_id=data["Host_id"], _type=data["type"], project=str(data["project_id"]),
start_time=start_time, end_time=end_time)
return JsonResponse(data={"task_id": task_id}, code="999999", msg="成功!")
class GetTask(APIView):
authentication_classes = (TokenAuthentication,)
permission_classes = ()
def get(self, request):
"""
获取测试用例执行任务
:param request:
:return:
"""
project_id = request.GET.get("project_id")
if not project_id.isdecimal():
return JsonResponse(code="999996", msg="参数有误!")
try:
pro_data = Project.objects.get(id=project_id)
except ObjectDoesNotExist:
return JsonResponse(code="999995", msg="项目不存在!")
pro_data = ProjectSerializer(pro_data)
if not pro_data.data["status"]:
return JsonResponse(code="999985", msg="该项目已禁用")
try:
obj = AutomationTestTaskSerializer(AutomationTestTask.objects.get(project=project_id)).data
return JsonResponse(code="999999", msg="成功!", data=obj)
except ObjectDoesNotExist:
return JsonResponse(code="999999", msg="成功!")
class DelTask(APIView):
authentication_classes = (TokenAuthentication,)
permission_classes = ()
def parameter_check(self, data):
"""
校验参数
:param data:
:return:
"""
try:
# 校验project_id, id类型为int
if not data["project_id"]:
return JsonResponse(code="999996", msg="参数有误!")
if not isinstance(data["project_id"], int):
return JsonResponse(code="999996", msg="参数有误!")
except KeyError:
return JsonResponse(code="999996", msg="参数有误!")
def post(self, request):
"""
执行测试用例
:param request:
:return:
"""
data = JSONParser().parse(request)
result = self.parameter_check(data)
if result:
return result
try:
pro_data = Project.objects.get(id=data["project_id"])
if not request.user.is_superuser and pro_data.user.is_superuser:
return JsonResponse(code="999983", msg="无操作权限!")
except ObjectDoesNotExist:
return JsonResponse(code="999995", msg="项目不存在!")
pro_data = ProjectSerializer(pro_data)
if not pro_data.data["status"]:
return JsonResponse(code="999985", msg="该项目已禁用")
obm = AutomationTestTask.objects.filter(project=data["project_id"])
if obm:
with transaction.atomic():
obm.delete()
del_task_crontab(str(data["project_id"]))
record_dynamic(project=data["project_id"],
_type="删除", operationObject="任务",
user=request.user.pk, data="删除任务")
return JsonResponse(code="999999", msg="成功!")
else:
return JsonResponse(code="999986", msg="任务不存在!")
class LookResult(APIView):
authentication_classes = (TokenAuthentication,)
permission_classes = ()
def get(self, request):
"""
查看测试结果详情
:param request:
:return:
"""
project_id = request.GET.get("project_id")
case_id = request.GET.get("case_id")
api_id = request.GET.get("api_id")
if not project_id.isdecimal() or not api_id.isdecimal():
return JsonResponse(code="999996", msg="参数有误!")
try:
pro_data = Project.objects.get(id=project_id)
except ObjectDoesNotExist:
return JsonResponse(code="999995", msg="项目不存在!")
pro_data = ProjectSerializer(pro_data)
if not pro_data.data["status"]:
return JsonResponse(code="999985", msg="该项目已禁用")
try:
AutomationTestCase.objects.get(id=case_id, project=project_id)
except ObjectDoesNotExist:
return JsonResponse(code="999987", msg="用例不存在!")
try:
AutomationCaseApi.objects.get(id=api_id, automationTestCase=case_id)
except ObjectDoesNotExist:
return JsonResponse(code="999990", msg="接口不存在!")
try:
data = AutomationTestResult.objects.get(automationCaseApi=api_id)
serialize = AutomationTestResultSerializer(data)
return JsonResponse(data=serialize.data, code="999999", msg="成功!")
except ObjectDoesNotExist:
return JsonResponse(code="999999", msg="成功!")
class TestReport(APIView):
authentication_classes = (TokenAuthentication,)
permission_classes = ()
def get(self, request):
"""
测试报告
:param request:
:return:
"""
project_id = request.GET.get("project_id")
if not project_id.isdecimal():
return JsonResponse(code="999996", msg="参数有误!")
try:
pro_data = Project.objects.get(id=project_id)
except ObjectDoesNotExist:
return JsonResponse(code="999995", msg="项目不存在!")
pro_data = ProjectSerializer(pro_data)
if not pro_data.data["status"]:
return JsonResponse(code="999985", msg="该项目已禁用")
obj = AutomationTestCase.objects.filter(project=project_id)
if obj:
case = Q()
for i in obj:
case = case | Q(automationTestCase=i.pk)
data = AutomationTestReportSerializer(
AutomationCaseApi.objects.filter(case), many=True).data
success = 0
fail = 0
not_run = 0
error = 0
for i in data:
if i["result"] == "PASS":
success = success + 1
elif i["result"] == "FAIL":
fail = fail + 1
elif i["result"] == "ERROR":
error = error + 1
else:
not_run = not_run + 1
return JsonResponse(code="999999", msg="成功!", data={"data": data,
"total": len(data),
"pass": success,
"fail": fail,
"error": error,
"NotRun": not_run
})
else:
return JsonResponse(code="999987", msg="用例不存在!")
class DownLoadCase(APIView):
authentication_classes = (TokenAuthentication,)
permission_classes = ()
def get(self, request):
"""
获取用例下载文档路径
:param request:
:return:
"""
project_id = request.GET.get("project_id")
try:
if not project_id.isdecimal():
return JsonResponse(code="999996", msg="参数有误!")
except AttributeError:
return JsonResponse(code="999996", msg="参数有误!")
try:
obj = Project.objects.get(id=project_id)
except ObjectDoesNotExist:
return JsonResponse(code="999995", msg="项目不存在!")
pro_data = ProjectSerializer(obj)
if not pro_data.data["status"]:
return JsonResponse(code="999985", msg="该项目已禁用")
obi = AutomationGroupLevelFirst.objects.filter(project=project_id).order_by("id")
data = AutomationCaseDownSerializer(obi, many=True).data
path = "./api_test/ApiDoc/%s.xlsx" % str(obj.name)
result = Write(path).write_case(data)
if result:
return JsonResponse(code="999999", msg="成功!", data=path)
else:
return JsonResponse(code="999998", msg="失败")
| 44.565591
| 146
| 0.558494
| 6,042
| 62,169
| 5.633069
| 0.056604
| 0.110533
| 0.128632
| 0.06746
| 0.840899
| 0.820538
| 0.796827
| 0.767475
| 0.737476
| 0.705245
| 0
| 0.031107
| 0.32106
| 62,169
| 1,394
| 147
| 44.597561
| 0.775238
| 0.026396
| 0
| 0.733212
| 0
| 0
| 0.107855
| 0.009667
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033575
| false
| 0.002722
| 0.017241
| 0
| 0.315789
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b863f090d7c90cb832568e75045e8c36250eab76
| 31,871
|
py
|
Python
|
vmd_tools/vmd_defs.py
|
thatrandomlurker-divamoddingtools/PyD_Tool
|
def5877edbd48921c555b6e125483ebb9efe83cb
|
[
"MIT"
] | null | null | null |
vmd_tools/vmd_defs.py
|
thatrandomlurker-divamoddingtools/PyD_Tool
|
def5877edbd48921c555b6e125483ebb9efe83cb
|
[
"MIT"
] | null | null | null |
vmd_tools/vmd_defs.py
|
thatrandomlurker-divamoddingtools/PyD_Tool
|
def5877edbd48921c555b6e125483ebb9efe83cb
|
[
"MIT"
] | 1
|
2021-01-04T18:55:50.000Z
|
2021-01-04T18:55:50.000Z
|
import struct, diva_tools.dex, json, misc.ulsr
class VMD_Morph(object):
def VMD_Morph(self):
self.Name = b''
self.Frame = int()
self.Value = int()
def Read_Main_Morphs_to_EXP(dex_main_f, dex_eyes_f, json_file):
# I hate this code, and i know everyone else will hate this code
# But i need some way to load all the possible expressions into their own lists
# And to do that i need to make the lists
# If i knew how to do this on the fly i would, but i don't
kanashii_frames = []
warai_frames = []
pikkuri_frames = []
kantan_frames = []
eyesmile_frames = []
mabushii_frames = []
tsuyoi_frames = []
meikakunisuru_frames = []
yasashii_frames = []
nagashi_frames = []
kiri_frames = []
utsuro_frames = []
kangaeru_frames = []
setsuna_frames = []
genki_frames = []
yaru_frames = []
mabataki_frames = []
cool_frames = []
kumon_frames = []
kutsuu_frames = []
naki_frames = []
nayami_frames = []
pikkuri2_frames = []
wink2_frames = []
wink2r_frames = []
wink_frames = []
winkr_frames = []
diva_frames = []
file_version = dex_main_f.read(30).decode("Shift-JIS")
model_name = dex_main_f.read(20).decode("Shift-JIS")
bone_keyframes = struct.unpack("I", dex_main_f.read(4))[0]
# Won't bother to write bone reading code yet
face_keyframes = struct.unpack("I", dex_main_f.read(4))[0]
for i in range(0, face_keyframes):
morph = VMD_Morph()
try:
name = dex_main_f.read(15).decode('Shift-JIS').split('\x00')[0]
except UnicodeDecodeError:
print('Ok, this is weird... this should not happen... oh well')
name = "broken"
pass
morph.Name = str(name)
morph.Frame = struct.unpack("I", dex_main_f.read(4))[0]
morph.Value = struct.unpack("f", dex_main_f.read(4))[0]
if morph.Name == "悲しい":
kanashii_frames.append(morph.__dict__)
elif morph.Name == "笑い":
warai_frames.append(morph.__dict__)
elif morph.Name == "ぴっくり":
pikkuri_frames.append(morph.__dict__)
elif morph.Name == "感嘆":
kantan_frames.append(morph.__dict__)
elif morph.Name == "アイスマイル":
eyesmile_frames.append(morph.__dict__)
elif morph.Name == "眩しい":
mabushii_frames.append(morph.__dict__)
elif morph.Name == "強い":
tsuyoi_frames.append(morph.__dict__)
elif morph.Name == "明確にする":
meikakunisuru_frames.append(morph.__dict__)
elif morph.Name == "優しい":
yasashii_frames.append(morph.__dict__)
elif morph.Name == "ながし":
nagashi_frames.append(morph.__dict__)
elif morph.Name == "キリッ":
kiri_frames.append(morph.__dict__)
elif morph.Name == "ウツロ":
utsuro_frames.append(morph.__dict__)
elif morph.Name == "考える":
kangaeru_frames.append(morph.__dict__)
elif morph.Name == "せつな":
setsuna_frames.append(morph.__dict__)
elif morph.Name == "元気":
genki_frames.append(morph.__dict__)
elif morph.Name == "ヤル":
yaru_frames.append(morph.__dict__)
elif morph.Name == "まばたき":
mabataki_frames.append(morph.__dict__)
elif morph.Name == "クール":
cool_frames.append(morph.__dict__)
elif morph.Name == "くもん":
kumon_frames.append(morph.__dict__)
elif morph.Name == "くつう":
kutsuu_frames.append(morph.__dict__)
elif morph.Name == "なき":
naki_frames.append(morph.__dict__)
elif morph.Name == "なやみ":
nayami_frames.append(morph.__dict__)
elif morph.Name == "ぴっくり2":
pikkuri2_frames.append(morph.__dict__)
elif morph.Name == "ウィンク2":
wink2_frames.append(morph.__dict__)
elif morph.Name == "ウィンク2右":
wink2r_frames.append(morph.__dict__)
elif morph.Name == "ウィンク":
wink_frames.append(morph.__dict__)
elif morph.Name == "ウィンク右":
winkr_frames.append(morph.__dict__)
# Lets begin adding
# SAD
internal_tracker = int(0)
while True:
if internal_tracker >= len(kanashii_frames):
break
src1 = kanashii_frames[internal_tracker]
internal_tracker += 1
if src1["Frame"] == 0 and src1["Value"] == 0:
continue
elif src1["Frame"] == 0 and src1["Value"] == 1:
df = diva_tools.dex.EXPFrame()
df.F = src1["Frame"]
df.B = 0
df.I = 0
df.V = src1["Value"]
df.T = 0
else:
src2 = kanashii_frames[internal_tracker]
internal_tracker += 1
df = diva_tools.dex.EXPFrame()
df.F = src1["Frame"]
df.B = 0
df.I = 0
df.V = src2["Value"]
df.T = int(src2["Frame"] - src1["Frame"])
diva_frames.append(df.__dict__)
# LAUGH
internal_tracker = int(0)
while True:
if internal_tracker >= len(warai_frames):
break
src1 = warai_frames[internal_tracker]
internal_tracker += 1
if src1["Frame"] == 0 and src1["Value"] == 0:
continue
elif src1["Frame"] == 0 and src1["Value"] == 1:
df = diva_tools.dex.EXPFrame()
df.F = src1["Frame"]
df.B = 0
df.I = 1
df.V = src1["Value"]
df.T = 0
else:
src2 = warai_frames[internal_tracker]
internal_tracker += 1
df = diva_tools.dex.EXPFrame()
df.F = src1["Frame"]
df.B = 0
df.I = 1
df.V = src2["Value"]
df.T = int(src2["Frame"] - src1["Frame"])
diva_frames.append(df.__dict__)
# pikkuri
internal_tracker = int(0)
while True:
if internal_tracker >= len(pikkuri_frames):
break
src1 = pikkuri_frames[internal_tracker]
internal_tracker += 1
if src1["Frame"] == 0 and src1["Value"] == 0:
continue
elif src1["Frame"] == 0 and src1["Value"] == 1:
df = diva_tools.dex.EXPFrame()
df.F = src1["Frame"]
df.B = 0
df.I = 3
df.V = src1["Value"]
df.T = 0
else:
src2 = pikkuri_frames[internal_tracker]
internal_tracker += 1
df = diva_tools.dex.EXPFrame()
df.F = src1["Frame"]
df.B = 0
df.I = 3
df.V = src2["Value"]
df.T = int(src2["Frame"] - src1["Frame"])
diva_frames.append(df.__dict__)
# ADMIRATION
internal_tracker = int(0)
while True:
if internal_tracker >= len(kantan_frames):
break
src1 = kantan_frames[internal_tracker]
internal_tracker += 1
if src1["Frame"] == 0 and src1["Value"] == 0:
continue
elif src1["Frame"] == 0 and src1["Value"] == 1:
df = diva_tools.dex.EXPFrame()
df.F = src1["Frame"]
df.B = 0
df.I = 5
df.V = src1["Value"]
df.T = 0
else:
src2 = kantan_frames[internal_tracker]
internal_tracker += 1
df = diva_tools.dex.EXPFrame()
df.F = src1["Frame"]
df.B = 0
df.I = 5
df.V = src2["Value"]
df.T = int(src2["Frame"] - src1["Frame"])
diva_frames.append(df.__dict__)
# eyesmile
internal_tracker = int(0)
while True:
if internal_tracker >= len(eyesmile_frames):
break
src1 = eyesmile_frames[internal_tracker]
internal_tracker += 1
if src1["Frame"] == 0 and src1["Value"] == 0:
continue
elif src1["Frame"] == 0 and src1["Value"] == 1:
df = diva_tools.dex.EXPFrame()
df.F = src1["Frame"]
df.B = 0
df.I = 6
df.V = src1["Value"]
df.T = 0
else:
src2 = eyesmile_frames[internal_tracker]
internal_tracker += 1
df = diva_tools.dex.EXPFrame()
df.F = src1["Frame"]
df.B = 0
df.I = 6
df.V = src2["Value"]
df.T = int(src2["Frame"] - src1["Frame"])
diva_frames.append(df.__dict__)
# mabushii
internal_tracker = int(0)
while True:
if internal_tracker >= len(mabushii_frames):
break
src1 = mabushii_frames[internal_tracker]
internal_tracker += 1
if src1["Frame"] == 0 and src1["Value"] == 0:
continue
elif src1["Frame"] == 0 and src1["Value"] == 1:
df = diva_tools.dex.EXPFrame()
df.F = src1["Frame"]
df.B = 0
df.I = 8
df.V = src1["Value"]
df.T = 0
else:
src2 = mabushii_frames[internal_tracker]
internal_tracker += 1
df = diva_tools.dex.EXPFrame()
df.F = src1["Frame"]
df.B = 0
df.I = 8
df.V = src2["Value"]
df.T = int(src2["Frame"] - src1["Frame"])
diva_frames.append(df.__dict__)
# tsuyoi
internal_tracker = int(0)
while True:
if internal_tracker >= len(tsuyoi_frames):
break
src1 = tsuyoi_frames[internal_tracker]
internal_tracker += 1
if src1["Frame"] == 0 and src1["Value"] == 0:
continue
elif src1["Frame"] == 0 and src1["Value"] == 1:
df = diva_tools.dex.EXPFrame()
df.F = src1["Frame"]
df.B = 0
df.I = 10
df.V = src1["Value"]
df.T = 0
else:
src2 = tsuyoi_frames[internal_tracker]
internal_tracker += 1
df = diva_tools.dex.EXPFrame()
df.F = src1["Frame"]
df.B = 0
df.I = 10
df.V = src2["Value"]
df.T = int(src2["Frame"] - src1["Frame"])
diva_frames.append(df.__dict__)
# meikakunisuru
internal_tracker = int(0)
while True:
if internal_tracker >= len(meikakunisuru_frames):
break
src1 = meikakunisuru_frames[internal_tracker]
internal_tracker += 1
if src1["Frame"] == 0 and src1["Value"] == 0:
continue
elif src1["Frame"] == 0 and src1["Value"] == 1:
df = diva_tools.dex.EXPFrame()
df.F = src1["Frame"]
df.B = 0
df.I = 11
df.V = src1["Value"]
df.T = 0
else:
src2 = meikakunisuru_frames[internal_tracker]
internal_tracker += 1
df = diva_tools.dex.EXPFrame()
df.F = src1["Frame"]
df.B = 0
df.I = 11
df.V = src2["Value"]
df.T = int(src2["Frame"] - src1["Frame"])
diva_frames.append(df.__dict__)
# yasashii
internal_tracker = int(0)
while True:
if internal_tracker >= len(yasashii_frames):
break
src1 = yasashii_frames[internal_tracker]
internal_tracker += 1
if src1["Frame"] == 0 and src1["Value"] == 0:
continue
elif src1["Frame"] == 0 and src1["Value"] == 1:
df = diva_tools.dex.EXPFrame()
df.F = src1["Frame"]
df.B = 0
df.I = 12
df.V = src1["Value"]
df.T = 0
else:
src2 = yasashii_frames[internal_tracker]
internal_tracker += 1
df = diva_tools.dex.EXPFrame()
df.F = src1["Frame"]
df.B = 0
df.I = 12
df.V = src2["Value"]
df.T = int(src2["Frame"] - src1["Frame"])
diva_frames.append(df.__dict__)
# nagashi
internal_tracker = int(0)
while True:
if internal_tracker >= len(nagashi_frames):
break
src1 = nagashi_frames[internal_tracker]
internal_tracker += 1
if src1["Frame"] == 0 and src1["Value"] == 0:
continue
elif src1["Frame"] == 0 and src1["Value"] == 1:
df = diva_tools.dex.EXPFrame()
df.F = src1["Frame"]
df.B = 0
df.I = 13
df.V = src1["Value"]
df.T = 0
else:
src2 = nagashi_frames[internal_tracker]
internal_tracker += 1
df = diva_tools.dex.EXPFrame()
df.F = src1["Frame"]
df.B = 0
df.I = 13
df.V = src2["Value"]
df.T = int(src2["Frame"] - src1["Frame"])
diva_frames.append(df.__dict__)
# kiri
internal_tracker = int(0)
while True:
if internal_tracker >= len(kiri_frames):
break
src1 = kiri_frames[internal_tracker]
internal_tracker += 1
if src1["Frame"] == 0 and src1["Value"] == 0:
continue
elif src1["Frame"] == 0 and src1["Value"] == 1:
df = diva_tools.dex.EXPFrame()
df.F = src1["Frame"]
df.B = 0
df.I = 15
df.V = src1["Value"]
df.T = 0
else:
src2 = kiri_frames[internal_tracker]
internal_tracker += 1
df = diva_tools.dex.EXPFrame()
df.F = src1["Frame"]
df.B = 0
df.I = 15
df.V = src2["Value"]
df.T = int(src2["Frame"] - src1["Frame"])
diva_frames.append(df.__dict__)
# utsuro
internal_tracker = int(0)
while True:
if internal_tracker >= len(utsuro_frames):
break
src1 = utsuro_frames[internal_tracker]
internal_tracker += 1
if src1["Frame"] == 0 and src1["Value"] == 0:
continue
elif src1["Frame"] == 0 and src1["Value"] == 1:
df = diva_tools.dex.EXPFrame()
df.F = src1["Frame"]
df.B = 0
df.I = 16
df.V = src1["Value"]
df.T = 0
else:
src2 = utsuro_frames[internal_tracker]
internal_tracker += 1
df = diva_tools.dex.EXPFrame()
df.F = src1["Frame"]
df.B = 0
df.I = 16
df.V = src2["Value"]
df.T = int(src2["Frame"] - src1["Frame"])
diva_frames.append(df.__dict__)
# kangaeru
internal_tracker = int(0)
while True:
if internal_tracker >= len(kangaeru_frames):
break
src1 = kangaeru_frames[internal_tracker]
internal_tracker += 1
if src1["Frame"] == 0 and src1["Value"] == 0:
continue
elif src1["Frame"] == 0 and src1["Value"] == 1:
df = diva_tools.dex.EXPFrame()
df.F = src1["Frame"]
df.B = 0
df.I = 17
df.V = src1["Value"]
df.T = 0
else:
src2 = kangaeru_frames[internal_tracker]
internal_tracker += 1
df = diva_tools.dex.EXPFrame()
df.F = src1["Frame"]
df.B = 0
df.I = 17
df.V = src2["Value"]
df.T = int(src2["Frame"] - src1["Frame"])
diva_frames.append(df.__dict__)
# setsuna
internal_tracker = int(0)
while True:
if internal_tracker >= len(setsuna_frames):
break
src1 = setsuna_frames[internal_tracker]
internal_tracker += 1
if src1["Frame"] == 0 and src1["Value"] == 0:
continue
elif src1["Frame"] == 0 and src1["Value"] == 1:
df = diva_tools.dex.EXPFrame()
df.F = src1["Frame"]
df.B = 0
df.I = 18
df.V = src1["Value"]
df.T = 0
else:
src2 = setsuna_frames[internal_tracker]
internal_tracker += 1
df = diva_tools.dex.EXPFrame()
df.F = src1["Frame"]
df.B = 0
df.I = 18
df.V = src2["Value"]
df.T = int(src2["Frame"] - src1["Frame"])
diva_frames.append(df.__dict__)
# genki
internal_tracker = int(0)
while True:
if internal_tracker >= len(genki_frames):
break
src1 = genki_frames[internal_tracker]
internal_tracker += 1
if src1["Frame"] == 0 and src1["Value"] == 0:
continue
elif src1["Frame"] == 0 and src1["Value"] == 1:
df = diva_tools.dex.EXPFrame()
df.F = src1["Frame"]
df.B = 0
df.I = 19
df.V = src1["Value"]
df.T = 0
else:
src2 = genki_frames[internal_tracker]
internal_tracker += 1
df = diva_tools.dex.EXPFrame()
df.F = src1["Frame"]
df.B = 0
df.I = 19
df.V = src2["Value"]
df.T = int(src2["Frame"] - src1["Frame"])
diva_frames.append(df.__dict__)
# yaru
internal_tracker = int(0)
while True:
if internal_tracker >= len(yaru_frames):
break
src1 = yaru_frames[internal_tracker]
internal_tracker += 1
if src1["Frame"] == 0 and src1["Value"] == 0:
continue
elif src1["Frame"] == 0 and src1["Value"] == 1:
df = diva_tools.dex.EXPFrame()
df.F = src1["Frame"]
df.B = 0
df.I = 20
df.V = src1["Value"]
df.T = 0
else:
src2 = yaru_frames[internal_tracker]
internal_tracker += 1
df = diva_tools.dex.EXPFrame()
df.F = src1["Frame"]
df.B = 0
df.I = 20
df.V = src2["Value"]
df.T = int(src2["Frame"] - src1["Frame"])
diva_frames.append(df.__dict__)
# mabataki
internal_tracker = int(0)
while True:
if internal_tracker >= len(mabataki_frames):
break
src1 = mabataki_frames[internal_tracker]
internal_tracker += 1
if src1["Frame"] == 0 and src1["Value"] == 0:
continue
elif src1["Frame"] == 0 and src1["Value"] == 1:
df = diva_tools.dex.EXPFrame()
df.F = src1["Frame"]
df.B = 0
df.I = 22
df.V = src1["Value"]
df.T = 0
else:
src2 = mabataki_frames[internal_tracker]
internal_tracker += 1
df = diva_tools.dex.EXPFrame()
df.F = src1["Frame"]
df.B = 0
df.I = 22
df.V = src2["Value"]
df.T = int(src2["Frame"] - src1["Frame"])
diva_frames.append(df.__dict__)
# cool
internal_tracker = int(0)
while True:
if internal_tracker >= len(cool_frames):
break
src1 = cool_frames[internal_tracker]
internal_tracker += 1
if src1["Frame"] == 0 and src1["Value"] == 0:
continue
elif src1["Frame"] == 0 and src1["Value"] == 1:
df = diva_tools.dex.EXPFrame()
df.F = src1["Frame"]
df.B = 0
df.I = 34
df.V = src1["Value"]
df.T = 0
else:
src2 = cool_frames[internal_tracker]
internal_tracker += 1
df = diva_tools.dex.EXPFrame()
df.F = src1["Frame"]
df.B = 0
df.I = 34
df.V = src2["Value"]
df.T = int(src2["Frame"] - src1["Frame"])
diva_frames.append(df.__dict__)
# kumon
internal_tracker = int(0)
while True:
if internal_tracker >= len(kumon_frames):
break
src1 = kumon_frames[internal_tracker]
internal_tracker += 1
if src1["Frame"] == 0 and src1["Value"] == 0:
continue
elif src1["Frame"] == 0 and src1["Value"] == 1:
df = diva_tools.dex.EXPFrame()
df.F = src1["Frame"]
df.B = 0
df.I = 36
df.V = src1["Value"]
df.T = 0
else:
src2 = kumon_frames[internal_tracker]
internal_tracker += 1
df = diva_tools.dex.EXPFrame()
df.F = src1["Frame"]
df.B = 0
df.I = 36
df.V = src2["Value"]
df.T = int(src2["Frame"] - src1["Frame"])
diva_frames.append(df.__dict__)
# kutsuu
internal_tracker = int(0)
while True:
if internal_tracker >= len(kutsuu_frames):
break
src1 = kutsuu_frames[internal_tracker]
internal_tracker += 1
if src1["Frame"] == 0 and src1["Value"] == 0:
continue
elif src1["Frame"] == 0 and src1["Value"] == 1:
df = diva_tools.dex.EXPFrame()
df.F = src1["Frame"]
df.B = 0
df.I = 37
df.V = src1["Value"]
df.T = 0
else:
src2 = kutsuu_frames[internal_tracker]
internal_tracker += 1
df = diva_tools.dex.EXPFrame()
df.F = src1["Frame"]
df.B = 0
df.I = 37
df.V = src2["Value"]
df.T = int(src2["Frame"] - src1["Frame"])
diva_frames.append(df.__dict__)
# naki
internal_tracker = int(0)
while True:
if internal_tracker >= len(naki_frames):
break
src1 = naki_frames[internal_tracker]
internal_tracker += 1
if src1["Frame"] == 0 and src1["Value"] == 0:
continue
elif src1["Frame"] == 0 and src1["Value"] == 1:
df = diva_tools.dex.EXPFrame()
df.F = src1["Frame"]
df.B = 0
df.I = 38
df.V = src1["Value"]
df.T = 0
else:
src2 = naki_frames[internal_tracker]
internal_tracker += 1
df = diva_tools.dex.EXPFrame()
df.F = src1["Frame"]
df.B = 0
df.I = 38
df.V = src2["Value"]
df.T = int(src2["Frame"] - src1["Frame"])
diva_frames.append(df.__dict__)
# nayami
internal_tracker = int(0)
while True:
if internal_tracker >= len(nayami_frames):
break
src1 = nayami_frames[internal_tracker]
internal_tracker += 1
if src1["Frame"] == 0 and src1["Value"] == 0:
continue
elif src1["Frame"] == 0 and src1["Value"] == 1:
df = diva_tools.dex.EXPFrame()
df.F = src1["Frame"]
df.B = 0
df.I = 39
df.V = src1["Value"]
df.T = 0
else:
src2 = nayami_frames[internal_tracker]
internal_tracker += 1
df = diva_tools.dex.EXPFrame()
df.F = src1["Frame"]
df.B = 0
df.I = 39
df.V = src2["Value"]
df.T = int(src2["Frame"] - src1["Frame"])
diva_frames.append(df.__dict__)
# pikkuri2
internal_tracker = int(0)
while True:
if internal_tracker >= len(pikkuri2_frames):
break
src1 = pikkuri2_frames[internal_tracker]
internal_tracker += 1
if src1["Frame"] == 0 and src1["Value"] == 0:
continue
elif src1["Frame"] == 0 and src1["Value"] == 1:
df = diva_tools.dex.EXPFrame()
df.F = src1["Frame"]
df.B = 0
df.I = 40
df.V = src1["Value"]
df.T = 0
else:
src2 = pikkuri2_frames[internal_tracker]
internal_tracker += 1
df = diva_tools.dex.EXPFrame()
df.F = src1["Frame"]
df.B = 0
df.I = 40
df.V = src2["Value"]
df.T = int(src2["Frame"] - src1["Frame"])
diva_frames.append(df.__dict__)
# wink2
internal_tracker = int(0)
while True:
if internal_tracker >= len(wink2_frames):
break
src1 = wink2_frames[internal_tracker]
internal_tracker += 1
if src1["Frame"] == 0 and src1["Value"] == 0:
continue
elif src1["Frame"] == 0 and src1["Value"] == 1:
df = diva_tools.dex.EXPFrame()
df.F = src1["Frame"]
df.B = 0
df.I = 42
df.V = src1["Value"]
df.T = 0
else:
src2 = wink2_frames[internal_tracker]
internal_tracker += 1
df = diva_tools.dex.EXPFrame()
df.F = src1["Frame"]
df.B = 0
df.I = 42
df.V = src2["Value"]
df.T = int(src2["Frame"] - src1["Frame"])
diva_frames.append(df.__dict__)
# wink2r
internal_tracker = int(0)
while True:
if internal_tracker >= len(wink2r_frames):
break
src1 = wink2r_frames[internal_tracker]
internal_tracker += 1
if src1["Frame"] == 0 and src1["Value"] == 0:
continue
elif src1["Frame"] == 0 and src1["Value"] == 1:
df = diva_tools.dex.EXPFrame()
df.F = src1["Frame"]
df.B = 0
df.I = 43
df.V = src1["Value"]
df.T = 0
else:
src2 = wink2r_frames[internal_tracker]
internal_tracker += 1
df = diva_tools.dex.EXPFrame()
df.F = src1["Frame"]
df.B = 0
df.I = 43
df.V = src2["Value"]
df.T = int(src2["Frame"] - src1["Frame"])
diva_frames.append(df.__dict__)
# wink
internal_tracker = int(0)
while True:
if internal_tracker >= len(wink_frames):
break
src1 = wink_frames[internal_tracker]
internal_tracker += 1
if src1["Frame"] == 0 and src1["Value"] == 0:
continue
elif src1["Frame"] == 0 and src1["Value"] == 1:
df = diva_tools.dex.EXPFrame()
df.F = src1["Frame"]
df.B = 0
df.I = 44
df.V = src1["Value"]
df.T = 0
else:
src2 = wink_frames[internal_tracker]
internal_tracker += 1
df = diva_tools.dex.EXPFrame()
df.F = src1["Frame"]
df.B = 0
df.I = 44
df.V = src2["Value"]
df.T = int(src2["Frame"] - src1["Frame"])
diva_frames.append(df.__dict__)
# winkr
internal_tracker = int(0)
while True:
if internal_tracker >= len(winkr_frames):
break
src1 = winkr_frames[internal_tracker]
internal_tracker += 1
if src1["Frame"] == 0 and src1["Value"] == 0:
continue
elif src1["Frame"] == 0 and src1["Value"] == 1:
df = diva_tools.dex.EXPFrame()
df.F = src1["Frame"]
df.B = 0
df.I = 45
df.V = src1["Value"]
df.T = 0
else:
src2 = winkr_frames[internal_tracker]
internal_tracker += 1
df = diva_tools.dex.EXPFrame()
df.F = src1["Frame"]
df.B = 0
df.I = 45
df.V = src2["Value"]
df.T = int(src2["Frame"] - src1["Frame"])
diva_frames.append(df.__dict__)
# diva_frames sorting code
frame_sorter = []
while True:
if len(diva_frames) == 0:
break
highest = int(0)
highest_index = int(0)
for frame in diva_frames:
if frame["F"] == highest:
frame_sorter.pop(-1)
frame_sorter.append(frame)
elif frame["F"] > highest:
highest = frame["F"]
highest_index = int(diva_frames.index(frame))
frame_sorter.append(diva_frames[highest_index])
diva_frames.pop(highest_index)
# and last but not least, let's reverse the list
frames = []
finish_tracker = int(len(frame_sorter) - 1)
while True:
if finish_tracker < 0:
break
frame = frame_sorter[finish_tracker]
finish_tracker -= 1
frames.append(frame)
# Now to perform operations on the eyes section
blink_frames = []
eyes_frames = []
file_version = dex_eyes_f.read(30).decode("Shift-JIS")
model_name = dex_eyes_f.read(20).decode("Shift-JIS")
bone_keyframes = struct.unpack("I", dex_eyes_f.read(4))[0]
# Won't bother to write bone reading code yet
face_keyframes = struct.unpack("I", dex_eyes_f.read(4))[0]
for i in range(0, face_keyframes):
morph = VMD_Morph()
try:
name = dex_eyes_f.read(15).decode('Shift-JIS').split('\x00')[0]
except UnicodeDecodeError:
print('Ok, this is weird... this should not happen... oh well')
name = "broken"
pass
morph.Name = str(name)
morph.Frame = struct.unpack("I", dex_eyes_f.read(4))[0]
morph.Value = struct.unpack("f", dex_eyes_f.read(4))[0]
if morph.Name == "まばたき":
blink_frames.append(morph.__dict__)
# blinks
internal_tracker = int(0)
while True:
if internal_tracker >= len(blink_frames):
break
src1 = blink_frames[internal_tracker]
internal_tracker += 1
if src1["Frame"] == 0 and src1["Value"] == 0:
continue
elif src1["Frame"] == 0 and src1["Value"] == 1:
df = diva_tools.dex.EXPFrame()
df.F = src1["Frame"]
df.B = 1
df.I = 3
df.V = src1["Value"]
df.T = 0
else:
src2 = blink_frames[internal_tracker]
internal_tracker += 1
df = diva_tools.dex.EXPFrame()
df.F = src1["Frame"]
df.B = 1
df.I = 3
df.V = src2["Value"]
df.T = int(src2["Frame"] - src1["Frame"])
eyes_frames.append(df.__dict__)
# diva_frames sorting code
eye_frame_sorter = []
while True:
if len(eyes_frames) == 0:
break
highest = int(0)
highest_index = int(0)
for frame in eyes_frames:
if frame["F"] > highest:
highest = frame["F"]
highest_index = int(eyes_frames.index(frame))
eye_frame_sorter.append(eyes_frames[highest_index])
eyes_frames.pop(highest_index)
# and last but not least, let's reverse the list
eyeframes = []
finish_tracker = int(len(eye_frame_sorter) - 1)
while True:
if finish_tracker < 0:
break
frame = eye_frame_sorter[finish_tracker]
finish_tracker -= 1
eyeframes.append(frame)
# Finally, let's write the file
dex_main = {"Dex": []}
meta = {"Name": f"{file_version}", "Main": frames, "Eyes": eyeframes}
dex_main["Dex"].append(meta)
json.dump(dex_main, json_file, indent=2)
| 33.407757
| 84
| 0.485677
| 3,699
| 31,871
| 3.990808
| 0.060557
| 0.170709
| 0.046335
| 0.110012
| 0.845211
| 0.842162
| 0.838233
| 0.746918
| 0.742853
| 0.695299
| 0
| 0.04061
| 0.393493
| 31,871
| 953
| 85
| 33.442812
| 0.723073
| 0.024066
| 0
| 0.745977
| 0
| 0
| 0.057227
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.002299
| false
| 0.002299
| 0.001149
| 0
| 0.004598
| 0.002299
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b878315197814e35b0ebf560388e1facfc014771
| 82,920
|
py
|
Python
|
pynetdicom3/tests/test_assoc.py
|
rdebroiz/pynetdicom3
|
0baea8310b9d3fd0a67df0c2e90f2607463f73c7
|
[
"MIT"
] | null | null | null |
pynetdicom3/tests/test_assoc.py
|
rdebroiz/pynetdicom3
|
0baea8310b9d3fd0a67df0c2e90f2607463f73c7
|
[
"MIT"
] | null | null | null |
pynetdicom3/tests/test_assoc.py
|
rdebroiz/pynetdicom3
|
0baea8310b9d3fd0a67df0c2e90f2607463f73c7
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""Association testing
TODO: Add tests to check raise NotImplemented if no user implementation
of the DIMSE-C service callbacks
"""
from io import BytesIO
import logging
import os
import select
import socket
from struct import pack
import time
import threading
import unittest
from pydicom import read_file
from pydicom.dataset import Dataset
from pydicom.uid import UID, ImplicitVRLittleEndian, ExplicitVRLittleEndian
from pynetdicom3 import AE, VerificationSOPClass
from pynetdicom3.association import Association
from pynetdicom3.dimse_primitives import C_STORE, C_FIND, C_GET, C_MOVE
from pynetdicom3.dsutils import encode, decode
from pynetdicom3.pdu_primitives import (
UserIdentityNegotiation, SOPClassExtendedNegotiation,
SOPClassCommonExtendedNegotiation
)
from pynetdicom3.sop_class import (
CTImageStorage, MRImageStorage, RTImageStorage,
PatientRootQueryRetrieveInformationModelFind,
StudyRootQueryRetrieveInformationModelFind,
ModalityWorklistInformationFind,
PatientStudyOnlyQueryRetrieveInformationModelFind,
PatientRootQueryRetrieveInformationModelGet,
PatientStudyOnlyQueryRetrieveInformationModelGet,
StudyRootQueryRetrieveInformationModelGet,
PatientRootQueryRetrieveInformationModelMove,
PatientStudyOnlyQueryRetrieveInformationModelMove,
StudyRootQueryRetrieveInformationModelMove
)
from .dummy_c_scp import (
DummyVerificationSCP, DummyStorageSCP, DummyFindSCP, DummyGetSCP,
DummyMoveSCP, DummyBaseSCP
)
LOGGER = logging.getLogger('pynetdicom3')
LOGGER.setLevel(logging.CRITICAL)
#LOGGER.setLevel(logging.DEBUG)
TEST_DS_DIR = os.path.join(os.path.dirname(__file__), 'dicom_files')
BIG_DATASET = read_file(os.path.join(TEST_DS_DIR, 'RTImageStorage.dcm')) # 2.1 M
DATASET = read_file(os.path.join(TEST_DS_DIR, 'CTImageStorage.dcm'))
COMP_DATASET = read_file(os.path.join(TEST_DS_DIR, 'MRImageStorage_JPG2000_Lossless.dcm'))
class DummyDIMSE(object):
def __init__(self):
self.status = None
def send_msg(self, rsp, context_id):
self.status = rsp.Status
class TestCStoreSCP(unittest.TestCase):
"""Tests for Association._c_store_scp"""
def setUp(self):
"""Run prior to each test"""
self.scp = None
def tearDown(self):
"""Clear any active threads"""
if self.scp:
self.scp.abort()
time.sleep(0.1)
for thread in threading.enumerate():
if isinstance(thread, DummyBaseSCP):
thread.abort()
thread.stop()
def test_no_presentation_context(self):
"""Test correct status is returned if no valid presentation context."""
self.scp = DummyStorageSCP()
self.scp.raise_exception = True
self.scp.start()
ae = AE(scu_sop_class=[RTImageStorage])
ae.on_c_store = self.scp.on_c_store
assoc = ae.associate('localhost', 11112)
assoc.dimse = DummyDIMSE()
self.assertTrue(assoc.is_established)
req = C_STORE()
req.MessageID = 1
req.AffectedSOPClassUID = DATASET.SOPClassUID
req.AffectedSOPInstanceUID = DATASET.SOPInstanceUID
req.Priority = 1
bytestream = encode(DATASET, True, True)
req.DataSet = BytesIO(bytestream)
assoc._c_store_scp(req)
assert assoc.dimse.status == 0x0122
assoc.release()
self.scp.stop()
def test_dataset_decode_failure(self):
"""Test correct status returned if unable to decode dataset."""
# Not sure how to test this
pass
def test_on_c_store_callback_exception(self):
"""Test correct status returned if exception raised in callback."""
self.scp = DummyStorageSCP()
self.scp.raise_exception = True
self.scp.start()
ae = AE(scu_sop_class=[CTImageStorage, RTImageStorage])
ae.on_c_store = self.scp.on_c_store
assoc = ae.associate('localhost', 11112)
assoc.dimse = DummyDIMSE()
self.assertTrue(assoc.is_established)
req = C_STORE()
req.MessageID = 1
req.AffectedSOPClassUID = DATASET.SOPClassUID
req.AffectedSOPInstanceUID = DATASET.SOPInstanceUID
req.Priority = 1
bytestream = encode(DATASET, True, True)
req.DataSet = BytesIO(bytestream)
assoc._c_store_scp(req)
assert assoc.dimse.status == 0xC211
assoc.release()
self.scp.stop()
def test_callback_status_ds_no_status(self):
"""Test correct status returned if status Dataset has no status."""
self.scp = DummyStorageSCP()
self.scp.status = Dataset()
self.scp.status.PatientName = 'ABCD'
self.scp.start()
ae = AE(scu_sop_class=[CTImageStorage, RTImageStorage])
ae.on_c_store = self.scp.on_c_store
assoc = ae.associate('localhost', 11112)
assoc.dimse = DummyDIMSE()
self.assertTrue(assoc.is_established)
req = C_STORE()
req.MessageID = 1
req.AffectedSOPClassUID = DATASET.SOPClassUID
req.AffectedSOPInstanceUID = DATASET.SOPInstanceUID
req.Priority = 1
bytestream = encode(DATASET, True, True)
req.DataSet = BytesIO(bytestream)
assoc._c_store_scp(req)
assert assoc.dimse.status == 0xC001
assoc.release()
self.scp.stop()
def test_callback_status_ds_unknown_elem(self):
"""Test returning a status Dataset with an unknown element."""
self.scp = DummyStorageSCP()
self.scp.status = Dataset()
self.scp.status.Status = 0x0000
self.scp.status.PatientName = 'ABCD'
self.scp.start()
ae = AE(scu_sop_class=[CTImageStorage, RTImageStorage])
ae.on_c_store = self.scp.on_c_store
assoc = ae.associate('localhost', 11112)
assoc.dimse = DummyDIMSE()
self.assertTrue(assoc.is_established)
req = C_STORE()
req.MessageID = 1
req.AffectedSOPClassUID = DATASET.SOPClassUID
req.AffectedSOPInstanceUID = DATASET.SOPInstanceUID
req.Priority = 1
bytestream = encode(DATASET, True, True)
req.DataSet = BytesIO(bytestream)
assoc._c_store_scp(req)
assert assoc.dimse.status == 0x0000
assoc.release()
self.scp.stop()
def test_callback_invalid_status(self):
"""Test returning a status Dataset with an invalid status type."""
self.scp = DummyStorageSCP()
self.scp.status = 'abcd'
self.scp.start()
ae = AE(scu_sop_class=[CTImageStorage, RTImageStorage])
ae.on_c_store = self.scp.on_c_store
assoc = ae.associate('localhost', 11112)
assoc.dimse = DummyDIMSE()
self.assertTrue(assoc.is_established)
req = C_STORE()
req.MessageID = 1
req.AffectedSOPClassUID = DATASET.SOPClassUID
req.AffectedSOPInstanceUID = DATASET.SOPInstanceUID
req.Priority = 1
bytestream = encode(DATASET, True, True)
req.DataSet = BytesIO(bytestream)
assoc._c_store_scp(req)
assert assoc.dimse.status == 0xC002
assoc.release()
self.scp.stop()
def test_callback_unknown_status(self):
"""Test returning a status Dataset with an unknown status value."""
self.scp = DummyStorageSCP()
self.scp.status = 0xDEFA
self.scp.start()
ae = AE(scu_sop_class=[CTImageStorage, RTImageStorage])
ae.on_c_store = self.scp.on_c_store
assoc = ae.associate('localhost', 11112)
assoc.dimse = DummyDIMSE()
self.assertTrue(assoc.is_established)
req = C_STORE()
req.MessageID = 1
req.AffectedSOPClassUID = DATASET.SOPClassUID
req.AffectedSOPInstanceUID = DATASET.SOPInstanceUID
req.Priority = 1
bytestream = encode(DATASET, True, True)
req.DataSet = BytesIO(bytestream)
assoc._c_store_scp(req)
assert assoc.dimse.status == 0xDEFA
assoc.release()
self.scp.stop()
class TestAssociation(unittest.TestCase):
"""Run tests on Associtation."""
# Association(local_ae, client_socket, peer_ae, acse_timeout,
# dimse_timout, max_pdu, ext_neg)
def setUp(self):
"""This function runs prior to all test methods"""
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind(('', 0))
self.socket.listen(1)
self.peer = {'AET' : 'PEER_AET',
'Port' : 11112,
'Address' : 'localhost'}
self.ext_neg = []
self.scp = None
def tearDown(self):
"""This function runs after all test methods"""
self.socket.close()
if self.scp:
self.scp.abort()
time.sleep(0.1)
for thread in threading.enumerate():
if isinstance(thread, DummyBaseSCP):
thread.abort()
thread.stop()
def test_scp_assoc_a_abort_reply(self):
"""Test the SCP sending an A-ABORT instead of an A-ASSOCIATE response"""
class DummyAE(threading.Thread, AE):
"""Dummy AE used for testing"""
def __init__(self, scp_sop_class, port):
"""Initialise the class"""
AE.__init__(self, scp_sop_class=scp_sop_class, port=port)
threading.Thread.__init__(self)
self.daemon = True
def run(self):
"""The thread run method"""
self.start_scp()
def start_scp(self):
"""new runner"""
self._bind_socket()
while True:
try:
if self._quit:
break
self._monitor_socket()
self.cleanup_associations()
except KeyboardInterrupt:
self.stop()
def _monitor_socket(self):
"""Override the normal method"""
try:
read_list, _, _ = select.select([self.local_socket], [], [], 0)
except (socket.error, ValueError):
return
# If theres a connection
if read_list:
client_socket, _ = self.local_socket.accept()
client_socket.setsockopt(socket.SOL_SOCKET,
socket.SO_RCVTIMEO,
pack('ll', 10, 0))
# Create a new Association
# Association(local_ae, local_socket=None, max_pdu=16382)
assoc = Association(self,
client_socket,
max_pdu=self.maximum_pdu_size,
acse_timeout=self.acse_timeout,
dimse_timeout=self.dimse_timeout)
# Set the ACSE to abort association requests
assoc._a_abort_assoc_rq = True
assoc.start()
self.active_associations.append(assoc)
scp = DummyAE(scp_sop_class=[VerificationSOPClass], port=11112)
scp.start()
ae = AE(scu_sop_class=[VerificationSOPClass])
assoc = ae.associate('localhost', 11112)
self.assertFalse(assoc.is_established)
scp.stop()
def test_scp_assoc_ap_abort_reply(self):
"""Test the SCP sending an A-ABORT instead of an A-ASSOCIATE response"""
class DummyAE(threading.Thread, AE):
"""Dummy AE used for testing"""
def __init__(self, scp_sop_class, port):
"""Initialise the class"""
AE.__init__(self, scp_sop_class=scp_sop_class, port=port)
threading.Thread.__init__(self)
self.daemon = True
def run(self):
"""The thread run method"""
self.start_scp()
def start_scp(self):
"""new runner"""
self._bind_socket()
while True:
try:
if self._quit:
break
self._monitor_socket()
self.cleanup_associations()
except KeyboardInterrupt:
self.stop()
def _monitor_socket(self):
"""Override the normal method"""
try:
read_list, _, _ = select.select([self.local_socket], [], [], 0)
except ValueError:
return
# If theres a connection
if read_list:
client_socket, _ = self.local_socket.accept()
client_socket.setsockopt(socket.SOL_SOCKET,
socket.SO_RCVTIMEO,
pack('ll', 10, 0))
# Create a new Association
# Association(local_ae, local_socket=None, max_pdu=16382)
assoc = Association(self,
client_socket,
max_pdu=self.maximum_pdu_size,
acse_timeout=self.acse_timeout,
dimse_timeout=self.dimse_timeout)
# Set the ACSE to abort association requests
assoc._a_p_abort_assoc_rq = True
assoc.start()
self.active_associations.append(assoc)
scp = DummyAE(scp_sop_class=[VerificationSOPClass], port=11112)
scp.start()
ae = AE(scu_sop_class=[VerificationSOPClass])
assoc = ae.associate('localhost', 11112)
self.assertFalse(assoc.is_established)
scp.stop()
@staticmethod
def test_bad_connection():
"""Test connect to non-AE"""
ae = AE(scu_sop_class=[VerificationSOPClass])
assoc = ae.associate('localhost', 22)
@staticmethod
def test_connection_refused():
"""Test connection refused"""
ae = AE(scu_sop_class=[VerificationSOPClass])
assoc = ae.associate('localhost', 11120)
def test_init_errors(self):
"""Test bad parameters on init raise errors"""
ae = AE(scu_sop_class=[VerificationSOPClass])
with self.assertRaises(TypeError, msg="must have client_socket or peer_ae"):
Association(ae)
with self.assertRaises(TypeError, msg="must have client_socket or peer_ae"):
Association(ae, client_socket=self.socket, peer_ae=self.peer)
with self.assertRaises(TypeError, msg="wrong client_socket type"):
Association(ae, client_socket=123)
with self.assertRaises(TypeError, msg="wrong peer_ae type"):
Association(ae, peer_ae=123)
with self.assertRaises(KeyError, msg="missing keys in peer_ae"):
Association(ae, peer_ae={})
with self.assertRaises(TypeError, msg="wrong local_ae type"):
Association(12345, peer_ae=self.peer)
with self.assertRaises(TypeError, msg="wrong dimse_timeout type"):
Association(ae, peer_ae=self.peer, dimse_timeout='a')
with self.assertRaises(TypeError, msg="wrong acse_timeout type"):
Association(ae, peer_ae=self.peer, acse_timeout='a')
with self.assertRaises(TypeError, msg="wrong max_pdu type"):
Association(ae, peer_ae=self.peer, max_pdu='a')
with self.assertRaises(TypeError, msg="wrong ext_neg type"):
Association(ae, peer_ae=self.peer, ext_neg='a')
def test_run_acceptor(self):
"""Test running as an Association acceptor (SCP)"""
pass
def test_run_requestor(self):
"""Test running as an Association requestor (SCU)"""
scp = DummyVerificationSCP()
scp.start()
ae = AE(scu_sop_class=[VerificationSOPClass])
ae.presentation_contexts_scu = []
assoc = ae.associate('localhost', 11112)
self.assertFalse(assoc.is_established)
#self.assertRaises(SystemExit, ae.quit)
scp.stop()
# Test good request and assoc accepted by peer
scp = DummyVerificationSCP()
scp.start()
ae = AE(scu_sop_class=[VerificationSOPClass])
assoc = ae.associate('localhost', 11112)
self.assertTrue(assoc.is_established)
assoc.release()
self.assertFalse(assoc.is_established)
#self.assertRaises(SystemExit, ae.quit)
scp.stop()
def test_req_no_presentation_context(self):
"""Test rejection due to no acceptable presentation contexts"""
scp = DummyVerificationSCP()
scp.start()
ae = AE(scu_sop_class=[CTImageStorage])
assoc = ae.associate('localhost', 11112)
self.assertTrue(assoc.is_aborted)
self.assertFalse(assoc.is_established)
#self.assertRaises(SystemExit, ae.quit)
scp.stop()
def test_peer_releases_assoc(self):
"""Test peer releases assoc"""
scp = DummyVerificationSCP()
scp.start()
ae = AE(scu_sop_class=[VerificationSOPClass])
assoc = ae.associate('localhost', 11112)
self.assertTrue(assoc.is_established)
scp.release()
self.assertFalse(assoc.is_established)
self.assertTrue(assoc.is_released)
#self.assertRaises(SystemExit, ae.quit)
scp.stop() # Important!
def test_peer_aborts_assoc(self):
"""Test peer aborts assoc"""
self.scp = DummyVerificationSCP()
self.scp.start()
ae = AE(scu_sop_class=[VerificationSOPClass])
assoc = ae.associate('localhost', 11112)
self.assertTrue(assoc.is_established)
self.scp.abort()
time.sleep(0.1)
self.assertFalse(assoc.is_established)
self.assertTrue(assoc.is_aborted)
self.scp.stop()
def test_peer_rejects_assoc(self):
"""Test peer rejects assoc"""
scp = DummyVerificationSCP()
scp.ae.require_calling_aet = b'HAHA NOPE'
scp.start()
ae = AE(scu_sop_class=[VerificationSOPClass])
assoc = ae.associate('localhost', 11112)
self.assertTrue(assoc.is_rejected)
self.assertFalse(assoc.is_established)
#self.assertRaises(SystemExit, ae.quit)
scp.stop() # Important!
def test_kill(self):
"""Test killing the association"""
pass
def test_assoc_release(self):
"""Test Association release"""
# Simple release
scp = DummyVerificationSCP()
scp.start()
ae = AE(scu_sop_class=[VerificationSOPClass])
assoc = ae.associate('localhost', 11112)
self.assertTrue(assoc.is_established)
assoc.release()
self.assertFalse(assoc.is_established)
scp.stop()
# Simple release, then release again
scp = DummyVerificationSCP()
scp.start()
ae = AE(scu_sop_class=[VerificationSOPClass])
assoc = ae.associate('localhost', 11112)
self.assertTrue(assoc.is_established)
assoc.release()
self.assertFalse(assoc.is_established)
self.assertTrue(assoc.is_released)
assoc.release()
scp.stop()
# Simple release, then abort
scp = DummyVerificationSCP()
scp.start()
ae = AE(scu_sop_class=[VerificationSOPClass])
assoc = ae.associate('localhost', 11112)
self.assertTrue(assoc.is_established)
assoc.release()
self.assertTrue(assoc.is_released)
self.assertFalse(assoc.is_established)
assoc.abort()
self.assertFalse(assoc.is_aborted)
scp.stop()
def test_assoc_abort(self):
"""Test Association abort"""
# Simple abort
scp = DummyVerificationSCP()
scp.start()
ae = AE(scu_sop_class=[VerificationSOPClass])
assoc = ae.associate('localhost', 11112)
self.assertTrue(assoc.is_established)
assoc.abort()
self.assertFalse(assoc.is_established)
self.assertTrue(assoc.is_aborted)
scp.stop()
# Simple abort, then release
scp = DummyVerificationSCP()
scp.start()
ae = AE(scu_sop_class=[VerificationSOPClass])
assoc = ae.associate('localhost', 11112)
self.assertTrue(assoc.is_established)
assoc.abort()
self.assertFalse(assoc.is_established)
self.assertTrue(assoc.is_aborted)
assoc.release()
self.assertFalse(assoc.is_released)
scp.stop()
# Simple abort, then abort again
scp = DummyVerificationSCP()
scp.start()
ae = AE(scu_sop_class=[VerificationSOPClass])
assoc = ae.associate('localhost', 11112)
self.assertTrue(assoc.is_established)
assoc.abort()
self.assertTrue(assoc.is_aborted)
self.assertFalse(assoc.is_established)
assoc.abort()
scp.stop()
def test_scp_removed_ui(self):
"""Test SCP removes UI negotiation"""
scp = DummyVerificationSCP()
scp.start()
ui = UserIdentityNegotiation()
ui.user_identity_type = 0x01
ui.primary_field = b'pynetdicom'
ae = AE(scu_sop_class=[VerificationSOPClass])
assoc = ae.associate('localhost', 11112, ext_neg=[ui])
self.assertTrue(assoc.is_established)
assoc.release()
scp.stop()
def test_scp_removed_ext_neg(self):
"""Test SCP removes ex negotiation"""
scp = DummyVerificationSCP()
scp.start()
ext = SOPClassExtendedNegotiation()
ext.sop_class_uid = '1.1.1.1'
ext.service_class_application_information = b'\x01\x02'
ae = AE(scu_sop_class=[VerificationSOPClass])
assoc = ae.associate('localhost', 11112, ext_neg=[ext])
self.assertTrue(assoc.is_established)
assoc.release()
scp.stop()
def test_scp_removed_com_ext_neg(self):
"""Test SCP removes common ext negotiation"""
scp = DummyVerificationSCP()
scp.start()
ext = SOPClassCommonExtendedNegotiation()
self.related_general_sop_class_identification = ['1.2.1']
ext.sop_class_uid = '1.1.1.1'
ext.service_class_uid = '1.1.3'
ae = AE(scu_sop_class=[VerificationSOPClass])
assoc = ae.associate('localhost', 11112, ext_neg=[ext])
self.assertTrue(assoc.is_established)
assoc.release()
scp.stop()
def test_scp_assoc_limit(self):
"""Test SCP limits associations"""
scp = DummyVerificationSCP()
scp.ae.maximum_associations = 1
scp.start()
ae = AE(scu_sop_class=[VerificationSOPClass])
assoc = ae.associate('localhost', 11112)
self.assertTrue(assoc.is_established)
assoc_2 = ae.associate('localhost', 11112)
self.assertFalse(assoc_2.is_established)
assoc.release()
scp.stop()
def test_require_called_aet(self):
"""SCP requires matching called AET"""
scp = DummyVerificationSCP()
scp.ae.require_called_aet = b'TESTSCU'
scp.start()
ae = AE(scu_sop_class=[VerificationSOPClass])
assoc = ae.associate('localhost', 11112)
self.assertFalse(assoc.is_established)
self.assertTrue(assoc.is_rejected)
scp.stop()
def test_require_calling_aet(self):
"""SCP requires matching called AET"""
scp = DummyVerificationSCP()
scp.ae.require_calling_aet = b'TESTSCP'
scp.start()
ae = AE(scu_sop_class=[VerificationSOPClass])
assoc = ae.associate('localhost', 11112)
self.assertFalse(assoc.is_established)
self.assertTrue(assoc.is_rejected)
scp.stop()
def test_acse_timeout(self):
"""Test that the ACSE timeout works"""
pass
def test_dimse_timeout(self):
"""Test that the DIMSE timeout works"""
scp = DummyVerificationSCP()
scp.delay = 0.2
scp.start()
ae = AE(scu_sop_class=[VerificationSOPClass])
ae.dimse_timeout = 0.1
assoc = ae.associate('localhost', 11112)
self.assertEqual(assoc.dimse_timeout, 0.1)
self.assertEqual(assoc.dimse.dimse_timeout, 0.1)
self.assertTrue(assoc.is_established)
assoc.send_c_echo()
assoc.release()
self.assertTrue(assoc.is_aborted)
scp.stop()
def test_dul_timeout(self):
"""Test that the DUL timeout (ARTIM) works"""
pass
class TestAssociationSendCEcho(unittest.TestCase):
"""Run tests on Assocation send_c_echo."""
def setUp(self):
"""Run prior to each test"""
self.scp = None
def tearDown(self):
"""Clear any active threads"""
if self.scp:
self.scp.abort()
time.sleep(0.1)
for thread in threading.enumerate():
if isinstance(thread, DummyBaseSCP):
thread.abort()
thread.stop()
def test_must_be_associated(self):
"""Test can't send without association."""
# Test raise if assoc not established
self.scp = DummyVerificationSCP()
self.scp.start()
ae = AE(scu_sop_class=[VerificationSOPClass])
assoc = ae.associate('localhost', 11112)
assoc.release()
self.assertFalse(assoc.is_established)
with self.assertRaises(RuntimeError):
assoc.send_c_echo()
self.scp.stop()
def test_no_abstract_syntax_match(self):
"""Test SCU when no accepted abstract syntax"""
self.scp = DummyStorageSCP()
self.scp.start()
ae = AE(scu_sop_class=[CTImageStorage])
assoc = ae.associate('localhost', 11112)
self.assertTrue(assoc.is_established)
self.assertRaises(ValueError, assoc.send_c_echo)
assoc.release()
self.scp.stop()
def test_rsp_none(self):
"""Test no response from peer"""
self.scp = DummyVerificationSCP()
self.scp.start()
ae = AE(scu_sop_class=[VerificationSOPClass])
assoc = ae.associate('localhost', 11112)
class DummyDIMSE():
def send_msg(*args, **kwargs): return
def receive_msg(*args, **kwargs): return None, None
assoc.dimse = DummyDIMSE()
if assoc.is_established:
assoc.send_c_echo()
self.assertTrue(assoc.is_aborted)
self.scp.stop()
def test_rsp_invalid(self):
"""Test invalid response received from peer"""
self.scp = DummyVerificationSCP()
self.scp.start()
ae = AE(scu_sop_class=[VerificationSOPClass])
assoc = ae.associate('localhost', 11112)
class DummyResponse():
is_valid_response = False
class DummyDIMSE():
def send_msg(*args, **kwargs): return
def receive_msg(*args, **kwargs): return DummyResponse(), None
assoc.dimse = DummyDIMSE()
if assoc.is_established:
assoc.send_c_echo()
self.assertTrue(assoc.is_aborted)
self.scp.stop()
def test_rsp_success(self):
"""Test receiving a success response from the peer"""
self.scp = DummyVerificationSCP()
self.scp.start()
ae = AE(scu_sop_class=[VerificationSOPClass])
assoc = ae.associate('localhost', 11112)
self.assertTrue(assoc.is_established)
result = assoc.send_c_echo()
self.assertEqual(result.Status, 0x0000)
assoc.release()
self.scp.stop()
def test_rsp_failure(self):
"""Test receiving a failure response from the peer"""
self.scp = DummyVerificationSCP()
self.scp.status = 0x0210
self.scp.start()
ae = AE(scu_sop_class=[VerificationSOPClass])
assoc = ae.associate('localhost', 11112)
self.assertTrue(assoc.is_established)
result = assoc.send_c_echo()
self.assertEqual(result.Status, 0x0210)
assoc.release()
self.scp.stop()
def test_rsp_unknown_status(self):
"""Test unknown status value returned by peer"""
self.scp = DummyVerificationSCP()
self.scp.status = 0xFFF0
self.scp.start()
ae = AE(scu_sop_class=[VerificationSOPClass])
assoc = ae.associate('localhost', 11112)
self.assertTrue(assoc.is_established)
result = assoc.send_c_echo()
self.assertEqual(result.Status, 0xFFF0)
assoc.release()
self.scp.stop()
def test_rsp_multi_status(self):
"""Test receiving a status with extra elements"""
def on_c_echo():
ds = Dataset()
ds.Status = 0x0122
ds.ErrorComment = 'Some comment'
return ds
self.scp = DummyVerificationSCP()
self.scp.ae.on_c_echo = on_c_echo
self.scp.start()
ae = AE(scu_sop_class=[VerificationSOPClass])
assoc = ae.associate('localhost', 11112)
self.assertTrue(assoc.is_established)
result = assoc.send_c_echo()
self.assertEqual(result.Status, 0x0122)
self.assertEqual(result.ErrorComment, 'Some comment')
assoc.release()
self.scp.stop()
def test_abort_during(self):
"""Test aborting the association during message exchange"""
self.scp = DummyVerificationSCP()
self.scp.send_abort = True
self.scp.start()
ae = AE(scu_sop_class=[VerificationSOPClass])
assoc = ae.associate('localhost', 11112)
self.assertTrue(assoc.is_established)
result = assoc.send_c_echo()
self.assertEqual(result, Dataset())
self.assertTrue(assoc.is_aborted)
self.scp.stop()
class TestAssociationSendCStore(unittest.TestCase):
"""Run tests on Assocation send_c_store."""
def setUp(self):
"""Run prior to each test"""
self.scp = None
def tearDown(self):
"""Clear any active threads"""
if self.scp:
self.scp.abort()
time.sleep(0.1)
for thread in threading.enumerate():
if isinstance(thread, DummyBaseSCP):
thread.abort()
thread.stop()
def test_must_be_associated(self):
"""Test SCU can't send without association."""
# Test raise if assoc not established
self.scp = DummyStorageSCP()
self.scp.start()
ae = AE(scu_sop_class=[CTImageStorage])
assoc = ae.associate('localhost', 11112)
assoc.release()
self.assertFalse(assoc.is_established)
with self.assertRaises(RuntimeError):
assoc.send_c_store(DATASET)
self.scp.stop()
def test_no_abstract_syntax_match(self):
"""Test SCU when no accepted abstract syntax"""
self.scp = DummyVerificationSCP()
self.scp.start()
ae = AE(scu_sop_class=[VerificationSOPClass])
assoc = ae.associate('localhost', 11112)
self.assertTrue(assoc.is_established)
self.assertRaises(ValueError, assoc.send_c_store, DATASET)
assoc.release()
self.scp.stop()
def test_bad_priority(self):
"""Test bad priority raises exception"""
self.scp = DummyStorageSCP()
self.scp.start()
ae = AE(scu_sop_class=[CTImageStorage])
assoc = ae.associate('localhost', 11112)
self.assertTrue(assoc.is_established)
self.assertRaises(ValueError, assoc.send_c_store, DATASET, priority=0x0003)
assoc.release()
self.scp.stop()
def test_fail_encode_dataset(self):
"""Test failure if unable to encode dataset"""
self.scp = DummyStorageSCP()
self.scp.start()
ae = AE(scu_sop_class=[CTImageStorage],
transfer_syntax=[ExplicitVRLittleEndian])
assoc = ae.associate('localhost', 11112)
self.assertTrue(assoc.is_established)
DATASET.PerimeterValue = b'\x00\x01'
self.assertRaises(ValueError, assoc.send_c_store, DATASET)
assoc.release()
del DATASET.PerimeterValue # Fix up our changes
self.scp.stop()
def test_encode_compressed_dataset(self):
"""Test sending a dataset with a compressed transfer syntax"""
self.scp = DummyStorageSCP()
self.scp.start()
ae = AE(scu_sop_class=[MRImageStorage])
assoc = ae.associate('localhost', 11112)
self.assertTrue(assoc.is_established)
result = assoc.send_c_store(COMP_DATASET)
self.assertEqual(result.Status, 0x0000)
assoc.release()
self.scp.stop()
def test_rsp_none(self):
"""Test no response from peer"""
self.scp = DummyStorageSCP()
self.scp.start()
ae = AE(scu_sop_class=[CTImageStorage])
assoc = ae.associate('localhost', 11112)
class DummyDIMSE():
def send_msg(*args, **kwargs): return
def receive_msg(*args, **kwargs): return None, None
assoc.dimse = DummyDIMSE()
self.assertTrue(assoc.is_established)
assoc.send_c_store(DATASET)
self.assertTrue(assoc.is_aborted)
self.scp.stop()
def test_rsp_invalid(self):
"""Test invalid DIMSE message received from peer"""
self.scp = DummyStorageSCP()
self.scp.start()
ae = AE(scu_sop_class=[CTImageStorage])
assoc = ae.associate('localhost', 11112)
class DummyResponse():
is_valid_response = False
class DummyDIMSE():
def send_msg(*args, **kwargs): return
def receive_msg(*args, **kwargs): return DummyResponse(), None
assoc.dimse = DummyDIMSE()
self.assertTrue(assoc.is_established)
assoc.send_c_store(DATASET)
self.assertTrue(assoc.is_aborted)
self.scp.stop()
def test_rsp_failure(self):
"""Test receiving a failure response from the peer"""
self.scp = DummyStorageSCP()
self.scp.status = 0xC000
self.scp.start()
ae = AE(scu_sop_class=[CTImageStorage, RTImageStorage])
assoc = ae.associate('localhost', 11112)
self.assertTrue(assoc.is_established)
result = assoc.send_c_store(DATASET)
self.assertEqual(result.Status, 0xC000)
assoc.release()
self.scp.stop()
def test_rsp_warning(self):
"""Test receiving a warning response from the peer"""
self.scp = DummyStorageSCP()
self.scp.status = 0xB000
self.scp.start()
ae = AE(scu_sop_class=[CTImageStorage, RTImageStorage])
assoc = ae.associate('localhost', 11112)
self.assertTrue(assoc.is_established)
result = assoc.send_c_store(DATASET)
self.assertEqual(result.Status, 0xB000)
assoc.release()
self.scp.stop()
def test_rsp_success(self):
"""Test receiving a success response from the peer"""
self.scp = DummyStorageSCP()
self.scp.start()
ae = AE(scu_sop_class=[CTImageStorage, RTImageStorage])
assoc = ae.associate('localhost', 11112)
self.assertTrue(assoc.is_established)
result = assoc.send_c_store(DATASET)
self.assertEqual(result.Status, 0x0000)
assoc.release()
self.scp.stop()
def test_rsp_unknown_status(self):
"""Test unknown status value returned by peer"""
self.scp = DummyStorageSCP()
self.scp.status = 0xFFF0
self.scp.start()
ae = AE(scu_sop_class=[CTImageStorage])
assoc = ae.associate('localhost', 11112)
self.assertTrue(assoc.is_established)
result = assoc.send_c_store(DATASET)
self.assertEqual(result.Status, 0xFFF0)
assoc.release()
self.scp.stop()
class TestAssociationSendCFind(unittest.TestCase):
"""Run tests on Assocation send_c_find."""
def setUp(self):
"""Run prior to each test"""
self.ds = Dataset()
self.ds.PatientName = '*'
self.ds.QueryRetrieveLevel = "PATIENT"
self.scp = None
def tearDown(self):
"""Clear any active threads"""
if self.scp:
self.scp.abort()
time.sleep(0.1)
for thread in threading.enumerate():
if isinstance(thread, DummyBaseSCP):
thread.abort()
thread.stop()
def test_must_be_associated(self):
"""Test can't send without association."""
# Test raise if assoc not established
scp = DummyFindSCP()
scp.start()
ae = AE(scu_sop_class=[PatientRootQueryRetrieveInformationModelFind])
assoc = ae.associate('localhost', 11112)
assoc.release()
self.assertFalse(assoc.is_established)
with self.assertRaises(RuntimeError):
next(assoc.send_c_find(self.ds))
scp.stop()
def test_no_abstract_syntax_match(self):
"""Test when no accepted abstract syntax"""
scp = DummyVerificationSCP()
scp.start()
ae = AE(scu_sop_class=[VerificationSOPClass])
assoc = ae.associate('localhost', 11112)
self.assertTrue(assoc.is_established)
def test():
next(assoc.send_c_find(self.ds))
self.assertRaises(ValueError, test)
assoc.release()
scp.stop()
def test_bad_query_model(self):
"""Test invalid query_model value"""
scp = DummyFindSCP()
scp.start()
ae = AE(scu_sop_class=[PatientRootQueryRetrieveInformationModelFind])
assoc = ae.associate('localhost', 11112)
self.assertTrue(assoc.is_established)
with self.assertRaises(ValueError):
next(assoc.send_c_find(self.ds, query_model='X'))
assoc.release()
scp.stop()
def test_good_query_model(self):
"""Test good query_model values"""
scp = DummyFindSCP()
scp.statuses = [0x0000]
scp.start()
ae = AE(scu_sop_class=[PatientRootQueryRetrieveInformationModelFind,
StudyRootQueryRetrieveInformationModelFind,
PatientStudyOnlyQueryRetrieveInformationModelFind,
ModalityWorklistInformationFind])
assoc = ae.associate('localhost', 11112)
self.assertTrue(assoc.is_established)
for (status, ds) in assoc.send_c_find(self.ds, query_model='P'):
self.assertEqual(status.Status, 0x0000)
for (status, ds) in assoc.send_c_find(self.ds, query_model='S'):
self.assertEqual(status.Status, 0x0000)
for (status, ds) in assoc.send_c_find(self.ds, query_model='O'):
self.assertEqual(status.Status, 0x0000)
for (status, ds) in assoc.send_c_find(self.ds, query_model='W'):
self.assertEqual(status.Status, 0x0000)
assoc.release()
scp.stop()
def test_fail_encode_identifier(self):
"""Test a failure in encoding the Identifier dataset"""
self.scp = DummyFindSCP()
self.scp.start()
ae = AE(scu_sop_class=[PatientRootQueryRetrieveInformationModelFind],
transfer_syntax=[ExplicitVRLittleEndian])
assoc = ae.associate('localhost', 11112)
self.assertTrue(assoc.is_established)
DATASET.PerimeterValue = b'\x00\x01'
def test():
next(assoc.send_c_find(DATASET, query_model='P'))
self.assertRaises(ValueError, test)
assoc.release()
del DATASET.PerimeterValue # Fix up our changes
self.scp.stop()
def test_rsp_failure(self):
"""Test receiving a failure response from the peer"""
scp = DummyFindSCP()
scp.statuses = [0xA700]
scp.start()
ae = AE(scu_sop_class=[PatientRootQueryRetrieveInformationModelFind])
assoc = ae.associate('localhost', 11112)
self.assertTrue(assoc.is_established)
for (status, ds) in assoc.send_c_find(self.ds, query_model='P'):
self.assertEqual(status.Status, 0xA700)
self.assertEqual(ds, None)
assoc.release()
scp.stop()
def test_rsp_pending(self):
"""Test receiving a pending response from the peer"""
scp = DummyFindSCP()
scp.statuses = [0xFF00]
scp.start()
ae = AE(scu_sop_class=[PatientRootQueryRetrieveInformationModelFind])
assoc = ae.associate('localhost', 11112)
self.assertTrue(assoc.is_established)
result = assoc.send_c_find(self.ds, query_model='P')
(status, ds) = next(result)
self.assertEqual(status.Status, 0xFF00)
self.assertTrue('PatientName' in ds)
(status, ds) = next(result)
self.assertEqual(status.Status, 0x0000)
self.assertTrue(ds is None)
assoc.release()
scp.stop()
def test_rsp_success(self):
"""Test receiving a success response from the peer"""
scp = DummyFindSCP()
scp.statuses = [0x0000]
scp.start()
ae = AE(scu_sop_class=[PatientRootQueryRetrieveInformationModelFind])
assoc = ae.associate('localhost', 11112)
self.assertTrue(assoc.is_established)
for (status, ds) in assoc.send_c_find(self.ds, query_model='P'):
self.assertEqual(status.Status, 0x0000)
self.assertEqual(ds, None)
assoc.release()
scp.stop()
def test_rsp_cancel(self):
"""Test receiving a cancel response from the peer"""
scp = DummyFindSCP()
scp.statuses = [0xFE00]
scp.start()
ae = AE(scu_sop_class=[PatientRootQueryRetrieveInformationModelFind])
assoc = ae.associate('localhost', 11112)
self.assertTrue(assoc.is_established)
for (status, ds) in assoc.send_c_find(self.ds, query_model='P'):
self.assertEqual(status.Status, 0xFE00)
self.assertEqual(ds, None)
assoc.release()
scp.stop()
def test_rsp_invalid(self):
"""Test invalid DIMSE message response received from peer"""
self.scp = DummyFindSCP()
self.scp.start()
ae = AE(scu_sop_class=[PatientRootQueryRetrieveInformationModelFind])
assoc = ae.associate('localhost', 11112)
class DummyResponse():
is_valid_response = False
class DummyDIMSE():
def send_msg(*args, **kwargs): return
def receive_msg(*args, **kwargs): return DummyResponse(), None
assoc.dimse = DummyDIMSE()
self.assertTrue(assoc.is_established)
for (_, _) in assoc.send_c_find(self.ds, query_model='P'):
pass
self.assertTrue(assoc.is_aborted)
self.scp.stop()
def test_rsp_unknown_status(self):
"""Test unknown status value returned by peer"""
self.scp = DummyFindSCP()
self.scp.statuses = [0xFFF0]
self.scp.start()
ae = AE(scu_sop_class=[PatientRootQueryRetrieveInformationModelFind])
assoc = ae.associate('localhost', 11112)
self.assertTrue(assoc.is_established)
for (status, ds) in assoc.send_c_find(self.ds, query_model='P'):
self.assertEqual(status.Status, 0xFFF0)
assoc.release()
self.scp.stop()
def test_rsp_bad_dataset(self):
"""Test bad dataset returned by on_c_find"""
self.scp = DummyFindSCP()
def on_c_find(ds):
def test(): pass
yield 0xFF00, test
self.scp.ae.on_c_find = on_c_find
self.scp.start()
ae = AE(scu_sop_class=[PatientRootQueryRetrieveInformationModelFind],
transfer_syntax=[ExplicitVRLittleEndian])
assoc = ae.associate('localhost', 11112)
self.assertTrue(assoc.is_established)
for (status, ds) in assoc.send_c_find(self.ds, query_model='P'):
self.assertTrue(status.Status in range(0xC000, 0xD000))
assoc.release()
self.scp.stop()
class TestAssociationSendCCancelFind(unittest.TestCase):
"""Run tests on Assocation send_c_cancel_find."""
def setUp(self):
"""Run prior to each test"""
self.scp = None
def tearDown(self):
"""Clear any active threads"""
if self.scp:
self.scp.abort()
time.sleep(0.1)
for thread in threading.enumerate():
if isinstance(thread, DummyBaseSCP):
thread.abort()
thread.stop()
def test_must_be_associated(self):
"""Test can't send without association."""
# Test raise if assoc not established
self.scp = DummyFindSCP()
self.scp.start()
ae = AE(scu_sop_class=[PatientRootQueryRetrieveInformationModelFind])
assoc = ae.associate('localhost', 11112)
assoc.release()
self.assertFalse(assoc.is_established)
with self.assertRaises(RuntimeError):
assoc.send_c_cancel_find(1)
self.scp.stop()
def test_good_send(self):
"""Test send_c_cancel_move"""
self.scp = DummyFindSCP()
self.scp.start()
ae = AE(scu_sop_class=[PatientRootQueryRetrieveInformationModelFind])
assoc = ae.associate('localhost', 11112)
self.assertTrue(assoc.is_established)
assoc.send_c_cancel_find(1)
self.scp.stop()
def test_bad_send(self):
"""Test send_c_cancel_move"""
self.scp = DummyFindSCP()
self.scp.start()
ae = AE(scu_sop_class=[PatientRootQueryRetrieveInformationModelFind])
assoc = ae.associate('localhost', 11112)
self.assertTrue(assoc.is_established)
with self.assertRaises(TypeError):
assoc.send_c_cancel_find('a')
assoc.release()
self.scp.stop()
class TestAssociationSendCGet(unittest.TestCase):
"""Run tests on Assocation send_c_get."""
def setUp(self):
"""Run prior to each test"""
self.ds = Dataset()
#self.ds.SOPClassUID = PatientRootQueryRetrieveInformationModelGet.UID
self.ds.PatientName = '*'
self.ds.QueryRetrieveLevel = "PATIENT"
self.good = Dataset()
self.good.SOPClassUID = CTImageStorage().UID
self.good.SOPInstanceUID = '1.1.1'
self.good.PatientName = 'Test'
self.scp = None
def tearDown(self):
"""Clear any active threads"""
if self.scp:
self.scp.abort()
time.sleep(0.1)
for thread in threading.enumerate():
if isinstance(thread, DummyBaseSCP):
thread.abort()
thread.stop()
def test_must_be_associated(self):
"""Test can't send without association."""
# Test raise if assoc not established
self.scp = DummyGetSCP()
self.scp.start()
ae = AE(scu_sop_class=[PatientRootQueryRetrieveInformationModelGet])
assoc = ae.associate('localhost', 11112)
assoc.release()
self.assertFalse(assoc.is_established)
with self.assertRaises(RuntimeError):
next(assoc.send_c_get(self.ds))
self.scp.stop()
def test_no_abstract_syntax_match(self):
"""Test when no accepted abstract syntax"""
self.scp = DummyStorageSCP()
self.scp.datasets = [self.good]
self.scp.start()
ae = AE(scu_sop_class=[CTImageStorage])
assoc = ae.associate('localhost', 11112)
self.assertTrue(assoc.is_established)
def test():
next(assoc.send_c_get(self.ds))
self.assertRaises(ValueError, test)
assoc.release()
self.scp.stop()
def test_bad_query_model(self):
"""Test bad query model parameter"""
self.scp = DummyGetSCP()
self.scp.start()
ae = AE(scu_sop_class=[PatientRootQueryRetrieveInformationModelGet])
assoc = ae.associate('localhost', 11112)
self.assertTrue(assoc.is_established)
with self.assertRaises(ValueError):
next(assoc.send_c_get(self.ds, query_model='X'))
assoc.release()
self.scp.stop()
def test_good_query_model(self):
"""Test all the query models"""
self.scp = DummyGetSCP()
self.scp.start()
ae = AE(scu_sop_class=[PatientRootQueryRetrieveInformationModelGet,
StudyRootQueryRetrieveInformationModelGet,
PatientStudyOnlyQueryRetrieveInformationModelGet])
assoc = ae.associate('localhost', 11112)
self.assertTrue(assoc.is_established)
for (status, ds) in assoc.send_c_get(self.ds, query_model='P'):
self.assertEqual(status.Status, 0x0000)
for (status, ds) in assoc.send_c_get(self.ds, query_model='S'):
self.assertEqual(status.Status, 0x0000)
for (status, ds) in assoc.send_c_get(self.ds, query_model='O'):
self.assertEqual(status.Status, 0x0000)
assoc.release()
self.scp.stop()
def test_fail_encode_identifier(self):
"""Test a failure in encoding the Identifier dataset"""
self.scp = DummyGetSCP()
self.scp.start()
ae = AE(scu_sop_class=[PatientRootQueryRetrieveInformationModelGet],
transfer_syntax=[ExplicitVRLittleEndian])
assoc = ae.associate('localhost', 11112)
self.assertTrue(assoc.is_established)
DATASET.PerimeterValue = b'\x00\x01'
def test():
next(assoc.send_c_get(DATASET, query_model='P'))
self.assertRaises(ValueError, test)
assoc.release()
del DATASET.PerimeterValue # Fix up our changes
self.scp.stop()
def test_rsp_failure(self):
"""Test receiving a failure response"""
self.scp = DummyGetSCP()
self.scp.statuses = [0xA701]
self.scp.start()
ae = AE(scu_sop_class=[PatientRootQueryRetrieveInformationModelGet])
def on_c_store(ds): return 0x0000
assoc = ae.associate('localhost', 11112)
self.assertTrue(assoc.is_established)
for (status, ds) in assoc.send_c_get(self.ds, query_model='P'):
self.assertEqual(status.Status, 0xA701)
assoc.release()
self.scp.stop()
def test_rsp_success(self):
"""Test good send"""
self.scp = DummyGetSCP()
self.scp.no_suboperations = 2
self.scp.statuses = [0xFF00, 0xFF00]
self.scp.datasets = [self.good, self.good]
def on_c_store(ds):
self.assertTrue('PatientName' in ds)
return 0x0000
self.scp.start()
ae = AE(scu_sop_class=[PatientRootQueryRetrieveInformationModelGet,
CTImageStorage],
scp_sop_class=[PatientRootQueryRetrieveInformationModelGet,
CTImageStorage],
transfer_syntax=[ExplicitVRLittleEndian])
ae.on_c_store = on_c_store
assoc = ae.associate('localhost', 11112)
self.assertTrue(assoc.is_established)
result = assoc.send_c_get(self.ds, query_model='P')
(status, ds) = next(result)
self.assertEqual(status.Status, 0xff00)
self.assertEqual(ds, None)
(status, ds) = next(result)
self.assertEqual(status.Status, 0xff00)
self.assertEqual(ds, None)
(status, ds) = next(result)
self.assertEqual(status.Status, 0x0000)
self.assertEqual(ds, None)
assoc.release()
self.scp.stop()
def test_rsp_pending_send_success(self):
"""Test receiving a pending response and sending success"""
self.scp = DummyGetSCP()
self.scp.no_suboperations = 3
self.scp.statuses = [0xFF00, 0xFF00, 0xB000]
self.scp.datasets = [self.good, self.good]
self.scp.start()
ae = AE(scu_sop_class=[PatientRootQueryRetrieveInformationModelGet,
CTImageStorage],
scp_sop_class=[CTImageStorage])
def on_c_store(ds): return 0x0000
ae.on_c_store = on_c_store
assoc = ae.associate('localhost', 11112)
self.assertTrue(assoc.is_established)
result = assoc.send_c_get(self.ds, query_model='P')
# We have 2 status, ds and 1 success
(status, ds) = next(result)
self.assertEqual(status.Status, 0xFF00)
self.assertTrue(ds is None)
(status, ds) = next(result)
self.assertEqual(status.Status, 0xFF00)
self.assertTrue(ds is None)
(status, ds) = next(result)
self.assertEqual(status.Status, 0x0000)
self.assertTrue(ds is None)
self.assertRaises(StopIteration, next, result)
assoc.release()
self.scp.stop()
def test_rsp_pending_send_failure(self):
"""Test receiving a pending response and sending a failure"""
self.scp = DummyGetSCP()
self.scp.no_suboperations = 3
self.scp.statuses = [0xFF00, 0xFF00, 0x0000]
self.scp.datasets = [self.good, self.good, None]
self.scp.start()
ae = AE(scu_sop_class=[PatientRootQueryRetrieveInformationModelGet,
CTImageStorage],
scp_sop_class=[CTImageStorage])
def on_c_store(ds): return 0xA700
ae.on_c_store = on_c_store
assoc = ae.associate('localhost', 11112)
self.assertTrue(assoc.is_established)
result = assoc.send_c_get(self.ds, query_model='P')
# We have 2 status, ds and 1 success
(status, ds) = next(result)
self.assertEqual(status.Status, 0xFF00)
self.assertTrue(ds is None)
(status, ds) = next(result)
self.assertEqual(status.Status, 0xFF00)
self.assertTrue(ds is None)
(status, ds) = next(result)
self.assertEqual(status.Status, 0xB000)
self.assertTrue('FailedSOPInstanceUIDList' in ds)
self.assertRaises(StopIteration, next, result)
assoc.release()
self.scp.stop()
def test_rsp_pending_send_warning(self):
"""Test receiving a pending response and sending a warning"""
self.scp = DummyGetSCP()
self.scp.no_suboperations = 3
self.scp.statuses = [0xFF00, 0xFF00, 0xB000]
self.scp.datasets = [self.good, self.good, None]
self.scp.start()
ae = AE(scu_sop_class=[PatientRootQueryRetrieveInformationModelGet,
CTImageStorage],
scp_sop_class=[CTImageStorage])
def on_c_store(ds): return 0xB007
ae.on_c_store = on_c_store
assoc = ae.associate('localhost', 11112)
self.assertTrue(assoc.is_established)
result = assoc.send_c_get(self.ds, query_model='P')
# We have 2 status, ds and 1 success
(status, ds) = next(result)
self.assertEqual(status.Status, 0xFF00)
self.assertTrue(ds is None)
(status, ds) = next(result)
self.assertEqual(status.Status, 0xFF00)
self.assertTrue(ds is None)
(status, ds) = next(result)
self.assertEqual(status.Status, 0xB000)
self.assertTrue('FailedSOPInstanceUIDList' in ds)
self.assertRaises(StopIteration, next, result)
assoc.release()
self.scp.stop()
def test_rsp_cancel(self):
"""Test receiving a cancel response"""
self.scp = DummyGetSCP()
self.scp.statuses = [0xFE00]
self.scp.start()
ae = AE(scu_sop_class=[PatientRootQueryRetrieveInformationModelGet])
assoc = ae.associate('localhost', 11112)
self.assertTrue(assoc.is_established)
for (status, ds) in assoc.send_c_get(self.ds, query_model='P'):
self.assertEqual(status.Status, 0xFE00)
assoc.release()
self.scp.stop()
def test_rsp_warning(self):
"""Test receiving a warning response"""
self.scp = DummyGetSCP()
self.scp.no_suboperations = 3
self.scp.statuses = [0xFF00, 0xFF00, 0xB000]
self.scp.datasets = [self.good, self.good, None]
self.scp.start()
ae = AE(scu_sop_class=[PatientRootQueryRetrieveInformationModelGet,
CTImageStorage],
scp_sop_class=[CTImageStorage])
def on_c_store(ds): return 0xB007
ae.on_c_store = on_c_store
assoc = ae.associate('localhost', 11112)
self.assertTrue(assoc.is_established)
result = assoc.send_c_get(self.ds, query_model='P')
(status, ds) = next(result)
self.assertEqual(status.Status, 0xff00)
self.assertTrue(ds is None)
(status, ds) = next(result)
self.assertEqual(status.Status, 0xff00)
self.assertTrue(ds is None)
(status, ds) = next(result)
self.assertEqual(status.Status, 0xb000)
self.assertTrue('FailedSOPInstanceUIDList' in ds)
self.assertRaises(StopIteration, next, result)
assoc.release()
self.scp.stop()
def test_rsp_unknown_status(self):
"""Test unknown status value returned by peer"""
self.scp = DummyGetSCP()
self.scp.statuses = [0xFFF0]
self.scp.start()
ae = AE(scu_sop_class=[PatientRootQueryRetrieveInformationModelGet,
CTImageStorage],
scp_sop_class=[CTImageStorage])
assoc = ae.associate('localhost', 11112)
self.assertTrue(assoc.is_established)
for (status, ds) in assoc.send_c_get(self.ds, query_model='P'):
self.assertEqual(status.Status, 0xFFF0)
assoc.release()
self.scp.stop()
class TestAssociationSendCCancelGet(unittest.TestCase):
"""Run tests on Assocation send_c_cancel_find."""
def setUp(self):
"""Run prior to each test"""
self.scp = None
def tearDown(self):
"""Clear any active threads"""
if self.scp:
self.scp.abort()
time.sleep(0.1)
for thread in threading.enumerate():
if isinstance(thread, DummyBaseSCP):
thread.abort()
thread.stop()
def test_must_be_associated(self):
"""Test can't send without association."""
# Test raise if assoc not established
self.scp = DummyGetSCP()
self.scp.start()
ae = AE(scu_sop_class=[PatientRootQueryRetrieveInformationModelGet])
assoc = ae.associate('localhost', 11112)
assoc.release()
self.assertFalse(assoc.is_established)
with self.assertRaises(RuntimeError):
assoc.send_c_cancel_get(1)
self.scp.stop()
class TestAssociationSendCMove(unittest.TestCase):
"""Run tests on Assocation send_c_move."""
def setUp(self):
"""Run prior to each test"""
self.ds = Dataset()
self.ds.PatientName = '*'
self.ds.QueryRetrieveLevel = "PATIENT"
self.good = Dataset()
self.good.SOPClassUID = CTImageStorage().UID
self.good.SOPInstanceUID = '1.1.1'
self.good.PatientName = 'Test'
self.scp = None
self.scp2 = None
def tearDown(self):
"""Clear any active threads"""
if self.scp:
self.scp.abort()
if self.scp2:
self.scp2.abort()
time.sleep(0.1)
for thread in threading.enumerate():
if isinstance(thread, DummyBaseSCP):
thread.abort()
thread.stop()
def test_must_be_associated(self):
"""Test can't send without association."""
# Test raise if assoc not established
self.scp = DummyVerificationSCP()
self.scp.start()
ae = AE(scu_sop_class=[VerificationSOPClass])
assoc = ae.associate('localhost', 11112)
assoc.release()
self.assertFalse(assoc.is_established)
with self.assertRaises(RuntimeError):
next(assoc.send_c_move(self.ds, b'TESTMOVE'))
self.scp.stop()
def test_no_abstract_syntax_match(self):
"""Test when no accepted abstract syntax"""
self.scp = DummyStorageSCP()
self.scp.start()
ae = AE(scu_sop_class=[CTImageStorage])
assoc = ae.associate('localhost', 11112)
self.assertTrue(assoc.is_established)
def test():
next(assoc.send_c_move(self.ds, b'TESTMOVE'))
self.assertRaises(ValueError, test)
assoc.release()
self.scp.stop()
def test_bad_query_model(self):
"""Test bad query model parameter"""
self.scp = DummyMoveSCP()
self.scp.start()
ae = AE(scu_sop_class=[PatientRootQueryRetrieveInformationModelMove])
assoc = ae.associate('localhost', 11112)
self.assertTrue(assoc.is_established)
with self.assertRaises(ValueError):
next(assoc.send_c_move(self.ds, b'TESTMOVE', query_model='X'))
assoc.release()
self.scp.stop()
def test_good_query_model(self):
"""Test all the query models"""
self.scp2 = DummyStorageSCP(11113)
self.scp2.start()
self.scp = DummyMoveSCP()
self.scp.no_suboperations = 2
self.scp.statuses = [0xFF00, 0xFF00]
self.scp.datasets = [self.good, self.good]
self.scp.start()
ae = AE(scu_sop_class=[PatientRootQueryRetrieveInformationModelMove,
StudyRootQueryRetrieveInformationModelMove,
PatientStudyOnlyQueryRetrieveInformationModelMove])
assoc = ae.associate('localhost', 11112)
self.assertTrue(assoc.is_established)
result = assoc.send_c_move(self.ds, b'TESTMOVE', query_model='P')
(status, ds) = next(result)
self.assertEqual(status.Status, 0xFF00)
(status, ds) = next(result)
self.assertEqual(status.Status, 0xFF00)
(status, ds) = next(result)
self.assertEqual(status.Status, 0x0000)
self.assertRaises(StopIteration, next, result)
result = assoc.send_c_move(self.ds, b'TESTMOVE', query_model='S')
(status, ds) = next(result)
self.assertEqual(status.Status, 0xFF00)
(status, ds) = next(result)
self.assertEqual(status.Status, 0xFF00)
(status, ds) = next(result)
self.assertEqual(status.Status, 0x0000)
self.assertRaises(StopIteration, next, result)
result = assoc.send_c_move(self.ds, b'TESTMOVE', query_model='O')
(status, ds) = next(result)
self.assertEqual(status.Status, 0xFF00)
(status, ds) = next(result)
self.assertEqual(status.Status, 0xFF00)
(status, ds) = next(result)
self.assertEqual(status.Status, 0x0000)
self.assertRaises(StopIteration, next, result)
assoc.release()
self.scp.stop()
self.scp2.stop()
def test_fail_encode_identifier(self):
"""Test a failure in encoding the Identifier dataset"""
self.scp = DummyMoveSCP()
self.scp.start()
ae = AE(scu_sop_class=[PatientRootQueryRetrieveInformationModelMove],
transfer_syntax=[ExplicitVRLittleEndian])
assoc = ae.associate('localhost', 11112)
self.assertTrue(assoc.is_established)
DATASET.PerimeterValue = b'\x00\x01'
def test():
next(assoc.send_c_move(DATASET, b'SOMEPLACE', query_model='P'))
self.assertRaises(ValueError, test)
assoc.release()
del DATASET.PerimeterValue # Fix up our changes
self.scp.stop()
def test_move_destination_no_assoc(self):
"""Test move destination failed to assoc"""
self.scp = DummyMoveSCP()
self.scp.destination_ae = ('localhost', 11113)
self.scp.start()
ae = AE(scu_sop_class=[PatientRootQueryRetrieveInformationModelMove,
StudyRootQueryRetrieveInformationModelMove,
PatientStudyOnlyQueryRetrieveInformationModelMove])
assoc = ae.associate('localhost', 11112)
self.assertTrue(assoc.is_established)
for (status, ds) in assoc.send_c_move(self.ds, b'TESTMOVE', query_model='P'):
self.assertEqual(status.Status, 0xa801)
assoc.release()
self.scp.stop()
def test_move_destination_unknown(self):
"""Test unknown move destination"""
self.scp = DummyMoveSCP()
self.scp.destination_ae = ('localhost', 11113)
self.scp.start()
ae = AE(scu_sop_class=[PatientRootQueryRetrieveInformationModelMove,
StudyRootQueryRetrieveInformationModelMove,
PatientStudyOnlyQueryRetrieveInformationModelMove])
assoc = ae.associate('localhost', 11112)
self.assertTrue(assoc.is_established)
for (status, ds) in assoc.send_c_move(self.ds, b'UNKNOWN', query_model='P'):
self.assertEqual(status.Status, 0xa801)
assoc.release()
self.scp.stop()
def test_move_destination_failed_store(self):
"""Test the destination AE returning failed status"""
self.scp2 = DummyStorageSCP(11113)
self.scp2.status = 0xA700
self.scp2.start()
self.scp = DummyMoveSCP()
self.scp.destination_ae = ('localhost', 11113)
self.scp.no_suboperations = 2
self.scp.statuses = [0xFF00, 0xFF00]
self.scp.datasets = [self.good, self.good]
self.scp.start()
ae = AE(scu_sop_class=[PatientRootQueryRetrieveInformationModelMove,
StudyRootQueryRetrieveInformationModelMove,
PatientStudyOnlyQueryRetrieveInformationModelMove])
assoc = ae.associate('localhost', 11112)
self.assertTrue(assoc.is_established)
result = assoc.send_c_move(self.ds, b'TESTMOVE', query_model='P')
(status, ds) = next(result)
self.assertEqual(status.Status, 0xFF00)
(status, ds) = next(result)
self.assertEqual(status.Status, 0xFF00)
(status, ds) = next(result)
self.assertEqual(status.Status, 0xB000)
self.assertRaises(StopIteration, next, result)
assoc.release()
self.scp.stop()
self.scp2.stop()
def test_move_destination_warning_store(self):
"""Test the destination AE returning warning status"""
self.scp2 = DummyStorageSCP(11113)
self.scp2.status = 0xB000
self.scp2.start()
self.scp = DummyMoveSCP()
self.scp.destination_ae = ('localhost', 11113)
self.scp.no_suboperations = 2
self.scp.statuses = [0xFF00, 0xFF00]
self.scp.datasets = [self.good, self.good]
self.scp.start()
ae = AE(scu_sop_class=[PatientRootQueryRetrieveInformationModelMove,
StudyRootQueryRetrieveInformationModelMove,
PatientStudyOnlyQueryRetrieveInformationModelMove])
assoc = ae.associate('localhost', 11112)
self.assertTrue(assoc.is_established)
result = assoc.send_c_move(self.ds, b'TESTMOVE', query_model='P')
(status, ds) = next(result)
self.assertEqual(status.Status, 0xFF00)
(status, ds) = next(result)
self.assertEqual(status.Status, 0xFF00)
(status, ds) = next(result)
self.assertEqual(status.Status, 0xB000)
assoc.release()
self.scp.stop()
self.scp2.stop()
def test_rsp_failure(self):
"""Test the user on_c_move returning failure status"""
self.scp2 = DummyStorageSCP(11113)
self.scp2.start()
self.scp = DummyMoveSCP()
self.scp.no_suboperations = 1
self.scp.destination_ae = ('localhost', 11113)
self.scp.statuses = [0xC000]
self.scp.datasets = [None]
self.scp.start()
ae = AE(scu_sop_class=[PatientRootQueryRetrieveInformationModelMove,
StudyRootQueryRetrieveInformationModelMove,
PatientStudyOnlyQueryRetrieveInformationModelMove])
assoc = ae.associate('localhost', 11112)
self.assertTrue(assoc.is_established)
result = assoc.send_c_move(self.ds, b'TESTMOVE', query_model='P')
(status, ds) = next(result)
self.assertEqual(status.Status, 0xC000)
self.assertTrue('FailedSOPInstanceUIDList' in ds)
self.assertRaises(StopIteration, next, result)
assoc.release()
self.scp.stop()
self.scp.stop()
def test_rsp_warning(self):
"""Test receiving a warning response from the peer"""
self.scp2 = DummyStorageSCP(11113)
self.scp2.status = 0xB007
self.scp2.start()
self.scp = DummyMoveSCP()
self.scp.destination_ae = ('localhost', 11113)
self.scp.no_suboperations = 2
self.scp.statuses = [0xFF00, 0xFF00]
self.scp.datasets = [self.good, self.good]
self.scp.start()
ae = AE(scu_sop_class=[PatientRootQueryRetrieveInformationModelMove,
StudyRootQueryRetrieveInformationModelMove,
PatientStudyOnlyQueryRetrieveInformationModelMove])
assoc = ae.associate('localhost', 11112)
self.assertTrue(assoc.is_established)
result = assoc.send_c_move(self.ds, b'TESTMOVE', query_model='P')
(status, ds) = next(result)
self.assertEqual(status.Status, 0xFF00)
self.assertEqual(ds, None)
(status, ds) = next(result)
self.assertEqual(status.Status, 0xFF00)
self.assertEqual(ds, None)
(status, ds) = next(result)
self.assertEqual(status.Status, 0xB000)
self.assertTrue('FailedSOPInstanceUIDList' in ds)
self.assertRaises(StopIteration, next, result)
assoc.release()
self.scp.stop()
self.scp2.stop()
def test_rsp_cancel(self):
"""Test the user on_c_move returning cancel status"""
self.scp2 = DummyStorageSCP(11113)
self.scp2.start()
self.scp = DummyMoveSCP()
self.scp.destination_ae = ('localhost', 11113)
self.scp.no_suboperations = 2
self.scp.statuses = [0xFE00, 0xFF00]
self.scp.datasets = [None, self.good]
self.scp.start()
ae = AE(scu_sop_class=[PatientRootQueryRetrieveInformationModelMove,
StudyRootQueryRetrieveInformationModelMove,
PatientStudyOnlyQueryRetrieveInformationModelMove])
assoc = ae.associate('localhost', 11112)
self.assertTrue(assoc.is_established)
result = assoc.send_c_move(self.ds, b'TESTMOVE', query_model='P')
(status, ds) = next(result)
self.assertEqual(status.Status, 0xFE00)
assoc.release()
self.scp.stop()
self.scp2.stop()
def test_rsp_success(self):
"""Test the user on_c_move returning success status"""
self.scp2 = DummyStorageSCP(11113)
self.scp2.start()
self.scp = DummyMoveSCP()
self.scp.destination_ae = ('localhost', 11113)
self.scp.no_suboperations = 2
self.scp.statuses = [0xFF00, 0x0000]
self.scp.datasets = [self.good, None]
self.scp.start()
ae = AE(scu_sop_class=[PatientRootQueryRetrieveInformationModelMove,
StudyRootQueryRetrieveInformationModelMove,
PatientStudyOnlyQueryRetrieveInformationModelMove])
assoc = ae.associate('localhost', 11112)
self.assertTrue(assoc.is_established)
result = assoc.send_c_move(self.ds, b'TESTMOVE', query_model='P')
(status, ds) = next(result)
self.assertEqual(status.Status, 0xFF00)
self.assertEqual(ds, None)
(status, ds) = next(result)
self.assertEqual(status.Status, 0x0000)
self.assertEqual(ds, None)
self.assertRaises(StopIteration, next, result)
assoc.release()
self.scp.stop()
self.scp2.stop()
def test_rsp_unknown_status(self):
"""Test unknown status value returned by peer"""
self.scp2 = DummyStorageSCP(11113)
self.scp2.start()
self.scp = DummyMoveSCP()
self.scp.destination_ae = ('localhost', 11113)
self.scp.statuses = [0xFFF0]
self.scp.start()
ae = AE(scu_sop_class=[PatientRootQueryRetrieveInformationModelMove,
CTImageStorage],
scp_sop_class=[CTImageStorage])
assoc = ae.associate('localhost', 11112)
self.assertTrue(assoc.is_established)
for (status, ds) in assoc.send_c_move(self.ds, b'TESTMOVE', query_model='P'):
self.assertEqual(status.Status, 0xFFF0)
assoc.release()
self.scp.stop()
self.scp2.stop()
class TestAssociationSendCCancelMove(unittest.TestCase):
"""Run tests on Assocation send_c_cancel_move."""
def setUp(self):
self.scp = None
def tearDown(self):
"""Clear any active threads"""
if self.scp:
self.scp.abort()
time.sleep(0.1)
for thread in threading.enumerate():
if isinstance(thread, DummyBaseSCP):
thread.abort()
thread.stop()
def test_must_be_associated(self):
"""Test can't send without association."""
# Test raise if assoc not established
self.scp = DummyMoveSCP()
self.scp.start()
ae = AE(scu_sop_class=[PatientRootQueryRetrieveInformationModelMove])
assoc = ae.associate('localhost', 11112)
assoc.release()
self.assertFalse(assoc.is_established)
with self.assertRaises(RuntimeError):
assoc.send_c_cancel_move(1)
self.scp.stop()
class TestAssociationSendNEventReport(unittest.TestCase):
"""Run tests on Assocation send_n_event_report."""
def setUp(self):
self.scp = None
def tearDown(self):
"""Clear any active threads"""
if self.scp:
self.scp.abort()
time.sleep(0.1)
for thread in threading.enumerate():
if isinstance(thread, DummyBaseSCP):
thread.abort()
thread.stop()
def test_must_be_associated(self):
"""Test can't send without association."""
# Test raise if assoc not established
self.scp = DummyVerificationSCP()
self.scp.start()
ae = AE(scu_sop_class=[VerificationSOPClass])
assoc = ae.associate('localhost', 11112)
assoc.release()
self.assertFalse(assoc.is_established)
with self.assertRaises(RuntimeError):
assoc.send_n_event_report()
self.scp.stop()
def test_not_implemented(self):
"""Test can't send without association."""
# Test raise if assoc not established
self.scp = DummyVerificationSCP()
self.scp.start()
ae = AE(scu_sop_class=[VerificationSOPClass])
assoc = ae.associate('localhost', 11112)
self.assertTrue(assoc.is_established)
with self.assertRaises(NotImplementedError):
assoc.send_n_event_report()
assoc.release()
self.scp.stop()
class TestAssociationSendNGet(unittest.TestCase):
"""Run tests on Assocation send_n_get."""
def setUp(self):
self.scp = None
def tearDown(self):
"""Clear any active threads"""
if self.scp:
self.scp.abort()
time.sleep(0.1)
for thread in threading.enumerate():
if isinstance(thread, DummyBaseSCP):
thread.abort()
thread.stop()
def test_must_be_associated(self):
"""Test can't send without association."""
# Test raise if assoc not established
self.scp = DummyVerificationSCP()
self.scp.start()
ae = AE(scu_sop_class=[VerificationSOPClass])
assoc = ae.associate('localhost', 11112)
assoc.release()
self.assertFalse(assoc.is_established)
with self.assertRaises(RuntimeError):
assoc.send_n_get()
self.scp.stop()
def test_not_implemented(self):
"""Test can't send without association."""
# Test raise if assoc not established
self.scp = DummyVerificationSCP()
self.scp.start()
ae = AE(scu_sop_class=[VerificationSOPClass])
assoc = ae.associate('localhost', 11112)
self.assertTrue(assoc.is_established)
with self.assertRaises(NotImplementedError):
assoc.send_n_get()
assoc.release()
self.scp.stop()
class TestAssociationSendNSet(unittest.TestCase):
"""Run tests on Assocation send_n_set."""
def setUp(self):
self.scp = None
def tearDown(self):
"""Clear any active threads"""
if self.scp:
self.scp.abort()
time.sleep(0.1)
for thread in threading.enumerate():
if isinstance(thread, DummyBaseSCP):
thread.abort()
thread.stop()
def test_must_be_associated(self):
"""Test can't send without association."""
# Test raise if assoc not established
self.scp = DummyVerificationSCP()
self.scp.start()
ae = AE(scu_sop_class=[VerificationSOPClass])
assoc = ae.associate('localhost', 11112)
assoc.release()
self.assertFalse(assoc.is_established)
with self.assertRaises(RuntimeError):
assoc.send_n_set()
self.scp.stop()
def test_not_implemented(self):
"""Test can't send without association."""
# Test raise if assoc not established
self.scp = DummyVerificationSCP()
self.scp.start()
ae = AE(scu_sop_class=[VerificationSOPClass])
assoc = ae.associate('localhost', 11112)
self.assertTrue(assoc.is_established)
with self.assertRaises(NotImplementedError):
assoc.send_n_set()
assoc.release()
self.scp.stop()
class TestAssociationSendNAction(unittest.TestCase):
"""Run tests on Assocation send_n_action."""
def setUp(self):
self.scp = None
def tearDown(self):
"""Clear any active threads"""
if self.scp:
self.scp.abort()
time.sleep(0.1)
for thread in threading.enumerate():
if isinstance(thread, DummyBaseSCP):
thread.abort()
thread.stop()
def test_must_be_associated(self):
"""Test can't send without association."""
# Test raise if assoc not established
self.scp = DummyVerificationSCP()
self.scp.start()
ae = AE(scu_sop_class=[VerificationSOPClass])
assoc = ae.associate('localhost', 11112)
assoc.release()
self.assertFalse(assoc.is_established)
with self.assertRaises(RuntimeError):
assoc.send_n_action()
self.scp.stop()
def test_not_implemented(self):
"""Test can't send without association."""
# Test raise if assoc not established
self.scp = DummyVerificationSCP()
self.scp.start()
ae = AE(scu_sop_class=[VerificationSOPClass])
assoc = ae.associate('localhost', 11112)
self.assertTrue(assoc.is_established)
with self.assertRaises(NotImplementedError):
assoc.send_n_action()
assoc.release()
self.scp.stop()
class TestAssociationSendNCreate(unittest.TestCase):
"""Run tests on Assocation send_n_create."""
def setUp(self):
self.scp = None
def tearDown(self):
"""Clear any active threads"""
if self.scp:
self.scp.abort()
time.sleep(0.1)
for thread in threading.enumerate():
if isinstance(thread, DummyBaseSCP):
thread.abort()
thread.stop()
def test_must_be_associated(self):
"""Test can't send without association."""
# Test raise if assoc not established
self.scp = DummyVerificationSCP()
self.scp.start()
ae = AE(scu_sop_class=[VerificationSOPClass])
assoc = ae.associate('localhost', 11112)
assoc.release()
self.assertFalse(assoc.is_established)
with self.assertRaises(RuntimeError):
assoc.send_n_create()
self.scp.stop()
def test_not_implemented(self):
"""Test can't send without association."""
# Test raise if assoc not established
self.scp = DummyVerificationSCP()
self.scp.start()
ae = AE(scu_sop_class=[VerificationSOPClass])
assoc = ae.associate('localhost', 11112)
self.assertTrue(assoc.is_established)
with self.assertRaises(NotImplementedError):
assoc.send_n_create()
assoc.release()
self.scp.stop()
class TestAssociationSendNDelete(unittest.TestCase):
"""Run tests on Assocation send_n_delete."""
def setUp(self):
self.scp = None
def tearDown(self):
"""Clear any active threads"""
if self.scp:
self.scp.abort()
time.sleep(0.1)
for thread in threading.enumerate():
if isinstance(thread, DummyBaseSCP):
thread.abort()
thread.stop()
def test_must_be_associated(self):
"""Test can't send without association."""
# Test raise if assoc not established
self.scp = DummyVerificationSCP()
self.scp.start()
ae = AE(scu_sop_class=[VerificationSOPClass])
assoc = ae.associate('localhost', 11112)
assoc.release()
self.assertFalse(assoc.is_established)
with self.assertRaises(RuntimeError):
assoc.send_n_delete()
self.scp.stop()
def test_not_implemented(self):
"""Test can't send without association."""
# Test raise if assoc not established
self.scp = DummyVerificationSCP()
self.scp.start()
ae = AE(scu_sop_class=[VerificationSOPClass])
assoc = ae.associate('localhost', 11112)
self.assertTrue(assoc.is_established)
with self.assertRaises(NotImplementedError):
assoc.send_n_delete()
assoc.release()
self.scp.stop()
class TestAssociationCallbacks(unittest.TestCase):
"""Run tests on Assocation callbacks."""
def setUp(self):
self.scp = None
def tearDown(self):
"""Clear any active threads"""
if self.scp:
self.scp.abort()
time.sleep(0.1)
for thread in threading.enumerate():
if isinstance(thread, DummyBaseSCP):
thread.abort()
thread.stop()
def test_debug_assoc_rq(self):
"""Test the callback"""
self.scp = DummyVerificationSCP()
self.scp.start()
ae = AE(scu_sop_class=[VerificationSOPClass])
assoc = ae.associate('localhost', 11112)
assoc.debug_association_requested(None)
assoc.release()
self.scp.stop()
if __name__ == "__main__":
unittest.main()
| 35.834054
| 90
| 0.615388
| 8,933
| 82,920
| 5.566439
| 0.050935
| 0.05082
| 0.040543
| 0.021518
| 0.888949
| 0.873464
| 0.851845
| 0.835636
| 0.809673
| 0.791554
| 0
| 0.021758
| 0.278895
| 82,920
| 2,313
| 91
| 35.849546
| 0.809847
| 0.09473
| 0
| 0.85513
| 0
| 0
| 0.025673
| 0.002089
| 0
| 0
| 0.010485
| 0.000432
| 0.162909
| 1
| 0.099211
| false
| 0.003946
| 0.01071
| 0.008455
| 0.127959
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b87cc149c00e3d48c1757a6d7e934ce3e4401085
| 10,576
|
py
|
Python
|
Dashboard/views.py
|
Poornartha/ProductSocial
|
b2bc1fcf81090cb1edfdce4e76d7345e37d968e4
|
[
"MIT"
] | 8
|
2020-07-05T17:06:40.000Z
|
2022-02-05T19:44:53.000Z
|
Dashboard/views.py
|
Poornartha/ProductSocial
|
b2bc1fcf81090cb1edfdce4e76d7345e37d968e4
|
[
"MIT"
] | 13
|
2020-11-13T18:56:58.000Z
|
2022-03-12T00:38:59.000Z
|
Dashboard/views.py
|
Poornartha/ProductSocial
|
b2bc1fcf81090cb1edfdce4e76d7345e37d968e4
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from .operations import scrape, scrape_limeroad, scrape_zobello
from django.contrib.auth.models import User
from accounts.models import Customer
# Create your views here.
def dashboard(request):
search_terms = []
search_now = ''
search_now1 = ''
search_now2 = ''
search_now3 = ''
listings1 = []
listings2 = []
listings3 = []
if request.user.is_active:
search_terms = request.user.search_set.all()
print(search_terms)
if search_terms:
search_now = search_terms[len(search_terms) - 1].search_term
if 'tee' in search_now.lower():
listings1 = scrape(search_now)
else:
listings1 = scrape_limeroad(search_now)
search_now1 = search_now
if len(search_terms) - 2 >= 0:
search_now = search_terms[len(search_terms) - 2].search_term
if 'tee' in search_now.lower():
listings2 = scrape(search_now)
else:
listings2 = scrape_limeroad(search_now)
search_now2 = search_now
elif len(search_terms) - 3 >= 0:
search_now = search_terms[len(search_terms) - 3].search_term
if 'tee' in search_now.lower():
listings3 = scrape(search_now)
else:
listings3 = scrape_limeroad(search_now)
search_now3 = search_now
if len(search_terms) >= 3:
listings1 = listings1[:3]
listings2 = listings2[:3]
listings3 = listings3[:3]
elif len(search_terms) >= 2:
listings1 = listings1[:6]
listings2 = listings2[:6]
else:
listings1 = []
listings2 = []
listings3 = []
else:
listings1 = []
listings2 = []
listings3 = []
context = {
'listings1': listings1,
'listings2': listings2,
'listings3': listings3,
'search1': search_now1,
'search2': search_now2,
'search3': search_now3,
}
return render(request, 'dashboard-recent.html', context)
def shirt(request):
search_term = ''
if request.user.is_active:
customer = request.user.customer
search_color = customer.favourite_color
search_prod = 'shirt'
search_term = request.user.customer.gender + ' ' + search_color + " " + search_prod
print(search_term)
listings = scrape(search_term)
listings += scrape_limeroad(search_term)
listings += scrape_zobello(search_term)
else:
listings = []
context = {
'listings': listings,
'search': search_term,
}
return render(request, 'dashboard-product.html', context)
def shoes(request):
search_term = ''
if request.user.is_active:
customer = request.user.customer
search_color = customer.favourite_color
search_prod = 'shoes'
search_term = search_color + " " + search_prod
search_term = request.user.customer.gender + ' ' + search_color + " " + search_prod
print(search_term)
listings = scrape_limeroad(search_term)
listings += scrape_zobello(search_term)
listings += scrape(search_term)
else:
listings = []
context = {
'listings': listings,
'search': search_term,
}
return render(request, 'dashboard-product.html', context)
def shorts(request):
search_term = ''
if request.user.is_active:
customer = request.user.customer
search_color = customer.favourite_color
search_prod = 'shorts'
search_term = search_color + " " + search_prod
search_term = request.user.customer.gender + ' ' + search_color + " " + search_prod
print(search_term)
listings = scrape_limeroad(search_term)
listings += scrape_zobello(search_term)
listings += scrape(search_term)
else:
listings = []
context = {
'listings': listings,
'search': search_term,
}
return render(request, 'dashboard-product.html', context)
def jeans(request):
search_prod = ''
search_term = ''
if request.user.is_active:
customer = request.user.customer
search_color = customer.favourite_color
search_prod = 'jeans'
search_term = search_color + " " + search_prod
search_term = request.user.customer.gender + ' ' + search_color + " " + search_prod
print(search_term)
listings = scrape_limeroad(search_term)
listings += scrape_zobello(search_term)
listings += scrape(search_term)
else:
listings = []
context = {
'listings': listings,
'search': search_term,
}
return render(request, 'dashboard-product.html', context)
###########################################################
def blue(request):
search_prod = ''
search_term = ''
if request.user.is_active:
customer = request.user.customer
search_color = 'blue'
search_prod = customer.favourite_item
search_term = search_color + " " + search_prod
search_term = request.user.customer.gender + ' ' + search_color + " " + search_prod
print(search_prod)
if request.user.customer.gender == 'male':
listings = scrape(search_term)
listings += scrape_zobello(search_term)
listings += scrape_limeroad(search_term)
else:
listings = scrape_limeroad(search_term)
listings += scrape_zobello(search_term)
listings += scrape(search_term)
else:
listings = []
context = {
'listings': listings,
'search': search_prod,
}
return render(request, 'dashboard.html', context)
def red(request):
search_prod = ''
search_term = ''
if request.user.is_active:
customer = request.user.customer
search_color = 'red'
search_prod = customer.favourite_item
search_term = search_color + " " + search_prod
search_term = request.user.customer.gender + ' ' + search_color + " " + search_prod
print(search_term)
if request.user.customer.gender == 'male':
listings = scrape(search_term)
listings += scrape_zobello(search_term)
listings += scrape_limeroad(search_term)
else:
listings = scrape_limeroad(search_term)
listings += scrape_zobello(search_term)
listings += scrape(search_term)
else:
listings = []
context = {
'listings': listings,
'search': search_prod,
}
return render(request, 'dashboard.html', context)
def green(request):
search_prod = ''
search_term = ''
if request.user.is_active:
customer = request.user.customer
search_color = 'green'
search_prod = customer.favourite_item
search_term = search_color + " " + search_prod
search_term = request.user.customer.gender + ' ' + search_color + " " + search_prod
print(search_term)
if request.user.customer.gender == 'male':
listings = scrape(search_term)
listings += scrape_zobello(search_term)
listings += scrape_limeroad(search_term)
else:
listings = scrape_limeroad(search_term)
listings += scrape_zobello(search_term)
listings += scrape(search_term)
else:
listings = []
context = {
'listings': listings,
'search': search_prod,
}
return render(request, 'dashboard.html', context)
def black(request):
search_prod = ''
search_term = ''
if request.user.is_active:
customer = request.user.customer
search_color = 'black'
search_prod = customer.favourite_item
search_term = search_color + " " + search_prod
search_term = request.user.customer.gender + ' ' + search_color + " " + search_prod
print(search_term)
if request.user.customer.gender == 'male':
listings = scrape(search_term)
listings += scrape_zobello(search_term)
listings += scrape_limeroad(search_term)
else:
listings = scrape_limeroad(search_term)
listings += scrape_zobello(search_term)
listings += scrape(search_term)
else:
listings = []
context = {
'listings': listings,
'search': search_prod,
}
return render(request, 'dashboard.html', context)
def yellow(request):
search_prod = ''
search_term = ''
if request.user.is_active:
customer = request.user.customer
search_color = 'yellow'
search_prod = customer.favourite_item
search_term = search_color + " " + search_prod
search_term = request.user.customer.gender + ' ' + search_color + " " + search_prod
print(search_term)
if request.user.customer.gender == 'male':
listings = scrape(search_term)
listings += scrape_zobello(search_term)
listings += scrape_limeroad(search_term)
else:
listings = scrape_limeroad(search_term)
listings += scrape_zobello(search_term)
listings += scrape(search_term)
else:
listings = []
context = {
'listings': listings,
'search': search_prod,
}
return render(request, 'dashboard.html', context)
def white(request):
search_prod = ''
search_term = ''
if request.user.is_active:
customer = request.user.customer
search_color = 'white'
search_prod = customer.favourite_item
search_term = search_color + " " + search_prod
search_term = request.user.customer.gender + ' ' + search_color + " " + search_prod
print(search_term)
if request.user.customer.gender == 'male':
listings = scrape(search_term)
listings += scrape_zobello(search_term)
listings += scrape_limeroad(search_term)
else:
listings = scrape_limeroad(search_term)
listings += scrape_zobello(search_term)
listings += scrape(search_term)
else:
listings = []
context = {
'listings': listings,
'search': search_prod,
}
return render(request, 'dashboard.html', context)
def suggesions(request):
return render(request, 'dashboard-product.html')
| 31.664671
| 91
| 0.594554
| 1,058
| 10,576
| 5.694707
| 0.070888
| 0.154357
| 0.107552
| 0.143402
| 0.852116
| 0.825726
| 0.810954
| 0.805311
| 0.778257
| 0.778257
| 0
| 0.007707
| 0.300681
| 10,576
| 333
| 92
| 31.75976
| 0.806923
| 0.002175
| 0
| 0.740484
| 0
| 0
| 0.04899
| 0.012486
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041522
| false
| 0
| 0.013841
| 0.00346
| 0.096886
| 0.038062
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b880f04bd99991e1af418bfe1c4f6ff1a1b79135
| 1,993
|
py
|
Python
|
karp/tests/unit/test_entry_query.py
|
spraakbanken/karp-backend-v6-tmp
|
e5b78157bd999df18c188973ae2a337015b6f35d
|
[
"MIT"
] | 1
|
2021-12-08T15:33:42.000Z
|
2021-12-08T15:33:42.000Z
|
karp/tests/unit/test_entry_query.py
|
spraakbanken/karp-backend-v6-tmp
|
e5b78157bd999df18c188973ae2a337015b6f35d
|
[
"MIT"
] | null | null | null |
karp/tests/unit/test_entry_query.py
|
spraakbanken/karp-backend-v6-tmp
|
e5b78157bd999df18c188973ae2a337015b6f35d
|
[
"MIT"
] | null | null | null |
import pytest
from karp.domain import index, errors
from karp.services import entry_query
from karp.tests import random_refs
from .adapters import bootstrap_test_app
class TestSearchIds:
def test_cannot_search_non_existent_resource(self):
bus = bootstrap_test_app()
with pytest.raises(errors.ResourceNotFound):
entry_query.search_ids("non_existing", "entry", bus.ctx)
def test_cannot_search_non_published_resource(self):
bus = bootstrap_test_app()
bus.handle(random_refs.make_create_resource_command("existing"))
with pytest.raises(errors.ResourceNotPublished):
entry_query.search_ids("existing", "entry", bus.ctx)
class TestQuery:
def test_cannot_search_non_existent_resource(self):
bus = bootstrap_test_app()
with pytest.raises(errors.ResourceNotFound):
query_request = index.QueryRequest(resource_ids="non_existing")
entry_query.query(query_request, bus.ctx)
def test_cannot_search_non_published_resource(self):
bus = bootstrap_test_app()
bus.handle(random_refs.make_create_resource_command("existing"))
with pytest.raises(errors.ResourceNotPublished):
query_request = index.QueryRequest(resource_ids="existing")
entry_query.query(query_request, bus.ctx)
class TestQuerySplit:
def test_cannot_search_non_existent_resource(self):
bus = bootstrap_test_app()
with pytest.raises(errors.ResourceNotFound):
query_request = index.QueryRequest(resource_ids="non_existing")
entry_query.query_split(query_request, bus.ctx)
def test_cannot_search_non_published_resource(self):
bus = bootstrap_test_app()
bus.handle(random_refs.make_create_resource_command("existing"))
with pytest.raises(errors.ResourceNotPublished):
query_request = index.QueryRequest(resource_ids="existing")
entry_query.query_split(query_request, bus.ctx)
| 39.078431
| 75
| 0.730557
| 238
| 1,993
| 5.768908
| 0.184874
| 0.06992
| 0.081573
| 0.08303
| 0.809177
| 0.809177
| 0.809177
| 0.809177
| 0.796067
| 0.775674
| 0
| 0
| 0.186152
| 1,993
| 50
| 76
| 39.86
| 0.846486
| 0
| 0
| 0.74359
| 0
| 0
| 0.047165
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.153846
| false
| 0
| 0.128205
| 0
| 0.358974
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b8a2c1768ba10966aebc16d9be918a33c64f937e
| 30,995
|
py
|
Python
|
exp/dev/nerf_inr/models/discriminator_v15.py
|
zihangJiang/CIPS-3D
|
5d092688b19393033f444b43c88585a6169e9c7b
|
[
"MIT"
] | 308
|
2021-10-19T17:29:14.000Z
|
2022-03-31T11:54:45.000Z
|
exp/dev/nerf_inr/models/discriminator_v15.py
|
zihangJiang/CIPS-3D
|
5d092688b19393033f444b43c88585a6169e9c7b
|
[
"MIT"
] | 28
|
2021-10-31T22:49:00.000Z
|
2022-03-25T05:49:47.000Z
|
exp/dev/nerf_inr/models/discriminator_v15.py
|
zihangJiang/CIPS-3D
|
5d092688b19393033f444b43c88585a6169e9c7b
|
[
"MIT"
] | 44
|
2021-10-21T10:08:23.000Z
|
2022-03-16T10:05:08.000Z
|
import logging
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from tl2.proj.fvcore import MODEL_REGISTRY
from tl2.proj.pytorch.pytorch_hook import VerboseModel
from tl2.launch.launch_utils import global_cfg
from tl2.proj.pytorch import torch_utils, init_func
from exp.comm.models import mod_conv_fc
class AddCoords(nn.Module):
"""
Source: https://github.com/mkocabas/CoordConv-pytorch/blob/master/CoordConv.py
"""
def __init__(self, with_r=False):
super().__init__()
self.with_r = with_r
def forward(self, input_tensor):
"""
Args:
input_tensor: shape(batch, channel, x_dim, y_dim)
"""
batch_size, _, x_dim, y_dim = input_tensor.size()
xx_channel = torch.arange(x_dim).repeat(1, y_dim, 1)
yy_channel = torch.arange(y_dim).repeat(1, x_dim, 1).transpose(1, 2)
xx_channel = xx_channel.float() / (x_dim - 1)
yy_channel = yy_channel.float() / (y_dim - 1)
xx_channel = xx_channel * 2 - 1
yy_channel = yy_channel * 2 - 1
xx_channel = xx_channel.repeat(batch_size, 1, 1, 1).transpose(2, 3)
yy_channel = yy_channel.repeat(batch_size, 1, 1, 1).transpose(2, 3)
ret = torch.cat([
input_tensor,
xx_channel.type_as(input_tensor),
yy_channel.type_as(input_tensor)], dim=1)
if self.with_r:
rr = torch.sqrt(
torch.pow(xx_channel.type_as(input_tensor) - 0.5, 2) + torch.pow(yy_channel.type_as(input_tensor) - 0.5, 2))
ret = torch.cat([ret, rr], dim=1)
return ret
class CoordConv(nn.Module):
"""
Source: https://github.com/mkocabas/CoordConv-pytorch/blob/master/CoordConv.py
"""
def __init__(self, in_channels, out_channels, with_r=False, **kwargs):
super().__init__()
self.addcoords = AddCoords(with_r=with_r)
in_size = in_channels + 2
if with_r:
in_size += 1
self.conv = nn.Conv2d(in_size, out_channels, **kwargs)
def forward(self, x):
ret = self.addcoords(x)
ret = self.conv(ret)
return ret
def kaiming_leaky_init(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1 or classname.find('Conv2d') != -1:
torch.nn.init.kaiming_normal_(m.weight, a=0.2, mode='fan_in', nonlinearity='leaky_relu')
class ResidualCCBlock(nn.Module):
def __init__(self, inplanes, planes, kernel_size=3):
super().__init__()
p = kernel_size // 2
self.network = nn.Sequential(
CoordConv(inplanes, planes, kernel_size=kernel_size, padding=p),
nn.LeakyReLU(0.2, inplace=True),
CoordConv(planes, planes, kernel_size=kernel_size, stride=2, padding=p),
nn.LeakyReLU(0.2, inplace=True)
)
self.network.apply(kaiming_leaky_init)
self.proj = nn.Conv2d(inplanes, planes, 1, stride=2)
def forward(self, input):
y = self.network(input)
identity = self.proj(input)
y = (y + identity) / math.sqrt(2)
return y
class AdapterBlock(nn.Module):
def __init__(self, output_channels):
super().__init__()
self.model = nn.Sequential(
nn.Conv2d(3, output_channels, 1, padding=0),
nn.LeakyReLU(0.2, inplace=True)
)
def forward(self, input):
return self.model(input)
@MODEL_REGISTRY.register(name_prefix=__name__)
class CCSEncoderDiscriminator(nn.Module):
def __repr__(self): return f"{self.__class__.__name__}({self.repr})"
def __init__(self,
dim_z=0,
**kwargs): # from 4 * 2^0 to 4 * 2^7 4 -> 512
super().__init__()
self.repr = f"dim_z={dim_z}"
self.epoch = 0
self.step = 0
self.dim_z = dim_z
self.layers = nn.ModuleList(
[
ResidualCCBlock(32, 64), # 6 256x256 -> 128x128
ResidualCCBlock(64, 128), # 5 128x128 -> 64x64
ResidualCCBlock(128, 256), # 4 64x64 -> 32x32
ResidualCCBlock(256, 400), # 3 32x32 -> 16x16
ResidualCCBlock(400, 400), # 2 16x16 -> 8x8
ResidualCCBlock(400, 400), # 1 8x8 -> 4x4
ResidualCCBlock(400, 400), # 7 4x4 -> 2x2
])
self.fromRGB = nn.ModuleList(
[
AdapterBlock(32),
AdapterBlock(64),
AdapterBlock(128),
AdapterBlock(256),
AdapterBlock(400),
AdapterBlock(400),
AdapterBlock(400),
AdapterBlock(400)
])
self.final_layer = nn.Conv2d(400, 1 + self.dim_z + 2, 2)
self.img_size_to_layer = {
2: 7,
4: 6,
8: 5,
16: 4,
32: 3,
64: 2,
128: 1,
256: 0
}
torch_utils.print_number_params(models_dict={
'layers': self.layers,
'fromRGB': self.fromRGB,
'final_layer': self.final_layer,
'D': self,
})
logging.getLogger('tl').info(self)
pass
def forward(self,
input,
alpha,
options=None,
**kwargs):
start = self.img_size_to_layer[input.shape[-1]]
x = self.fromRGB[start](input)
if kwargs.get('instance_noise', 0) > 0:
x = x + torch.randn_like(x) * kwargs['instance_noise']
for i, layer in enumerate(self.layers[start:]):
if i == 1 and alpha < 1:
x = alpha * x + (1 - alpha) * self.fromRGB[start + 1](F.interpolate(input, scale_factor=0.5, mode='nearest'))
x = layer(x)
x = self.final_layer(x).reshape(x.shape[0], -1)
prediction = x[..., 0:1]
latent = x[..., 1:1 + self.dim_z]
position = x[..., 1 + self.dim_z:]
return prediction, latent, position
class ResidualCCBlock_FirstDown(nn.Module):
def __init__(self, inplanes, planes, kernel_size=3):
super().__init__()
p = kernel_size // 2
self.network = nn.Sequential(
CoordConv(inplanes, planes, kernel_size=kernel_size, stride=2, padding=p),
nn.LeakyReLU(0.2, inplace=True),
CoordConv(planes, planes, kernel_size=kernel_size, stride=1, padding=p),
nn.LeakyReLU(0.2, inplace=True)
)
self.network.apply(kaiming_leaky_init)
self.proj = nn.Conv2d(inplanes, planes, 1, stride=2)
def forward(self, input):
y = self.network(input)
identity = self.proj(input)
y = (y + identity) / math.sqrt(2)
return y
@MODEL_REGISTRY.register(name_prefix=__name__)
class CCSEncoderDiscriminator_FirstDown(nn.Module):
def __repr__(self): return f"{self.__class__.__name__}({self.repr})"
def __init__(self,
dim_z=0,
**kwargs): # from 4 * 2^0 to 4 * 2^7 4 -> 512
super().__init__()
self.repr = f"dim_z={dim_z}"
self.epoch = 0
self.step = 0
self.dim_z = dim_z
self.layers = nn.ModuleList(
[
ResidualCCBlock_FirstDown(32, 64), # 6 256x256 -> 128x128
ResidualCCBlock_FirstDown(64, 128), # 5 128x128 -> 64x64
ResidualCCBlock_FirstDown(128, 256), # 4 64x64 -> 32x32
ResidualCCBlock_FirstDown(256, 400), # 3 32x32 -> 16x16
ResidualCCBlock_FirstDown(400, 400), # 2 16x16 -> 8x8
ResidualCCBlock_FirstDown(400, 400), # 1 8x8 -> 4x4
ResidualCCBlock_FirstDown(400, 400), # 7 4x4 -> 2x2
])
self.fromRGB = nn.ModuleList(
[
AdapterBlock(32),
AdapterBlock(64),
AdapterBlock(128),
AdapterBlock(256),
AdapterBlock(400),
AdapterBlock(400),
AdapterBlock(400),
AdapterBlock(400)
])
self.final_layer = nn.Conv2d(400, 1 + self.dim_z + 2, 2)
self.img_size_to_layer = {
2: 7,
4: 6,
8: 5,
16: 4,
32: 3,
64: 2,
128: 1,
256: 0
}
torch_utils.print_number_params(models_dict={
'layers': self.layers,
'fromRGB': self.fromRGB,
'final_layer': self.final_layer,
'D': self,
})
logging.getLogger('tl').info(self)
pass
def forward(self,
input,
alpha,
options=None,
**kwargs):
start = self.img_size_to_layer[input.shape[-1]]
if global_cfg.tl_debug:
VerboseModel.forward_verbose(self.fromRGB[start],
inputs_args=(input, ),
submodels=['model', ],
name_prefix=f"fromRGB[{start}].")
x = self.fromRGB[start](input)
if kwargs.get('instance_noise', 0) > 0:
x = x + torch.randn_like(x) * kwargs['instance_noise']
for i, layer in enumerate(self.layers[start:]):
if i == 1 and alpha < 1:
x = alpha * x + (1 - alpha) * self.fromRGB[start + 1](F.interpolate(input, scale_factor=0.5, mode='nearest'))
if global_cfg.tl_debug:
VerboseModel.forward_verbose(layer,
inputs_args=(x,),
submodels=['network', 'network.0', 'network.2'],
name_prefix=f"layers[{start + i}].")
x = layer(x)
if global_cfg.tl_debug:
VerboseModel.forward_verbose(self.final_layer,
inputs_args=(x, ),
name_prefix=f"final_layer.")
x = self.final_layer(x).reshape(x.shape[0], -1)
prediction = x[..., 0:1]
latent = x[..., 1:1 + self.dim_z]
position = x[..., 1 + self.dim_z:]
return prediction, latent, position
class SinAct(nn.Module):
def __init__(self, ):
super(SinAct, self).__init__()
def forward(self, x):
return torch.sin(x)
class ResidualCCBlock_FirstDown_SinAct(nn.Module):
def __init__(self, inplanes, planes, kernel_size=3):
super().__init__()
p = kernel_size // 2
self.network = nn.Sequential(
CoordConv(inplanes, planes, kernel_size=kernel_size, stride=2, padding=p),
SinAct(),
# nn.LeakyReLU(0.2, inplace=True),
CoordConv(planes, planes, kernel_size=kernel_size, stride=1, padding=p),
SinAct(),
# nn.LeakyReLU(0.2, inplace=True)
)
self.network.apply(kaiming_leaky_init)
self.proj = nn.Conv2d(inplanes, planes, 1, stride=2)
def forward(self, input):
y = self.network(input)
identity = self.proj(input)
y = (y + identity) / math.sqrt(2)
return y
@MODEL_REGISTRY.register(name_prefix=__name__)
class CCSEncoderDiscriminator_FirstDown_SinAct(nn.Module):
def __repr__(self): return f"{self.__class__.__name__}({self.repr})"
def __init__(self,
dim_z=0,
**kwargs): # from 4 * 2^0 to 4 * 2^7 4 -> 512
super().__init__()
self.repr = f"dim_z={dim_z}"
self.epoch = 0
self.step = 0
self.dim_z = dim_z
self.layers = nn.ModuleList(
[
ResidualCCBlock_FirstDown_SinAct(32, 64), # 6 256x256 -> 128x128
ResidualCCBlock_FirstDown_SinAct(64, 128), # 5 128x128 -> 64x64
ResidualCCBlock_FirstDown_SinAct(128, 256), # 4 64x64 -> 32x32
ResidualCCBlock_FirstDown_SinAct(256, 400), # 3 32x32 -> 16x16
ResidualCCBlock_FirstDown_SinAct(400, 400), # 2 16x16 -> 8x8
ResidualCCBlock_FirstDown_SinAct(400, 400), # 1 8x8 -> 4x4
ResidualCCBlock_FirstDown_SinAct(400, 400), # 7 4x4 -> 2x2
])
self.fromRGB = nn.ModuleList(
[
AdapterBlock(32),
AdapterBlock(64),
AdapterBlock(128),
AdapterBlock(256),
AdapterBlock(400),
AdapterBlock(400),
AdapterBlock(400),
AdapterBlock(400)
])
self.final_layer = nn.Conv2d(400, 1 + self.dim_z + 2, 2)
self.img_size_to_layer = {
2: 7,
4: 6,
8: 5,
16: 4,
32: 3,
64: 2,
128: 1,
256: 0
}
torch_utils.print_number_params(models_dict={
'layers': self.layers,
'fromRGB': self.fromRGB,
'final_layer': self.final_layer,
'D': self,
})
logging.getLogger('tl').info(self)
pass
def forward(self,
input,
alpha,
options=None,
**kwargs):
start = self.img_size_to_layer[input.shape[-1]]
if global_cfg.tl_debug:
VerboseModel.forward_verbose(self.fromRGB[start],
inputs_args=(input, ),
submodels=['model', ],
name_prefix=f"fromRGB[{start}].")
x = self.fromRGB[start](input)
if kwargs.get('instance_noise', 0) > 0:
x = x + torch.randn_like(x) * kwargs['instance_noise']
for i, layer in enumerate(self.layers[start:]):
if i == 1 and alpha < 1:
x = alpha * x + (1 - alpha) * self.fromRGB[start + 1](F.interpolate(input, scale_factor=0.5, mode='nearest'))
if global_cfg.tl_debug:
VerboseModel.forward_verbose(layer,
inputs_args=(x,),
submodels=['network', 'network.0', 'network.2'],
name_prefix=f"layers[{start + i}].")
x = layer(x)
if global_cfg.tl_debug:
VerboseModel.forward_verbose(self.final_layer,
inputs_args=(x, ),
name_prefix=f"final_layer.")
x = self.final_layer(x).reshape(x.shape[0], -1)
prediction = x[..., 0:1]
latent = x[..., 1:1 + self.dim_z]
position = x[..., 1 + self.dim_z:]
return prediction, latent, position
class CoordConvSinAct(nn.Module):
"""
Source: https://github.com/mkocabas/CoordConv-pytorch/blob/master/CoordConv.py
"""
def __init__(self,
in_channels,
out_channels,
channels_per_group=16,
**kwargs):
super().__init__()
self.coord_conv = nn.Conv2d(2, out_channels, **kwargs)
self.sin_act = SinAct()
self.conv = nn.Conv2d(in_channels, out_channels, **kwargs)
# self.norm = nn.GroupNorm(num_groups=out_channels//channels_per_group, num_channels=out_channels)
pass
def forward(self, input):
batch, _, H, W = input.shape
x, y = torch.meshgrid(torch.linspace(-1, 1, W, device=input.device),
torch.linspace(-1, 1, H, device=input.device))
x = x.T # (HxW, ) [[-1, ..., 1], ...]
y = y.T # (HxW, ) [[-1, ..., 1]^T, ...]
xy = torch.stack((x, y), dim=0)
xy = xy.expand((batch, -1, -1, -1))
xy_fea = self.coord_conv(xy)
xy_fea = self.sin_act(xy_fea)
out = self.conv(input)
# out = self.norm(out)
out = xy_fea + out
return out
class ResidualCCBlock_FirstDown_CoordConvSinAct(nn.Module):
def __init__(self, inplanes, planes, kernel_size=3):
super().__init__()
p = kernel_size // 2
self.network = nn.Sequential(
CoordConvSinAct(inplanes, planes, kernel_size=kernel_size, stride=2, padding=p),
nn.LeakyReLU(0.2, inplace=True),
CoordConvSinAct(planes, planes, kernel_size=kernel_size, stride=1, padding=p),
nn.LeakyReLU(0.2, inplace=True)
)
self.network.apply(kaiming_leaky_init)
self.proj = nn.Conv2d(inplanes, planes, 1, stride=2)
def forward(self, input):
y = self.network(input)
identity = self.proj(input)
y = (y + identity) / math.sqrt(2)
return y
@MODEL_REGISTRY.register(name_prefix=__name__)
class CCSEncoderDiscriminator_FirstDown_CoordConvSinAct(nn.Module):
"""
Coord_Conv_Sin (good);
"""
def __repr__(self): return f"{self.__class__.__name__}({self.repr})"
def __init__(self,
dim_z=0,
**kwargs): # from 4 * 2^0 to 4 * 2^7 4 -> 512
super().__init__()
self.repr = f"dim_z={dim_z}"
self.epoch = 0
self.step = 0
self.dim_z = dim_z
max_channel = 400
self.layers = nn.ModuleList(
[
ResidualCCBlock_FirstDown_CoordConvSinAct(32, 64), # 6 256x256 -> 128x128
ResidualCCBlock_FirstDown_CoordConvSinAct(64, 128), # 5 128x128 -> 64x64
ResidualCCBlock_FirstDown_CoordConvSinAct(128, 256), # 4 64x64 -> 32x32
ResidualCCBlock_FirstDown_CoordConvSinAct(256, max_channel), # 3 32x32 -> 16x16
ResidualCCBlock_FirstDown_CoordConvSinAct(max_channel, max_channel), # 2 16x16 -> 8x8
ResidualCCBlock_FirstDown_CoordConvSinAct(max_channel, max_channel), # 1 8x8 -> 4x4
ResidualCCBlock_FirstDown_CoordConvSinAct(max_channel, max_channel), # 7 4x4 -> 2x2
])
self.fromRGB = nn.ModuleList(
[
AdapterBlock(32),
AdapterBlock(64),
AdapterBlock(128),
AdapterBlock(256),
AdapterBlock(max_channel),
AdapterBlock(max_channel),
AdapterBlock(max_channel),
AdapterBlock(max_channel)
])
self.final_layer = nn.Conv2d(max_channel, 1 + self.dim_z + 2, 2)
self.img_size_to_layer = {
2: 7,
4: 6,
8: 5,
16: 4,
32: 3,
64: 2,
128: 1,
256: 0
}
torch_utils.print_number_params(models_dict={
'layers': self.layers,
'fromRGB': self.fromRGB,
'final_layer': self.final_layer,
'D': self,
})
logging.getLogger('tl').info(self)
pass
def forward(self,
input,
alpha,
options=None,
**kwargs):
start = self.img_size_to_layer[input.shape[-1]]
if global_cfg.tl_debug:
VerboseModel.forward_verbose(self.fromRGB[start],
inputs_args=(input, ),
submodels=['model', ],
name_prefix=f"fromRGB[{start}].")
x = self.fromRGB[start](input)
if kwargs.get('instance_noise', 0) > 0:
x = x + torch.randn_like(x) * kwargs['instance_noise']
for i, layer in enumerate(self.layers[start:]):
if i == 1 and alpha < 1:
x = alpha * x + (1 - alpha) * self.fromRGB[start + 1](F.interpolate(input, scale_factor=0.5, mode='nearest'))
if global_cfg.tl_debug:
VerboseModel.forward_verbose(layer,
inputs_args=(x,),
submodels=['network', 'network.0', 'network.2'],
input_padding=50,
name_prefix=f"layers[{start + i}].")
x = layer(x)
if global_cfg.tl_debug:
VerboseModel.forward_verbose(self.final_layer,
inputs_args=(x, ),
name_prefix=f"final_layer.")
x = self.final_layer(x).reshape(x.shape[0], -1)
prediction = x[..., 0:1]
latent = x[..., 1:1 + self.dim_z]
position = x[..., 1 + self.dim_z:]
return prediction, latent, position
@MODEL_REGISTRY.register(name_prefix=__name__)
class Discriminator_CoordConvSinAct(nn.Module):
"""
Coord_Conv_Sin (good);
Support 512 and 1024;
"""
def __repr__(self): return f"{self.__class__.__name__}({self.repr})"
def __init__(self,
dim_z=0,
**kwargs): # from 4 * 2^0 to 4 * 2^7 4 -> 512
super().__init__()
self.repr = f"dim_z={dim_z}"
self.epoch = 0
self.step = 0
self.dim_z = dim_z
max_channel = 400
self.layers = nn.ModuleList(
[
ResidualCCBlock_FirstDown_CoordConvSinAct(32, 64), # 6 256x256 -> 128x128
ResidualCCBlock_FirstDown_CoordConvSinAct(64, 128), # 5 128x128 -> 64x64
ResidualCCBlock_FirstDown_CoordConvSinAct(128, 256), # 4 64x64 -> 32x32
ResidualCCBlock_FirstDown_CoordConvSinAct(256, max_channel), # 3 32x32 -> 16x16
ResidualCCBlock_FirstDown_CoordConvSinAct(max_channel, max_channel), # 2 16x16 -> 8x8
ResidualCCBlock_FirstDown_CoordConvSinAct(max_channel, max_channel), # 1 8x8 -> 4x4
ResidualCCBlock_FirstDown_CoordConvSinAct(max_channel, max_channel), # 7 4x4 -> 2x2
])
self.fromRGB = nn.ModuleList(
[
AdapterBlock(32),
AdapterBlock(64),
AdapterBlock(128),
AdapterBlock(256),
AdapterBlock(max_channel),
AdapterBlock(max_channel),
AdapterBlock(max_channel),
AdapterBlock(max_channel)
])
self.final_layer = nn.Conv2d(max_channel, 1 + self.dim_z + 2, 2)
self.img_size_to_layer = {
2: 7,
4: 6,
8: 5,
16: 4,
32: 3,
64: 2,
128: 1,
256: 0
}
torch_utils.print_number_params(models_dict={
'layers': self.layers,
'fromRGB': self.fromRGB,
'final_layer': self.final_layer,
'D': self,
})
logging.getLogger('tl').info(self)
pass
def forward(self,
input,
alpha,
options=None,
**kwargs):
start = self.img_size_to_layer[input.shape[-1]]
if global_cfg.tl_debug:
VerboseModel.forward_verbose(self.fromRGB[start],
inputs_args=(input, ),
submodels=['model', ],
name_prefix=f"fromRGB[{start}].")
x = self.fromRGB[start](input)
if kwargs.get('instance_noise', 0) > 0:
x = x + torch.randn_like(x) * kwargs['instance_noise']
for i, layer in enumerate(self.layers[start:]):
if i == 1 and alpha < 1:
x = alpha * x + (1 - alpha) * self.fromRGB[start + 1](F.interpolate(input, scale_factor=0.5, mode='nearest'))
if global_cfg.tl_debug:
VerboseModel.forward_verbose(layer,
inputs_args=(x,),
submodels=['network', 'network.0', 'network.2'],
input_padding=50,
name_prefix=f"layers[{start + i}].")
x = layer(x)
if global_cfg.tl_debug:
VerboseModel.forward_verbose(self.final_layer,
inputs_args=(x, ),
name_prefix=f"final_layer.")
x = self.final_layer(x).reshape(x.shape[0], -1)
prediction = x[..., 0:1]
latent = x[..., 1:1 + self.dim_z]
position = x[..., 1 + self.dim_z:]
return prediction, latent, position
class CoordConvSinAct_EqualLR(nn.Module):
"""
Source: https://github.com/mkocabas/CoordConv-pytorch/blob/master/CoordConv.py
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
**kwargs):
super().__init__()
self.coord_conv = mod_conv_fc.EqualConv2d(2, out_channels, kernel_size, stride, padding=padding)
self.sin_act = SinAct()
self.conv = mod_conv_fc.EqualConv2d(in_channels, out_channels, kernel_size, stride, padding=padding)
pass
def forward(self, input):
batch, _, H, W = input.shape
x, y = torch.meshgrid(torch.linspace(-1, 1, W, device=input.device),
torch.linspace(-1, 1, H, device=input.device))
x = x.T # (HxW, ) [[-1, ..., 1], ...]
y = y.T # (HxW, ) [[-1, ..., 1]^T, ...]
xy = torch.stack((x, y), dim=0)
xy = xy.expand((batch, -1, -1, -1))
xy_fea = self.coord_conv(xy)
xy_fea = self.sin_act(xy_fea)
out = self.conv(input)
out = xy_fea + out
return out
class ResidualCCBlockFirstDown_EqualLR(nn.Module):
def __init__(self,
inplanes,
planes,
stride=2,
kernel_size=3,
skip=True):
super().__init__()
self.skip = skip
p = kernel_size // 2
conv1 = CoordConvSinAct_EqualLR(in_channels=inplanes,
out_channels=planes,
kernel_size=kernel_size,
stride=stride,
padding=p)
conv2 = CoordConvSinAct_EqualLR(in_channels=planes,
out_channels=planes,
kernel_size=kernel_size,
stride=1,
padding=p)
self.network = nn.Sequential(
conv1,
nn.LeakyReLU(0.2, inplace=True),
conv2,
nn.LeakyReLU(0.2, inplace=True),
)
# self.network.apply(kaiming_leaky_init)
if skip:
self.proj = mod_conv_fc.EqualConv2d(inplanes, planes, 1, stride)
pass
def forward(self, input):
y = self.network(input)
if self.skip:
identity = self.proj(input)
y = (y + identity) / math.sqrt(2)
return y
@MODEL_REGISTRY.register(name_prefix=__name__)
class DiscriminatorMultiScale_EqualLR(nn.Module):
def __repr__(self): return f"{self.__class__.__name__}({self.repr})"
def __init__(self,
dim_z=0,
**kwargs):
"""
:param kwargs:
"""
super().__init__()
self.repr = f"dim_z={dim_z}"
self.dim_z = dim_z
logger = logging.getLogger('tl')
self.epoch = 0
self.step = 0
self.layers = nn.ModuleList(
[
# 1024
ResidualCCBlockFirstDown_EqualLR(3, 32),
# 512
ResidualCCBlockFirstDown_EqualLR(32, 64),
# 256
ResidualCCBlockFirstDown_EqualLR(64, 128),
# 128
ResidualCCBlockFirstDown_EqualLR(128, 256),
# 64
ResidualCCBlockFirstDown_EqualLR(256, 512),
# 32
ResidualCCBlockFirstDown_EqualLR(512, 512),
# 16
ResidualCCBlockFirstDown_EqualLR(512, 512, stride=1),
# 16
ResidualCCBlockFirstDown_EqualLR(512, 512, stride=1),
# 16
ResidualCCBlockFirstDown_EqualLR(512, 512, stride=1),
# 16
# ResidualCCBlock(400, 400),
# 2
])
final_layer = nn.Linear(512, 1 + dim_z + 2)
self.final_layer = final_layer
torch_utils.print_number_params(models_dict={'layers': self.layers,
'final_layer': self.final_layer,
'D': self})
logger.info(self)
pass
def forward(self,
x,
alpha,
**kwargs):
img_size = x.shape[-1]
if img_size < 128:
x = F.upsample_bilinear(x, size=128)
if kwargs.get('instance_noise', 0) > 0:
x = x + torch.randn_like(x) * kwargs['instance_noise']
for i, layer in enumerate(self.layers):
if global_cfg.tl_debug:
VerboseModel.forward_verbose(layer,
inputs_args=(x.clone(),),
submodels=['network', 'network.0', 'network.2'],
name_prefix=f"layers[{i}].",
input_padding=50)
x = layer(x)
x = F.adaptive_avg_pool2d(x, output_size=1)
x = x.flatten(1)
if global_cfg.tl_debug:
VerboseModel.forward_verbose(self.final_layer,
inputs_args=(x,),
name_prefix=f"final_layer.")
x = self.final_layer(x).reshape(x.shape[0], -1)
prediction = x[..., 0:1]
latent = x[..., 1:1+self.dim_z]
position = x[..., 1+self.dim_z:]
return prediction, latent, position
class ResidualCCBlock_FirstDown_CoordConvSinAct_EqualLR(nn.Module):
def __init__(self, inplanes, planes, kernel_size=3):
super().__init__()
p = kernel_size // 2
self.network = nn.Sequential(
CoordConvSinAct_EqualLR(inplanes, planes, kernel_size=kernel_size, stride=2, padding=p),
nn.LeakyReLU(0.2, inplace=True),
CoordConvSinAct_EqualLR(planes, planes, kernel_size=kernel_size, stride=1, padding=p),
nn.LeakyReLU(0.2, inplace=True)
)
self.network.apply(kaiming_leaky_init)
self.proj = mod_conv_fc.EqualConv2d(inplanes, planes, 1, 2)
pass
def forward(self, input):
y = self.network(input)
identity = self.proj(input)
y = (y + identity) / math.sqrt(2)
return y
@MODEL_REGISTRY.register(name_prefix=__name__)
class CCSEncoderDiscriminator_FirstDown_CoordConvSinAct_EqualLR(nn.Module):
"""
CoordConv + GroupNorm
"""
def __repr__(self): return f"{self.__class__.__name__}({self.repr})"
def __init__(self,
dim_z=0,
**kwargs): # from 4 * 2^0 to 4 * 2^7 4 -> 512
super().__init__()
self.repr = f"dim_z={dim_z}"
self.epoch = 0
self.step = 0
self.dim_z = dim_z
max_channel = 400
self.layers = nn.ModuleList(
[
ResidualCCBlock_FirstDown_CoordConvSinAct_EqualLR(32, 64), # 6 256x256 -> 128x128
ResidualCCBlock_FirstDown_CoordConvSinAct_EqualLR(64, 128), # 5 128x128 -> 64x64
ResidualCCBlock_FirstDown_CoordConvSinAct_EqualLR(128, 256), # 4 64x64 -> 32x32
ResidualCCBlock_FirstDown_CoordConvSinAct_EqualLR(256, max_channel), # 3 32x32 -> 16x16
ResidualCCBlock_FirstDown_CoordConvSinAct_EqualLR(max_channel, max_channel), # 2 16x16 -> 8x8
ResidualCCBlock_FirstDown_CoordConvSinAct_EqualLR(max_channel, max_channel), # 1 8x8 -> 4x4
ResidualCCBlock_FirstDown_CoordConvSinAct_EqualLR(max_channel, max_channel), # 7 4x4 -> 2x2
])
self.fromRGB = nn.ModuleList(
[
AdapterBlock(32),
AdapterBlock(64),
AdapterBlock(128),
AdapterBlock(256),
AdapterBlock(max_channel),
AdapterBlock(max_channel),
AdapterBlock(max_channel),
AdapterBlock(max_channel)
])
self.final_layer = nn.Conv2d(max_channel, 1 + self.dim_z + 2, 2)
self.img_size_to_layer = {
2: 7,
4: 6,
8: 5,
16: 4,
32: 3,
64: 2,
128: 1,
256: 0
}
torch_utils.print_number_params(models_dict={
'layers': self.layers,
'fromRGB': self.fromRGB,
'final_layer': self.final_layer,
'D': self,
})
logging.getLogger('tl').info(self)
pass
def forward(self,
input,
alpha,
options=None,
**kwargs):
start = self.img_size_to_layer[input.shape[-1]]
if global_cfg.tl_debug:
VerboseModel.forward_verbose(self.fromRGB[start],
inputs_args=(input, ),
submodels=['model', ],
name_prefix=f"fromRGB[{start}].")
x = self.fromRGB[start](input)
if kwargs.get('instance_noise', 0) > 0:
x = x + torch.randn_like(x) * kwargs['instance_noise']
for i, layer in enumerate(self.layers[start:]):
if i == 1 and alpha < 1:
x = alpha * x + (1 - alpha) * self.fromRGB[start + 1](F.interpolate(input, scale_factor=0.5, mode='nearest'))
if global_cfg.tl_debug:
VerboseModel.forward_verbose(layer,
inputs_args=(x,),
submodels=['network', 'network.0', 'network.2'],
input_padding=50,
name_prefix=f"layers[{start + i}].")
x = layer(x)
if global_cfg.tl_debug:
VerboseModel.forward_verbose(self.final_layer,
inputs_args=(x, ),
name_prefix=f"final_layer.")
x = self.final_layer(x).reshape(x.shape[0], -1)
prediction = x[..., 0:1]
latent = x[..., 1:1 + self.dim_z]
position = x[..., 1 + self.dim_z:]
return prediction, latent, position
| 29.889103
| 117
| 0.575706
| 3,756
| 30,995
| 4.513845
| 0.059904
| 0.013212
| 0.016043
| 0.010617
| 0.868055
| 0.852129
| 0.825351
| 0.781939
| 0.753804
| 0.739766
| 0
| 0.058033
| 0.296177
| 30,995
| 1,036
| 118
| 29.917954
| 0.719138
| 0.057138
| 0
| 0.793103
| 0
| 0
| 0.043011
| 0.00916
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058748
| false
| 0.014049
| 0.012771
| 0.011494
| 0.120051
| 0.00894
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b2467b6534cf286415cb97363eea89e43f8743ff
| 62,632
|
py
|
Python
|
v1.3/python/torch/dlr_pytorch_wrapper.py
|
github-fds/Deep_Learning_Routines
|
7f91fbb4db10e3e96e939e7d312a7ec0534febc7
|
[
"Unlicense"
] | 9
|
2020-12-27T04:47:09.000Z
|
2022-02-17T22:01:26.000Z
|
v1.3/python/torch/dlr_pytorch_wrapper.py
|
github-fds/Deep_Learning_Routines
|
7f91fbb4db10e3e96e939e7d312a7ec0534febc7
|
[
"Unlicense"
] | null | null | null |
v1.3/python/torch/dlr_pytorch_wrapper.py
|
github-fds/Deep_Learning_Routines
|
7f91fbb4db10e3e96e939e7d312a7ec0534febc7
|
[
"Unlicense"
] | 3
|
2020-12-27T03:16:36.000Z
|
2021-01-31T06:30:55.000Z
|
#!/usr/bin/env python
"""
This file contains Python interface of convolution_2d.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
#-------------------------------------------------------------------------------
__author__ = "Ando Ki"
__copyright__ = "Copyright 2020, Future Design Systems"
__credits__ = ["none", "some"]
__license__ = "FUTURE DESIGN SYSTEMS SOFTWARE END-USER LICENSE AGREEMENT"
__version__ = "0"
__revision__ = "1"
__maintainer__ = "Ando Ki"
__email__ = "contact@future-ds.com"
__status__ = "Development"
__date__ = "2020.09.30"
__description__= "PyTorch interface of Deep Learning Processing Routines"
#-------------------------------------------------------------------------------
import torch
import torch.nn as nn
import torch.nn.functional as F
import python as _dlr
#===============================================================================
def conv2d( input # in_minibatch x in_channel x in_size x in_size
, weight # out_channel x in_channel x kernel_size x kernel_size
, bias=None # out_channel
, stride=1
, padding=0
, dilation=1
, groups=1
, rigor=False
, verbose=False):
"""
Corresponding torch.nn.functional.conv2d(input, weight, bias=None,
stride, padding, dilation, groups)
Returns output tensor on success
Applies a 2D convolution over an input data composed of several input channels.
Note that all nd-array lists are PyTorch tensor (immutable).
:param input: input data, input[in_minibatch][in_channel][in_size][in_size]
:param weight: kernel (or filter), weight[out_channel][in_channel][kernel_size][kernel_size]
:param bias: bias for each filter (kernel), bias[out_channel]
:param stride: num of skips to apply next filter
:param padding: num of pixes at the boundary
:param dilation:
:param groups:
:param rigor: check values rigorously when 'True'
:param verbose: output message more when 'True'
:return: out_data on success, None on failure.
"""
if rigor:
error = 0
if (dilation!=1): error += 1 # not support
if (groups!=1): error += 1 # not support
if (input.dim()!=4): error += 1
if (input.shape[2]!=input.shape[3]): error += 1 # not square
if (weight.dim()!=4): error += 1
if (weight.shape[2]!=weight.shape[3]): error += 1 # not square
if (bias is not None) and (bias.dim()!=1): error += 1
if (input.shape[1]!=weight.shape[1]): error += 1 # in_channel
if (bias is not None) and (bias.shape[0]!=weight.shape[0]): error += 1 # out_channel
if (stride<=0) or (padding<0): error += 1
if error!=0: return None
dtype = input.dtype
in_minibatch = input.shape[0]
in_size = input.shape[3]
kernel_size = weight.shape[3]
out_channel = weight.shape[0]
status, out_size = _dlr.GetOutputSizeOfConvolution2d( in_size
, kernel_size
, stride
, padding
, rigor=rigor
, verbose=verbose)
if not status: return None
out_data = torch.empty([in_minibatch,out_channel,out_size,out_size], dtype=dtype)
for mb in range(in_minibatch):
xout_data = out_data[mb]
xin_data = input[mb]
status = _dlr.Convolution2d( xout_data.data.numpy() # out_channel x out_size x out_size
, xin_data.data.numpy() # in_channel x in_size x in_size
, weight.data.numpy() # in_channel x out_channel x kernel_size x kernel_size
, bias.data.numpy() if bias is not None else None
, stride
, padding
, rigor=rigor
, verbose=verbose)
if not status: return None
out_data[mb] = xout_data
return out_data
#===============================================================================
def max_pool2d ( input # in_minibatch x in_channel x in_size x in_size
, kernel_size
, stride=1
, padding=0
, ceil_mode=False
, rigor=False
, verbose=False):
"""
Corresponding torch.nn.functional.max_pool2d(input, kernel_size,
stride, padding, ceil_mode,
count_include_pad=True,
divisor_override=None)
Returns output tensor on success
Applies a 2D max pooling over an input data composed of several input channels.
Note that all nd-array lists are PyTorch tensor (immutable).
:param input: input data, input[in_minibatch][in_channel][in_size][in_size]
:param kernel_size: size of kernel
:param stride: num of skips to apply next filter
:param padding: num of pixes at the boundary
:param ceil_mode: when True, will use ceil instead of floor in the formula to compute the output shape
:param rigor: check values rigorously when 'True'
:param verbose: output message more when 'True'
:return: out_data on success, None on failure.
"""
if rigor:
error = 0
if ceil_mode: error += 1 # not support
if (input.dim()!=4): error += 1
if (input.shape[2]!=input.shape[3]): error += 1 # not square
if (kernel_size<=0): error += 1
if (stride<=0) or (padding<0): error += 1
if error!=0: return None
dtype = input.dtype
in_minibatch = input.shape[0]
in_size = input.shape[3]
out_channel = input.shape[1]
status, out_size = _dlr.GetOutputSizeOfPooling2dMax( in_size
, kernel_size
, stride
, padding
, ceil_mode
, rigor=rigor
, verbose=verbose)
if not status: return None
out_data = torch.empty([in_minibatch,out_channel,out_size,out_size], dtype=dtype)
for mb in range(in_minibatch):
xout_data = out_data[mb]
xin_data = input[mb]
status = _dlr.Pooling2dMax( xout_data.data.numpy() # out_channel x out_size x out_size
, xin_data.data.numpy() # in_channel x in_size x in_size
, kernel_size
, stride
, padding
, ceil_mode
, rigor=rigor
, verbose=verbose)
if not status: return None
out_data[mb] = xout_data
return out_data
#===============================================================================
def avg_pool2d ( input # in_minibatch x in_channel x in_size x in_size
, kernel_size
, stride=1
, padding=0
, ceil_mode=False
, rigor=False
, verbose=False):
"""
Corresponding torch.nn.functional.avg_pool2d(input, kernel_size,
stride, padding, ceil_mode,
count_include_pad=True,
divisor_override=None)
Returns output tensor on success
Applies a 2D average pooling over an input data composed of several input channels.
Note that all nd-array lists are PyTorch tensor (immutable).
:param input: input data, input[in_minibatch][in_channel][in_size][in_size]
:param kernel_size: size of kernel
:param stride: num of skips to apply next filter
:param padding: num of pixes at the boundary
:param ceil_mode: when True, will use ceil instead of floor in the formula to compute the output shape
:param rigor: check values rigorously when 'True'
:param verbose: output message more when 'True'
:return: out_data on success, None on failure.
"""
if rigor:
error = 0
if ceil_mode: error += 1 # not support
if (input.dim()!=4): error += 1
if (input.shape[2]!=input.shape[3]): error += 1 # not square
if (kernel_size<=0): error += 1
if (stride<=0) or (padding<0): error += 1
if error!=0: return None
dtype = input.dtype
in_minibatch = input.shape[0]
in_size = input.shape[3]
out_channel = input.shape[1]
status, out_size = _dlr.GetOutputSizeOfPooling2dAvg( in_size
, kernel_size
, stride
, padding
, ceil_mode
, rigor=rigor
, verbose=verbose)
if not status: return None
out_data = torch.empty([in_minibatch,out_channel,out_size,out_size], dtype=dtype)
for mb in range(in_minibatch):
xout_data = out_data[mb]
xin_data = input[mb]
status = _dlr.Pooling2dAvg( xout_data.data.numpy() # out_channel x out_size x out_size
, xin_data.data.numpy() # in_channel x in_size x in_size
, kernel_size
, stride
, padding
, ceil_mode
, rigor=rigor
, verbose=verbose)
if not status: return None
out_data[mb] = xout_data
return out_data
#===============================================================================
# example: def __init__(self):
# super().__init__()
# self.a1 = nn.Linear(4,4)
# self.a2 = nn.Linear(4,4)
# self.a3 = nn.Linear(9,1)
# example: def forward(self,x):
# o1 = self.a1(x)
# o2 = self.a2(x).transpose(1,2)
# output = torch.bmm(o1,o2)
# output = output.view(len(x),9)
# output = self.a3(output)
# return output
#
# Linear layer accept only 1D input ==> so linearNd() may be removed.
def linear ( input # in_minibatch x in_size
, weight # out_size x in_size
, bias=None # out_size
, rigor=False
, verbose=False):
"""
Correspond torch.nn.functional.linear(input, weight, bias)
Returns output tensor on success
Applies a 1D vector matrix multiplication over an input data
Note that all nd-array lists are PyTorch tensor (immutable).
:param input: input data, input[in_minibatch][1][in_size]
:param weight: weight[out_size][in_size]
:param bias: bias[out_size]
:param rigor: check values rigorously when 'True'
:param verbose: output message more when 'True'
:return: out_data on success, None on failure.
"""
if (input.dim()==2):
return linear1d( input # in_minibatch x N x in_size
, weight # out_size x in_size
, bias
, rigor
, verbose)
else:
return linearNd( input # in_minibatch x N x in_size
, weight # out_size x in_size
, bias
, rigor
, verbose)
#===============================================================================
# Z = X * W' + B, where W' is transposed
def linear1d ( input # in_minibatch x in_size
, weight # out_size x in_size
, bias=None # out_size
, rigor=False
, verbose=False):
"""
Correspond torch.nn.functional.linear(input, weight, bias)
Returns output tensor on success
Applies a 1D vector matrix multiplication over an input data
Note that all nd-array lists are PyTorch tensor (immutable).
:param input: input data, input[in_minibatch][in_size]
:param weight: weight[out_size][in_size]
:param bias: bias[out_size]
:param rigor: check values rigorously when 'True'
:param verbose: output message more when 'True'
:return: out_data on success, None on failure.
"""
if rigor:
error = 0
if (input.dim()!=2): error += 1
if (weight.dim()!=2): error += 1 # not 2D
if (weight.shape[1]!=input.shape[1]): error += 1
if (bias is not None) and (bias.dim()!=1): error += 1
if (bias is not None) and (bias.shape[0]!=weight.shape[0]): error += 1
if error!=0: return None
dtype = input.dtype
in_minibatch = input.shape[0]
in_size = input.shape[1]
out_size = weight.shape[0]
out_data = torch.empty([in_minibatch,out_size], dtype=dtype)
for mb in range(in_minibatch):
xout_data = out_data[mb]
xin_data = input[mb]
status = _dlr.Linear1d( xout_data.data.numpy() # out_size
, xin_data.data.numpy() # in_size
, weight.data.numpy() # out_size x in_size
, None if bias is None else bias.data.numpy() # out_size
, rigor=rigor
, verbose=verbose)
if not status: return None
out_data[mb] = xout_data
return out_data
#===============================================================================
# Z = X * W' + B, where W' is transposed
def linearNd ( input # in_minibatch x in_size x ...
, weight # out_size x in_size
, bias=None # out_size
, rigor=False
, verbose=False):
"""
Correspond torch.nn.functional.linear(input, weight, bias)
Returns output tensor on success
Applies a N-D vector matrix multiplication over an input data
Note that all nd-array lists are PyTorch tensor (immutable).
:param input: input data, input[in_minibatch][in_size][in_size][in_size]...
:param weight: weight[out_size][in_size]
:param bias: bias[out_size]
:param rigor: check values rigorously when 'True'
:param verbose: output message more when 'True'
:return: out_data on success, None on failure.
"""
if (input.dim()==2):
return linear1d( input # in_minibatch x N x in_size
, weight # out_size x in_size
, bias
, rigor
, verbose)
if rigor:
error = 0
if (weight.dim()!=2): error += 1 # not 2D
if (weight.shape[1]!=input.shape[2]): error += 1
if (bias is not None) and (bias.dim()!=1): error += 1
if (bias is not None) and (bias.shape[0]!=weight.shape[0]): error += 1
if error!=0: return None
in_minibatch = input.shape[0]
out_data = torch.empty([in_minibatch,input.shape[0],weight.shape[1]], dtype=input.dtype)
for mb in range(in_minibatch):
xout_data = out_data[mb]
xin_data = input[mb]
status = _dlr.LinearNd( xout_data.data.numpy() # ndim x out_size
, xin_data.data.numpy() # ndim x in_size
, weight.data.numpy() # out_size x in_size
, None if bias is None else bias.data.numpy() # out_size
, rigor=rigor
, verbose=verbose)
if not status: return None
out_data[mb] = xout_data
return out_data
#===============================================================================
def cat( tensors
, dim=0
, rigor=False
, verbose=False):
"""
Correspond torch.cat(tensors,dim,out=None) for tensor.dim is 3, i.e, (minibatch,rows,cols)
"""
if (tensors.numel()!=2): return None
return concat2d( tensors[0]
, tensors[1]
, dim
, rigor
, verbose)
#===============================================================================
def concat2d( inputA # minibatch x rowsA x colsA
, inputB # minibatch x rowsB x colsB
, dim=0
, rigor=False
, verbose=False):
"""
Correspond torch.cat(tensors,dim,out=None) for tensor.dim is 3, i.e, (minibatch,rows,cols)
Returns output tensor on success
Applies two 2-dimentional concatenation
Note that all nd-array lists are PyTorch tensor (immutable).
:param inputA: input data, input[rowsA][colsA]
:param inputB: input data, input[rowsB][colsB]
:param dim: dimension
:param rigor: check values rigorously when 'True'
:param verbose: output message more when 'True'
:return: out_data on success, None on failure.
"""
if rigor:
error = 0
if (inputA.dim()!=3): error += 1
if (inputB.dim()!=3): error += 1
if (inputA.shape[0]!=inputB.shape[0]): error += 1 # minibatch
if (dim!=0) and (dim!=1): error += 1
if error!=0: return None
if dim==0:
out_rows = inputA.shape[1]
out_cols = inputA.shape[2]+inputB.shape[2]
else:
out_rows = inputA.shape[1]+inputB.shape[1]
out_cols = inputA.shape[2]
dtype = input.dtype
minibatch = inputA.shape[0]
out_data = torch.empty([minibatch,out_rows,our_cols], dtype=dtype)
for mb in range(in_minibatch):
xout_data = out_data[mb]
xin_dataA = inputA[mb]
xin_dataB = inputB[mb]
status = _dlr.Concat2d( xout_data.data.numpy()
, xin_dataA.data.numpy()
, xin_dataB.data.numpy()
, dim
, rigor=rigor
, verbose=verbose)
if not status: return None
out_data[mb] = xout_data
return out_data
#===============================================================================
def activations( func
, input
, negative_slope=0.01
, rigor=False
, verbose=False):
"""
Bridge to a specific non-linear activation function
Returns output tensor on success
Applies activation function
Note that all nd-array lists are PyTorch tensor (immutable).
:param func_name: activation function; ReLu, LeakyReLu, Tanh, Sigmoid
:param input: input data, input[minibatch][....] in any dimension
:param rigor: check values rigorously when 'True'
:param verbose: output message more when 'True'
:return: out_data on success, None on failure.
"""
func_name = 'Activation'+func
function = getattr(_dlr, func_name)
minibatch = input.shape[0]
dtype = input.dtype
out_data = torch.empty(input.shape, dtype=dtype)
for mb in range(minibatch):
xout_data = out_data[mb]
xin_data = input[mb]
if func == 'LeakyReLu':
status = function( xout_data.data.numpy()
, xin_data.data.numpy()
, negative_slope=negative_slope
, rigor=rigor
, verbose=verbose)
else:
# status = _dlr.__getattribute__(func_name)( xout_data.data.numpy()
status = function( xout_data.data.numpy()
, xin_data.data.numpy()
, rigor=rigor
, verbose=verbose)
if not status: return None
out_data[mb] = xout_data
return out_data
def relu(input, rigor=False, verbose=False):
"""
Correspond torch.nn.functional.relu(input, inplace=False)
"""
return activations( 'ReLu'
, input
, rigor
, verbose)
def leaky_relu(input, negative_slope=0.01, rigor=False, verbose=False):
return activations( 'LeakyReLu'
, input
, negative_slope
, rigor
, verbose)
def tanh(input, rigor=False, verbose=False):
return activations( 'Tanh'
, input
, rigor
, verbose)
def sigmoid(input, rigor=False, verbose=False):
return activations( 'Sigmoid'
, input
, rigor
, verbose)
#===============================================================================
def batch_norm ( input # in_minibatch x in_channel x <...>
, running_mean
, running_var
, weight=None
, bias=None
, eps=1E-5
, rigor=False
, verbose=False):
"""
Correspond torch.nn.functional.batch_norm(input, running_mean, running_var,
weight, bias,
training=False, momentum=0.1, eps)
"""
if (input.dim()==3):
return batch_norm1d(input, running_mean, running_var,
weight, bias, eps, rigor, verbose)
elif (input.dim()==4):
return batch_norm2d(input, running_mean, running_var,
weight, bias, eps, rigor, verbose)
elif (input.dim()==5):
return batch_norm3d(input, running_mean, running_var,
weight, bias, eps, rigor, verbose)
else:
if verbose: _dlr.DlrError(f"batch_norm for more than 3D not supported")
return None
#===============================================================================
def batch_norm1d ( input # in_minibatch x 1 x in_size
, running_mean # 1 x in_size (not no minibatch)
, running_var # 1 x in_size (not no minibatch)
, weight=None # 1 x in_size
, bias=None # 1 x in_size
, eps=1E-5
, rigor=False
, verbose=False):
"""
Correspond torch.nn.functional.batch_norm(input, running_mean, running_var,
weight, bias,
training=False, momentum=0.1, eps)
Returns output tensor on success
Applies a batch normalization over an input data
Note that all nd-array lists are PyTorch tensor (immutable).
:param input: input data, input[in_minibatch][in_channel][in_size]
:param running_mean: running_mean[in_channel]
:param running_var: running_var[in_channel]
:param weight: None or weight[in_channel]
:param bias: None or bias[in_channel]
:param rigor: check values rigorously when 'True'
:param verbose: output message more when 'True'
:return: out_data on success, None on failure.
"""
if rigor:
error = 0
if (input.dim()!=3) and (input.dim()!=2): error += 1
if (input.dim()==3):
in_channel = input.shape[1]
if (input.dim()!=3): error += 1
if (running_mean.dim()!=1): error += 1 # mind channel
if (running_var.dim()!=1): error += 1 # mind channel
if (running_mean.numel()!=in_channel): error += 1
if (running_var.numel()!=in_channel): error += 1
if (weight is not None) and (weight.dim()!=1): error += 1
if (weight is not None) and (weight.numel()!=in_channel): error += 1
if (bias is not None) and (bias.dim()!=1): error += 1
if (bias is not None) and (bias.numel()!=in_channel): error += 1
else: error += 1; _dlr.DlrError("only supported for data with channel")
if error!=0: return None
dtype = input.dtype
in_minibatch = input.shape[0]
out_data = torch.empty(input.shape, dtype=dtype)
for mb in range(in_minibatch):
xout_data = out_data[mb]
xin_data = input[mb]
status = _dlr.Norm1dBatch( xout_data.data.numpy() # ndim x out_size
, xin_data.data.numpy() # ndim x in_size
, running_mean.data.numpy() # out_size x in_size
, running_var.data.numpy() # out_size x in_size
, None if weight is None else weight.data.numpy() # out_size
, None if bias is None else bias.data.numpy() # out_size
, eps
, rigor=rigor
, verbose=verbose)
if not status: return None
out_data[mb] = xout_data
return out_data
#===============================================================================
def batch_norm2d ( input # in_minibatch x in_channel x in_size x in_size
, running_mean # in_channel
, running_var # in_channel
, weight=None # in_channel x in_size x in_size
, bias=None # in_channel x in_size x in_size
, eps=1E-5
, rigor=False
, verbose=False):
"""
Correspond torch.nn.functional.batch_norm(input, running_mean, running_var,
weight, bias,
training=False, momentum=0.1, eps)
Returns output tensor on success
Applies a batch normalization over an input data
Note that all nd-array lists are PyTorch tensor (immutable).
:param input: input data, input[in_minibatch][in_channel][in_size][in_size]
:param running_mean: running_mean[in_channel]
:param running_var: running_var[in_channel]
:param weight: None or weight[in_channel]
:param bias: None or bias[in_channel]
:param rigor: check values rigorously when 'True'
:param verbose: output message more when 'True'
:return: out_data on success, None on failure.
"""
if rigor:
error = 0
if (input.dim()!=4): error += 1
in_channel = input.shape[1]
if (running_mean.dim()!=1): error += 1 # mind channel
if (running_var.dim()!=1): error += 1 # mind channel
if (running_mean.numel()!=in_channel): error += 1 # not 2D
if (running_var.numel()!=in_channel): error += 1 # not 2D
if (weight is not None) and (weight.dim()!=1): error += 1
if (weight is not None) and (weight.numel()!=in_channel): error += 1
if (bias is not None) and (bias.dim()!=1): error += 1
if (bias is not None) and (bias.numel()!=in_channel): error += 1
if error!=0: return None
dtype = input.dtype
in_minibatch = input.shape[0]
out_data = torch.empty(input.shape, dtype=dtype)
for mb in range(in_minibatch):
xout_data = out_data[mb]
xin_data = input[mb]
status = _dlr.Norm2dBatch( xout_data.data.numpy() # ndim x out_size
, xin_data.data.numpy() # ndim x in_size
, running_mean.data.numpy() # out_size x in_size
, running_var.data.numpy() # out_size x in_size
, None if weight is None else weight.data.numpy() # out_size
, None if bias is None else bias.data.numpy() # out_size
, eps
, rigor=rigor
, verbose=verbose)
if not status: return None
out_data[mb] = xout_data
return out_data
#===============================================================================
# not fully tested
def batch_norm3d ( input # in_minibatch x in_channel x in_depth x in_height x in_width
, running_mean # in_channel
, running_var # in_channel
, weight=None # in_channel x in_depth x in_height x in_width
, bias=None # in_channel x in_depth x in_height x in_width
, eps=1E-5
, rigor=False
, verbose=False):
"""
Correspond torch.nn.functional.batch_norm(input, running_mean, running_var,
weight, bias,
training=False, momentum=0.1, eps)
Returns output tensor on success
Applies a batch normalization over an input data
Note that all nd-array lists are PyTorch tensor (immutable).
:param input: input data, input[in_minibatch][in_channel][...]
:param running_mean: running_mean[in_channel]
:param running_var: running_var[in_channel]
:param weight: None or weight[in_channel]
:param bias: None or bias[in_channel]
:param rigor: check values rigorously when 'True'
:param verbose: output message more when 'True'
:return: out_data on success, None on failure.
"""
if rigor:
error = 0
if (input.dim()!=5): error += 1
in_channel = input.shape[1]
in_depth = input.shape[2]
in_height = input.shape[3]
in_width = input.shape[4]
if (running_mean.dim()!=1): error += 1 # not 2D
if (running_var.dim()!=1): error += 1 # not 2D
if (running_mean.numel()!=in_channel): error += 1 # not 2D
if (running_var.numel()!=in_channel): error += 1 # not 2D
if (weight is not None) and (weight.dim()!=1): error += 1
if (weight is not None) and (weight.numel()!=in_channel): error += 1
if (bias is not None) and (bias.dim()!=1): error += 1
if (bias is not None) and (bias.numel()!=in_channel): error += 1
if error!=0: return None
dtype = input.dtype
in_minibatch = input.shape[0]
out_data = torch.empty(input.shape, dtype=dtype)
for mb in range(in_minibatch):
xout_data = out_data[mb]
xin_data = input[mb]
status = _dlr.Norm3dBatch( xout_data.data.numpy() # ndim x out_size
, xin_data.data.numpy() # ndim x in_size
, running_mean.data.numpy() # out_size x in_size
, running_var.data.numpy() # out_size x in_size
, None if weight is None else weight.data.numpy() # out_size
, None if bias is None else bias.data.numpy() # out_size
, eps
, rigor=rigor
, verbose=verbose)
if not status: return None
out_data[mb] = xout_data
return out_data
#===============================================================================
def conv_transpose2d( input # in_minibatch x in_channel x in_size x in_size
, weight # in_channel x out_channel x kernel_size x kernel_size
, bias=None # out_channel
, stride=1
, padding=0
, output_padding=0
, groups=1
, dilation=1
, rigor=False
, verbose=False):
"""
Corresponding torch.nn.functional.conv_transpose2d(input, weight, bias=None,
stride, padding, dilation, groups)
Returns output tensor on success
Applies a 2D deconvolution over an input data composed of several input channels.
Note that all nd-array lists are PyTorch tensor (immutable).
:param input: input data, input[in_minibatch][in_channel][in_size][in_size]
:param weight: kernel (or filter), weight[out_channel][in_channel][kernel_size][kernel_size]
:param bias: bias for each filter (kernel), bias[out_channel]
:param stride: num of skips to apply next filter
:param padding: num of pixes at the boundary
:param dilation:
:param groups:
:param rigor: check values rigorously when 'True'
:param verbose: output message more when 'True'
:return: out_data on success, None on failure.
"""
if rigor:
error = 0
if (dilation!=1): error += 1 # not support
if (groups!=1): error += 1 # not support
if (output_padding!=0): error += 1 # not support
if (input.dim()!=4): error += 1
if (input.shape[2]!=input.shape[3]): error += 1 # not square
if (weight.dim()!=4): error += 1
if (weight.shape[2]!=weight.shape[3]): error += 1 # not square
if (bias is not None) and (bias.dim()!=1): error += 1
if (input.shape[1]!=weight.shape[0]): error += 1 # in_channel
if (bias is not None) and (bias.shape[0]!=weight.shape[1]): error += 1 # out_channel
if (stride<=0) or (padding<0): error += 1
if error!=0: return None
dtype = input.dtype
in_minibatch = input.shape[0]
in_channel = input.shape[1]
in_size = input.shape[3]
kernel_size = weight.shape[3]
out_channel = weight.shape[1]
status, out_size = _dlr.GetOutputSizeOfDeconvolution2d( in_size=in_size
, kernel_size=kernel_size
, stride=stride
, padding=padding
, output_padding=0
, dilation=1
, rigor=rigor
, verbose=verbose)
if not status: return None
out_data = torch.empty([in_minibatch,out_channel,out_size,out_size], dtype=dtype)
for mb in range(in_minibatch):
xout_data = out_data[mb]
xin_data = input[mb]
status = _dlr.Deconvolution2d( xout_data.data.numpy() # out_channel x out_size x out_size
, xin_data.data.numpy() # in_channel x in_size x in_size
, weight.data.numpy() # in_channel x out_channel x kernel_size x kernel_size
, bias.data.numpy() # out_channel
, stride
, padding
, rigor=rigor
, verbose=verbose)
if not status: return None
out_data[mb] = xout_data
return out_data
#===============================================================================
if __name__=='__main__':
def TestLinear1d (dtype,limit,random,rigor,verbose): return False
def TestConcat2d (dtype,limit,random,rigor,verbose): return False
def TestNorm1dBatch (dtype,limit,random,rigor,verbose): return False
#===============================================================================
if __name__=='__main__':
def TestLinearNd (dtype,limit,random,rigor,verbose):
return False
#===============================================================================
if __name__=='__main__':
def TestPooling2dMax(dtype,limit,random,rigor,verbose):
TestPooling2d(func='max'
,dtype=dtype
,limit=limit
,random=random
,rigor=rigor
,verbose=verbose)
def TestPooling2dAvg(dtype,limit,random,rigor,verbose):
TestPooling2d(func='avg'
,dtype=dtype
,limit=limit
,random=random
,rigor=rigor
,verbose=verbose)
def TestPooling2d(func='max'
,dtype=torch.float32
,limit=1.0E-3 # error limit
,random=False
,rigor=False
,verbose=False):
configs = [
[1, 2, 4,2,1,0,0] #minibatch[0],in_channel[1],in_size[2],kernel_size[3],stride[4],padding[5],ceil[6]
,[1,16,416,2,1,0,0] #minibatch[0],in_channel[1],in_size[2],kernel_size[3],stride[4],padding[5],ceil[6]
,[1,16,416,2,1,1,0] #minibatch[0],in_channel[1],in_size[2],kernel_size[3],stride[4],padding[5],ceil[6]
,[1,256,26,2,2,0,0]
,[1,256,26,2,2,1,0]
,[1,512,13,2,1,0,0]
,[1,512,12,6,1,0,0]
,[1,512,12,6,2,1,0]
,[1,512,12,6,3,2,0]
]
errors = torch.zeros(len(configs))
for idx in range(len(configs)):
minibatch = configs[idx][0]
in_channel = configs[idx][1]
in_size = configs[idx][2]
kernel_size = configs[idx][3] # make it even
stride = configs[idx][4]
padding = configs[idx][5]
ceil_mode = False if configs[idx][6] == 0 else True
data = torch.zeros(size=[minibatch,in_channel,in_size,in_size])
in_data = GenDataPooling2d(data, rigor=rigor, verbose=verbose)
sys.stdout.flush()
if func is 'max':
out_data = F.max_pool2d( input=in_data
, kernel_size=kernel_size
, stride=stride
, padding=padding
, ceil_mode=ceil_mode)
nout_data = max_pool2d ( input=in_data
, kernel_size=kernel_size
, stride=stride
, padding=padding
, ceil_mode=ceil_mode
, rigor=rigor
, verbose=verbose)
elif func is 'avg':
out_data = F.avg_pool2d( input=in_data
, kernel_size=kernel_size
, stride=stride
, padding=padding
, ceil_mode=ceil_mode)
nout_data = avg_pool2d ( input=in_data
, kernel_size=kernel_size
, stride=stride
, padding=padding
, ceil_mode=ceil_mode
, rigor=rigor
, verbose=verbose)
else:
return False
diff = []
status = False
if (out_data is not None) and (nout_data is not None):
diff = torch.lt(torch.abs(torch.add(out_data, -nout_data)), limit)
status = torch.all(diff)
if not status:
diff_max = torch.max(torch.abs(torch.add(out_data, -nout_data)))
_dlr.DlrWarn(f"diff max: {diff_max}")
ok = 0; error = 0
if status:
ok += 1
_dlr.DlrInfo(f"OK {out_data.shape}")
if verbose:
_dlr.DlrInfo(f"in_data\n{in_data}")
_dlr.DlrInfo(f"kernel_size\n{kernel_size}")
_dlr.DlrInfo(f"out_data\n{out_data}")
_dlr.DlrInfo(f"nout_data\n{nout_data}")
else:
error += 1
_dlr.DlrError(f"Mis-match {torch.sum(diff==False)} of tensor {out_data.shape}")
if verbose:
_dlr.DlrInfo(f"in_data\n{in_data}")
_dlr.DlrInfo(f"kernel_size\n{kernel_size}")
_dlr.DlrInfo(f"out_data\n{out_data}")
_dlr.DlrInfo(f"nout_data\n{nout_data}")
errors[idx] = error
return True if torch.sum(errors)==0.0 else False
def GenDataPooling2d(data, rigor=False, verbose=False):
if (data.dim()==4): # minibatch x channel x size x size
in_minibatch = data.shape[0]
in_channel = data.shape[1]
in_size = data.shape[2]
else: return None
error = 0
in_data = (100+100)*torch.rand(size=data.shape) - 100
return in_data
#===============================================================================
if __name__=='__main__':
def TestConvolution2d(dtype=torch.float32
,limit=1.0E-3 # error limit
,random=False
,rigor=False
,verbose=False):
configs = [
[1, 3,416,16,5,1,0]#minibatch,in_chan,in_sizd,out_chan,kerne_size,strid,padding
,[1, 3,416,16,5,2,0]
,[1, 3,416,16,5,3,0]
,[1, 3,416,16,5,1,1]
,[1, 3,416,16,5,2,2]
,[1, 3,416,16,5,3,1]
,[1, 8,416,64,5,1,0]
,[1, 8,416,64,5,1,1]
,[1, 8,416,64,5,1,2]
]
errors = torch.zeros(len(configs))
for idx in range(len(configs)):
minibatch = configs[idx][0]
in_channel = configs[idx][1]
in_size = configs[idx][2]
out_channel = configs[idx][3]
kernel_size = configs[idx][4]
stride = configs[idx][5]
padding = configs[idx][6]
data = torch.zeros(size=[minibatch,in_channel,in_size,in_size])
kernel = torch.zeros(size=[out_channel,in_channel,kernel_size,kernel_size])
bias = torch.zeros(size=[out_channel])
in_data,in_kernel,in_bias = GenDataConv2d(data, kernel, bias, rigor=rigor, verbose=verbose)
sys.stdout.flush()
out_data = F.conv2d( input=in_data
, weight=in_kernel
, bias=in_bias
, stride=stride
, padding=padding
, groups=1
, dilation=1)
sys.stdout.flush()
nout_data = conv2d ( input=in_data
, weight=in_kernel
, bias=in_bias
, stride=stride
, padding=padding
, rigor=rigor
, verbose=verbose)
sys.stdout.flush()
diff = []
status = False
if (out_data is not None) and (nout_data is not None):
diff = torch.lt(torch.abs(torch.add(out_data, -nout_data)), limit)
status = torch.all(diff)
if not status:
diff_max = torch.max(torch.abs(torch.add(out_data, -nout_data)))
_dlr.DlrWarn(f"diff max: {diff_max}")
ok = 0; error = 0
if status:
ok += 1
_dlr.DlrInfo(f"OK {out_data.shape}")
if verbose:
_dlr.DlrInfo(f"in_data\n{in_data}")
_dlr.DlrInfo(f"in_kernel\n{in_kernel}")
_dlr.DlrInfo(f"out_data\n{out_data}")
_dlr.DlrInfo(f"nout_data\n{nout_data}")
else:
error += 1
_dlr.DlrError(f"Mis-match {torch.sum(diff==False)} of tensor {out_data.shape}")
if verbose:
_dlr.DlrInfo(f"in_data\n{in_data}")
_dlr.DlrInfo(f"in_kernel\n{in_kernel}")
_dlr.DlrInfo(f"out_data\n{out_data}")
_dlr.DlrInfo(f"nout_data\n{nout_data}")
errors[idx] = error
return True if torch.sum(errors)==0.0 else False
def GenDataConv2d(data, kernel, bias, rigor=False, verbose=False):
if (data.dim()==4): # minibatch x channel x size x size
in_minibatch = data.shape[0]
in_channel = data.shape[1]
in_size = data.shape[2]
else: return None
out_channel = kernel.shape[1]
kerne_size = kernel.shape[2]
bias_size = bias.shape[0]
error = 0
if (in_channel!=kernel.shape[0]): error += 1
if (bias is not None) and (out_channel!=bias_size): error += 1
in_data = (100+100)*torch.rand(size=data.shape) - 100
in_kernel = ( 10+ 10)*torch.rand(size=kernel.shape) - 10
if (bias is not None): in_bias = 5+5*torch.rand(size=bias.shape) - 5
else: in_bias = None
return in_data, in_kernel, in_bias
#===============================================================================
if __name__=='__main__':
def TestDeconvolution2d(dtype=torch.float32
,limit=1.0E-3 # error limit
,random=False
,rigor=False
,verbose=False):
minibatch = 1
in_channel = 16
in_size = 416
out_channel = 32
kernel_size = 5
data = torch.zeros(size=[minibatch,in_channel,in_size,in_size])
kernel = torch.zeros(size=[in_channel,out_channel,kernel_size,kernel_size])
bias = torch.zeros(size=[out_channel])
in_data,in_kernel,in_bias = GenDataDeconv2d(data, kernel, bias, rigor=rigor, verbose=verbose)
stride=1
padding=0
sys.stdout.flush()
out_data = F.conv_transpose2d( input=in_data
, weight=in_kernel
, bias=in_bias
, stride=stride
, padding=padding
, output_padding=0
, groups=1
, dilation=1)
sys.stdout.flush()
nout_data = conv_transpose2d ( input=in_data
, weight=in_kernel
, bias=in_bias
, stride=stride
, padding=padding
, rigor=rigor
, verbose=verbose)
sys.stdout.flush()
status = False
if (out_data is not None) and (nout_data is not None):
status = torch.all(torch.lt(torch.abs(torch.add(out_data, -nout_data)), limit))
if not status:
diff_max = torch.max(torch.abs(torch.add(out_data, -nout_data)))
_dlr.DlrWarn(f"diff max: {diff_max}")
ok = 0; error = 0
if status:
ok += 1
_dlr.DlrInfo(f"OK {out_data.shape}")
if verbose:
_dlr.DlrInfo(f"in_data\n{in_data}")
_dlr.DlrInfo(f"in_kernel\n{in_kernel}")
_dlr.DlrInfo(f"out_data\n{out_data}")
_dlr.DlrInfo(f"nout_data\n{nout_data}")
else:
error += 1
_dlr.DlrError(f"Mis-match {out_data.shape}")
if verbose:
_dlr.DlrInfo(f"in_data\n{in_data}")
_dlr.DlrInfo(f"in_kernel\n{in_kernel}")
_dlr.DlrInfo(f"out_data\n{out_data}")
_dlr.DlrInfo(f"nout_data\n{nout_data}")
return True if error==0 else False
return False
def GenDataDeconv2d(data, kernel, bias, rigor=False, verbose=False):
if (data.dim()==4): # minibatch x channel x size x size
in_minibatch = data.shape[0]
in_channel = data.shape[1]
in_size = data.shape[2]
else: return None
out_channel = kernel.shape[1]
kerne_size = kernel.shape[2]
bias_size = bias.shape[0]
error = 0
if (in_channel!=kernel.shape[0]): error += 1
if (bias is not None) and (out_channel!=bias_size): error += 1
in_data = (100+100)*torch.rand(size=data.shape) - 100
in_kernel = ( 10+ 10)*torch.rand(size=kernel.shape) - 10
if (bias is not None): in_bias = 5+5*torch.rand(size=bias.shape) - 5
else: in_bias = None
return in_data, in_kernel, in_bias
#===============================================================================
if __name__=='__main__':
def TestNormBatch(dtype=torch.float32
,limit=1.E-3 # error limit
,random=False
,rigor=False
,verbose=False):
dim = 1
if dim==1: # to test 1D batch_norm
minibatches = 3
channels = 2
sizes = 4
ndims = [minibatches, channels, sizes]
raw_data, raw_mean, raw_var, raw_std = GenDataNorm(ndims, plot=False, rigor=rigor, verbose=verbose)
elif dim==2: # to test 2D batch_norm
minibatches = 2
channels = 2
rows = 4
cols = 3
ndims = [minibatches, channels, rows, cols]
raw_data, raw_mean, raw_var, raw_std = GenDataNorm(ndims, plot=False, rigor=rigor, verbose=verbose)
elif dim==3: # to test 3D batch_norm
# not fully tested
minibatches = 1
channels = 1
depths = 1
rows = 4
cols = 3
ndims = [minibatches, channels, depths, rows, cols]
raw_data, raw_mean, raw_var, raw_std = GenDataNorm(ndims, plot=False, rigor=rigor, verbose=verbose)
#std_data = (raw_data - raw_mean)/raw_std
out_data = F.batch_norm( input=raw_data
, running_mean=raw_mean
, running_var=raw_var
, weight=None
, bias=None
, training=False
, momentum=1.0
, eps=1E-5)
nout_data = batch_norm ( input=raw_data
, running_mean=raw_mean
, running_var=raw_var
, weight=None
, bias=None
, eps=1E-5
, rigor=rigor
, verbose=verbose)
status = False
if (out_data is not None) and (nout_data is not None):
status = torch.all(torch.lt(torch.abs(torch.add(out_data, -nout_data)), limit))
if not status:
diff_max = torch.max(torch.abs(torch.add(out_data, -nout_data)))
_dlr.DlrWarn(f"diff max: {diff_max}")
ok = 0; error = 0
if status:
ok += 1
_dlr.DlrInfo(f"OK {out_data.shape}")
if verbose:
_dlr.DlrInfo(f"raw_data\n{raw_data}")
_dlr.DlrInfo(f"out_data\n{out_data}")
_dlr.DlrInfo(f"nout_data\n{nout_data}")
else:
error += 1
_dlr.DlrError(f"Mis-match {out_data.shape}")
if verbose:
_dlr.DlrInfo(f"raw_data\n{raw_data}")
_dlr.DlrInfo(f"out_data\n{out_data}")
_dlr.DlrInfo(f"nout_data\n{nout_data}")
return True if error==0 else False
#---------------------------------------------------------------------------
def GenDataNorm(ndims, plot=False, rigor=False, verbose=False):
import numpy as np
import matplotlib.pyplot as plt
if (len(ndims)==3): # minibatch x channel x size
minibatches = ndims[0]
channels = ndims[1]
sizes = ndims[2]
elif (len(ndims)==4): # minibatch x channel x size x size
minibatches = ndims[0]
channels = ndims[1]
sizes = ndims[2]
elif (len(ndims)==5): # minibatch x channel x depth x size x size
minibatches = ndims[0]
channels = ndims[1]
depth = ndims[2]
sizes = ndims[3]
raw_data = (100+100)*torch.rand(size=ndims)-100
raw_mean= torch.zeros(size=[channels]) # mean value
raw_var = torch.zeros(size=[channels]) # variance
raw_std = torch.zeros(size=[channels]) # standard-deviation
#mean/var/std should be one for each channel regardless minibanch
if len(ndims)==5: # should take care of depth
# not fully tested
raw_mean = torch.mean(input=raw_data[0], axis=(2,-1)) # mean value
raw_var = torch.var (input=raw_data[0], axis=(2,-1)) # variance
raw_std = torch.std (input=raw_data[0], axis=(2,-1)) # standard-deviation=sqrt(var)
else: # minibatch x channel x size
raw_mean = torch.mean(input=raw_data[0], axis=(1,-1)) # mean value
raw_var = torch.var (input=raw_data[0], axis=(1,-1)) # variance
raw_std = torch.std (input=raw_data[0], axis=(1,-1)) # standard-deviation=sqrt(var)
#std_data = (raw_data - raw_mean)/raw_std
#_dlr.DlrInfo(f"std_data ={std_data}")
if plot:
if True:
plt.subplot(1, 2, 1)
plt.hist(raw_data, bins=50)
plt.subplot(1, 2, 2)
plt.hist(std_data, bins=50)
plt.show()
else:
bins = 50
raw_hist, raw_bin = np.histogram(raw_data, bins=bins)
std_hist, std_bin = np.histogram(std_data, bins=bins)
num = int(raw_data.numel()/2)
x = torch.linspace(start=-num, end=num, steps=num*2)
y = torch.flatten(raw_data)
plt.subplot(3, 1, 1)
plt.plot(x, y)
a = np.linspace(min(raw_bin), max(raw_bin), bins)
b = raw_hist
plt.subplot(3, 1, 2)
plt.plot(a, b)
n = np.linspace(min(std_bin), max(std_bin), bins)
m = std_hist
plt.subplot(3, 1, 3)
plt.plot(n, m)
plt.show()
return raw_data, raw_mean, raw_var, raw_std
#===============================================================================
if __name__=='__main__':
def TestActivations(func='ReLu' # DLR function name
,tfunc='relu' # PyTorch Functional function name
,negative_slope=0.01
,dtype=torch.int32
,limit=1E-3 # error limit
,random=False
,rigor=False
,verbose=False):
"""
dtype: specify data type of data one of {torch.int32, torch.float32, torch.float64}
"""
func_name = tfunc
if func=='Sigmoid' or func=='Tanh': function = getattr(torch, tfunc)
else: function = getattr(F, tfunc)
if random:
minibatch = (torch.randint(low=1, high=3, size=[1], dtype=torch.int)).data.numpy()
d = (torch.randint(low=1, high=10, size=[1], dtype=torch.int)).data.numpy()
dims = (torch.randint(low=1, high=10, size=tuple(d), dtype=torch.int)).data.numpy()
else:
minibatch = [1, 2, 3]
dims = [1, 2, 3] # 1=1-dimension, 2=2-dimension
ok = 0; error = 0
for dim in dims:
ndim = (torch.randint(low=1, high=10, size=[dim], dtype=torch.int)).data.numpy() # [x] or [x, y] or [x, y, z]
in_data = (100+100)*torch.rand(size=tuple(ndim))-100
if dtype is torch.int32:
in_data = in_data.type(torch.int32)
if func == 'LeakyReLu': # dealing with "not implemented for 'Int'"
if dtype is torch.int32:
in_data = in_data.type(torch.float32)
out_data = function(input=in_data, negative_slope=negative_slope)
nout_data = globals()[func_name]( in_data
, rigor=rigor
, verbose=verbose)
if dtype is torch.int32:
in_data = in_data.type(torch.int32)
nout_data = nout_data.type(torch.int32)
else:
if (func!='ReLu') and (dtype==torch.int32):
in_data = in_data.type(torch.float32)
out_data = function(in_data)
nout_data = globals()[func_name]( in_data
, rigor=rigor
, verbose=verbose)
if (func!='ReLu') and (dtype==torch.int32):
in_data = in_data.type(torch.int32)
out_data = out_data.type(torch.int32)
nout_data = nout_data.type(torch.int32)
if dtype is torch.int32:
out_data = out_data.type(torch.int32)
nout_data = nout_data.type(torch.int32)
status = False
if (out_data is not None) and (nout_data is not None):
status = torch.all(torch.lt(torch.abs(torch.add(out_data, -nout_data)), limit))
if not status:
diff_max = torch.max(torch.abs(torch.add(out_data, -nout_data)))
_dlr.DlrWarn(f"diff max: {diff_max}")
if status:
ok += 1
_dlr.DlrError(f"OK {out_data.shape}")
if verbose:
_dlr.DlrInfo(f"in_data\n{in_data}")
_dlr.DlrInfo(f"nout_data\n{nout_data}")
_dlr.DlrInfo(f"out_data\n{out_data}")
else:
error += 1
_dlr.DlrError(f"Mis-match {out_data.shape}")
if verbose:
_dlr.DlrInfo(f"in_data\n{in_data}")
_dlr.DlrInfo(f"nout_data\n{nout_data}")
_dlr.DlrInfo(f"out_data\n{out_data}")
return True if error==0 else False
def TestActivationReLu(dtype, random, limit, rigor, verbose):
return TestActivations('ReLu', 'relu', dtype=dtype, limit=limit, random=random, rigor=rigor, verbose=verbose)
def TestActivationLeakyReLu(negative_slope, dtype, limit, random, rigor, verbose):
return TestActivations('LeakyReLu', 'leaky_relu', negative_slope=negative_slope, dtype=dtype, limit=limit, random=random, rigor=rigor, verbose=verbose)
def TestActivationTanh(dtype, random, limit, rigor, verbose):
return TestActivations('Tanh', 'tanh', dtype=dtype, limit=limit, random=random, rigor=rigor, verbose=verbose)
def TestActivationSigmoid(dtype, random, limit, rigor, verbose):
return TestActivations('Sigmoid', 'sigmoid', dtype=dtype, limit=limit, random=random, rigor=rigor, verbose=verbose)
#===============================================================================
if __name__=='__main__':
import sys
if 'torch' not in sys.modules:
_dlr.DlrError("PyTorch is not loaded.")
import argparse
parser = argparse.ArgumentParser(description='DLR PyTorch Testing')
parser.add_argument('--layer', dest='layer', type=str, default='ReLu',
help='Specify layer to test (default: ReLu)\n'
+'ReLu LeakyReLu Tanh Sigmoid\n'
+'Convolution2d Pooling2dMax Pooling2dAvg\n'
+'Linear1d Linear2d Concat2d\n'
+'NormBatch'+'Deconvlution2d'
)
parser.add_argument('--limit', dest='limit', type=float, default=1.0E-3,
help='Specify error limmit (default: 1.0E-3)')
parser.add_argument('--nslope', dest='negative_slope', type=float, default=0.01,
help='Specify negative slope of LeakyReLU (default: 0.01)')
parser.add_argument('--dtype', dest='dtype', type=str, default='int32',
help='Specify data type (default: int32) float32, float64')
parser.add_argument('--random', dest='random', action='store_true', default=False,
help='Use random pattern (default: False)')
parser.add_argument('--rigor', dest='rigor', action='store_true', default=False,
help='Check values rigorously (default: False)')
parser.add_argument('--verbose', dest='verbose', action='store_true', default=False,
help='Verbose (default: False)')
args = parser.parse_args()
random = args.random
rigor = args.rigor
verbose = args.verbose
limit = args.limit
negative_slope = args.negative_slope
dtype = { 'int32' : torch.int32,
'float32' : torch.float32,
'float64' : torch.float64
} [args.dtype]
layer = args.layer
func = { 'Convolution2d' : TestConvolution2d
, 'Pooling2dMax' : TestPooling2dMax
, 'Pooling2dAvg' : TestPooling2dAvg
, 'Linear1d' : TestLinear1d
, 'LinearNd' : TestLinearNd
, 'Concat2d' : TestConcat2d
, 'ReLu' : TestActivationReLu
, 'LeakyReLu' : TestActivationLeakyReLu
, 'Tanh' : TestActivationTanh
, 'Sigmoid' : TestActivationSigmoid
, 'NormBatch' : TestNormBatch
, 'Deconvolution2d': TestDeconvolution2d
} [layer]
_dlr.DlrPrint("Testing " + layer, flush=True)
if layer == 'LeakyReLu':
status = func(negative_slope=negative_slope,dtype=dtype,limit=limit,random=random,rigor=rigor,verbose=verbose)
else:
status = func(dtype=dtype,limit=limit,random=random,rigor=rigor,verbose=verbose)
#func_name = layer
#status = locals()[func_name](dtype=dtype,random=random,rigor=rigor,verbose=verbose)
#===============================================================================
# Revision history:
#
# 2020.09.30: Started by Ando Ki (adki@future-ds.com)
#===============================================================================
| 45.287057
| 159
| 0.502523
| 7,156
| 62,632
| 4.239519
| 0.058971
| 0.025842
| 0.011767
| 0.030061
| 0.801305
| 0.773716
| 0.754895
| 0.721471
| 0.707759
| 0.687554
| 0
| 0.023874
| 0.367336
| 62,632
| 1,382
| 160
| 45.319826
| 0.741754
| 0.238217
| 0
| 0.70019
| 0
| 0
| 0.050671
| 0.009188
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035104
| false
| 0
| 0.010436
| 0.010436
| 0.082543
| 0.000949
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b2976d45017b2d660511b3ba06416322f5b72582
| 13,223
|
py
|
Python
|
flow_sdk/api/basic/basic_client.py
|
easyopsapis/easyops-api-python
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
[
"Apache-2.0"
] | 5
|
2019-07-31T04:11:05.000Z
|
2021-01-07T03:23:20.000Z
|
flow_sdk/api/basic/basic_client.py
|
easyopsapis/easyops-api-python
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
[
"Apache-2.0"
] | null | null | null |
flow_sdk/api/basic/basic_client.py
|
easyopsapis/easyops-api-python
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
import sys
import flow_sdk.api.basic.batch_update_permission_pb2
import google.protobuf.struct_pb2
import flow_sdk.model.flow.flow_pb2
import flow_sdk.api.basic.delete_pb2
import flow_sdk.api.basic.get_pb2
import google.protobuf.empty_pb2
import flow_sdk.api.basic.get_categories_pb2
import flow_sdk.api.basic.get_version_list_pb2
import flow_sdk.api.basic.list_pb2
import flow_sdk.utils.http_util
import google.protobuf.json_format
class BasicClient(object):
def __init__(self, server_ip="", server_port=0, service_name="", host=""):
"""
初始化client
:param server_ip: 指定sdk请求的server_ip,为空时走名字服务路由
:param server_port: 指定sdk请求的server_port,与server_ip一起使用, 为空时走名字服务路由
:param service_name: 指定sdk请求的service_name, 为空时按契约名称路由。如果server_ip和service_name同时设置,server_ip优先级更高
:param host: 指定sdk请求服务的host名称, 如cmdb.easyops-only.com
"""
if server_ip == "" and server_port != 0 or server_ip != "" and server_port == 0:
raise Exception("server_ip和server_port必须同时指定")
self._server_ip = server_ip
self._server_port = server_port
self._service_name = service_name
self._host = host
def update_batch_permission(self, request, org, user, timeout=10):
# type: (flow_sdk.api.basic.batch_update_permission_pb2.UpdateBatchPermissionRequest, int, str, int) -> google.protobuf.struct_pb2.Struct
"""
批量更新流程权限
:param request: update_batch_permission请求
:param org: 客户的org编号,为数字
:param user: 调用api使用的用户名
:param timeout: 调用超时时间,单位秒
:return: google.protobuf.struct_pb2.Struct
"""
headers = {"org": org, "user": user}
route_name = ""
server_ip = self._server_ip
if self._service_name != "":
route_name = self._service_name
elif self._server_ip != "":
route_name = "easyops.api.flow.basic.UpdateBatchPermission"
uri = "/flows/batch/permission"
requestParam = request
rsp_obj = flow_sdk.utils.http_util.do_api_request(
method="PUT",
src_name="logic.flow_sdk",
dst_name=route_name,
server_ip=server_ip,
server_port=self._server_port,
host=self._host,
uri=uri,
params=google.protobuf.json_format.MessageToDict(
requestParam, preserving_proto_field_name=True),
headers=headers,
timeout=timeout,
)
rsp = google.protobuf.struct_pb2.Struct()
google.protobuf.json_format.ParseDict(rsp_obj["data"], rsp, ignore_unknown_fields=True)
return rsp
def create_flow(self, request, org, user, timeout=10):
# type: (flow_sdk.model.flow.flow_pb2.Flow, int, str, int) -> flow_sdk.model.flow.flow_pb2.Flow
"""
创建流程
:param request: create_flow请求
:param org: 客户的org编号,为数字
:param user: 调用api使用的用户名
:param timeout: 调用超时时间,单位秒
:return: flow_sdk.model.flow.flow_pb2.Flow
"""
headers = {"org": org, "user": user}
route_name = ""
server_ip = self._server_ip
if self._service_name != "":
route_name = self._service_name
elif self._server_ip != "":
route_name = "easyops.api.flow.basic.CreateFlow"
uri = "/flows"
requestParam = request
rsp_obj = flow_sdk.utils.http_util.do_api_request(
method="POST",
src_name="logic.flow_sdk",
dst_name=route_name,
server_ip=server_ip,
server_port=self._server_port,
host=self._host,
uri=uri,
params=google.protobuf.json_format.MessageToDict(
requestParam, preserving_proto_field_name=True),
headers=headers,
timeout=timeout,
)
rsp = flow_sdk.model.flow.flow_pb2.Flow()
google.protobuf.json_format.ParseDict(rsp_obj["data"], rsp, ignore_unknown_fields=True)
return rsp
def delete_flow(self, request, org, user, timeout=10):
# type: (flow_sdk.api.basic.delete_pb2.DeleteFlowRequest, int, str, int) -> flow_sdk.api.basic.delete_pb2.DeleteFlowResponse
"""
删除流程
:param request: delete_flow请求
:param org: 客户的org编号,为数字
:param user: 调用api使用的用户名
:param timeout: 调用超时时间,单位秒
:return: flow_sdk.api.basic.delete_pb2.DeleteFlowResponse
"""
headers = {"org": org, "user": user}
route_name = ""
server_ip = self._server_ip
if self._service_name != "":
route_name = self._service_name
elif self._server_ip != "":
route_name = "easyops.api.flow.basic.DeleteFlow"
uri = "/flows/{flowId}".format(
flowId=request.flowId,
)
requestParam = request
rsp_obj = flow_sdk.utils.http_util.do_api_request(
method="DELETE",
src_name="logic.flow_sdk",
dst_name=route_name,
server_ip=server_ip,
server_port=self._server_port,
host=self._host,
uri=uri,
params=google.protobuf.json_format.MessageToDict(
requestParam, preserving_proto_field_name=True),
headers=headers,
timeout=timeout,
)
rsp = flow_sdk.api.basic.delete_pb2.DeleteFlowResponse()
google.protobuf.json_format.ParseDict(rsp_obj["data"], rsp, ignore_unknown_fields=True)
return rsp
def get_flow_info(self, request, org, user, timeout=10):
# type: (flow_sdk.api.basic.get_pb2.GetFlowInfoRequest, int, str, int) -> flow_sdk.model.flow.flow_pb2.Flow
"""
获取流程信息
:param request: get_flow_info请求
:param org: 客户的org编号,为数字
:param user: 调用api使用的用户名
:param timeout: 调用超时时间,单位秒
:return: flow_sdk.model.flow.flow_pb2.Flow
"""
headers = {"org": org, "user": user}
route_name = ""
server_ip = self._server_ip
if self._service_name != "":
route_name = self._service_name
elif self._server_ip != "":
route_name = "easyops.api.flow.basic.GetFlowInfo"
uri = "/flows/{flowId}".format(
flowId=request.flowId,
)
requestParam = request
rsp_obj = flow_sdk.utils.http_util.do_api_request(
method="GET",
src_name="logic.flow_sdk",
dst_name=route_name,
server_ip=server_ip,
server_port=self._server_port,
host=self._host,
uri=uri,
params=google.protobuf.json_format.MessageToDict(
requestParam, preserving_proto_field_name=True),
headers=headers,
timeout=timeout,
)
rsp = flow_sdk.model.flow.flow_pb2.Flow()
google.protobuf.json_format.ParseDict(rsp_obj["data"], rsp, ignore_unknown_fields=True)
return rsp
def get_flow_categories(self, request, org, user, timeout=10):
# type: (google.protobuf.empty_pb2.Empty, int, str, int) -> flow_sdk.api.basic.get_categories_pb2.GetFlowCategoriesResponse
"""
查询流程分类
:param request: get_flow_categories请求
:param org: 客户的org编号,为数字
:param user: 调用api使用的用户名
:param timeout: 调用超时时间,单位秒
:return: flow_sdk.api.basic.get_categories_pb2.GetFlowCategoriesResponse
"""
headers = {"org": org, "user": user}
route_name = ""
server_ip = self._server_ip
if self._service_name != "":
route_name = self._service_name
elif self._server_ip != "":
route_name = "easyops.api.flow.basic.GetFlowCategories"
uri = "/flow_categories"
requestParam = request
rsp_obj = flow_sdk.utils.http_util.do_api_request(
method="GET",
src_name="logic.flow_sdk",
dst_name=route_name,
server_ip=server_ip,
server_port=self._server_port,
host=self._host,
uri=uri,
params=google.protobuf.json_format.MessageToDict(
requestParam, preserving_proto_field_name=True),
headers=headers,
timeout=timeout,
)
rsp = flow_sdk.api.basic.get_categories_pb2.GetFlowCategoriesResponse()
google.protobuf.json_format.ParseDict(rsp_obj, rsp, ignore_unknown_fields=True)
return rsp
def get_version_list(self, request, org, user, timeout=10):
# type: (flow_sdk.api.basic.get_version_list_pb2.GetVersionListRequest, int, str, int) -> flow_sdk.api.basic.get_version_list_pb2.GetVersionListResponse
"""
获取流程版本列表
:param request: get_version_list请求
:param org: 客户的org编号,为数字
:param user: 调用api使用的用户名
:param timeout: 调用超时时间,单位秒
:return: flow_sdk.api.basic.get_version_list_pb2.GetVersionListResponse
"""
headers = {"org": org, "user": user}
route_name = ""
server_ip = self._server_ip
if self._service_name != "":
route_name = self._service_name
elif self._server_ip != "":
route_name = "easyops.api.flow.basic.GetVersionList"
uri = "/flows/{flowId}/versions".format(
flowId=request.flowId,
)
requestParam = request
rsp_obj = flow_sdk.utils.http_util.do_api_request(
method="GET",
src_name="logic.flow_sdk",
dst_name=route_name,
server_ip=server_ip,
server_port=self._server_port,
host=self._host,
uri=uri,
params=google.protobuf.json_format.MessageToDict(
requestParam, preserving_proto_field_name=True),
headers=headers,
timeout=timeout,
)
rsp = flow_sdk.api.basic.get_version_list_pb2.GetVersionListResponse()
google.protobuf.json_format.ParseDict(rsp_obj["data"], rsp, ignore_unknown_fields=True)
return rsp
def get_flow_list(self, request, org, user, timeout=10):
# type: (flow_sdk.api.basic.list_pb2.GetFlowListRequest, int, str, int) -> flow_sdk.api.basic.list_pb2.GetFlowListResponse
"""
获取流程列表
:param request: get_flow_list请求
:param org: 客户的org编号,为数字
:param user: 调用api使用的用户名
:param timeout: 调用超时时间,单位秒
:return: flow_sdk.api.basic.list_pb2.GetFlowListResponse
"""
headers = {"org": org, "user": user}
route_name = ""
server_ip = self._server_ip
if self._service_name != "":
route_name = self._service_name
elif self._server_ip != "":
route_name = "easyops.api.flow.basic.GetFlowList"
uri = "/flows"
requestParam = request
rsp_obj = flow_sdk.utils.http_util.do_api_request(
method="GET",
src_name="logic.flow_sdk",
dst_name=route_name,
server_ip=server_ip,
server_port=self._server_port,
host=self._host,
uri=uri,
params=google.protobuf.json_format.MessageToDict(
requestParam, preserving_proto_field_name=True),
headers=headers,
timeout=timeout,
)
rsp = flow_sdk.api.basic.list_pb2.GetFlowListResponse()
google.protobuf.json_format.ParseDict(rsp_obj["data"], rsp, ignore_unknown_fields=True)
return rsp
def update_flow(self, request, org, user, timeout=10):
# type: (flow_sdk.model.flow.flow_pb2.Flow, int, str, int) -> flow_sdk.model.flow.flow_pb2.Flow
"""
更新流程
:param request: update_flow请求
:param org: 客户的org编号,为数字
:param user: 调用api使用的用户名
:param timeout: 调用超时时间,单位秒
:return: flow_sdk.model.flow.flow_pb2.Flow
"""
headers = {"org": org, "user": user}
route_name = ""
server_ip = self._server_ip
if self._service_name != "":
route_name = self._service_name
elif self._server_ip != "":
route_name = "easyops.api.flow.basic.UpdateFlow"
uri = "/flows/{flowId}".format(
flowId=request.flowId,
)
requestParam = request
rsp_obj = flow_sdk.utils.http_util.do_api_request(
method="PUT",
src_name="logic.flow_sdk",
dst_name=route_name,
server_ip=server_ip,
server_port=self._server_port,
host=self._host,
uri=uri,
params=google.protobuf.json_format.MessageToDict(
requestParam, preserving_proto_field_name=True),
headers=headers,
timeout=timeout,
)
rsp = flow_sdk.model.flow.flow_pb2.Flow()
google.protobuf.json_format.ParseDict(rsp_obj["data"], rsp, ignore_unknown_fields=True)
return rsp
| 35.834688
| 160
| 0.601906
| 1,492
| 13,223
| 5.041555
| 0.093834
| 0.048391
| 0.030577
| 0.045865
| 0.847115
| 0.831295
| 0.812151
| 0.765621
| 0.731189
| 0.708987
| 0
| 0.006578
| 0.298722
| 13,223
| 368
| 161
| 35.932065
| 0.804594
| 0.197837
| 0
| 0.746835
| 0
| 0
| 0.065644
| 0.036059
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037975
| false
| 0
| 0.054852
| 0
| 0.130802
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.