hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
07f3ab5a55517046df006375a0333977d393d6cf
| 3,076
|
py
|
Python
|
FusionIIIT/applications/office_module/migrations/0002_auto_20200522_1851.py
|
sabhishekpratap5/sonarcubeTest2
|
9bd8105e457f6feb8c38fa94b335e54783fca99e
|
[
"bzip2-1.0.6"
] | 2
|
2020-06-17T11:59:08.000Z
|
2020-07-10T12:17:35.000Z
|
FusionIIIT/applications/office_module/migrations/0002_auto_20200522_1851.py
|
sabhishekpratap5/sonarcubeTest2
|
9bd8105e457f6feb8c38fa94b335e54783fca99e
|
[
"bzip2-1.0.6"
] | 19
|
2019-09-08T06:01:14.000Z
|
2020-05-21T09:08:20.000Z
|
FusionIIIT/applications/office_module/migrations/0002_auto_20200522_1851.py
|
sabhishekpratap5/sonarcubeTest2
|
9bd8105e457f6feb8c38fa94b335e54783fca99e
|
[
"bzip2-1.0.6"
] | 14
|
2019-08-31T12:25:42.000Z
|
2022-01-12T08:05:33.000Z
|
# Generated by Django 3.0.6 on 2020-05-22 18:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('office_module', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='hostel_guestroom_approval',
name='hall_no',
field=models.CharField(choices=[('HALL-1-BOYS', 'hall-1-boys'), ('HALL-1-GIRLS', 'hall-1-girls'), ('HALL-3', 'hall-3'), ('HALL-4', 'hall-4')], default='', max_length=16),
),
migrations.AlterField(
model_name='project_closure',
name='advances_dues',
field=models.CharField(choices=[('Computer Graphics', 'Computer Graphics'), ('Machine Learning', 'Machine Learning'), ('Image Processing', 'Image Processing'), ('Data Structure', 'Data Structure')], default='Pending', max_length=20),
),
migrations.AlterField(
model_name='project_closure',
name='expenses_dues',
field=models.CharField(choices=[('Computer Graphics', 'Computer Graphics'), ('Machine Learning', 'Machine Learning'), ('Image Processing', 'Image Processing'), ('Data Structure', 'Data Structure')], default='Pending', max_length=20),
),
migrations.AlterField(
model_name='project_closure',
name='others_dues',
field=models.CharField(choices=[('Computer Graphics', 'Computer Graphics'), ('Machine Learning', 'Machine Learning'), ('Image Processing', 'Image Processing'), ('Data Structure', 'Data Structure')], default='Pending', max_length=20),
),
migrations.AlterField(
model_name='project_closure',
name='overhead_deducted',
field=models.CharField(choices=[('Computer Graphics', 'Computer Graphics'), ('Machine Learning', 'Machine Learning'), ('Image Processing', 'Image Processing'), ('Data Structure', 'Data Structure')], default='Pending', max_length=20),
),
migrations.AlterField(
model_name='project_closure',
name='payment_dues',
field=models.CharField(choices=[('Computer Graphics', 'Computer Graphics'), ('Machine Learning', 'Machine Learning'), ('Image Processing', 'Image Processing'), ('Data Structure', 'Data Structure')], default='Pending', max_length=20),
),
migrations.AlterField(
model_name='project_closure',
name='salary_dues',
field=models.CharField(choices=[('Computer Graphics', 'Computer Graphics'), ('Machine Learning', 'Machine Learning'), ('Image Processing', 'Image Processing'), ('Data Structure', 'Data Structure')], default='Pending', max_length=20),
),
migrations.AlterField(
model_name='project_registration',
name='agreement',
field=models.CharField(choices=[('Computer Graphics', 'Computer Graphics'), ('Machine Learning', 'Machine Learning'), ('Image Processing', 'Image Processing'), ('Data Structure', 'Data Structure')], default='NO', max_length=20),
),
]
| 56.962963
| 245
| 0.63199
| 303
| 3,076
| 6.30363
| 0.217822
| 0.117277
| 0.104712
| 0.121466
| 0.802618
| 0.790576
| 0.790576
| 0.765969
| 0.765969
| 0.765969
| 0
| 0.017637
| 0.207412
| 3,076
| 53
| 246
| 58.037736
| 0.765792
| 0.014629
| 0
| 0.595745
| 1
| 0
| 0.412347
| 0.008254
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.021277
| 0
| 0.085106
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ed28afe4b70947e48dcc909ef5065d36a98f49cd
| 32,988
|
py
|
Python
|
OED/MP.py
|
adrianomundo/II2202-research-methodology-scientific-writing
|
39a7a07e1dabbd988f9b3e0c5c41a36a6d292df8
|
[
"Apache-2.0"
] | 1
|
2021-01-30T11:03:05.000Z
|
2021-01-30T11:03:05.000Z
|
OED/MP.py
|
adrianomundo/II2202-research-methodology-scientific-writing
|
39a7a07e1dabbd988f9b3e0c5c41a36a6d292df8
|
[
"Apache-2.0"
] | null | null | null |
OED/MP.py
|
adrianomundo/II2202-research-methodology-scientific-writing
|
39a7a07e1dabbd988f9b3e0c5c41a36a6d292df8
|
[
"Apache-2.0"
] | null | null | null |
import os
import sys
import numpy as np
import time
import matplotlib.pyplot as plt
import pandas as pd
from utils import *
def sliding_dot_product(q, t):
n = t.size
m = q.size
# Append t with n zeros
ta = np.append(t, np.zeros(n))
# Reverse Q
qr = np.flip(q, 0)
# Append qra
qra = np.append(qr, np.zeros(2 * n - m))
# Compute FFTs
qraf = np.fft.fft(qra)
taf = np.fft.fft(ta)
# Compute the inverse FFT to the element-wise multiplication of qraf and taf
qt = np.fft.ifft(np.multiply(qraf, taf))
return qt[m:n]
def sliding_dot_product_stomp(q, t):
n = t.size
m = q.size
# Append t with n zeros
ta = np.append(t, np.zeros(n))
# Reverse Q
qr = np.flip(q, 0)
# Append qra
qra = np.append(qr, np.zeros(2 * n - m))
# Compute FFTs
qraf = np.fft.fft(qra)
taf = np.fft.fft(ta)
# Compute the inverse FFT to the element-wise multiplication of qraf and taf
qt = np.fft.ifft(np.multiply(qraf, taf))
return qt[m - 1:n]
def calculate_distance_profile(q, t, qt, a, sum_q, sum_q2, mean_t, sigma_t):
n = t.size
m = q.size
b = np.zeros(n - m)
dist = np.zeros(n - m)
for i in range(0, n - m):
b[i] = -2 * (qt[i].real - sum_q * mean_t[i]) / sigma_t[i]
dist[i] = a[i] + b[i] + sum_q2
return np.sqrt(np.abs(dist))
# The code below takes O(m) for each subsequence
# you should replace it for MASS
def compute_mean_std_for_query(Q):
# Compute Q stats -- O(n)
sumQ = np.sum(Q)
sumQ2 = np.sum(np.power(Q, 2))
return sumQ, sumQ2
def pre_compute_mean_std_for_TS(ta, m):
na = len(ta)
sum_t = np.zeros(na - m)
sum_t2 = np.zeros(na - m)
# Compute the stats for t
cumulative_sum_t = np.cumsum(ta)
cumulative_sum_t2 = np.cumsum(np.power(ta, 2))
for i in range(na - m):
sum_t[i] = cumulative_sum_t[i + m] - cumulative_sum_t[i]
sum_t2[i] = cumulative_sum_t2[i + m] - cumulative_sum_t2[i]
mean_t = np.divide(sum_t, m)
mean_t2 = np.divide(sum_t2, m)
mean_t_p2 = np.power(mean_t, 2)
sigma_t2 = np.subtract(mean_t2, mean_t_p2)
sigma_t = np.sqrt(sigma_t2)
return sum_t, sum_t2, mean_t, mean_t2, mean_t_p2, sigma_t, sigma_t2
def pre_compute_mean_std_for_TS_stomp(ta, m):
na = len(ta)
# Compute the stats for t
cumulative_sum_t = np.cumsum(ta)
cumulative_sum_t2 = np.cumsum(np.power(ta, 2))
sum_t = (cumulative_sum_t[m - 1:na] - np.concatenate(([0], cumulative_sum_t[0:na - m])))
sum_t2 = (cumulative_sum_t2[m - 1:na] - np.concatenate(([0], cumulative_sum_t2[0:na - m])))
mean_t = np.divide(sum_t, m)
mean_t2 = np.divide(sum_t2, m)
mean_t_p2 = np.power(mean_t, 2)
sigma_t2 = np.subtract(mean_t2, mean_t_p2)
sigma_t = np.sqrt(sigma_t2)
return sum_t, sum_t2, mean_t, mean_t2, mean_t_p2, sigma_t, sigma_t2
# MUEEN’S ALGORITHM FOR SIMILARITY SEARCH (MASS)
def mass(Q, T, a, meanT, sigmaT):
# Z-Normalisation
if np.std(Q) != 0:
Q = (Q - np.mean(Q)) / np.std(Q)
QT = sliding_dot_product(Q, T)
sumQ, sumQ2 = compute_mean_std_for_query(Q)
return calculate_distance_profile(Q, T, QT, a, sumQ, sumQ2, meanT, sigmaT)
def element_wise_min(Pab, Iab, D, idx, ignore_trivial, m):
for i in range(0, len(D)):
if not ignore_trivial or (
np.abs(idx - i) > m / 2.0): # if it's a self-join, ignore trivial matches in [-m/2,m/2]
if D[i] < Pab[i]:
Pab[i] = D[i]
Iab[i] = idx
return Pab, Iab
def stamp(Ta, Tb, m):
"""
Compute the Matrix Profile between time-series Ta and Tb.
If Ta==Tb, the operation is a self-join and trivial matches are ignored.
:param Ta: time-series, np.array
:param Tb: time-series, np.array
:param m: subsequence length
:return: Matrix Profile, Nearest-Neighbor indexes
"""
nb = len(Tb)
na = len(Ta)
Pab = np.ones(na - m) * np.inf
Iab = np.zeros(na - m)
idxes = np.arange(nb - m + 1)
sumT, sumT2, meanT, meanT_2, meanTP2, sigmaT, sigmaT2 = pre_compute_mean_std_for_TS(Ta, m)
a = np.zeros(na - m)
for i in range(0, na - m):
a[i] = (sumT2[i] - 2 * sumT[i] * meanT[i] + m * meanTP2[i]) / sigmaT2[i]
ignore_trivial = np.atleast_1d(Ta == Tb).all()
for idx in idxes:
D = mass(Tb[idx: idx + m], Ta, a, meanT, sigmaT)
if (ignore_trivial):
# ignore trivial minimum and maximum
minIdx = int(np.maximum(idx - m / 2.0, 0))
maxIdx = int(np.minimum(idx + m / 2.0, len(D)))
D[minIdx:maxIdx:1] = np.inf
Iab[Pab > D] = i
Pab = np.minimum(Pab, D)
return Pab, Iab
def stomp(T, m):
"""
Compute the Matrix Profile with self join for T
:param T: time-series, np.array
:param Tb: time-series, np.array
:param m: subsequence length
:return: Matrix Profile, Nearest-Neighbor indexes
"""
epsilon = 1e-10
n = len(T)
seq_l = n - m
_, _, meanT, _, _, sigmaT, _ = pre_compute_mean_std_for_TS_stomp(T, m)
Pab = np.full(seq_l + 1, np.inf)
Iab = np.zeros(n - m + 1)
ignore_trivial = True
for idx in range(0, seq_l):
# There's somthing with normalization
Q_std = sigmaT[idx] if sigmaT[idx] > epsilon else epsilon
if idx == 0:
QT = sliding_dot_product_stomp(T[0:m], T).real
QT_first = np.copy(QT)
else:
QT[1:] = QT[0:-1] - (T[0:seq_l] * T[idx - 1]) + (T[m:n] * T[idx + m - 1])
QT[0] = QT_first[idx]
# Calculate distance profile
D = (2 * (m - (QT - m * meanT * meanT[idx]) / (Q_std * sigmaT)))
D[D < epsilon] = 0
if (ignore_trivial):
# ignore trivial minimum and maximum
minIdx = int(np.maximum(idx - m / 2.0, 0))
maxIdx = int(np.minimum(idx + m / 2.0, len(D)))
D[minIdx:maxIdx:1] = np.inf
Iab[Pab > D] = idx
np.minimum(Pab, D, Pab)
np.sqrt(Pab, Pab)
return Pab, Iab
# Quick Test
# def test_stomp(Ta, m):
# start_time = time.time()
#
# Pab, Iab = stomp(Ta, m)
# print("--- %s seconds ---" % (time.time() - start_time))
# plot_motif(Ta, Pab, Iab, m)
# return Pab, Iab
# Quick Test
# def test_stamp(Ta, Tb, m):
# start_time = time.time()
#
# Pab, Iab = stamp(Ta, Tb, m)
# print("--- %s seconds ---" % (time.time() - start_time))
#
# plot_discord(Ta, Pab, Iab, m, )
# return Pab, Iab
def plot_motif(Ta, values, indexes, m):
from matplotlib import gridspec
plt.figure(figsize=(8, 4))
plt.subplot(211)
plt.plot(Ta, linestyle='--', alpha=0.5)
plt.xlim((0, len(Ta)))
print(np.argmax(values))
plt.plot(range(np.argmin(values), np.argmin(values) + m), Ta[np.argmin(values):np.argmin(values) + m], c='g',
label='Top Motif')
plt.plot(range(np.argmax(values), np.argmax(values) + m), Ta[np.argmax(values):np.argmax(values) + m], c='r',
label='Top Discord')
plt.legend(loc='best')
plt.title('Time-Series')
plt.subplot(212)
plt.title('Matrix Profile')
plt.plot(range(0, len(values)), values, '#ff5722')
plt.plot(np.argmax(values), np.max(values), marker='x', c='r', ms=10)
plt.plot(np.argmin(values), np.min(values), marker='^', c='g', ms=10)
plt.xlim((0, len(Ta)))
plt.xlabel('Index')
plt.ylabel('Value')
plt.show()
def plot_discord(Ta, Tb, values, indexes, m):
from matplotlib import gridspec
plt.figure(figsize=(8, 4))
gs = gridspec.GridSpec(1, 2, width_ratios=[int(len(Ta) / len(Tb)), 1])
plt.subplot(gs[0])
plt.plot(Ta, linestyle='--')
plt.xlim((0, len(Ta)))
plt.plot(range(np.argmin(values), np.argmin(values) + m), Ta[np.argmin(values):np.argmin(values) + m], c='g',
label='Best Match')
plt.legend(loc='best')
plt.title('Time-Series')
plt.ylim((-3, 3))
plt.subplot(gs[1])
plt.plot(Tb)
plt.title('Query')
plt.xlim((0, len(Tb)))
plt.ylim((-3, 3))
plt.figure()
plt.title('Matrix Profile')
plt.plot(range(0, len(values)), values, '#ff5722')
plt.plot(np.argmax(values), np.max(values), marker='x', c='r', ms=10)
plt.plot(np.argmin(values), np.min(values), marker='^', c='g', ms=10)
plt.xlim((0, len(Ta)))
plt.xlabel('Index')
plt.ylabel('Value')
plt.show()
def plot_match(Ta, Tb, values, indexes, m):
from matplotlib import gridspec
plt.figure(figsize=(8, 4))
gs = gridspec.GridSpec(1, 2, width_ratios=[int(len(Ta) / len(Tb)), 1])
plt.subplot(gs[0])
plt.plot(Ta, linestyle='--')
plt.xlim((0, len(Ta)))
print(np.argmax(values))
plt.plot(range(np.argmin(values), np.argmin(values) + m), Ta[np.argmin(values):np.argmin(values) + m], c='g',
label='Best Match')
plt.legend(loc='best')
plt.title('Time-Series')
plt.ylim((-3, 3))
plt.subplot(gs[1])
plt.plot(Tb)
plt.title('Query')
plt.xlim((0, len(Tb)))
plt.ylim((-3, 3))
plt.figure()
plt.title('Matrix Profile')
plt.plot(range(0, len(values)), values, '#ff5722')
plt.plot(np.argmax(values), np.max(values), marker='x', c='r', ms=10)
plt.plot(np.argmin(values), np.min(values), marker='^', c='g', ms=10)
plt.xlim((0, len(Ta)))
plt.xlabel('Index')
plt.ylabel('Value')
plt.show()
def RunModel(_file_name, _choice, _element_num):
pattern_size = 5
if _choice == 1:
abnormal_data, abnormal_label = ReadGDDataset(_file_name)
if _choice == 2:
abnormal_data, abnormal_label = ReadHSSDataset(_file_name)
if _choice == 3:
abnormal_data, abnormal_label = ReadS5Dataset(_file_name)
if _choice == 4:
abnormal_data, abnormal_label = ReadNABDataset(_file_name)
if _choice == 5:
abnormal_data, abnormal_label = Read2DDataset(_file_name)
if _choice == 6:
abnormal_data, abnormal_label = ReadUAHDataset(_file_name)
if _choice == 7:
abnormal_data, abnormal_label = ReadECGDataset(_file_name)
ts = abnormal_data.flatten()
query = abnormal_data.flatten()
Pab, Iab = stamp(ts, query, pattern_size * _element_num)
# plot_discord(ts, query, Pab, Iab, pattern_size * elem_num)
final_zscore = Z_Score(np.sum(np.nan_to_num(Pab).reshape([-1, _element_num]), axis=1))
y_pred = CreateLabelBasedOnZscore(final_zscore, 3, True)
precision, recall, f1 = CalculatePrecisionRecallF1Metrics(abnormal_label[:-pattern_size], y_pred)
# PrintPrecisionRecallF1Metrics(precision, recall, f1)
fpr, tpr, roc_auc = CalculateROCAUCMetrics(abnormal_label[:-pattern_size], np.sum(np.nan_to_num(Pab).reshape([-1, _element_num]), axis=1))
# print('roc_auc=' + str(roc_auc))
precision_curve, recall_curve, average_precision = CalculatePrecisionRecallCurve(abnormal_label[:-pattern_size], np.sum(np.nan_to_num(Pab).reshape([-1, _element_num]), axis=1))
# print('pr_auc=' + str(average_precision))
cks = CalculateCohenKappaMetrics(abnormal_label[:-pattern_size], y_pred)
# print('cohen_kappa=' + str(cks))
return precision, recall, f1, roc_auc, average_precision, cks
if __name__ == '__main__':
try:
sys.argv[1]
except IndexError:
for n in range(1, 7):
dataset = n
if dataset == 1:
file_name = './GD/data/Genesis_AnomalyLabels.csv'
print(file_name)
precision, recall, f1, roc_auc, pr_auc, cks = RunModel(file_name, dataset)
print('avg_precision=' + str(precision))
print('avg_recall=' + str(recall))
print('avg_f1=' + str(f1))
print('avg_roc_auc=' + str(roc_auc))
print('avg_pr_auc=' + str(pr_auc))
print('avg_cks=' + str(cks))
if dataset == 2:
file_name = './HSS/data/HRSS_anomalous_standard.csv'
print(file_name)
precision, recall, f1, roc_auc, pr_auc, cks = RunModel(file_name, dataset)
print('avg_precision=' + str(precision))
print('avg_recall=' + str(recall))
print('avg_f1=' + str(f1))
print('avg_roc_auc=' + str(roc_auc))
print('avg_pr_auc=' + str(pr_auc))
print('avg_cks=' + str(cks))
if dataset == 3:
for root, dirs, _ in os.walk('./YAHOO/data'):
for dir in dirs:
k_partition = 10
s_precision = []
s_recall = []
s_f1 = []
s_roc_auc = []
s_pr_auc = []
s_cks = []
for _, _, files in os.walk(root + '/' + dir):
for file in files:
file_name = os.path.join(root, dir, file)
print(file_name)
precision, recall, f1, roc_auc, pr_auc, cks = RunModel(file_name, dataset, _element_num=1)
s_precision.append(precision)
s_recall.append(recall)
s_f1.append(f1)
s_roc_auc.append(roc_auc)
s_pr_auc.append(pr_auc)
s_cks.append(cks)
print('########################################')
avg_precision = CalculateAverageMetric(s_precision)
print('avg_precision=' + str(avg_precision))
avg_recall = CalculateAverageMetric(s_recall)
print('avg_recall=' + str(avg_recall))
avg_f1 = CalculateAverageMetric(s_f1)
print('avg_f1=' + str(avg_f1))
avg_roc_auc = CalculateAverageMetric(s_roc_auc)
print('avg_roc_auc=' + str(avg_roc_auc))
avg_pr_auc = CalculateAverageMetric(s_pr_auc)
print('avg_pr_auc=' + str(avg_pr_auc))
avg_cks = CalculateAverageMetric(s_cks)
print('avg_cks=' + str(avg_cks))
print('########################################')
if dataset == 4:
for root, dirs, _ in os.walk('./NAB/data'):
for dir in dirs:
k_partition = 10
s_precision = []
s_recall = []
s_f1 = []
s_roc_auc = []
s_pr_auc = []
s_cks = []
for _, _, files in os.walk(root + '/' + dir):
for file in files:
file_name = os.path.join(root, dir, file)
print(file_name)
precision, recall, f1, roc_auc, pr_auc, cks = RunModel(file_name, dataset, _element_num=1)
s_precision.append(precision)
s_recall.append(recall)
s_f1.append(f1)
s_roc_auc.append(roc_auc)
s_pr_auc.append(pr_auc)
s_cks.append(cks)
print('########################################')
avg_precision = CalculateAverageMetric(s_precision)
print('avg_precision=' + str(avg_precision))
avg_recall = CalculateAverageMetric(s_recall)
print('avg_recall=' + str(avg_recall))
avg_f1 = CalculateAverageMetric(s_f1)
print('avg_f1=' + str(avg_f1))
avg_roc_auc = CalculateAverageMetric(s_roc_auc)
print('avg_roc_auc=' + str(avg_roc_auc))
avg_pr_auc = CalculateAverageMetric(s_pr_auc)
print('avg_pr_auc=' + str(avg_pr_auc))
avg_cks = CalculateAverageMetric(s_cks)
print('avg_cks=' + str(avg_cks))
print('########################################')
if dataset == 5:
for root, dirs, _ in os.walk('./2D/test'):
for dir in dirs:
k_partition = 3
s_precision = []
s_recall = []
s_f1 = []
s_roc_auc = []
s_pr_auc = []
s_cks = []
for _, _, files in os.walk(root + '/' + dir):
for file in files:
file_name = os.path.join(root, dir, file)
print(file_name)
precision, recall, f1, roc_auc, pr_auc, cks = RunModel(file_name, dataset, _element_num=2)
s_precision.append(precision)
s_recall.append(recall)
s_f1.append(f1)
s_roc_auc.append(roc_auc)
s_pr_auc.append(pr_auc)
s_cks.append(cks)
print('########################################')
avg_precision = CalculateAverageMetric(s_precision)
print('avg_precision=' + str(avg_precision))
avg_recall = CalculateAverageMetric(s_recall)
print('avg_recall=' + str(avg_recall))
avg_f1 = CalculateAverageMetric(s_f1)
print('avg_f1=' + str(avg_f1))
avg_roc_auc = CalculateAverageMetric(s_roc_auc)
print('avg_roc_auc=' + str(avg_roc_auc))
avg_pr_auc = CalculateAverageMetric(s_pr_auc)
print('avg_pr_auc=' + str(avg_pr_auc))
avg_cks = CalculateAverageMetric(s_cks)
print('avg_cks=' + str(avg_cks))
print('########################################')
if dataset == 6:
s_precision = []
s_recall = []
s_f1 = []
s_roc_auc = []
s_pr_auc = []
s_cks = []
for root, dirs, files in os.walk('./UAH/'):
for dir in dirs:
folder_name = os.path.join(root, dir)
print(folder_name)
precision, recall, f1, roc_auc, pr_auc, cks = RunModel(folder_name, dataset, _element_num=4)
print('########################################')
print('precision=' + str(precision))
print('recall=' + str(recall))
print('f1=' + str(f1))
print('roc_auc=' + str(roc_auc))
print('pr_auc=' + str(pr_auc))
print('cks=' + str(cks))
print('########################################')
s_precision.append(precision)
s_recall.append(recall)
s_f1.append(f1)
s_roc_auc.append(roc_auc)
s_pr_auc.append(pr_auc)
s_cks.append(cks)
print('########################################')
avg_precision = CalculateAverageMetric(s_precision)
print('avg_precision=' + str(avg_precision))
avg_recall = CalculateAverageMetric(s_recall)
print('avg_recall=' + str(avg_recall))
avg_f1 = CalculateAverageMetric(s_f1)
print('avg_f1=' + str(avg_f1))
avg_roc_auc = CalculateAverageMetric(s_roc_auc)
print('avg_roc_auc=' + str(avg_roc_auc))
avg_pr_auc = CalculateAverageMetric(s_pr_auc)
print('avg_pr_auc=' + str(avg_pr_auc))
avg_cks = CalculateAverageMetric(s_cks)
print('avg_cks=' + str(avg_cks))
print('########################################')
if dataset == 7:
for root, dirs, _ in os.walk('./ECG/'):
for dir in dirs:
k_partition = 3
s_precision = []
s_recall = []
s_f1 = []
s_roc_auc = []
s_pr_auc = []
s_cks = []
for _, _, files in os.walk(root + '/' + dir):
for file in files:
file_name = os.path.join(root, dir, file)
print(file_name)
precision, recall, f1, roc_auc, pr_auc, cks = RunModel(file_name, dataset, _element_num=3)
s_precision.append(precision)
s_recall.append(recall)
s_f1.append(f1)
s_roc_auc.append(roc_auc)
s_pr_auc.append(pr_auc)
s_cks.append(cks)
print('########################################')
avg_precision = CalculateAverageMetric(s_precision)
print('avg_precision=' + str(avg_precision))
avg_recall = CalculateAverageMetric(s_recall)
print('avg_recall=' + str(avg_recall))
avg_f1 = CalculateAverageMetric(s_f1)
print('avg_f1=' + str(avg_f1))
avg_roc_auc = CalculateAverageMetric(s_roc_auc)
print('avg_roc_auc=' + str(avg_roc_auc))
avg_pr_auc = CalculateAverageMetric(s_pr_auc)
print('avg_pr_auc=' + str(avg_pr_auc))
avg_cks = CalculateAverageMetric(s_cks)
print('avg_cks=' + str(avg_cks))
print('########################################')
else:
dataset = int(sys.argv[1])
if dataset == 1:
file_name = './GD/data/Genesis_AnomalyLabels.csv'
print(file_name)
precision, recall, f1, roc_auc, pr_auc, cks = RunModel(file_name, dataset)
print('avg_precision=' + str(precision))
print('avg_recall=' + str(recall))
print('avg_f1=' + str(f1))
print('avg_roc_auc=' + str(roc_auc))
print('avg_pr_auc=' + str(pr_auc))
print('avg_cks=' + str(cks))
if dataset == 2:
file_name = './HSS/data/HRSS_anomalous_standard.csv'
print(file_name)
precision, recall, f1, roc_auc, pr_auc, cks = RunModel(file_name, dataset)
print('avg_precision=' + str(precision))
print('avg_recall=' + str(recall))
print('avg_f1=' + str(f1))
print('avg_roc_auc=' + str(roc_auc))
print('avg_pr_auc=' + str(pr_auc))
print('avg_cks=' + str(cks))
if dataset == 3:
for root, dirs, _ in os.walk('./YAHOO/data'):
for dir in dirs:
k_partition = 10
s_precision = []
s_recall = []
s_f1 = []
s_roc_auc = []
s_pr_auc = []
s_cks = []
for _, _, files in os.walk(root + '/' + dir):
for file in files:
file_name = os.path.join(root, dir, file)
print(file_name)
precision, recall, f1, roc_auc, pr_auc, cks = RunModel(file_name, dataset, _element_num=1)
s_precision.append(precision)
s_recall.append(recall)
s_f1.append(f1)
s_roc_auc.append(roc_auc)
s_pr_auc.append(pr_auc)
s_cks.append(cks)
print('########################################')
avg_precision = CalculateAverageMetric(s_precision)
print('avg_precision=' + str(avg_precision))
avg_recall = CalculateAverageMetric(s_recall)
print('avg_recall=' + str(avg_recall))
avg_f1 = CalculateAverageMetric(s_f1)
print('avg_f1=' + str(avg_f1))
avg_roc_auc = CalculateAverageMetric(s_roc_auc)
print('avg_roc_auc=' + str(avg_roc_auc))
avg_pr_auc = CalculateAverageMetric(s_pr_auc)
print('avg_pr_auc=' + str(avg_pr_auc))
avg_cks = CalculateAverageMetric(s_cks)
print('avg_cks=' + str(avg_cks))
print('########################################')
if dataset == 4:
for root, dirs, _ in os.walk('./NAB/data'):
for dir in dirs:
k_partition = 10
s_precision = []
s_recall = []
s_f1 = []
s_roc_auc = []
s_pr_auc = []
s_cks = []
for _, _, files in os.walk(root + '/' + dir):
for file in files:
file_name = os.path.join(root, dir, file)
print(file_name)
precision, recall, f1, roc_auc, pr_auc, cks = RunModel(file_name, dataset, _element_num=1)
s_precision.append(precision)
s_recall.append(recall)
s_f1.append(f1)
s_roc_auc.append(roc_auc)
s_pr_auc.append(pr_auc)
s_cks.append(cks)
print('########################################')
avg_precision = CalculateAverageMetric(s_precision)
print('avg_precision=' + str(avg_precision))
avg_recall = CalculateAverageMetric(s_recall)
print('avg_recall=' + str(avg_recall))
avg_f1 = CalculateAverageMetric(s_f1)
print('avg_f1=' + str(avg_f1))
avg_roc_auc = CalculateAverageMetric(s_roc_auc)
print('avg_roc_auc=' + str(avg_roc_auc))
avg_pr_auc = CalculateAverageMetric(s_pr_auc)
print('avg_pr_auc=' + str(avg_pr_auc))
avg_cks = CalculateAverageMetric(s_cks)
print('avg_cks=' + str(avg_cks))
print('########################################')
if dataset == 5:
for root, dirs, _ in os.walk('./2D/test'):
for dir in dirs:
k_partition = 3
s_precision = []
s_recall = []
s_f1 = []
s_roc_auc = []
s_pr_auc = []
s_cks = []
for _, _, files in os.walk(root + '/' + dir):
for file in files:
file_name = os.path.join(root, dir, file)
print(file_name)
precision, recall, f1, roc_auc, pr_auc, cks = RunModel(file_name, dataset, _element_num=2)
s_precision.append(precision)
s_recall.append(recall)
s_f1.append(f1)
s_roc_auc.append(roc_auc)
s_pr_auc.append(pr_auc)
s_cks.append(cks)
print('########################################')
avg_precision = CalculateAverageMetric(s_precision)
print('avg_precision=' + str(avg_precision))
avg_recall = CalculateAverageMetric(s_recall)
print('avg_recall=' + str(avg_recall))
avg_f1 = CalculateAverageMetric(s_f1)
print('avg_f1=' + str(avg_f1))
avg_roc_auc = CalculateAverageMetric(s_roc_auc)
print('avg_roc_auc=' + str(avg_roc_auc))
avg_pr_auc = CalculateAverageMetric(s_pr_auc)
print('avg_pr_auc=' + str(avg_pr_auc))
avg_cks = CalculateAverageMetric(s_cks)
print('avg_cks=' + str(avg_cks))
print('########################################')
if dataset == 6:
s_precision = []
s_recall = []
s_f1 = []
s_roc_auc = []
s_pr_auc = []
s_cks = []
for root, dirs, files in os.walk('./UAH/'):
for dir in dirs:
folder_name = os.path.join(root, dir)
print(folder_name)
precision, recall, f1, roc_auc, pr_auc, cks = RunModel(folder_name, dataset, _element_num=4)
print('########################################')
print('precision=' + str(precision))
print('recall=' + str(recall))
print('f1=' + str(f1))
print('roc_auc=' + str(roc_auc))
print('pr_auc=' + str(pr_auc))
print('cks=' + str(cks))
print('########################################')
s_precision.append(precision)
s_recall.append(recall)
s_f1.append(f1)
s_roc_auc.append(roc_auc)
s_pr_auc.append(pr_auc)
s_cks.append(cks)
print('########################################')
avg_precision = CalculateAverageMetric(s_precision)
print('avg_precision=' + str(avg_precision))
avg_recall = CalculateAverageMetric(s_recall)
print('avg_recall=' + str(avg_recall))
avg_f1 = CalculateAverageMetric(s_f1)
print('avg_f1=' + str(avg_f1))
avg_roc_auc = CalculateAverageMetric(s_roc_auc)
print('avg_roc_auc=' + str(avg_roc_auc))
avg_pr_auc = CalculateAverageMetric(s_pr_auc)
print('avg_pr_auc=' + str(avg_pr_auc))
avg_cks = CalculateAverageMetric(s_cks)
print('avg_cks=' + str(avg_cks))
print('########################################')
if dataset == 7:
for root, dirs, _ in os.walk('./ECG/'):
for dir in dirs:
k_partition = 3
s_precision = []
s_recall = []
s_f1 = []
s_roc_auc = []
s_pr_auc = []
s_cks = []
for _, _, files in os.walk(root + '/' + dir):
for file in files:
file_name = os.path.join(root, dir, file)
print(file_name)
precision, recall, f1, roc_auc, pr_auc, cks = RunModel(file_name, dataset, _element_num=3)
s_precision.append(precision)
s_recall.append(recall)
s_f1.append(f1)
s_roc_auc.append(roc_auc)
s_pr_auc.append(pr_auc)
s_cks.append(cks)
print('########################################')
avg_precision = CalculateAverageMetric(s_precision)
print('avg_precision=' + str(avg_precision))
avg_recall = CalculateAverageMetric(s_recall)
print('avg_recall=' + str(avg_recall))
avg_f1 = CalculateAverageMetric(s_f1)
print('avg_f1=' + str(avg_f1))
avg_roc_auc = CalculateAverageMetric(s_roc_auc)
print('avg_roc_auc=' + str(avg_roc_auc))
avg_pr_auc = CalculateAverageMetric(s_pr_auc)
print('avg_pr_auc=' + str(avg_pr_auc))
avg_cks = CalculateAverageMetric(s_cks)
print('avg_cks=' + str(avg_cks))
print('########################################')
| 42.076531
| 180
| 0.479811
| 3,800
| 32,988
| 3.920526
| 0.076053
| 0.040274
| 0.02054
| 0.012082
| 0.819573
| 0.809236
| 0.798631
| 0.77695
| 0.769432
| 0.76225
| 0
| 0.015989
| 0.378138
| 32,988
| 783
| 181
| 42.130268
| 0.710247
| 0.053868
| 0
| 0.809748
| 0
| 0
| 0.076461
| 0.035562
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022013
| false
| 0
| 0.015723
| 0
| 0.055031
| 0.213836
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ed44b2542ca6d6607d08560167a89868c413592c
| 571
|
py
|
Python
|
naoqipythonlib/qi/path.py
|
ka2le/pepperflask-1485946909371
|
e5278419a82f27f06c6a088df235eb57d6578f3e
|
[
"Apache-2.0"
] | 8
|
2020-08-01T04:31:50.000Z
|
2021-11-01T08:39:11.000Z
|
naoqipythonlib/qi/path.py
|
ka2le/pepperflask-1485946909371
|
e5278419a82f27f06c6a088df235eb57d6578f3e
|
[
"Apache-2.0"
] | 3
|
2020-10-26T13:30:06.000Z
|
2022-02-18T18:12:44.000Z
|
old_aldebaran_cplusplus_sdk/lib/qi/path.py
|
fernandozuher/naoqi_webots
|
36aa7b7ed7eb2b305293e860446ca55920d8f92a
|
[
"Apache-2.0"
] | 4
|
2016-04-01T10:02:39.000Z
|
2018-04-14T08:05:20.000Z
|
from _qi import findBin, findLib, findConf, findData, listData, confPaths, \
dataPaths, binPaths, libPaths, setWritablePath, \
userWritableDataPath, userWritableConfPath, sdkPrefix
__all__ = [ "findBin",
"findLib",
"findConf",
"findData",
"listData",
"confPaths",
"dataPaths",
"binPaths",
"libPaths",
"setWritablePath",
"userWritableDataPath",
"userWritableConfPath",
"sdkPrefix" ]
| 30.052632
| 77
| 0.511384
| 30
| 571
| 9.566667
| 0.566667
| 0.097561
| 0.15331
| 0.209059
| 0.947735
| 0.947735
| 0.947735
| 0.947735
| 0.947735
| 0.947735
| 0
| 0
| 0.392294
| 571
| 18
| 78
| 31.722222
| 0.827089
| 0
| 0
| 0
| 0
| 0
| 0.238179
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.0625
| 0
| 0.0625
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ed522e3eb92afd0606d4aefe7df2503dd12b3cec
| 131
|
py
|
Python
|
xconsole/__init__.py
|
quanghuyen1301/xconsole
|
fdc64f408ef021c8c9c921ae148fb43f886db005
|
[
"Unlicense"
] | 1
|
2020-03-20T16:44:27.000Z
|
2020-03-20T16:44:27.000Z
|
xconsole/__init__.py
|
quanghuyen1301/xconsole
|
fdc64f408ef021c8c9c921ae148fb43f886db005
|
[
"Unlicense"
] | null | null | null |
xconsole/__init__.py
|
quanghuyen1301/xconsole
|
fdc64f408ef021c8c9c921ae148fb43f886db005
|
[
"Unlicense"
] | null | null | null |
# __init__.py
from .__main__ import tty
from .__main__ import ssh_tty
from .__main__ import local_tty
from .__main__ import sol_tty
| 26.2
| 31
| 0.824427
| 21
| 131
| 4.047619
| 0.428571
| 0.376471
| 0.658824
| 0.6
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.129771
| 131
| 5
| 32
| 26.2
| 0.745614
| 0.083969
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
ed61b52d99a6228733f69b9d71a1ae8f2915c5f7
| 38,644
|
py
|
Python
|
sdk/python/pulumi_keycloak/openid/user_client_role_protocol_mapper.py
|
davide-talesco/pulumi-keycloak
|
08d66be6f2bf578d4292e29eb6181794375bc4e5
|
[
"ECL-2.0",
"Apache-2.0"
] | 13
|
2020-04-28T15:20:56.000Z
|
2022-03-24T18:00:17.000Z
|
sdk/python/pulumi_keycloak/openid/user_client_role_protocol_mapper.py
|
davide-talesco/pulumi-keycloak
|
08d66be6f2bf578d4292e29eb6181794375bc4e5
|
[
"ECL-2.0",
"Apache-2.0"
] | 49
|
2020-02-06T17:53:35.000Z
|
2022-03-25T19:36:08.000Z
|
sdk/python/pulumi_keycloak/openid/user_client_role_protocol_mapper.py
|
davide-talesco/pulumi-keycloak
|
08d66be6f2bf578d4292e29eb6181794375bc4e5
|
[
"ECL-2.0",
"Apache-2.0"
] | 2
|
2020-06-09T01:08:56.000Z
|
2021-12-07T15:30:37.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['UserClientRoleProtocolMapperArgs', 'UserClientRoleProtocolMapper']
@pulumi.input_type
class UserClientRoleProtocolMapperArgs:
def __init__(__self__, *,
claim_name: pulumi.Input[str],
realm_id: pulumi.Input[str],
add_to_access_token: Optional[pulumi.Input[bool]] = None,
add_to_id_token: Optional[pulumi.Input[bool]] = None,
add_to_userinfo: Optional[pulumi.Input[bool]] = None,
claim_value_type: Optional[pulumi.Input[str]] = None,
client_id: Optional[pulumi.Input[str]] = None,
client_id_for_role_mappings: Optional[pulumi.Input[str]] = None,
client_role_prefix: Optional[pulumi.Input[str]] = None,
client_scope_id: Optional[pulumi.Input[str]] = None,
multivalued: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a UserClientRoleProtocolMapper resource.
:param pulumi.Input[str] claim_name: The name of the claim to insert into a token.
:param pulumi.Input[str] realm_id: The realm this protocol mapper exists within.
:param pulumi.Input[bool] add_to_access_token: Indicates if the property should be added as a claim to the access token. Defaults to `true`.
:param pulumi.Input[bool] add_to_id_token: Indicates if the property should be added as a claim to the id token. Defaults to `true`.
:param pulumi.Input[bool] add_to_userinfo: Indicates if the property should be added as a claim to the UserInfo response body. Defaults to `true`.
:param pulumi.Input[str] claim_value_type: The claim type used when serializing JSON tokens. Can be one of `String`, `JSON`, `long`, `int`, or `boolean`. Defaults to `String`.
:param pulumi.Input[str] client_id: The client this protocol mapper should be attached to. Conflicts with `client_scope_id`. One of `client_id` or `client_scope_id` must be specified.
:param pulumi.Input[str] client_id_for_role_mappings: The Client ID for role mappings. Just client roles of this client will be added to the token. If this is unset, client roles of all clients will be added to the token.
:param pulumi.Input[str] client_role_prefix: A prefix for each Client Role.
:param pulumi.Input[str] client_scope_id: The client scope this protocol mapper should be attached to. Conflicts with `client_id`. One of `client_id` or `client_scope_id` must be specified.
:param pulumi.Input[bool] multivalued: Indicates if attribute supports multiple values. If true, then the list of all values of this attribute will be set as claim. If false, then just first value will be set as claim. Defaults to `false`.
:param pulumi.Input[str] name: The display name of this protocol mapper in the GUI.
"""
pulumi.set(__self__, "claim_name", claim_name)
pulumi.set(__self__, "realm_id", realm_id)
if add_to_access_token is not None:
pulumi.set(__self__, "add_to_access_token", add_to_access_token)
if add_to_id_token is not None:
pulumi.set(__self__, "add_to_id_token", add_to_id_token)
if add_to_userinfo is not None:
pulumi.set(__self__, "add_to_userinfo", add_to_userinfo)
if claim_value_type is not None:
pulumi.set(__self__, "claim_value_type", claim_value_type)
if client_id is not None:
pulumi.set(__self__, "client_id", client_id)
if client_id_for_role_mappings is not None:
pulumi.set(__self__, "client_id_for_role_mappings", client_id_for_role_mappings)
if client_role_prefix is not None:
pulumi.set(__self__, "client_role_prefix", client_role_prefix)
if client_scope_id is not None:
pulumi.set(__self__, "client_scope_id", client_scope_id)
if multivalued is not None:
pulumi.set(__self__, "multivalued", multivalued)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="claimName")
def claim_name(self) -> pulumi.Input[str]:
"""
The name of the claim to insert into a token.
"""
return pulumi.get(self, "claim_name")
@claim_name.setter
def claim_name(self, value: pulumi.Input[str]):
pulumi.set(self, "claim_name", value)
@property
@pulumi.getter(name="realmId")
def realm_id(self) -> pulumi.Input[str]:
"""
The realm this protocol mapper exists within.
"""
return pulumi.get(self, "realm_id")
@realm_id.setter
def realm_id(self, value: pulumi.Input[str]):
pulumi.set(self, "realm_id", value)
@property
@pulumi.getter(name="addToAccessToken")
def add_to_access_token(self) -> Optional[pulumi.Input[bool]]:
"""
Indicates if the property should be added as a claim to the access token. Defaults to `true`.
"""
return pulumi.get(self, "add_to_access_token")
@add_to_access_token.setter
def add_to_access_token(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "add_to_access_token", value)
@property
@pulumi.getter(name="addToIdToken")
def add_to_id_token(self) -> Optional[pulumi.Input[bool]]:
"""
Indicates if the property should be added as a claim to the id token. Defaults to `true`.
"""
return pulumi.get(self, "add_to_id_token")
@add_to_id_token.setter
def add_to_id_token(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "add_to_id_token", value)
@property
@pulumi.getter(name="addToUserinfo")
def add_to_userinfo(self) -> Optional[pulumi.Input[bool]]:
"""
Indicates if the property should be added as a claim to the UserInfo response body. Defaults to `true`.
"""
return pulumi.get(self, "add_to_userinfo")
@add_to_userinfo.setter
def add_to_userinfo(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "add_to_userinfo", value)
@property
@pulumi.getter(name="claimValueType")
def claim_value_type(self) -> Optional[pulumi.Input[str]]:
"""
The claim type used when serializing JSON tokens. Can be one of `String`, `JSON`, `long`, `int`, or `boolean`. Defaults to `String`.
"""
return pulumi.get(self, "claim_value_type")
@claim_value_type.setter
def claim_value_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "claim_value_type", value)
@property
@pulumi.getter(name="clientId")
def client_id(self) -> Optional[pulumi.Input[str]]:
"""
The client this protocol mapper should be attached to. Conflicts with `client_scope_id`. One of `client_id` or `client_scope_id` must be specified.
"""
return pulumi.get(self, "client_id")
@client_id.setter
def client_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "client_id", value)
@property
@pulumi.getter(name="clientIdForRoleMappings")
def client_id_for_role_mappings(self) -> Optional[pulumi.Input[str]]:
"""
The Client ID for role mappings. Just client roles of this client will be added to the token. If this is unset, client roles of all clients will be added to the token.
"""
return pulumi.get(self, "client_id_for_role_mappings")
@client_id_for_role_mappings.setter
def client_id_for_role_mappings(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "client_id_for_role_mappings", value)
@property
@pulumi.getter(name="clientRolePrefix")
def client_role_prefix(self) -> Optional[pulumi.Input[str]]:
"""
A prefix for each Client Role.
"""
return pulumi.get(self, "client_role_prefix")
@client_role_prefix.setter
def client_role_prefix(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "client_role_prefix", value)
@property
@pulumi.getter(name="clientScopeId")
def client_scope_id(self) -> Optional[pulumi.Input[str]]:
"""
The client scope this protocol mapper should be attached to. Conflicts with `client_id`. One of `client_id` or `client_scope_id` must be specified.
"""
return pulumi.get(self, "client_scope_id")
@client_scope_id.setter
def client_scope_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "client_scope_id", value)
@property
@pulumi.getter
def multivalued(self) -> Optional[pulumi.Input[bool]]:
"""
Indicates if attribute supports multiple values. If true, then the list of all values of this attribute will be set as claim. If false, then just first value will be set as claim. Defaults to `false`.
"""
return pulumi.get(self, "multivalued")
@multivalued.setter
def multivalued(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "multivalued", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The display name of this protocol mapper in the GUI.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class _UserClientRoleProtocolMapperState:
def __init__(__self__, *,
add_to_access_token: Optional[pulumi.Input[bool]] = None,
add_to_id_token: Optional[pulumi.Input[bool]] = None,
add_to_userinfo: Optional[pulumi.Input[bool]] = None,
claim_name: Optional[pulumi.Input[str]] = None,
claim_value_type: Optional[pulumi.Input[str]] = None,
client_id: Optional[pulumi.Input[str]] = None,
client_id_for_role_mappings: Optional[pulumi.Input[str]] = None,
client_role_prefix: Optional[pulumi.Input[str]] = None,
client_scope_id: Optional[pulumi.Input[str]] = None,
multivalued: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
realm_id: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering UserClientRoleProtocolMapper resources.
:param pulumi.Input[bool] add_to_access_token: Indicates if the property should be added as a claim to the access token. Defaults to `true`.
:param pulumi.Input[bool] add_to_id_token: Indicates if the property should be added as a claim to the id token. Defaults to `true`.
:param pulumi.Input[bool] add_to_userinfo: Indicates if the property should be added as a claim to the UserInfo response body. Defaults to `true`.
:param pulumi.Input[str] claim_name: The name of the claim to insert into a token.
:param pulumi.Input[str] claim_value_type: The claim type used when serializing JSON tokens. Can be one of `String`, `JSON`, `long`, `int`, or `boolean`. Defaults to `String`.
:param pulumi.Input[str] client_id: The client this protocol mapper should be attached to. Conflicts with `client_scope_id`. One of `client_id` or `client_scope_id` must be specified.
:param pulumi.Input[str] client_id_for_role_mappings: The Client ID for role mappings. Just client roles of this client will be added to the token. If this is unset, client roles of all clients will be added to the token.
:param pulumi.Input[str] client_role_prefix: A prefix for each Client Role.
:param pulumi.Input[str] client_scope_id: The client scope this protocol mapper should be attached to. Conflicts with `client_id`. One of `client_id` or `client_scope_id` must be specified.
:param pulumi.Input[bool] multivalued: Indicates if attribute supports multiple values. If true, then the list of all values of this attribute will be set as claim. If false, then just first value will be set as claim. Defaults to `false`.
:param pulumi.Input[str] name: The display name of this protocol mapper in the GUI.
:param pulumi.Input[str] realm_id: The realm this protocol mapper exists within.
"""
if add_to_access_token is not None:
pulumi.set(__self__, "add_to_access_token", add_to_access_token)
if add_to_id_token is not None:
pulumi.set(__self__, "add_to_id_token", add_to_id_token)
if add_to_userinfo is not None:
pulumi.set(__self__, "add_to_userinfo", add_to_userinfo)
if claim_name is not None:
pulumi.set(__self__, "claim_name", claim_name)
if claim_value_type is not None:
pulumi.set(__self__, "claim_value_type", claim_value_type)
if client_id is not None:
pulumi.set(__self__, "client_id", client_id)
if client_id_for_role_mappings is not None:
pulumi.set(__self__, "client_id_for_role_mappings", client_id_for_role_mappings)
if client_role_prefix is not None:
pulumi.set(__self__, "client_role_prefix", client_role_prefix)
if client_scope_id is not None:
pulumi.set(__self__, "client_scope_id", client_scope_id)
if multivalued is not None:
pulumi.set(__self__, "multivalued", multivalued)
if name is not None:
pulumi.set(__self__, "name", name)
if realm_id is not None:
pulumi.set(__self__, "realm_id", realm_id)
@property
@pulumi.getter(name="addToAccessToken")
def add_to_access_token(self) -> Optional[pulumi.Input[bool]]:
"""
Indicates if the property should be added as a claim to the access token. Defaults to `true`.
"""
return pulumi.get(self, "add_to_access_token")
@add_to_access_token.setter
def add_to_access_token(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "add_to_access_token", value)
@property
@pulumi.getter(name="addToIdToken")
def add_to_id_token(self) -> Optional[pulumi.Input[bool]]:
"""
Indicates if the property should be added as a claim to the id token. Defaults to `true`.
"""
return pulumi.get(self, "add_to_id_token")
@add_to_id_token.setter
def add_to_id_token(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "add_to_id_token", value)
@property
@pulumi.getter(name="addToUserinfo")
def add_to_userinfo(self) -> Optional[pulumi.Input[bool]]:
"""
Indicates if the property should be added as a claim to the UserInfo response body. Defaults to `true`.
"""
return pulumi.get(self, "add_to_userinfo")
@add_to_userinfo.setter
def add_to_userinfo(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "add_to_userinfo", value)
@property
@pulumi.getter(name="claimName")
def claim_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the claim to insert into a token.
"""
return pulumi.get(self, "claim_name")
@claim_name.setter
def claim_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "claim_name", value)
@property
@pulumi.getter(name="claimValueType")
def claim_value_type(self) -> Optional[pulumi.Input[str]]:
"""
The claim type used when serializing JSON tokens. Can be one of `String`, `JSON`, `long`, `int`, or `boolean`. Defaults to `String`.
"""
return pulumi.get(self, "claim_value_type")
@claim_value_type.setter
def claim_value_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "claim_value_type", value)
@property
@pulumi.getter(name="clientId")
def client_id(self) -> Optional[pulumi.Input[str]]:
"""
The client this protocol mapper should be attached to. Conflicts with `client_scope_id`. One of `client_id` or `client_scope_id` must be specified.
"""
return pulumi.get(self, "client_id")
@client_id.setter
def client_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "client_id", value)
@property
@pulumi.getter(name="clientIdForRoleMappings")
def client_id_for_role_mappings(self) -> Optional[pulumi.Input[str]]:
"""
The Client ID for role mappings. Just client roles of this client will be added to the token. If this is unset, client roles of all clients will be added to the token.
"""
return pulumi.get(self, "client_id_for_role_mappings")
@client_id_for_role_mappings.setter
def client_id_for_role_mappings(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "client_id_for_role_mappings", value)
@property
@pulumi.getter(name="clientRolePrefix")
def client_role_prefix(self) -> Optional[pulumi.Input[str]]:
"""
A prefix for each Client Role.
"""
return pulumi.get(self, "client_role_prefix")
@client_role_prefix.setter
def client_role_prefix(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "client_role_prefix", value)
@property
@pulumi.getter(name="clientScopeId")
def client_scope_id(self) -> Optional[pulumi.Input[str]]:
"""
The client scope this protocol mapper should be attached to. Conflicts with `client_id`. One of `client_id` or `client_scope_id` must be specified.
"""
return pulumi.get(self, "client_scope_id")
@client_scope_id.setter
def client_scope_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "client_scope_id", value)
@property
@pulumi.getter
def multivalued(self) -> Optional[pulumi.Input[bool]]:
"""
Indicates if attribute supports multiple values. If true, then the list of all values of this attribute will be set as claim. If false, then just first value will be set as claim. Defaults to `false`.
"""
return pulumi.get(self, "multivalued")
@multivalued.setter
def multivalued(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "multivalued", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The display name of this protocol mapper in the GUI.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="realmId")
def realm_id(self) -> Optional[pulumi.Input[str]]:
"""
The realm this protocol mapper exists within.
"""
return pulumi.get(self, "realm_id")
@realm_id.setter
def realm_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "realm_id", value)
class UserClientRoleProtocolMapper(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
add_to_access_token: Optional[pulumi.Input[bool]] = None,
add_to_id_token: Optional[pulumi.Input[bool]] = None,
add_to_userinfo: Optional[pulumi.Input[bool]] = None,
claim_name: Optional[pulumi.Input[str]] = None,
claim_value_type: Optional[pulumi.Input[str]] = None,
client_id: Optional[pulumi.Input[str]] = None,
client_id_for_role_mappings: Optional[pulumi.Input[str]] = None,
client_role_prefix: Optional[pulumi.Input[str]] = None,
client_scope_id: Optional[pulumi.Input[str]] = None,
multivalued: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
realm_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Allows for creating and managing user client role protocol mappers within Keycloak.
User client role protocol mappers allow you to define a claim containing the list of a client roles.
Protocol mappers can be defined for a single client, or they can be defined for a client scope which can be shared between
multiple different clients.
## Example Usage
### Client)
```python
import pulumi
import pulumi_keycloak as keycloak
realm = keycloak.Realm("realm",
realm="my-realm",
enabled=True)
openid_client = keycloak.openid.Client("openidClient",
realm_id=realm.id,
client_id="client",
enabled=True,
access_type="CONFIDENTIAL",
valid_redirect_uris=["http://localhost:8080/openid-callback"])
user_client_role_mapper = keycloak.openid.UserClientRoleProtocolMapper("userClientRoleMapper",
realm_id=realm.id,
client_id=openid_client.id,
claim_name="foo")
```
### Client Scope)
```python
import pulumi
import pulumi_keycloak as keycloak
realm = keycloak.Realm("realm",
realm="my-realm",
enabled=True)
client_scope = keycloak.openid.ClientScope("clientScope", realm_id=realm.id)
user_client_role_mapper = keycloak.openid.UserClientRoleProtocolMapper("userClientRoleMapper",
realm_id=realm.id,
client_scope_id=client_scope.id,
claim_name="foo")
```
## Import
Protocol mappers can be imported using one of the following formats- Client`{{realm_id}}/client/{{client_keycloak_id}}/{{protocol_mapper_id}}` - Client Scope`{{realm_id}}/client-scope/{{client_scope_keycloak_id}}/{{protocol_mapper_id}}` Examplebash
```sh
$ pulumi import keycloak:openid/userClientRoleProtocolMapper:UserClientRoleProtocolMapper user_client_role_mapper my-realm/client/a7202154-8793-4656-b655-1dd18c181e14/71602afa-f7d1-4788-8c49-ef8fd00af0f4
```
```sh
$ pulumi import keycloak:openid/userClientRoleProtocolMapper:UserClientRoleProtocolMapper user_client_role_mapper my-realm/client-scope/b799ea7e-73ee-4a73-990a-1eafebe8e20a/71602afa-f7d1-4788-8c49-ef8fd00af0f4
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] add_to_access_token: Indicates if the property should be added as a claim to the access token. Defaults to `true`.
:param pulumi.Input[bool] add_to_id_token: Indicates if the property should be added as a claim to the id token. Defaults to `true`.
:param pulumi.Input[bool] add_to_userinfo: Indicates if the property should be added as a claim to the UserInfo response body. Defaults to `true`.
:param pulumi.Input[str] claim_name: The name of the claim to insert into a token.
:param pulumi.Input[str] claim_value_type: The claim type used when serializing JSON tokens. Can be one of `String`, `JSON`, `long`, `int`, or `boolean`. Defaults to `String`.
:param pulumi.Input[str] client_id: The client this protocol mapper should be attached to. Conflicts with `client_scope_id`. One of `client_id` or `client_scope_id` must be specified.
:param pulumi.Input[str] client_id_for_role_mappings: The Client ID for role mappings. Just client roles of this client will be added to the token. If this is unset, client roles of all clients will be added to the token.
:param pulumi.Input[str] client_role_prefix: A prefix for each Client Role.
:param pulumi.Input[str] client_scope_id: The client scope this protocol mapper should be attached to. Conflicts with `client_id`. One of `client_id` or `client_scope_id` must be specified.
:param pulumi.Input[bool] multivalued: Indicates if attribute supports multiple values. If true, then the list of all values of this attribute will be set as claim. If false, then just first value will be set as claim. Defaults to `false`.
:param pulumi.Input[str] name: The display name of this protocol mapper in the GUI.
:param pulumi.Input[str] realm_id: The realm this protocol mapper exists within.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: UserClientRoleProtocolMapperArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Allows for creating and managing user client role protocol mappers within Keycloak.
User client role protocol mappers allow you to define a claim containing the list of a client roles.
Protocol mappers can be defined for a single client, or they can be defined for a client scope which can be shared between
multiple different clients.
## Example Usage
### Client)
```python
import pulumi
import pulumi_keycloak as keycloak
realm = keycloak.Realm("realm",
realm="my-realm",
enabled=True)
openid_client = keycloak.openid.Client("openidClient",
realm_id=realm.id,
client_id="client",
enabled=True,
access_type="CONFIDENTIAL",
valid_redirect_uris=["http://localhost:8080/openid-callback"])
user_client_role_mapper = keycloak.openid.UserClientRoleProtocolMapper("userClientRoleMapper",
realm_id=realm.id,
client_id=openid_client.id,
claim_name="foo")
```
### Client Scope)
```python
import pulumi
import pulumi_keycloak as keycloak
realm = keycloak.Realm("realm",
realm="my-realm",
enabled=True)
client_scope = keycloak.openid.ClientScope("clientScope", realm_id=realm.id)
user_client_role_mapper = keycloak.openid.UserClientRoleProtocolMapper("userClientRoleMapper",
realm_id=realm.id,
client_scope_id=client_scope.id,
claim_name="foo")
```
## Import
Protocol mappers can be imported using one of the following formats- Client`{{realm_id}}/client/{{client_keycloak_id}}/{{protocol_mapper_id}}` - Client Scope`{{realm_id}}/client-scope/{{client_scope_keycloak_id}}/{{protocol_mapper_id}}` Examplebash
```sh
$ pulumi import keycloak:openid/userClientRoleProtocolMapper:UserClientRoleProtocolMapper user_client_role_mapper my-realm/client/a7202154-8793-4656-b655-1dd18c181e14/71602afa-f7d1-4788-8c49-ef8fd00af0f4
```
```sh
$ pulumi import keycloak:openid/userClientRoleProtocolMapper:UserClientRoleProtocolMapper user_client_role_mapper my-realm/client-scope/b799ea7e-73ee-4a73-990a-1eafebe8e20a/71602afa-f7d1-4788-8c49-ef8fd00af0f4
```
:param str resource_name: The name of the resource.
:param UserClientRoleProtocolMapperArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(UserClientRoleProtocolMapperArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
add_to_access_token: Optional[pulumi.Input[bool]] = None,
add_to_id_token: Optional[pulumi.Input[bool]] = None,
add_to_userinfo: Optional[pulumi.Input[bool]] = None,
claim_name: Optional[pulumi.Input[str]] = None,
claim_value_type: Optional[pulumi.Input[str]] = None,
client_id: Optional[pulumi.Input[str]] = None,
client_id_for_role_mappings: Optional[pulumi.Input[str]] = None,
client_role_prefix: Optional[pulumi.Input[str]] = None,
client_scope_id: Optional[pulumi.Input[str]] = None,
multivalued: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
realm_id: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = UserClientRoleProtocolMapperArgs.__new__(UserClientRoleProtocolMapperArgs)
__props__.__dict__["add_to_access_token"] = add_to_access_token
__props__.__dict__["add_to_id_token"] = add_to_id_token
__props__.__dict__["add_to_userinfo"] = add_to_userinfo
if claim_name is None and not opts.urn:
raise TypeError("Missing required property 'claim_name'")
__props__.__dict__["claim_name"] = claim_name
__props__.__dict__["claim_value_type"] = claim_value_type
__props__.__dict__["client_id"] = client_id
__props__.__dict__["client_id_for_role_mappings"] = client_id_for_role_mappings
__props__.__dict__["client_role_prefix"] = client_role_prefix
__props__.__dict__["client_scope_id"] = client_scope_id
__props__.__dict__["multivalued"] = multivalued
__props__.__dict__["name"] = name
if realm_id is None and not opts.urn:
raise TypeError("Missing required property 'realm_id'")
__props__.__dict__["realm_id"] = realm_id
super(UserClientRoleProtocolMapper, __self__).__init__(
'keycloak:openid/userClientRoleProtocolMapper:UserClientRoleProtocolMapper',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
add_to_access_token: Optional[pulumi.Input[bool]] = None,
add_to_id_token: Optional[pulumi.Input[bool]] = None,
add_to_userinfo: Optional[pulumi.Input[bool]] = None,
claim_name: Optional[pulumi.Input[str]] = None,
claim_value_type: Optional[pulumi.Input[str]] = None,
client_id: Optional[pulumi.Input[str]] = None,
client_id_for_role_mappings: Optional[pulumi.Input[str]] = None,
client_role_prefix: Optional[pulumi.Input[str]] = None,
client_scope_id: Optional[pulumi.Input[str]] = None,
multivalued: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
realm_id: Optional[pulumi.Input[str]] = None) -> 'UserClientRoleProtocolMapper':
"""
Get an existing UserClientRoleProtocolMapper resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] add_to_access_token: Indicates if the property should be added as a claim to the access token. Defaults to `true`.
:param pulumi.Input[bool] add_to_id_token: Indicates if the property should be added as a claim to the id token. Defaults to `true`.
:param pulumi.Input[bool] add_to_userinfo: Indicates if the property should be added as a claim to the UserInfo response body. Defaults to `true`.
:param pulumi.Input[str] claim_name: The name of the claim to insert into a token.
:param pulumi.Input[str] claim_value_type: The claim type used when serializing JSON tokens. Can be one of `String`, `JSON`, `long`, `int`, or `boolean`. Defaults to `String`.
:param pulumi.Input[str] client_id: The client this protocol mapper should be attached to. Conflicts with `client_scope_id`. One of `client_id` or `client_scope_id` must be specified.
:param pulumi.Input[str] client_id_for_role_mappings: The Client ID for role mappings. Just client roles of this client will be added to the token. If this is unset, client roles of all clients will be added to the token.
:param pulumi.Input[str] client_role_prefix: A prefix for each Client Role.
:param pulumi.Input[str] client_scope_id: The client scope this protocol mapper should be attached to. Conflicts with `client_id`. One of `client_id` or `client_scope_id` must be specified.
:param pulumi.Input[bool] multivalued: Indicates if attribute supports multiple values. If true, then the list of all values of this attribute will be set as claim. If false, then just first value will be set as claim. Defaults to `false`.
:param pulumi.Input[str] name: The display name of this protocol mapper in the GUI.
:param pulumi.Input[str] realm_id: The realm this protocol mapper exists within.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _UserClientRoleProtocolMapperState.__new__(_UserClientRoleProtocolMapperState)
__props__.__dict__["add_to_access_token"] = add_to_access_token
__props__.__dict__["add_to_id_token"] = add_to_id_token
__props__.__dict__["add_to_userinfo"] = add_to_userinfo
__props__.__dict__["claim_name"] = claim_name
__props__.__dict__["claim_value_type"] = claim_value_type
__props__.__dict__["client_id"] = client_id
__props__.__dict__["client_id_for_role_mappings"] = client_id_for_role_mappings
__props__.__dict__["client_role_prefix"] = client_role_prefix
__props__.__dict__["client_scope_id"] = client_scope_id
__props__.__dict__["multivalued"] = multivalued
__props__.__dict__["name"] = name
__props__.__dict__["realm_id"] = realm_id
return UserClientRoleProtocolMapper(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="addToAccessToken")
def add_to_access_token(self) -> pulumi.Output[Optional[bool]]:
"""
Indicates if the property should be added as a claim to the access token. Defaults to `true`.
"""
return pulumi.get(self, "add_to_access_token")
@property
@pulumi.getter(name="addToIdToken")
def add_to_id_token(self) -> pulumi.Output[Optional[bool]]:
"""
Indicates if the property should be added as a claim to the id token. Defaults to `true`.
"""
return pulumi.get(self, "add_to_id_token")
@property
@pulumi.getter(name="addToUserinfo")
def add_to_userinfo(self) -> pulumi.Output[Optional[bool]]:
"""
Indicates if the property should be added as a claim to the UserInfo response body. Defaults to `true`.
"""
return pulumi.get(self, "add_to_userinfo")
@property
@pulumi.getter(name="claimName")
def claim_name(self) -> pulumi.Output[str]:
"""
The name of the claim to insert into a token.
"""
return pulumi.get(self, "claim_name")
@property
@pulumi.getter(name="claimValueType")
def claim_value_type(self) -> pulumi.Output[Optional[str]]:
"""
The claim type used when serializing JSON tokens. Can be one of `String`, `JSON`, `long`, `int`, or `boolean`. Defaults to `String`.
"""
return pulumi.get(self, "claim_value_type")
@property
@pulumi.getter(name="clientId")
def client_id(self) -> pulumi.Output[Optional[str]]:
"""
The client this protocol mapper should be attached to. Conflicts with `client_scope_id`. One of `client_id` or `client_scope_id` must be specified.
"""
return pulumi.get(self, "client_id")
@property
@pulumi.getter(name="clientIdForRoleMappings")
def client_id_for_role_mappings(self) -> pulumi.Output[Optional[str]]:
"""
The Client ID for role mappings. Just client roles of this client will be added to the token. If this is unset, client roles of all clients will be added to the token.
"""
return pulumi.get(self, "client_id_for_role_mappings")
@property
@pulumi.getter(name="clientRolePrefix")
def client_role_prefix(self) -> pulumi.Output[Optional[str]]:
"""
A prefix for each Client Role.
"""
return pulumi.get(self, "client_role_prefix")
@property
@pulumi.getter(name="clientScopeId")
def client_scope_id(self) -> pulumi.Output[Optional[str]]:
"""
The client scope this protocol mapper should be attached to. Conflicts with `client_id`. One of `client_id` or `client_scope_id` must be specified.
"""
return pulumi.get(self, "client_scope_id")
@property
@pulumi.getter
def multivalued(self) -> pulumi.Output[Optional[bool]]:
"""
Indicates if attribute supports multiple values. If true, then the list of all values of this attribute will be set as claim. If false, then just first value will be set as claim. Defaults to `false`.
"""
return pulumi.get(self, "multivalued")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The display name of this protocol mapper in the GUI.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="realmId")
def realm_id(self) -> pulumi.Output[str]:
"""
The realm this protocol mapper exists within.
"""
return pulumi.get(self, "realm_id")
| 49.863226
| 256
| 0.669858
| 5,039
| 38,644
| 4.892439
| 0.048224
| 0.071391
| 0.060196
| 0.058898
| 0.921713
| 0.913317
| 0.906624
| 0.89555
| 0.891859
| 0.88687
| 0
| 0.005705
| 0.23349
| 38,644
| 774
| 257
| 49.927649
| 0.826576
| 0.410491
| 0
| 0.82801
| 1
| 0
| 0.110585
| 0.022861
| 0
| 0
| 0
| 0
| 0
| 1
| 0.164619
| false
| 0.002457
| 0.012285
| 0
| 0.275184
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
ed73635fb2a0a85b7ba2ea9aff17cd56128b08d0
| 99,028
|
py
|
Python
|
python/getXSection.py
|
lucaswiens/Susy1LeptonSkimmer
|
ab491c9ae8affb6143a17f9a24a2cd82f999eebb
|
[
"BSD-3-Clause"
] | null | null | null |
python/getXSection.py
|
lucaswiens/Susy1LeptonSkimmer
|
ab491c9ae8affb6143a17f9a24a2cd82f999eebb
|
[
"BSD-3-Clause"
] | null | null | null |
python/getXSection.py
|
lucaswiens/Susy1LeptonSkimmer
|
ab491c9ae8affb6143a17f9a24a2cd82f999eebb
|
[
"BSD-3-Clause"
] | null | null | null |
def GetXSection(fileName): #[pb]
#Cross Section derived from sample name using https://cms-gen-dev.cern.ch/xsdb/
#TODO UL QCD files have PSWeights in their name, but xsdb does not include it in its name
fileName = fileName.replace("PSWeights_", "")
#TODO: UL Single Top xSection only defined for filename without inclusive decays specified
fileName = fileName.replace("5f_InclusiveDecays_", "5f_")
#TODO: UL tZq_ll_4f_ckm_NLO_TuneCP5_13TeV-amcatnlo-pythia8 only defined with PSWeights...
fileName = fileName.replace("tZq_ll_4f_ckm_NLO_TuneCP5_13TeV-amcatnlo-pythia8", "tZq_ll_4f_ckm_NLO_TuneCP5_PSweights_13TeV-amcatnlo-pythia8")
if fileName.find("ST_t-channel_antitop_4f_InclusiveDecays_mtop1695_TuneCP5_PSweights_13TeV-powheg-madspin-pythia8") !=-1 : return 70.89
elif fileName.find("ST_t-channel_antitop_4f_InclusiveDecays_mtop1735_TuneCP5_PSweights_13TeV-powheg-madspin-pythia8") !=-1 : return 68.45
elif fileName.find("ST_t-channel_antitop_4f_InclusiveDecays_mtop1715_TuneCP5_PSweights_13TeV-powheg-madspin-pythia8") !=-1 : return 69.66
elif fileName.find("ST_t-channel_top_4f_InclusiveDecays_mtop1715_TuneCP5_PSweights_13TeV-powheg-madspin-pythia8") !=-1 : return 116.1
elif fileName.find("ST_t-channel_top_4f_InclusiveDecays_mtop1755_TuneCP5_PSweights_13TeV-powheg-madspin-pythia8") !=-1 : return 112.5
elif fileName.find("ST_t-channel_top_4f_InclusiveDecays_mtop1695_TuneCP5_PSweights_13TeV-powheg-madspin-pythia8") !=-1 : return 118.0
elif fileName.find("ST_s-channel_4f_leptonDecays_TuneCP5_PSweights_13TeV-amcatnlo-pythia8_correctnPartonsInBorn") !=-1 : return 3.74
elif fileName.find("ST_t-channel_top_4f_InclusiveDecays_mtop1735_TuneCP5_PSweights_13TeV-powheg-madspin-pythia8") !=-1 : return 114.4
elif fileName.find("ST_t-channel_antitop_4f_hdampdown_InclusiveDecays_TuneCP5_PSweights_13TeV-powheg-pythia8") !=-1 : return 67.91
elif fileName.find("ST_tW_antitop_5f_hdampdown_NoFullyHadronicDecays_TuneCP5_PSweights_13TeV-powheg-pythia8") !=-1 : return 34.99
elif fileName.find("ST_t-channel_antitop_4f_hdampdown_InclusiveDecays_TuneCP5_13TeV-powheg-madspin-pythia8") !=-1 : return 69.09
elif fileName.find("ST_t-channel_antitop_4f_hdampup_InclusiveDecays_TuneCP5_PSweights_13TeV-powheg-pythia8") !=-1 : return 67.91
elif fileName.find("ST_tW_antitop_5f_hdampup_NoFullyHadronicDecays_TuneCP5_PSweights_13TeV-powheg-pythia8") !=-1 : return 34.96
elif fileName.find("DYJetsToLL_BGenFilter_Zpt-100to200_M-50_TuneCP5_13TeV-madgraphMLM-pythia8_newgridpack") !=-1 : return 2.678
elif fileName.find("DYJetsToLL_BGenFilter_Zpt-200toInf_M-50_TuneCP5_13TeV-madgraphMLM-pythia8_newgridpack") !=-1 : return 0.3909
elif fileName.find("ST_t-channel_antitop_4f_InclusiveDecays_TuneCP5_erdON_PSweights_13TeV-powheg-pythia8") !=-1 : return 67.91
elif fileName.find("ST_t-channel_antitop_5f_TuneCP5_PSweights_13TeV-powheg-madspin-pythia8_vtd_vts_decay") !=-1 : return 82.52
elif fileName.find("ST_t-channel_antitop_4f_hdampup_InclusiveDecays_TuneCP5_13TeV-powheg-madspin-pythia8") !=-1 : return 69.09
elif fileName.find("ST_t-channel_top_4f_hdampdown_InclusiveDecays_TuneCP5_PSweights_13TeV-powheg-pythia8") !=-1 : return 113.3
elif fileName.find("ST_tW_top_5f_hdampdown_NoFullyHadronicDecays_TuneCP5_PSweights_13TeV-powheg-pythia8") !=-1 : return 34.92
elif fileName.find("ST_tW_antitop_5f_NoFullyHadronicDecays_TuneCP5_erdON_PSweights_13TeV-powheg-pythia8") !=-1 : return 34.97
elif fileName.find("ST_t-channel_antitop_5f_TuneCP5_PSweights_13TeV-powheg-madspin-pythia8_vtd_vts_prod") !=-1 : return 225.5
elif fileName.find("ST_t-channel_antitop_4f_InclusiveDecays_TuneCP5down_PSweights_13TeV-powheg-pythia8") !=-1 : return 67.91
elif fileName.find("ST_t-channel_top_4f_hdampup_InclusiveDecays_TuneCP5_PSweights_13TeV-powheg-pythia8") !=-1 : return 113.3
elif fileName.find("ST_t-channel_top_4f_hdampdown_InclusiveDecays_TuneCP5_13TeV-powheg-madspin-pythia8") !=-1 : return 115.3
elif fileName.find("ST_t-channel_antitop_4f_InclusiveDecays_TuneCP5_erdON_13TeV-powheg-madspin-pythia8") !=-1 : return 69.09
elif fileName.find("ST_tW_top_5f_hdampup_NoFullyHadronicDecays_TuneCP5_PSweights_13TeV-powheg-pythia8") !=-1 : return 34.9
elif fileName.find("DYJetsToTauTau_ForcedMuEleDecay_M-50_TuneCP5_PSweights_13TeV-amcatnloFXFX-pythia8") !=-1 : return 6505.0
elif fileName.find("ST_t-channel_antitop_4f_InclusiveDecays_TuneCP5CR1_PSweights_13TeV-powheg-pythia8") !=-1 : return 67.91
elif fileName.find("ST_tW_antitop_5f_NoFullyHadronicDecays_TuneCP5down_PSweights_13TeV-powheg-pythia8") !=-1 : return 34.97
elif fileName.find("DYJetsToTauTau_ForcedMuEleDecay_M-50_TuneCUETP8M1_13TeV-amcatnloFXFX-pythia8_ext1") !=-1 : return 1991.0
elif fileName.find("ST_t-channel_antitop_4f_InclusiveDecays_TuneCP5CR2_PSweights_13TeV-powheg-pythia8") !=-1 : return 67.91
elif fileName.find("ST_tW_antitop_5f_inclusiveDecays_mtop1695_TuneCP5_PSweights_13TeV-powheg-pythia8") !=-1 : return 36.65
elif fileName.find("ST_tW_antitop_5f_inclusiveDecays_mtop1735_TuneCP5_PSweights_13TeV-powheg-pythia8") !=-1 : return 34.44
elif fileName.find("ST_t-channel_top_4f_hdampup_InclusiveDecays_TuneCP5_13TeV-powheg-madspin-pythia8") !=-1 : return 115.3
elif fileName.find("ST_tW_antitop_5f_NoFullyHadronicDecays_TuneCP5CR1_PSweights_13TeV-powheg-pythia8") !=-1 : return 34.97
elif fileName.find("ST_t-channel_antitop_4f_InclusiveDecays_TuneCP5up_PSweights_13TeV-powheg-pythia8") !=-1 : return 67.91
elif fileName.find("ST_t-channel_top_5f_TuneCP5_PSweights_13TeV-powheg-madspin-pythia8_vtd_vts_decay") !=-1 : return 138.1
elif fileName.find("ST_tW_antitop_5f_inclusiveDecays_mtop1755_TuneCP5_PSweights_13TeV-powheg-pythia8") !=-1 : return 33.4
elif fileName.find("ST_t-channel_top_4f_InclusiveDecays_TuneCP5_erdON_PSweights_13TeV-powheg-pythia8") !=-1 : return 113.3
elif fileName.find("ST_tW_antitop_5f_inclusiveDecays_mtop1715_TuneCP5_PSweights_13TeV-powheg-pythia8") !=-1 : return 35.52
elif fileName.find("ST_t-channel_antitop_4f_InclusiveDecays_TuneCP5down_13TeV-powheg-madspin-pythia8") !=-1 : return 69.09
elif fileName.find("ST_tW_antitop_5f_NoFullyHadronicDecays_TuneCP5CR2_PSweights_13TeV-powheg-pythia8") !=-1 : return 34.97
elif fileName.find("ST_tW_antitop_5f_DS_NoFullyHadronicDecays_TuneCP5_PSweights_13TeV-powheg-pythia8") !=-1 : return 35.25
elif fileName.find("ST_t-channel_antitop_4f_InclusiveDecays_TuneCP5CR2_13TeV-powheg-madspin-pythia8") !=-1 : return 69.09
elif fileName.find("ST_t-channel_top_5f_TuneCP5_PSweights_13TeV-powheg-madspin-pythia8_vtd_vts_prod") !=-1 : return 547.2
elif fileName.find("TTZToLLNuNu_M-10_TuneCP5_PSweights_correctnPartonsInBorn_13TeV-amcatnlo-pythia8") !=-1 : return 0.2432
elif fileName.find("ST_tW_top_5f_NoFullyHadronicDecays_TuneCP5_erdON_PSweights_13TeV-powheg-pythia8") !=-1 : return 34.91
elif fileName.find("ST_tW_antitop_5f_NoFullyHadronicDecays_TuneCP5up_PSweights_13TeV-powheg-pythia8") !=-1 : return 34.97
elif fileName.find("ST_t-channel_antitop_4f_InclusiveDecays_TuneCP5CR1_13TeV-powheg-madspin-pythia8") !=-1 : return 69.09
elif fileName.find("DYJetsToLL_BGenFilter_Zpt-200toInf_M-50_TuneCUETP8M1_13TeV-madgraphMLM-pythia8") !=-1 : return 0.5082
elif fileName.find("ST_t-channel_top_4f_InclusiveDecays_TuneCP5_erdON_13TeV-powheg-madspin-pythia8") !=-1 : return 115.3
elif fileName.find("TTWJetsToLNu_TuneCP5CR2_GluonMove_PSweights_13TeV-amcatnloFXFX-madspin-pythia8") !=-1 : return 0.2183
elif fileName.find("ST_s-channel_4f_leptonDecays_mtop1715_TuneCP5_PSweights_13TeV-amcatnlo-pythia8") !=-1 : return 3.824
elif fileName.find("ST_s-channel_4f_leptonDecays_mtop1755_TuneCP5_PSweights_13TeV-amcatnlo-pythia8") !=-1 : return 3.506
elif fileName.find("ST_s-channel_4f_leptonDecays_mtop1695_TuneCP5_PSweights_13TeV-amcatnlo-pythia8") !=-1 : return 3.991
elif fileName.find("DYJetsToLL_BGenFilter_Zpt-100to200_M-50_TuneCUETP8M1_13TeV-madgraphMLM-pythia8") !=-1 : return 3.41
elif fileName.find("ST_t-channel_antitop_4f_InclusiveDecays_TuneCP5_PSweights_13TeV-powheg-pythia8") !=-1 : return 67.91
elif fileName.find("ST_t-channel_antitop_4f_inclusiveDecays_TuneCP5_13TeV-powhegV2-madspin-pythia8") !=-1 : return 67.91
elif fileName.find("ST_t-channel_antitop_4f_InclusiveDecays_TuneCP5up_13TeV-powheg-madspin-pythia8") !=-1 : return 69.09
elif fileName.find("ST_t-channel_top_4f_InclusiveDecays_TuneCP5down_PSweights_13TeV-powheg-pythia8") !=-1 : return 113.3
elif fileName.find("ST_s-channel_4f_leptonDecays_mtop1735_TuneCP5_PSweights_13TeV-amcatnlo-pythia8") !=-1 : return 3.653
elif fileName.find("TTWJetsToLNu_TuneCP5CR1_QCDbased_PSweights_13TeV-amcatnloFXFX-madspin-pythia8") !=-1 : return 0.2169
elif fileName.find("ST_tW_top_5f_NoFullyHadronicDecays_TuneCP5down_PSweights_13TeV-powheg-pythia8") !=-1 : return 34.91
elif fileName.find("ST_t-channel_top_4f_InclusiveDecays_TuneCP5CR1_PSweights_13TeV-powheg-pythia8") !=-1 : return 113.3
elif fileName.find("ST_tW_antitop_5f_hdampdown_NoFullyHadronicDecays_TuneCP5_13TeV-powheg-pythia8") !=-1 : return 34.98
elif fileName.find("ST_tW_antitop_5f_NoFullyHadronicDecays_TuneCP5_PSweights_13TeV-powheg-pythia8") !=-1 : return 34.97
elif fileName.find("ST_t-channel_top_4f_InclusiveDecays_TuneCP5CR2_PSweights_13TeV-powheg-pythia8") !=-1 : return 113.3
elif fileName.find("ST_t-channel_antitop_4f_InclusiveDecays_TuneCP5_13TeV-powheg-madspin-pythia8") !=-1 : return 69.09
elif fileName.find("ST_tW_antitop_5f_NoFullyHadronicDecays_mtop1715_TuneCP5_13TeV-powheg-pythia8") !=-1 : return 36.65
elif fileName.find("ST_tW_top_5f_inclusiveDecays_mtop1695_TuneCP5_PSweights_13TeV-powheg-pythia8") !=-1 : return 36.58
elif fileName.find("ST_tW_top_5f_NoFullyHadronicDecays_TuneCP5CR2_PSweights_13TeV-powheg-pythia8") !=-1 : return 34.91
elif fileName.find("ST_tW_top_5f_NoFullyHadronicDecays_TuneCP5CR1_PSweights_13TeV-powheg-pythia8") !=-1 : return 34.91
elif fileName.find("ST_tW_antitop_5f_NoFullyHadronicDecays_mtop1735_TuneCP5_13TeV-powheg-pythia8") !=-1 : return 36.65
elif fileName.find("ST_t-channel_top_4f_InclusiveDecays_TuneCP5up_PSweights_13TeV-powheg-pythia8") !=-1 : return 113.3
elif fileName.find("DYJetsToLL_Zpt-200toInf_M-50_TuneCUETP8M1_13TeV-madgraphMLM-pythia8_TuneDown") !=-1 : return 6.714
elif fileName.find("DYJetsToTauTau_ForcedMuEleDecay_M-50_TuneCUETP8M1_13TeV-amcatnloFXFX-pythia8") !=-1 : return 1981.0
elif fileName.find("ST_tW_top_5f_inclusiveDecays_mtop1735_TuneCP5_PSweights_13TeV-powheg-pythia8") !=-1 : return 34.38
elif fileName.find("ST_tW_antitop_5f_NoFullyHadronicDecays_mtop1695_TuneCP5_13TeV-powheg-pythia8") !=-1 : return 36.65
elif fileName.find("ST_tW_top_5f_DS_NoFullyHadronicDecays_TuneCP5_PSweights_13TeV-powheg-pythia8") !=-1 : return 33.75
elif fileName.find("ST_tW_antitop_5f_NoFullyHadronicDecays_mtop1755_TuneCP5_13TeV-powheg-pythia8") !=-1 : return 36.65
elif fileName.find("ST_tW_top_5f_inclusiveDecays_mtop1715_TuneCP5_PSweights_13TeV-powheg-pythia8") !=-1 : return 35.46
elif fileName.find("DYJetsToLL_M-105To160_VBFFilter_TuneCP5_PSweights_13TeV-amcatnloFXFX-pythia8") !=-1 : return 2.01
elif fileName.find("ST_t-channel_top_4f_InclusiveDecays_TuneCP5down_13TeV-powheg-madspin-pythia8") !=-1 : return 115.3
elif fileName.find("ST_tW_top_5f_inclusiveDecays_mtop1755_TuneCP5_PSweights_13TeV-powheg-pythia8") !=-1 : return 33.34
elif fileName.find("DYJetsToLL_M-105To160_VBFFilter_TuneCP5_PSweights_13TeV-madgraphMLM-pythia8") !=-1 : return 1.526
elif fileName.find("DYBJetsToLL_M-50_Zpt-200toInf_TuneCP5_13TeV-madgraphMLM-pythia8_newgridpack") !=-1 : return 0.3282
elif fileName.find("ST_tW_antitop_5f_inclusiveDecays_TuneCP5down_PSweights_13TeV-powheg-pythia8") !=-1 : return 34.97
elif fileName.find("ST_t-channel_top_4f_InclusiveDecays_TuneCP5CR2_13TeV-powheg-madspin-pythia8") !=-1 : return 115.3
elif fileName.find("ST_t-channel_eleDecays_anomwtbLVRV_LVRVunphys_13TeV-comphep-pythia8_TuneCP5") !=-1 : return 47.81
elif fileName.find("ST_tW_antitop_5f_hdampup_NoFullyHadronicDecays_TuneCP5_13TeV-powheg-pythia8") !=-1 : return 34.97
elif fileName.find("ST_t-channel_tauDecays_anomwtbLVRV_LVRVunphys_13TeV-comphep-pythia8_TuneCP5") !=-1 : return 47.69
elif fileName.find("DYBJetsToLL_M-50_Zpt-100to200_TuneCP5_13TeV-madgraphMLM-pythia8_newgridpack") !=-1 : return 3.21
elif fileName.find("ST_t-channel_tauDecays_anomwtbLVLT_LVLTunphys_13TeV-comphep-pythia8_TuneCP5") !=-1 : return 84.72
elif fileName.find("ST_tW_top_5f_NoFullyHadronicDecays_TuneCP5up_PSweights_13TeV-powheg-pythia8") !=-1 : return 34.91
elif fileName.find("ST_t-channel_top_4f_InclusiveDecays_TuneCP5CR1_13TeV-powheg-madspin-pythia8") !=-1 : return 115.3
elif fileName.find("DYJetsToLL_M-4to50_HT-100to200_TuneCP5_PSweights_13TeV-madgraphMLM-pythia8") !=-1 : return 203.3
elif fileName.find("DYJetsToLL_Zpt-200toInf_M-50_TuneCUETP8M1_13TeV-madgraphMLM-pythia8_TuneUp") !=-1 : return 6.716
elif fileName.find("TTToSemiLepton_HT500Njet9_hdampDOWN_TuneCP5_PSweights_13TeV-powheg-pythia8") !=-1 : return 3.884
elif fileName.find("ST_tW_antitop_5f_DS_inclusiveDecays_TuneCP5_PSweights_13TeV-powheg-pythia8") !=-1 : return 35.25
elif fileName.find("DYJetsToLL_M-4to50_HT-600toInf_TuneCP5_PSWeights_13TeV-madgraphMLM-pythia8") !=-1 : return 1.837
elif fileName.find("ST_t-channel_muDecays_anomwtbLVLT_LVLTunphys_13TeV-comphep-pythia8_TuneCP5") !=-1 : return 84.86
elif fileName.find("DYJetsToLL_M-4to50_HT-200to400_TuneCP5_PSweights_13TeV-madgraphMLM-pythia8") !=-1 : return 54.31
elif fileName.find("ST_t-channel_top_4f_inclusiveDecays_TuneCP5_13TeV-powhegV2-madspin-pythia8") !=-1 : return 113.3
elif fileName.find("ST_t-channel_top_4f_InclusiveDecays_TuneCP5up_13TeV-powheg-madspin-pythia8") !=-1 : return 115.3
elif fileName.find("ST_t-channel_antitop_5f_TuneCP5_13TeV-powheg-madspin-pythia8_vtd_vts_decay") !=-1 : return 82.52
elif fileName.find("ST_t-channel_top_4f_InclusiveDecays_TuneCP5_PSweights_13TeV-powheg-pythia8") !=-1 : return 113.3
elif fileName.find("ST_t-channel_muDecays_anomwtbLVRV_LVRVunphys_13TeV-comphep-pythia8_TuneCP5") !=-1 : return 47.63
elif fileName.find("DYJetsToLL_CGenFilter_Zpt-200toInf_M-50_TuneCP5_13TeV-madgraphMLM-pythia8") !=-1 : return 3.184
elif fileName.find("DYJetsToLL_M-4to50_HT-70to100_TuneCP5_PSweights_13TeV-madgraphMLM-pythia8") !=-1 : return 146.6
elif fileName.find("DYJetsToLL_M-50_HT-1200to2500_TuneCP5_PSweights_13TeV-madgraphMLM-pythia8") !=-1 : return 0.1937
elif fileName.find("ST_s-channel_4f_leptonDecays_TuneCP5down_PSweights_13TeV-amcatnlo-pythia8") !=-1 : return 3.74
elif fileName.find("ST_t-channel_eDecays_anomwtbLVLT_LVLTunphys_13TeV-comphep-pythia8_TuneCP5") !=-1 : return 84.84
elif fileName.find("DYJetsToLL_BGenFilter_Zpt-200toInf_M-50_TuneCP5_13TeV-madgraphMLM-pythia8") !=-1 : return 0.5327
elif fileName.find("DYJetsToLL_BGenFilter_Zpt-100to200_M-50_TuneCP5_13TeV-madgraphMLM-pythia8") !=-1 : return 3.384
elif fileName.find("ST_tW_top_5f_hdampdown_NoFullyHadronicDecays_TuneCP5_13TeV-powheg-pythia8") !=-1 : return 34.91
elif fileName.find("DYJetsToLL_CGenFilter_Zpt-100to200_M-50_TuneCP5_13TeV-madgraphMLM-pythia8") !=-1 : return 25.86
elif fileName.find("ST_tW_antitop_5f_NoFullyHadronicDecays_TuneCP5_erdON_13TeV-powheg-pythia8") !=-1 : return 34.97
elif fileName.find("ST_t-channel_antitop_5f_TuneCP5_13TeV-powheg-madspin-pythia8_vtd_vts_prod") !=-1 : return 225.5
elif fileName.find("ST_tW_antitop_5f_inclusiveDecays_TuneCP5up_PSweights_13TeV-powheg-pythia8") !=-1 : return 34.97
elif fileName.find("DYJetsToTauTau_ForcedMuDecay_M-50_TuneCUETP8M1_13TeV-amcatnloFXFX-pythia8") !=-1 : return 1990.0
elif fileName.find("ST_tW_top_5f_NoFullyHadronicDecays_TuneCP5_PSweights_13TeV-powheg-pythia8") !=-1 : return 34.91
elif fileName.find("ST_tW_top_5f_NoFullyHadronicDecays_mtop1695_TuneCP5_13TeV-powheg-pythia8") !=-1 : return 36.65
elif fileName.find("ST_tW_top_5f_NoFullyHadronicDecays_mtop1715_TuneCP5_13TeV-powheg-pythia8") !=-1 : return 36.65
elif fileName.find("TTToSemiLepton_HT500Njet9_hdampUP_TuneCP5_PSweights_13TeV-powheg-pythia8") !=-1 : return 4.613
elif fileName.find("DYJetsToLL_M-50_HT-800to1200_TuneCP5_PSweights_13TeV-madgraphMLM-pythia8") !=-1 : return 0.8021
elif fileName.find("DYJetsToLL_M-50_HT-2500toInf_TuneCP5_PSweights_13TeV-madgraphMLM-pythia8") !=-1 : return 0.003514
elif fileName.find("ST_s-channel_4f_leptonDecays_TuneCP5CR2_PSweights_13TeV-amcatnlo-pythia8") !=-1 : return 3.74
elif fileName.find("ST_tW_top_5f_NoFullyHadronicDecays_mtop1735_TuneCP5_13TeV-powheg-pythia8") !=-1 : return 36.65
elif fileName.find("ST_s-channel_4f_leptonDecays_TuneCP5CR1_PSweights_13TeV-amcatnlo-pythia8") !=-1 : return 3.74
elif fileName.find("ST_t-channel_top_4f_InclusiveDecays_TuneCP5_13TeV-powheg-madspin-pythia8") !=-1 : return 115.3
elif fileName.find("ST_tW_top_5f_NoFullyHadronicDecays_mtop1755_TuneCP5_13TeV-powheg-pythia8") !=-1 : return 36.65
elif fileName.find("TTToSemilepton_ttbbFilter_TuneCUETP8M2_ttHtranche3_13TeV-powheg-pythia8") !=-1 : return 31.06
elif fileName.find("ST_s-channel_4f_leptonDecays_TuneCP5up_PSweights_13TeV-amcatnlo-pythia8") !=-1 : return 3.74
elif fileName.find("ST_t-channel_tauDecays_anomwtbLVRT_LV2RT2_13TeV-comphep-pythia8_TuneCP5") !=-1 : return 102.2
elif fileName.find("DYJetsToEE_M-50_LTbinned_800To2000_5f_LO_TuneCP5_13TeV-madgraph-pythia8") !=-1 : return 0.01009
elif fileName.find("DYJetsToLL_M-50_HT-100to200_TuneCP5_PSweights_13TeV-madgraphMLM-pythia8") !=-1 : return 160.7
elif fileName.find("ST_tW_antitop_5f_NoFullyHadronicDecays_TuneCP5down_13TeV-powheg-pythia8") !=-1 : return 34.97
elif fileName.find("ST_tW_antitop_5f_inclusiveDecays_TuneCP5_PSweights_13TeV-powheg-pythia8") !=-1 : return 34.97
elif fileName.find("ST_tW_top_5f_inclusiveDecays_TuneCP5down_PSweights_13TeV-powheg-pythia8") !=-1 : return 34.91
elif fileName.find("ST_t-channel_tauDecays_anomwtbLVRT_LV1RT4_13TeV-comphep-pythia8_TuneCP5") !=-1 : return 108.3
elif fileName.find("ST_t-channel_tauDecays_anomwtbLVRT_LV3RT1_13TeV-comphep-pythia8_TuneCP5") !=-1 : return 54.52
elif fileName.find("DYJetsToLL_M-50_HT-200to400_TuneCP5_PSweights_13TeV-madgraphMLM-pythia8") !=-1 : return 48.63
elif fileName.find("DYJetsToLL_M-50_HT-600to800_TuneCP5_PSweights_13TeV-madgraphMLM-pythia8") !=-1 : return 1.761
elif fileName.find("DYJetsToTauTau_ForcedMuEleDecay_M-50_TuneCP5_13TeV-amcatnloFXFX-pythia8") !=-1 : return 6458.0
elif fileName.find("ST_tW_top_5f_hdampup_NoFullyHadronicDecays_TuneCP5_13TeV-powheg-pythia8") !=-1 : return 34.9
elif fileName.find("DYJetsToLL_M-50_HT-400to600_TuneCP5_PSweights_13TeV-madgraphMLM-pythia8") !=-1 : return 6.993
elif fileName.find("DYJetsToEE_M-50_LTbinned_100To200_5f_LO_TuneCP5_13TeV-madgraph-pythia8") !=-1 : return 93.51
elif fileName.find("ST_t-channel_muDecays_anomwtbLVRT_LV1RT4_13TeV-comphep-pythia8_TuneCP5") !=-1 : return 108.6
elif fileName.find("tZq_ll_4f_ckm_NLO_TuneCP5CR1_QCDbased_PSweights_13TeV-amcatnlo-pythia8") !=-1 : return 0.07358
elif fileName.find("DYJetsToLL_M-50_VBFFilter_TuneCP5_PSweights_13TeV-amcatnloFXFX-pythia8") !=-1 : return 266.1
elif fileName.find("DYJetsToEE_M-50_LTbinned_200To400_5f_LO_TuneCP5_13TeV-madgraph-pythia8") !=-1 : return 4.121
elif fileName.find("ST_t-channel_muDecays_anomwtbLVRT_LV2RT2_13TeV-comphep-pythia8_TuneCP5") !=-1 : return 102.7
elif fileName.find("DYJetsToLL_M-50_HT-70to100_TuneCP5_PSweights_13TeV-madgraphMLM-pythia8") !=-1 : return 146.5
elif fileName.find("ST_tW_top_5f_DS_inclusiveDecays_TuneCP5_PSweights_13TeV-powheg-pythia8") !=-1 : return 33.75
elif fileName.find("ST_tW_antitop_5f_DS_NoFullyHadronicDecays_TuneCP5_13TeV-powheg-pythia8") !=-1 : return 35.13
elif fileName.find("TTZToLLNuNu_M-10_TuneCP5CR2_GluonMove_PSweights_13TeV-amcatnlo-pythia8") !=-1 : return 0.2432
elif fileName.find("DYJetsToEE_M-50_LTbinned_400To800_5f_LO_TuneCP5_13TeV-madgraph-pythia8") !=-1 : return 0.2445
elif fileName.find("ST_tW_antitop_5f_NoFullyHadronicDecays_TuneCP5CR1_13TeV-powheg-pythia8") !=-1 : return 34.97
elif fileName.find("ST_tW_antitop_5f_NoFullyHadronicDecays_TuneCP5CR2_13TeV-powheg-pythia8") !=-1 : return 34.97
elif fileName.find("TTJets_SingleLeptFromTbar_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8") !=-1 : return 6.167
elif fileName.find("ST_t-channel_muDecays_anomwtbLVRT_LV3RT1_13TeV-comphep-pythia8_TuneCP5") !=-1 : return 54.72
elif fileName.find("DYJetsToLL_M-5to50_HT-600toInf_TuneCUETP8M1_13TeV-madgraphMLM-pythia8") !=-1 : return 1.107
elif fileName.find("ST_t-channel_eDecays_anomwtbLVRT_LV2RT2_13TeV-comphep-pythia8_TuneCP5") !=-1 : return 102.9
elif fileName.find("ST_s-channel_4f_leptonDecays_TuneCP5_PSweights_13TeV-amcatnlo-pythia8") !=-1 : return 3.74
elif fileName.find("TTTo2L2Nu_HT500Njet7_hdampDOWN_TuneCP5_PSweights_13TeV-powheg-pythia8") !=-1 : return 10.56
elif fileName.find("ST_tW_top_5f_NoFullyHadronicDecays_TuneCP5_erdON_13TeV-powheg-pythia8") !=-1 : return 34.91
elif fileName.find("DYJetsToEE_M-50_LTbinned_95To100_5f_LO_TuneCP5_13TeV-madgraph-pythia8") !=-1 : return 47.14
elif fileName.find("ST_tW_antitop_5f_NoFullyHadronicDecays_TuneCP5up_13TeV-powheg-pythia8") !=-1 : return 34.97
elif fileName.find("TTJets_SingleLeptFromTbar_genMET-80_TuneCP5_13TeV-madgraphMLM-pythia8") !=-1 : return 31.68
elif fileName.find("ST_tW_top_5f_inclusiveDecays_TuneCP5up_PSweights_13TeV-powheg-pythia8") !=-1 : return 34.91
elif fileName.find("ST_t-channel_4f_leptonDecays_TuneCP5_PSweights_13TeV-amcatnlo-pythia8") !=-1 : return 70.9
elif fileName.find("DYJetsToLL_M-5to50_HT-400to600_TuneCUETP8M1_13TeV-madgraphMLM-pythia8") !=-1 : return 3.628
elif fileName.find("ST_t-channel_eDecays_anomwtbLVRT_LV1RT4_13TeV-comphep-pythia8_TuneCP5") !=-1 : return 108.9
elif fileName.find("TTZToLLNuNu_M-10_TuneCP5CR1_QCDbased_PSweights_13TeV-amcatnlo-pythia8") !=-1 : return 0.2432
elif fileName.find("DYJetsToLL_M-50_VBFFilter_TuneCP5_PSweights_13TeV-madgraphMLM-pythia8") !=-1 : return 202.3
elif fileName.find("DYJetsToLL_M-5to50_HT-100to200_TuneCUETP8M1_13TeV-madgraphMLM-pythia8") !=-1 : return 224.4
elif fileName.find("ST_t-channel_top_5f_TuneCP5_13TeV-powheg-madspin-pythia8_vtd_vts_prod") !=-1 : return 547.2
elif fileName.find("DYJetsToLL_M-5to50_HT-200to400_TuneCUETP8M1_13TeV-madgraphMLM-pythia8") !=-1 : return 37.87
elif fileName.find("TTWJetsToLNu_TuneCP5CR2_GluonMove_13TeV-amcatnloFXFX-madspin-pythia8") !=-1 : return 0.2188
elif fileName.find("DYBJetsToLL_M-50_Zpt-100to200_TuneCUETP8M1_13TeV-madgraphMLM-pythia8") !=-1 : return 3.088
elif fileName.find("ST_t-channel_tauDecays_anomwtbLVRT_RT4_13TeV-comphep-pythia8_TuneCP5") !=-1 : return 53.84
elif fileName.find("DYJetsToLL_M-50_TuneCUETP8M1_13TeV-amcatnloFXFX-pythia8_CUETP8M1Down") !=-1 : return 5940.0
elif fileName.find("DYJetsToLL_M-1To5_HT-400to600_TuneCUETP8M1_13TeV-madgraphMLM-pythia8") !=-1 : return 65.9
elif fileName.find("TTToSemiLepton_HT500Njet9_TuneCP5down_PSweights_13TeV-powheg-pythia8") !=-1 : return 4.179
elif fileName.find("DYJetsToLL_M-1To5_HT-200to400_TuneCUETP8M1_13TeV-madgraphMLM-pythia8") !=-1 : return 789.8
elif fileName.find("DYJetsToEE_M-50_LTbinned_80To85_5f_LO_TuneCP5_13TeV-madgraph-pythia8") !=-1 : return 174.1
elif fileName.find("DYBJetsToLL_M-50_Zpt-200toInf_TuneCUETP8M1_13TeV-madgraphMLM-pythia8") !=-1 : return 0.3159
elif fileName.find("DYJetsToEE_M-50_LTbinned_90To95_5f_LO_TuneCP5_13TeV-madgraph-pythia8") !=-1 : return 179.6
elif fileName.find("DYJetsToTauTau_ForcedMuDecay_M-50_TuneCP5_13TeV-amcatnloFXFX-pythia8") !=-1 : return 6503.0
elif fileName.find("DYJetsToLL_M-1To5_HT-600toInf_TuneCUETP8M1_13TeV-madgraphMLM-pythia8") !=-1 : return 16.72
elif fileName.find("DYJetsToLL_M-5to50_HT-70to100_TuneCUETP8M1_13TeV-madgraphMLM-pythia8") !=-1 : return 301.0
elif fileName.find("DYJetsToLL_M-1To5_HT-150to200_TuneCUETP8M1_13TeV-madgraphMLM-pythia8") !=-1 : return 1124.0
elif fileName.find("DYJetsToEE_M-50_LTbinned_85To90_5f_LO_TuneCP5_13TeV-madgraph-pythia8") !=-1 : return 250.6
elif fileName.find("DYJetsToLL_M-50_HT-1200to2500_TuneCUETP8M1_13TeV-madgraphMLM-pythia8") !=-1 : return 0.1512
elif fileName.find("TTToSemiLeptonic_TuneCP5CR2_GluonMove_PSweights_13TeV-powheg-pythia8") !=-1 : return 687.1
elif fileName.find("TTToHplusToWZp_WToTauNu_WuToQQ_ZpToMuMu_MH-160_MZp-2_13TeV-madgraph") !=-1 : return 0.0001077
elif fileName.find("TTToHplusToWZp_WToTauNu_WuToQQ_ZpToMuMu_MH-100_MZp-8_13TeV-madgraph") !=-1 : return 1.563e-05
elif fileName.find("TTToHplusToWZp_WToTauNu_WuToQQ_ZpToMuMu_MH-160_MZp-5_13TeV-madgraph") !=-1 : return 4.298e-05
elif fileName.find("TTToSemiLeptonic_TuneCP5CR1_QCDbased_PSweights_13TeV-powheg-pythia8") !=-1 : return 687.1
elif fileName.find("TTToHplusToWZp_WToTauNu_WuToQQ_ZpToMuMu_MH-150_MZp-8_13TeV-madgraph") !=-1 : return 5.117e-05
elif fileName.find("TTToHplusToWZp_WToTauNu_WuToQQ_ZpToMuMu_MH-150_MZp-5_13TeV-madgraph") !=-1 : return 8.237e-05
elif fileName.find("TTToHplusToWZp_WToTauNu_WuToQQ_ZpToMuMu_MH-120_MZp-2_13TeV-madgraph") !=-1 : return 0.0002052
elif fileName.find("TTJets_SingleLeptFromT_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8") !=-1 : return 6.212
elif fileName.find("TTTo2L2Nu_HT500Njet7_hdampUP_TuneCP5_PSweights_13TeV-powheg-pythia8") !=-1 : return 12.41
elif fileName.find("TTToHplusToWZp_WToTauNu_WuToQQ_ZpToMuMu_MH-140_MZp-8_13TeV-madgraph") !=-1 : return 6.504e-05
elif fileName.find("DYJetsToLL_M-50_HT-2500toInf_TuneCUETP8M1_13TeV-madgraphMLM-pythia8") !=-1 : return 0.003659
elif fileName.find("TTToHplusToWZp_WToTauNu_WuToQQ_ZpToMuMu_MH-130_MZp-2_13TeV-madgraph") !=-1 : return 0.0002577
elif fileName.find("TTToHplusToWZp_WToTauNu_WuToQQ_ZpToMuMu_MH-150_MZp-2_13TeV-madgraph") !=-1 : return 0.0002047
elif fileName.find("TTToHplusToWZp_WToTauNu_WuToQQ_ZpToMuMu_MH-140_MZp-2_13TeV-madgraph") !=-1 : return 0.0002613
elif fileName.find("TTToHplusToWZp_WToTauNu_WuToQQ_ZpToMuMu_MH-130_MZp-5_13TeV-madgraph") !=-1 : return 0.0001031
elif fileName.find("TTToHplusToWZp_WToTauNu_WuToQQ_ZpToMuMu_MH-100_MZp-2_13TeV-madgraph") !=-1 : return 5.684e-05
elif fileName.find("TTToHplusToWZp_WToTauNu_WuToQQ_ZpToMuMu_MH-110_MZp-8_13TeV-madgraph") !=-1 : return 3.331e-05
elif fileName.find("TTToHplusToWZp_WToTauNu_WuToQQ_ZpToMuMu_MH-130_MZp-8_13TeV-madgraph") !=-1 : return 6.456e-05
elif fileName.find("TTToHplusToWZp_WToTauNu_WuToQQ_ZpToMuMu_MH-100_MZp-5_13TeV-madgraph") !=-1 : return 2.343e-05
elif fileName.find("ChargedHiggs_TTToHplusBWB_HplusToTauNu_M-120_13TeV_amcatnlo_pythia8") !=-1 : return 7.402
elif fileName.find("ChargedHiggs_TTToHplusBWB_HplusToTauNu_M-150_13TeV_amcatnlo_pythia8") !=-1 : return 1.686
elif fileName.find("TTToHplusToWZp_WToTauNu_WuToQQ_ZpToMuMu_MH-110_MZp-5_13TeV-madgraph") !=-1 : return 5.221e-05
elif fileName.find("ChargedHiggs_TTToHplusBWB_HplusToTauNu_M-140_13TeV_amcatnlo_pythia8") !=-1 : return 3.367
elif fileName.find("ST_tW_top_5f_inclusiveDecays_TuneCP5_PSweights_13TeV-powheg-pythia8") !=-1 : return 34.91
elif fileName.find("ST_t-channel_tauDecays_anomwtbLVRV_RV_13TeV-comphep-pythia8_TuneCP5") !=-1 : return 24.06
elif fileName.find("TTToHplusToWZp_WToTauNu_WuToQQ_ZpToMuMu_MH-110_MZp-2_13TeV-madgraph") !=-1 : return 0.0001291
elif fileName.find("TTToHplusToWZp_WToTauNu_WuToQQ_ZpToMuMu_MH-120_MZp-8_13TeV-madgraph") !=-1 : return 5.201e-05
elif fileName.find("TTToHplusToWZp_WToTauNu_WuToQQ_ZpToMuMu_MH-140_MZp-5_13TeV-madgraph") !=-1 : return 0.0001043
elif fileName.find("ST_t-channel_tauDecays_anomwtbLVLT_LT_13TeV-comphep-pythia8_TuneCP5") !=-1 : return 71.8
elif fileName.find("ChargedHiggs_TTToHplusBWB_HplusToTauNu_M-160_13TeV_amcatnlo_pythia8") !=-1 : return 0.4841
elif fileName.find("TTWJetsToLNu_TuneCP5CR1_QCDbased_13TeV-amcatnloFXFX-madspin-pythia8") !=-1 : return 0.2183
elif fileName.find("ST_tW_top_5f_NoFullyHadronicDecays_TuneCP5down_13TeV-powheg-pythia8") !=-1 : return 34.91
elif fileName.find("ST_tW_antitop_5f_NoFullyHadronicDecays_TuneCP5_13TeV-powheg-pythia8") !=-1 : return 34.97
elif fileName.find("ChargedHiggs_TTToHplusBWB_HplusToTauNu_M-100_13TeV_amcatnlo_pythia8") !=-1 : return 11.62
elif fileName.find("TTToHplusToWZp_WToTauNu_WuToQQ_ZpToMuMu_MH-160_MZp-8_13TeV-madgraph") !=-1 : return 2.687e-05
elif fileName.find("DYJetsToLL_Zpt-200toInf_M-50_TuneCUETP8M1_13TeV-madgraphMLM-pythia8") !=-1 : return 6.733
elif fileName.find("DYJetsToLL_M-50_HT-800to1200_TuneCUETP8M1_13TeV-madgraphMLM-pythia8") !=-1 : return 0.6229
elif fileName.find("TTToHplusToWZp_WToTauNu_WuToQQ_ZpToMuMu_MH-120_MZp-5_13TeV-madgraph") !=-1 : return 8.243e-05
elif fileName.find("DYJetsToLL_Zpt-100to200_M-50_TuneCUETP8M1_13TeV-madgraphMLM-pythia8") !=-1 : return 57.3
elif fileName.find("ST_t-channel_muDecays_anomwtbLVRT_RT4_13TeV-comphep-pythia8_TuneCP5") !=-1 : return 53.81
elif fileName.find("DYJetsToLL_M-50_Zpt-150toInf_TuneCUETP8M1_13TeV-madgraphMLM-pythia8") !=-1 : return 18.36
elif fileName.find("ChargedHiggs_TTToHplusBWB_HplusToTauNu_M-155_13TeV_amcatnlo_pythia8") !=-1 : return 1.008
elif fileName.find("DYJetsToLL_M-50_HT-200to400_TuneCUETP8M1_13TeV-madgraphMLM-pythia8") !=-1 : return 41.04
elif fileName.find("TTToHplusToWZp_WToTauNu_WuToQQ_ZpToMuMu_MH-90_MZp-8_13TeV-madgraph") !=-1 : return 3.684e-06
elif fileName.find("DYJetsToLL_M-105To160_TuneCP5_PSweights_13TeV-amcatnloFXFX-pythia8") !=-1 : return 47.05
elif fileName.find("tZq_Zhad_Wlept_4f_ckm_NLO_TuneCP5_PSweights_13TeV-amcatnlo-pythia8") !=-1 : return 0.1518
elif fileName.find("TTJets_SingleLeptFromT_genMET-80_TuneCP5_13TeV-madgraphMLM-pythia8") !=-1 : return 32.27
elif fileName.find("DYJetsToLL_M-50_TuneCUETP8M1_13TeV-amcatnloFXFX-pythia8_CUETP8M1Up") !=-1 : return 5872.0
elif fileName.find("ST_tW_top_5f_NoFullyHadronicDecays_TuneCP5CR2_13TeV-powheg-pythia8") !=-1 : return 34.91
elif fileName.find("TTToHplusToWZp_WToTauNu_WuToQQ_ZpToMuMu_MH-90_MZp-5_13TeV-madgraph") !=-1 : return 5.859e-06
elif fileName.find("TTToSemiLepton_HT500Njet9_TuneCP5up_PSweights_13TeV-powheg-pythia8") !=-1 : return 4.295
elif fileName.find("DYJetsToLL_M-50_HT-100to200_TuneCUETP8M1_13TeV-madgraphMLM-pythia8") !=-1 : return 147.4
elif fileName.find("DYJetsToLL_M-105To160_VBFFilter_TuneCP5_13TeV-amcatnloFXFX-pythia8") !=-1 : return 2.026
elif fileName.find("DYJetsToLL_M-50_HT-600to800_TuneCUETP8M1_13TeV-madgraphMLM-pythia8") !=-1 : return 1.358
elif fileName.find("ST_t-channel_muDecays_anomwtbLVRV_RV_13TeV-comphep-pythia8_TuneCP5") !=-1 : return 24.21
elif fileName.find("ChargedHiggs_TTToHplusBWB_HplusToTauNu_M-90_13TeV_amcatnlo_pythia8") !=-1 : return 13.46
elif fileName.find("ST_t-channel_muDecays_anomwtbLVLT_LT_13TeV-comphep-pythia8_TuneCP5") !=-1 : return 71.67
elif fileName.find("ST_tW_top_5f_NoFullyHadronicDecays_TuneCP5CR1_13TeV-powheg-pythia8") !=-1 : return 34.91
elif fileName.find("ChargedHiggs_TTToHplusBWB_HplusToTauNu_M-80_13TeV_amcatnlo_pythia8") !=-1 : return 15.03
elif fileName.find("DYJetsToLL_M-50_HT-400to600_TuneCUETP8M1_13TeV-madgraphMLM-pythia8") !=-1 : return 5.674
elif fileName.find("ST_t-channel_eDecays_anomwtbLVRT_RT4_13TeV-comphep-pythia8_TuneCP5") !=-1 : return 53.7
elif fileName.find("ST_tW_top_5f_DS_NoFullyHadronicDecays_TuneCP5_13TeV-powheg-pythia8") !=-1 : return 33.67
elif fileName.find("TTTo2L2Nu_ttbbFilter_TuneCUETP8M2_ttHtranche3_13TeV-powheg-pythia8") !=-1 : return 7.269
elif fileName.find("TTToHplusToWZp_WToTauNu_WuToQQ_ZpToMuMu_MH-90_MZp-2_13TeV-madgraph") !=-1 : return 1.318e-05
elif fileName.find("TTToHplusToWZp_WToLNu_WToLNu_ZpToMuMu_MH-140_MZp-2_13TeV-madgraph") !=-1 : return 0.0001947
elif fileName.find("DYJetsToNuNu_PtZ-400To650_TuneCUETP8M1_13TeV-amcatnloFXFX-pythia8") !=-1 : return 0.2783
elif fileName.find("DYJetsToLL_BGenFilter_M-50_TuneCUETP8M1_13TeV-madgraphMLM-pythia8") !=-1 : return 255.2
elif fileName.find("TTToHplusToWZp_WToLNu_WToLNu_ZpToMuMu_MH-130_MZp-8_13TeV-madgraph") !=-1 : return 4.803e-05
elif fileName.find("TTToHplusToWZp_WToLNu_WToLNu_ZpToMuMu_MH-110_MZp-5_13TeV-madgraph") !=-1 : return 3.898e-05
elif fileName.find("TTToHplusToWZp_WToLNu_WToLNu_ZpToMuMu_MH-110_MZp-8_13TeV-madgraph") !=-1 : return 2.495e-05
elif fileName.find("TTToHplusToWZp_WToLNu_WToLNu_ZpToMuMu_MH-100_MZp-5_13TeV-madgraph") !=-1 : return 1.776e-05
elif fileName.find("DYJetsToNuNu_PtZ-100To250_TuneCUETP8M1_13TeV-amcatnloFXFX-pythia8") !=-1 : return 54.86
elif fileName.find("TTToSemiLeptonic_hdampDOWN_TuneCP5_PSweights_13TeV-powheg-pythia8") !=-1 : return 687.1
elif fileName.find("ST_tW_antitop_5f_inclusiveDecays_TuneCP5down_13TeV-powheg-pythia8") !=-1 : return 34.97
elif fileName.find("TTToHplusToWZp_WToLNu_WToLNu_ZpToMuMu_MH-120_MZp-8_13TeV-madgraph") !=-1 : return 3.87e-05
elif fileName.find("TTToSemiLeptonic_mtop178p5_TuneCP5_PSweights_13TeV-powheg-pythia8") !=-1 : return 584.6
elif fileName.find("DYJetsToNuNu_PtZ-250To400_TuneCUETP8M1_13TeV-amcatnloFXFX-pythia8") !=-1 : return 2.073
elif fileName.find("TTToSemiLeptonic_mtop175p5_TuneCP5_PSweights_13TeV-powheg-pythia8") !=-1 : return 633.4
elif fileName.find("TTToHplusToWZp_WToLNu_WToLNu_ZpToMuMu_MH-130_MZp-5_13TeV-madgraph") !=-1 : return 7.685e-05
elif fileName.find("TTToSemiLeptonic_mtop169p5_TuneCP5_PSweights_13TeV-powheg-pythia8") !=-1 : return 746.2
elif fileName.find("TTToHplusToWZp_WToLNu_WToLNu_ZpToMuMu_MH-160_MZp-5_13TeV-madgraph") !=-1 : return 3.199e-05
elif fileName.find("TTToHplusToWZp_WToLNu_WToLNu_ZpToMuMu_MH-160_MZp-8_13TeV-madgraph") !=-1 : return 1.993e-05
elif fileName.find("TTToHplusToWZp_WToLNu_WToLNu_ZpToMuMu_MH-120_MZp-2_13TeV-madgraph") !=-1 : return 0.0001529
elif fileName.find("TTToHplusToWZp_WToLNu_WToLNu_ZpToMuMu_MH-160_MZp-2_13TeV-madgraph") !=-1 : return 7.999e-05
elif fileName.find("DYJetsToLL_M-105To160_TuneCP5_PSweights_13TeV-madgraphMLM-pythia8") !=-1 : return 38.81
elif fileName.find("TTToHplusToWZp_WToLNu_WToLNu_ZpToMuMu_MH-100_MZp-2_13TeV-madgraph") !=-1 : return 4.276e-05
elif fileName.find("TTToHplusToWZp_WToLNu_WToLNu_ZpToMuMu_MH-150_MZp-5_13TeV-madgraph") !=-1 : return 6.133e-05
elif fileName.find("ST_tW_top_5f_NoFullyHadronicDecays_TuneCP5up_13TeV-powheg-pythia8") !=-1 : return 34.91
elif fileName.find("TTToSemiLeptonic_mtop173p5_TuneCP5_PSweights_13TeV-powheg-pythia8") !=-1 : return 668.6
elif fileName.find("TTToHplusToWZp_WToLNu_WToLNu_ZpToMuMu_MH-140_MZp-8_13TeV-madgraph") !=-1 : return 4.869e-05
elif fileName.find("TTToSemiLeptonic_mtop166p5_TuneCP5_PSweights_13TeV-powheg-pythia8") !=-1 : return 811.4
elif fileName.find("TTToHplusToWZp_WToLNu_WToLNu_ZpToMuMu_MH-150_MZp-8_13TeV-madgraph") !=-1 : return 3.827e-05
elif fileName.find("ST_t-channel_eDecays_anomwtbLVRV_RV_13TeV-comphep-pythia8_TuneCP5") !=-1 : return 24.12
elif fileName.find("DYJetsToNuNu_PtZ-650ToInf_TuneCUETP8M1_13TeV-amcatnloFXFX-pythia8") !=-1 : return 0.02603
elif fileName.find("TTToHplusToWZp_WToLNu_WToLNu_ZpToMuMu_MH-140_MZp-5_13TeV-madgraph") !=-1 : return 7.75e-05
elif fileName.find("ST_t-channel_eDecays_anomwtbLVLT_LT_13TeV-comphep-pythia8_TuneCP5") !=-1 : return 71.81
elif fileName.find("TTToHplusToWZp_WToLNu_WToLNu_ZpToMuMu_MH-110_MZp-2_13TeV-madgraph") !=-1 : return 9.633e-05
elif fileName.find("TTWJetsToLNu_TuneCP5_PSweights_13TeV-amcatnloFXFX-madspin-pythia8") !=-1 : return 0.2198
elif fileName.find("TTToHplusToWZp_WToLNu_WToLNu_ZpToMuMu_MH-130_MZp-2_13TeV-madgraph") !=-1 : return 0.0001914
elif fileName.find("TTToSemiLeptonic_mtop171p5_TuneCP5_PSweights_13TeV-powheg-pythia8") !=-1 : return 706.1
elif fileName.find("TTToHplusToWZp_WToLNu_WToLNu_ZpToMuMu_MH-120_MZp-5_13TeV-madgraph") !=-1 : return 6.122e-05
elif fileName.find("TTToHplusToWZp_WToLNu_WToLNu_ZpToMuMu_MH-150_MZp-2_13TeV-madgraph") !=-1 : return 0.000153
elif fileName.find("TTToHplusToWZp_WToLNu_WToLNu_ZpToMuMu_MH-100_MZp-8_13TeV-madgraph") !=-1 : return 1.168e-05
elif fileName.find("DYJetsToLL_M-105To160_VBFFilter_TuneCP5_13TeV-madgraphMLM-pythia8") !=-1 : return 1.518
elif fileName.find("DYJetsToLL_M-4to50_HT-600toInf_TuneCP5_13TeV-madgraphMLM-pythia8") !=-1 : return 1.85
elif fileName.find("TTToHplusToWA_WToTauNu_WToQQ_AToMuMu_MH-160_MA-25_13TeV-madgraph") !=-1 : return 0.01724
elif fileName.find("DY2JetsToLL_M-50_LHEZpT_400-inf_TuneCP5_13TeV-amcnloFXFX-pythia8") !=-1 : return 0.4477
elif fileName.find("DY1JetsToLL_M-50_LHEZpT_400-inf_TuneCP5_13TeV-amcnloFXFX-pythia8") !=-1 : return 0.1193
elif fileName.find("DY2JetsToLL_M-50_LHEZpT_250-400_TuneCP5_13TeV-amcnloFXFX-pythia8") !=-1 : return 2.737
elif fileName.find("TTToHplusToWA_WToTauNu_WToQQ_AToMuMu_MH-120_MA-30_13TeV-madgraph") !=-1 : return 0.01416
elif fileName.find("TTToHplusToWA_WToTauNu_WToQQ_AToMuMu_MH-160_MA-35_13TeV-madgraph") !=-1 : return 0.02032
elif fileName.find("TTToHplusToWA_WToTauNu_WToQQ_AToMuMu_MH-140_MA-25_13TeV-madgraph") !=-1 : return 0.03673
elif fileName.find("DYJetsToNuNu_PtZ-50To100_TuneCUETP8M1_13TeV-amcatnloFXFX-pythia8") !=-1 : return 237.2
elif fileName.find("TTToHplusToWZp_WToLNu_WToLNu_ZpToMuMu_MH-90_MZp-8_13TeV-madgraph") !=-1 : return 2.736e-06
elif fileName.find("TTToHplusToWA_WToTauNu_WToQQ_AToMuMu_MH-100_MA-20_13TeV-madgraph") !=-1 : return 0.0008489
elif fileName.find("DY1JetsToLL_M-50_LHEZpT_250-400_TuneCP5_13TeV-amcnloFXFX-pythia8") !=-1 : return 1.098
elif fileName.find("TTToHplusToWZp_WToLNu_WToLNu_ZpToMuMu_MH-90_MZp-5_13TeV-madgraph") !=-1 : return 4.393e-06
elif fileName.find("TTToHplusToWA_WToTauNu_WToQQ_AToMuMu_MH-140_MA-20_13TeV-madgraph") !=-1 : return 0.03297
elif fileName.find("TTToHplusToWA_WToTauNu_WToQQ_AToMuMu_MH-130_MA-25_13TeV-madgraph") !=-1 : return 0.03162
elif fileName.find("TTToHplusToWA_WToTauNu_WToQQ_AToMuMu_MH-140_MA-10_13TeV-madgraph") !=-1 : return 0.01898
elif fileName.find("TTToHplusToWA_WToTauNu_WToQQ_AToMuMu_MH-110_MA-25_13TeV-madgraph") !=-1 : return 0.005135
elif fileName.find("DYJetsToLL_M-4to50_HT-200to400_TuneCP5_13TeV-madgraphMLM-pythia8") !=-1 : return 54.39
elif fileName.find("TTToHplusToWA_WToTauNu_WToQQ_AToMuMu_MH-110_MA-20_13TeV-madgraph") !=-1 : return 0.008633
elif fileName.find("TTToHplusToWA_WToTauNu_WToQQ_AToMuMu_MH-100_MA-10_13TeV-madgraph") !=-1 : return 0.002968
elif fileName.find("TTToHplusToWA_WToTauNu_WToQQ_AToMuMu_MH-140_MA-35_13TeV-madgraph") !=-1 : return 0.03638
elif fileName.find("TTToHplusToWA_WToTauNu_WToQQ_AToMuMu_MH-160_MA-15_13TeV-madgraph") !=-1 : return 0.01158
elif fileName.find("DYJetsToLL_M-4to50_HT-400to600_TuneCP5_13TeV-madgraphMLM-pythia8") !=-1 : return 5.697
elif fileName.find("TTToHplusToWA_WToTauNu_WToQQ_AToMuMu_MH-100_MA-15_13TeV-madgraph") !=-1 : return 0.002361
elif fileName.find("TTToSemiLepton_HT500Njet9_hdampDOWN_TuneCP5_13TeV-powheg-pythia8") !=-1 : return 3.899
elif fileName.find("ST_tW_DS_antitop_5f_inclusiveDecays_TuneCP5_13TeV-powheg-pythia8") !=-1 : return 35.13
elif fileName.find("TTToHplusToWA_WToTauNu_WToQQ_AToMuMu_MH-120_MA-20_13TeV-madgraph") !=-1 : return 0.02027
elif fileName.find("TTToHplusToWA_WToTauNu_WToQQ_AToMuMu_MH-120_MA-10_13TeV-madgraph") !=-1 : return 0.01418
elif fileName.find("TTToHplusToWA_WToTauNu_WToQQ_AToMuMu_MH-150_MA-25_13TeV-madgraph") !=-1 : return 0.03152
elif fileName.find("TTToHplusToWA_WToTauNu_WToQQ_AToMuMu_MH-150_MA-35_13TeV-madgraph") !=-1 : return 0.0346
elif fileName.find("TTToHplusToWA_WToTauNu_WToQQ_AToMuMu_MH-150_MA-30_13TeV-madgraph") !=-1 : return 0.03397
elif fileName.find("TTToHplusToWA_WToTauNu_WToQQ_AToMuMu_MH-120_MA-25_13TeV-madgraph") !=-1 : return 0.0188
elif fileName.find("DY1JetsToLL_M-50_LHEZpT_150-250_TuneCP5_13TeV-amcnloFXFX-pythia8") !=-1 : return 9.543
elif fileName.find("TTToHplusToWA_WToTauNu_WToQQ_AToMuMu_MH-110_MA-30_13TeV-madgraph") !=-1 : return 0.001474
elif fileName.find("TTToHplusToWA_WToTauNu_WToQQ_AToMuMu_MH-150_MA-10_13TeV-madgraph") !=-1 : return 0.01521
elif fileName.find("TTToHplusToWA_WToTauNu_WToQQ_AToMuMu_MH-160_MA-30_13TeV-madgraph") !=-1 : return 0.01928
elif fileName.find("TTToHplusToWA_WToTauNu_WToQQ_AToMuMu_MH-150_MA-15_13TeV-madgraph") !=-1 : return 0.02175
elif fileName.find("DYJetsToLL_M-4to50_HT-100to200_TuneCP5_13TeV-madgraphMLM-pythia8") !=-1 : return 204.0
elif fileName.find("TTToHplusToWA_WToTauNu_WToQQ_AToMuMu_MH-140_MA-15_13TeV-madgraph") !=-1 : return 0.0269
elif fileName.find("TTToSemiLepton_HT500Njet9_TuneCP5_PSweights_13TeV-powheg-pythia8") !=-1 : return 4.253
elif fileName.find("DYBJetsToNuNu_Zpt-40toInf_TuneCUETP8M1_13TeV-madgraphMLM-pythia8") !=-1 : return 48.71
elif fileName.find("TTToHplusToWA_WToTauNu_WToQQ_AToMuMu_MH-130_MA-30_13TeV-madgraph") !=-1 : return 0.02977
elif fileName.find("TTToHplusToWA_WToTauNu_WToQQ_AToMuMu_MH-160_MA-20_13TeV-madgraph") !=-1 : return 0.01481
elif fileName.find("TTToHadronic_TuneCP5CR2_GluonMove_PSweights_13TeV-powheg-pythia8") !=-1 : return 687.1
elif fileName.find("TTToHplusToWA_WToTauNu_WToQQ_AToMuMu_MH-130_MA-20_13TeV-madgraph") !=-1 : return 0.03
elif fileName.find("TTToHplusToWA_WToTauNu_WToQQ_AToMuMu_MH-120_MA-15_13TeV-madgraph") !=-1 : return 0.01866
elif fileName.find("TTToHplusToWZp_WToLNu_WToLNu_ZpToMuMu_MH-90_MZp-2_13TeV-madgraph") !=-1 : return 9.812e-06
elif fileName.find("TTToHplusToWA_WToTauNu_WToQQ_AToMuMu_MH-130_MA-10_13TeV-madgraph") !=-1 : return 0.01839
elif fileName.find("TTToHplusToWA_WToTauNu_WToQQ_AToMuMu_MH-130_MA-15_13TeV-madgraph") !=-1 : return 0.02536
elif fileName.find("DY2JetsToLL_M-50_LHEZpT_150-250_TuneCP5_13TeV-amcnloFXFX-pythia8") !=-1 : return 15.65
elif fileName.find("TTToHplusToWA_WToTauNu_WToQQ_AToMuMu_MH-160_MA-10_13TeV-madgraph") !=-1 : return 0.008011
elif fileName.find("TTToHplusToWA_WToTauNu_WToQQ_AToMuMu_MH-110_MA-15_13TeV-madgraph") !=-1 : return 0.009756
elif fileName.find("TTToHplusToWA_WToTauNu_WToQQ_AToMuMu_MH-120_MA-35_13TeV-madgraph") !=-1 : return 0.007598
elif fileName.find("TTToHplusToWA_WToTauNu_WToQQ_AToMuMu_MH-110_MA-10_13TeV-madgraph") !=-1 : return 0.008264
elif fileName.find("TTToHplusToWA_WToTauNu_WToQQ_AToMuMu_MH-130_MA-35_13TeV-madgraph") !=-1 : return 0.02491
elif fileName.find("TTToHplusToWA_WToTauNu_WToQQ_AToMuMu_MH-140_MA-30_13TeV-madgraph") !=-1 : return 0.03793
elif fileName.find("TTToHplusToWA_WToTauNu_WToQQ_AToMuMu_MH-150_MA-20_13TeV-madgraph") !=-1 : return 0.02735
elif fileName.find("DYJetsToLL_M-50_HT-1200to2500_TuneCP5_13TeV-madgraphMLM-pythia8") !=-1 : return 0.1933
elif fileName.find("DYJetsToLL_M-1500to2000_TuneCUETP8M1_13TeV-amcatnloFXFX-pythia8") !=-1 : return 0.00218
elif fileName.find("ST_s-channel_4f_leptonDecays_TuneCP5down_13TeV-amcatnlo-pythia8") !=-1 : return 3.74
elif fileName.find("TTToHplusToWA_WToLNu_WToLNu_AToMuMu_MH-150_MA-35_13TeV-madgraph") !=-1 : return 0.02608
elif fileName.find("TTToHplusToWA_WToLNu_WToLNu_AToMuMu_MH-130_MA-20_13TeV-madgraph") !=-1 : return 0.02245
elif fileName.find("TTToHplusToWA_WToLNu_WToLNu_AToMuMu_MH-140_MA-20_13TeV-madgraph") !=-1 : return 0.02486
elif fileName.find("TTToHadronic_TuneCP5CR1_QCDbased_PSweights_13TeV-powheg-pythia8") !=-1 : return 687.1
elif fileName.find("TTToHplusToWA_WToLNu_WToLNu_AToMuMu_MH-120_MA-25_13TeV-madgraph") !=-1 : return 0.01403
elif fileName.find("DYBJetsToLL_M-50_Zpt-100to200_TuneCP5_13TeV-madgraphMLM-pythia8") !=-1 : return 4.042
elif fileName.find("ST_tW_antitop_5f_inclusiveDecays_TuneCP5up_13TeV-powheg-pythia8") !=-1 : return 34.97
elif fileName.find("DYJetsToLL_M-1to4_HT-600toInf_TuneCP5_13TeV-madgraphMLM-pythia8") !=-1 : return 2.453
elif fileName.find("TTToHplusToWA_WToLNu_WToLNu_AToMuMu_MH-110_MA-30_13TeV-madgraph") !=-1 : return 0.0011
elif fileName.find("DYJetsToLL_M-1to4_HT-100to200_TuneCP5_13TeV-madgraphMLM-pythia8") !=-1 : return 479.6
elif fileName.find("TTToHplusToWA_WToLNu_WToLNu_AToMuMu_MH-110_MA-25_13TeV-madgraph") !=-1 : return 0.003863
elif fileName.find("TTToHplusToWA_WToLNu_WToLNu_AToMuMu_MH-160_MA-25_13TeV-madgraph") !=-1 : return 0.01305
elif fileName.find("TTToHplusToWA_WToLNu_WToLNu_AToMuMu_MH-130_MA-35_13TeV-madgraph") !=-1 : return 0.01856
elif fileName.find("TTToHplusToWA_WToLNu_WToLNu_AToMuMu_MH-140_MA-30_13TeV-madgraph") !=-1 : return 0.02827
elif fileName.find("DY1JetsToLL_M-50_LHEZpT_50-150_TuneCP5_13TeV-amcnloFXFX-pythia8") !=-1 : return 316.6
elif fileName.find("TTToHplusToWA_WToLNu_WToLNu_AToMuMu_MH-120_MA-20_13TeV-madgraph") !=-1 : return 0.01516
elif fileName.find("TTToHplusToWA_WToLNu_WToLNu_AToMuMu_MH-140_MA-35_13TeV-madgraph") !=-1 : return 0.02718
elif fileName.find("DYJetsToLL_M-50_HT-1200to2500_TuneCP5_14TeV-madgraphMLM-pythia8") !=-1 : return 0.2466
elif fileName.find("TTToHplusToWA_WToLNu_WToLNu_AToMuMu_MH-160_MA-10_13TeV-madgraph") !=-1 : return 0.006028
elif fileName.find("TTToHplusToWA_WToLNu_WToLNu_AToMuMu_MH-120_MA-35_13TeV-madgraph") !=-1 : return 0.005625
elif fileName.find("TTToHplusToWA_WToLNu_WToLNu_AToMuMu_MH-160_MA-35_13TeV-madgraph") !=-1 : return 0.01527
elif fileName.find("DYJetsToLL_M-1to4_HT-400to600_TuneCP5_13TeV-madgraphMLM-pythia8") !=-1 : return 8.207
elif fileName.find("TTToSemiLeptonic_hdampUP_TuneCP5_PSweights_13TeV-powheg-pythia8") !=-1 : return 687.0
elif fileName.find("TTToHplusToWA_WToLNu_WToLNu_AToMuMu_MH-150_MA-10_13TeV-madgraph") !=-1 : return 0.01143
elif fileName.find("TTToHplusToWA_WToLNu_WToLNu_AToMuMu_MH-110_MA-20_13TeV-madgraph") !=-1 : return 0.006446
elif fileName.find("TTToHplusToWA_WToLNu_WToLNu_AToMuMu_MH-160_MA-30_13TeV-madgraph") !=-1 : return 0.01449
elif fileName.find("TTToHplusToWA_WToLNu_WToLNu_AToMuMu_MH-110_MA-15_13TeV-madgraph") !=-1 : return 0.007288
elif fileName.find("DY2JetsToLL_M-50_LHEZpT_50-150_TuneCP5_13TeV-amcnloFXFX-pythia8") !=-1 : return 169.6
elif fileName.find("TTToHplusToWA_WToLNu_WToLNu_AToMuMu_MH-150_MA-20_13TeV-madgraph") !=-1 : return 0.02056
elif fileName.find("TTToHplusToWA_WToLNu_WToLNu_AToMuMu_MH-150_MA-30_13TeV-madgraph") !=-1 : return 0.02555
elif fileName.find("DYJetsToLL_M-2000to3000_TuneCUETP8M1_13TeV-amcatnloFXFX-pythia8") !=-1 : return 0.0005156
elif fileName.find("TTToHplusToWA_WToLNu_WToLNu_AToMuMu_MH-130_MA-25_13TeV-madgraph") !=-1 : return 0.02348
elif fileName.find("TTToHplusToWA_WToLNu_WToLNu_AToMuMu_MH-150_MA-25_13TeV-madgraph") !=-1 : return 0.02368
elif fileName.find("TTToHplusToWA_WToLNu_WToLNu_AToMuMu_MH-160_MA-20_13TeV-madgraph") !=-1 : return 0.01108
elif fileName.find("TTToHplusToWA_WToLNu_WToLNu_AToMuMu_MH-130_MA-10_13TeV-madgraph") !=-1 : return 0.01375
elif fileName.find("ST_tW_top_5f_NoFullyHadronicDecays_TuneCP5_13TeV-powheg-pythia8") !=-1 : return 34.91
elif fileName.find("TTTo2L2Nu_HT500Njet7_TuneCP5down_PSweights_13TeV-powheg-pythia8") !=-1 : return 11.34
elif fileName.find("TTToHplusToWA_WToLNu_WToLNu_AToMuMu_MH-140_MA-25_13TeV-madgraph") !=-1 : return 0.02774
elif fileName.find("DYJetsToLL_M-4to50_HT-70to100_TuneCP5_13TeV-madgraphMLM-pythia8") !=-1 : return 145.5
elif fileName.find("TTToHplusToWA_WToLNu_WToLNu_AToMuMu_MH-100_MA-20_13TeV-madgraph") !=-1 : return 0.0006391
elif fileName.find("TTToHplusToWA_WToLNu_WToLNu_AToMuMu_MH-140_MA-10_13TeV-madgraph") !=-1 : return 0.01428
elif fileName.find("DYJetsToLL_M-1000to1500_TuneCUETP8M1_13TeV-amcatnloFXFX-pythia8") !=-1 : return 0.01636
elif fileName.find("DYJetsToEE_M-50_LTbinned_800To2000_5f_LO_13TeV-madgraph_pythia8") !=-1 : return 0.008352
elif fileName.find("TTToHplusToWA_WToLNu_WToLNu_AToMuMu_MH-100_MA-15_13TeV-madgraph") !=-1 : return 0.001767
elif fileName.find("DYBJetsToLL_M-50_Zpt-200toInf_TuneCP5_13TeV-madgraphMLM-pythia8") !=-1 : return 0.4286
elif fileName.find("TTToHplusToWA_WToTauNu_WToQQ_AToMuMu_MH-90_MA-10_13TeV-madgraph") !=-1 : return 0.0002799
elif fileName.find("TTToHplusToWA_WToLNu_WToLNu_AToMuMu_MH-130_MA-30_13TeV-madgraph") !=-1 : return 0.02227
elif fileName.find("TTToHplusToWA_WToLNu_WToLNu_AToMuMu_MH-130_MA-15_13TeV-madgraph") !=-1 : return 0.01906
elif fileName.find("TTToHplusToWA_WToLNu_WToLNu_AToMuMu_MH-160_MA-15_13TeV-madgraph") !=-1 : return 0.008748
elif fileName.find("TTToHplusToWA_WToLNu_WToLNu_AToMuMu_MH-150_MA-15_13TeV-madgraph") !=-1 : return 0.01638
elif fileName.find("TTToHplusToWA_WToLNu_WToLNu_AToMuMu_MH-140_MA-15_13TeV-madgraph") !=-1 : return 0.02024
elif fileName.find("DYJetsToLL_M-1to4_HT-200to400_TuneCP5_13TeV-madgraphMLM-pythia8") !=-1 : return 85.85
elif fileName.find("TTToHplusToWA_WToLNu_WToLNu_AToMuMu_MH-100_MA-10_13TeV-madgraph") !=-1 : return 0.002219
elif fileName.find("TTToHplusToWA_WToLNu_WToLNu_AToMuMu_MH-120_MA-30_13TeV-madgraph") !=-1 : return 0.01063
elif fileName.find("TTToHplusToWA_WToLNu_WToLNu_AToMuMu_MH-110_MA-10_13TeV-madgraph") !=-1 : return 0.006195
elif fileName.find("TTToHplusToWA_WToLNu_WToLNu_AToMuMu_MH-120_MA-10_13TeV-madgraph") !=-1 : return 0.01064
elif fileName.find("TTToHplusToWA_WToLNu_WToLNu_AToMuMu_MH-120_MA-15_13TeV-madgraph") !=-1 : return 0.014
elif fileName.find("TTToSemiLepton_HT500Njet9_hdampUP_TuneCP5_13TeV-powheg-pythia8") !=-1 : return 4.571
elif fileName.find("DYJetsToLL_M-50_HT-2500toInf_TuneCP5_13TeV-madgraphMLM-pythia8") !=-1 : return 0.003468
elif fileName.find("ST_s-channel_4f_leptonDecays_TuneCP5CR2_13TeV-amcatnlo-pythia8") !=-1 : return 3.74
elif fileName.find("DYJetsToEE_M-50_LTbinned_200To400_5f_LO_13TeV-madgraph_pythia8") !=-1 : return 3.574
elif fileName.find("ST_t-channel_antitop_5f_TuneCP5_PSweights_13TeV-powheg-pythia8") !=-1 : return 71.74
elif fileName.find("TTZPrimeToMuMu_M-1700_TuneCP5_PSWeights_13TeV-madgraph-pythia8") !=-1 : return 9.353e-05
elif fileName.find("TTZPrimeToMuMu_M-2000_TuneCP5_PSWeights_13TeV-madgraph-pythia8") !=-1 : return 3.577e-05
elif fileName.find("TTZPrimeToMuMu_M-1600_TuneCP5_PSWeights_13TeV-madgraph-pythia8") !=-1 : return 0.0001325
elif fileName.find("DYJetsToLL_M-50_Zpt-150toInf_TuneCP5_13TeV-madgraphMLM-pythia8") !=-1 : return 22.93
elif fileName.find("TTToHplusToWA_WToLNu_WToLNu_AToMuMu_MH-90_MA-10_13TeV-madgraph") !=-1 : return 0.0002095
elif fileName.find("DYJetsToLL_M-1to4_HT-70to100_TuneCP5_13TeV-madgraphMLM-pythia8") !=-1 : return 626.8
elif fileName.find("TTZPrimeToMuMu_M-1300_TuneCP5_PSWeights_13TeV-madgraph-pythia8") !=-1 : return 0.0004131
elif fileName.find("DYJetsToEE_M-50_LTbinned_100To200_5f_LO_13TeV-madgraph_pythia8") !=-1 : return 94.34
elif fileName.find("DYJetsToEE_M-50_LTbinned_400To800_5f_LO_13TeV-madgraph_pythia8") !=-1 : return 0.2005
elif fileName.find("DYJetsToLL_Pt-100To250_TuneCUETP8M1_13TeV-amcatnloFXFX-pythia8") !=-1 : return 81.22
elif fileName.find("DYJetsToLL_M-50_HT-800to1200_TuneCP5_13TeV-madgraphMLM-pythia8") !=-1 : return 0.8052
elif fileName.find("TTZPrimeToMuMu_M-1500_TuneCP5_PSWeights_13TeV-madgraph-pythia8") !=-1 : return 0.0001903
elif fileName.find("TTZPrimeToMuMu_M-1900_TuneCP5_PSWeights_13TeV-madgraph-pythia8") !=-1 : return 4.862e-05
elif fileName.find("TTZPrimeToMuMu_M-1000_TuneCP5_PSWeights_13TeV-madgraph-pythia8") !=-1 : return 0.00154
elif fileName.find("DYJetsToLL_Pt-400To650_TuneCUETP8M1_13TeV-amcatnloFXFX-pythia8") !=-1 : return 0.3882
elif fileName.find("TTZPrimeToMuMu_M-1800_TuneCP5_PSWeights_13TeV-madgraph-pythia8") !=-1 : return 6.693e-05
elif fileName.find("TTZPrimeToMuMu_M-1400_TuneCP5_PSWeights_13TeV-madgraph-pythia8") !=-1 : return 0.0002778
elif fileName.find("TTZPrimeToMuMu_M-1200_TuneCP5_PSWeights_13TeV-madgraph-pythia8") !=-1 : return 0.0006259
elif fileName.find("DYJetsToLL_Pt-250To400_TuneCUETP8M1_13TeV-amcatnloFXFX-pythia8") !=-1 : return 2.991
elif fileName.find("DYJetsToLL_M-800to1000_TuneCUETP8M1_13TeV-amcatnloFXFX-pythia8") !=-1 : return 0.03047
elif fileName.find("TTToSemiLeptonic_WspTgt150_TuneCUETP8M2T4_13TeV-powheg-pythia8") !=-1 : return 34.49
elif fileName.find("DYJetsToLL_M-50_TuneCUETP8M1_13TeV-amcatnloFXFX-pythia8_DownPS") !=-1 : return 5735.0
elif fileName.find("ST_s-channel_4f_leptonDecays_TuneCP5CR1_13TeV-amcatnlo-pythia8") !=-1 : return 3.74
elif fileName.find("DYJetsToLL_Pt-650ToInf_TuneCUETP8M1_13TeV-amcatnloFXFX-pythia8") !=-1 : return 0.03737
elif fileName.find("TTZPrimeToMuMu_M-900_TuneCP5_PSWeights_13TeV-madgraph-pythia8") !=-1 : return 0.002515
elif fileName.find("TTToHplusToWZprime_WToMuNu_ZToMuMu_MH-160_MZ-2_13TeV-madgraph") !=-1 : return 5.559e-05
elif fileName.find("DYJetsToLL_M-700to800_TuneCUETP8M1_13TeV-amcatnloFXFX-pythia8") !=-1 : return 0.03614
elif fileName.find("DYJetsToLL_M-200to400_TuneCUETP8M1_13TeV-amcatnloFXFX-pythia8") !=-1 : return 7.77
elif fileName.find("DYJetsToLL_M-400to500_TuneCUETP8M1_13TeV-amcatnloFXFX-pythia8") !=-1 : return 0.4065
elif fileName.find("TTToHplusToWZprime_WToMuNu_ZToMuMu_MH-140_MZ-8_13TeV-madgraph") !=-1 : return 4.225e-05
elif fileName.find("ST_tW_antitop_5f_inclusiveDecays_TuneCP5_13TeV-powheg-pythia8") !=-1 : return 34.97
elif fileName.find("DYJetsToEE_M-50_LTbinned_95To100_5f_LO_13TeV-madgraph_pythia8") !=-1 : return 48.2
elif fileName.find("TTZPrimeToMuMu_M-800_TuneCP5_PSWeights_13TeV-madgraph-pythia8") !=-1 : return 0.004257
elif fileName.find("TTToHplusToWZprime_WToMuNu_ZToMuMu_MH-130_MZ-2_13TeV-madgraph") !=-1 : return 0.000168
elif fileName.find("Test_ZprimeToTT_M-4500_W-45_TuneCP5_13TeV-madgraphMLM-pythia8") !=-1 : return 0.000701
elif fileName.find("TTTo2L2Nu_HT500Njet7_TuneCP5up_PSweights_13TeV-powheg-pythia8") !=-1 : return 11.59
elif fileName.find("TTToHplusToWZprime_WToMuNu_ZToMuMu_MH-160_MZ-8_13TeV-madgraph") !=-1 : return 1.365e-05
elif fileName.find("TTTo2L2Nu_TuneCP5CR2_GluonMove_PSweights_13TeV-powheg-pythia8") !=-1 : return 687.1
elif fileName.find("TTZPrimeToMuMu_M-400_TuneCP5_PSWeights_13TeV-madgraph-pythia8") !=-1 : return 0.06842
elif fileName.find("TTToHadronic_hdampDOWN_TuneCP5_PSweights_13TeV-powheg-pythia8") !=-1 : return 687.1
elif fileName.find("ST_s-channel_4f_leptonDecays_TuneCP5up_13TeV-amcatnlo-pythia8") !=-1 : return 3.74
elif fileName.find("TTToHadronic_mtop175p5_TuneCP5_PSweights_13TeV-powheg-pythia8") !=-1 : return 633.4
elif fileName.find("TTToHplusToWZprime_WToMuNu_ZToMuMu_MH-110_MZ-5_13TeV-madgraph") !=-1 : return 2.886e-05
elif fileName.find("TTToHplusToWZprime_WToMuNu_ZToMuMu_MH-120_MZ-5_13TeV-madgraph") !=-1 : return 5.067e-05
elif fileName.find("DYJetsToLL_M-500to700_TuneCUETP8M1_13TeV-amcatnloFXFX-pythia8") !=-1 : return 0.2334
elif fileName.find("TTToHplusToWZprime_WToMuNu_ZToMuMu_MH-140_MZ-2_13TeV-madgraph") !=-1 : return 0.0001727
elif fileName.find("TTZPrimeToMuMu_M-300_TuneCP5_PSWeights_13TeV-madgraph-pythia8") !=-1 : return 0.2194
elif fileName.find("DYJetsToLL_Pt-50To100_TuneCUETP8M1_13TeV-amcatnloFXFX-pythia8") !=-1 : return 354.8
elif fileName.find("TTToHadronic_mtop171p5_TuneCP5_PSweights_13TeV-powheg-pythia8") !=-1 : return 706.1
elif fileName.find("TTZPrimeToMuMu_M-600_TuneCP5_PSWeights_13TeV-madgraph-pythia8") !=-1 : return 0.01409
elif fileName.find("TTZPrimeToMuMu_M-500_TuneCP5_PSWeights_13TeV-madgraph-pythia8") !=-1 : return 0.0287
elif fileName.find("TTToHplusToWZprime_WToMuNu_ZToMuMu_MH-110_MZ-8_13TeV-madgraph") !=-1 : return 1.808e-05
elif fileName.find("TTToHplusToWZprime_WToMuNu_ZToMuMu_MH-100_MZ-2_13TeV-madgraph") !=-1 : return 2.652e-05
elif fileName.find("TTZToLLNuNu_M-10_TuneCP5down_PSweights_13TeV-amcatnlo-pythia8") !=-1 : return 0.2432
elif fileName.find("TTToHadronic_mtop169p5_TuneCP5_PSweights_13TeV-powheg-pythia8") !=-1 : return 746.2
elif fileName.find("TTToHplusToWZprime_WToMuNu_ZToMuMu_MH-130_MZ-5_13TeV-madgraph") !=-1 : return 6.649e-05
elif fileName.find("TTZPrimeToMuMu_M-700_TuneCP5_PSWeights_13TeV-madgraph-pythia8") !=-1 : return 0.007522
elif fileName.find("TTToHplusToWZprime_WToMuNu_ZToMuMu_MH-100_MZ-8_13TeV-madgraph") !=-1 : return 6.686e-06
elif fileName.find("DYJetsToLL_M-50_HT-100to200_TuneCP5_13TeV-madgraphMLM-pythia8") !=-1 : return 161.1
elif fileName.find("TTToHadronic_mtop166p5_TuneCP5_PSweights_13TeV-powheg-pythia8") !=-1 : return 811.4
elif fileName.find("TTToHplusToWZprime_WToMuNu_ZToMuMu_MH-130_MZ-8_13TeV-madgraph") !=-1 : return 4.106e-05
elif fileName.find("TTToHadronic_mtop173p5_TuneCP5_PSweights_13TeV-powheg-pythia8") !=-1 : return 668.6
elif fileName.find("ST_s-channel_4f_hadronicDecays_TuneCP5_13TeV-amcatnlo-pythia8") !=-1 : return 11.24
elif fileName.find("TTToHplusToWZprime_WToMuNu_ZToMuMu_MH-150_MZ-5_13TeV-madgraph") !=-1 : return 5.214e-05
elif fileName.find("DYJetsToLL_M-50_HT-600to800_TuneCP5_13TeV-madgraphMLM-pythia8") !=-1 : return 1.743
elif fileName.find("TTToHplusToWZprime_WToMuNu_ZToMuMu_MH-120_MZ-8_13TeV-madgraph") !=-1 : return 3.151e-05
elif fileName.find("TTToHplusToWZprime_WToMuNu_ZToMuMu_MH-150_MZ-2_13TeV-madgraph") !=-1 : return 0.0001319
elif fileName.find("TTToHplusToWZprime_WToMuNu_ZToMuMu_MH-100_MZ-5_13TeV-madgraph") !=-1 : return 1.058e-05
elif fileName.find("TTToHplusToWZprime_WToMuNu_ZToMuMu_MH-140_MZ-5_13TeV-madgraph") !=-1 : return 6.855e-05
elif fileName.find("DYJetsToLL_M-50_HT-200to400_TuneCP5_13TeV-madgraphMLM-pythia8") !=-1 : return 48.66
elif fileName.find("BdToPsi2sKst_MuTrkFilter_DGamma0_TuneCP5_13TeV-pythia8-evtgen") !=-1 : return 5942000.0
elif fileName.find("DYJetsToLL_M-100to200_TuneCUETP8M1_13TeV-amcatnloFXFX-pythia8") !=-1 : return 226.6
elif fileName.find("TTToHplusToWZprime_WToMuNu_ZToMuMu_MH-120_MZ-2_13TeV-madgraph") !=-1 : return 0.0001282
elif fileName.find("TTToHplusToWZprime_WToMuNu_ZToMuMu_MH-110_MZ-2_13TeV-madgraph") !=-1 : return 7.276e-05
elif fileName.find("TTToHadronic_mtop178p5_TuneCP5_PSweights_13TeV-powheg-pythia8") !=-1 : return 584.6
elif fileName.find("DYJetsToLL_M-50_HT-400to600_TuneCP5_13TeV-madgraphMLM-pythia8") !=-1 : return 6.968
elif fileName.find("TTToHplusToWZprime_WToMuNu_ZToMuMu_MH-150_MZ-8_13TeV-madgraph") !=-1 : return 3.237e-05
elif fileName.find("ST_s-channel_4f_hadronicDecays_TuneCP5_13TeV-madgraph-pythia8") !=-1 : return 11.24
elif fileName.find("TTToHplusToWZprime_WToMuNu_ZToMuMu_MH-160_MZ-5_13TeV-madgraph") !=-1 : return 2.198e-05
elif fileName.find("DYJetsToLL_Zpt-0To50_TuneCUETP8M1_13TeV-amcatnloFXFX-pythia8") !=-1 : return 5375.0
elif fileName.find("TTToHplusToWZprime_WToENu_ZToMuMu_MH-150_MZ-2_13TeV-madgraph") !=-1 : return 0.0001309
elif fileName.find("DYJetsToLL_M-50_TuneCUETP8M1_13TeV-amcatnloFXFX-pythia8_UpPS") !=-1 : return 6005.0
elif fileName.find("TTToHplusToWZprime_WToENu_ZToMuMu_MH-110_MZ-8_13TeV-madgraph") !=-1 : return 1.802e-05
elif fileName.find("TTToHplusToWZprime_WToENu_ZToMuMu_MH-140_MZ-2_13TeV-madgraph") !=-1 : return 0.0001729
elif fileName.find("TTToHplusToWZprime_WToENu_ZToMuMu_MH-120_MZ-8_13TeV-madgraph") !=-1 : return 3.146e-05
elif fileName.find("tZq_nunu_4f_ckm_NLO_TuneCP5_PSweights_13TeV-madgraph-pythia8") !=-1 : return 0.1337
elif fileName.find("DYJetsToEE_M-50_LTbinned_90To95_5f_LO_13TeV-madgraph_pythia8") !=-1 : return 167.3
elif fileName.find("DYJetsToEE_M-50_LTbinned_80To85_5f_LO_13TeV-madgraph_pythia8") !=-1 : return 159.9
elif fileName.find("DYJetsToEE_M-50_LTbinned_75To80_5f_LO_13TeV-madgraph_pythia8") !=-1 : return 134.6
elif fileName.find("TTZToLLNuNu_M-10_TuneCP5CR2_GluonMove_13TeV-amcatnlo-pythia8") !=-1 : return 0.2432
elif fileName.find("TTToHplusToWZprime_WToENu_ZToMuMu_MH-160_MZ-2_13TeV-madgraph") !=-1 : return 5.542e-05
elif fileName.find("TTToHplusToWZprime_WToMuNu_ZToMuMu_MH-90_MZ-8_13TeV-madgraph") !=-1 : return 9.926e-07
elif fileName.find("TTToHplusToWZprime_WToMuNu_ZToMuMu_MH-90_MZ-5_13TeV-madgraph") !=-1 : return 1.652e-06
elif fileName.find("TTToHplusToWZprime_WToENu_ZToMuMu_MH-140_MZ-5_13TeV-madgraph") !=-1 : return 6.851e-05
elif fileName.find("TTToHplusToWZprime_WToENu_ZToMuMu_MH-110_MZ-2_13TeV-madgraph") !=-1 : return 7.265e-05
elif fileName.find("TTToHplusToWZprime_WToENu_ZToMuMu_MH-140_MZ-8_13TeV-madgraph") !=-1 : return 4.231e-05
elif fileName.find("DYJetsToEE_M-50_LTbinned_85To90_5f_LO_13TeV-madgraph_pythia8") !=-1 : return 229.4
elif fileName.find("TTToSemilepton_TuneCUETP8M2_ttHtranche3_13TeV-powheg-pythia8") !=-1 : return 320.1
elif fileName.find("TTToHplusToWZprime_WToENu_ZToMuMu_MH-150_MZ-8_13TeV-madgraph") !=-1 : return 3.234e-05
elif fileName.find("TTToHplusToWZprime_WToENu_ZToMuMu_MH-160_MZ-5_13TeV-madgraph") !=-1 : return 2.212e-05
elif fileName.find("TTToHplusToWZprime_WToENu_ZToMuMu_MH-160_MZ-8_13TeV-madgraph") !=-1 : return 1.364e-05
elif fileName.find("TTToHplusToWZprime_WToENu_ZToMuMu_MH-130_MZ-8_13TeV-madgraph") !=-1 : return 4.116e-05
elif fileName.find("TTToHplusToWZprime_WToENu_ZToMuMu_MH-100_MZ-5_13TeV-madgraph") !=-1 : return 1.06e-05
elif fileName.find("TTToHplusToWZprime_WToENu_ZToMuMu_MH-100_MZ-2_13TeV-madgraph") !=-1 : return 2.654e-05
elif fileName.find("TTToHplusToWZprime_WToENu_ZToMuMu_MH-150_MZ-5_13TeV-madgraph") !=-1 : return 5.234e-05
elif fileName.find("TTToHplusToWZprime_WToENu_ZToMuMu_MH-110_MZ-5_13TeV-madgraph") !=-1 : return 2.894e-05
elif fileName.find("TTToHplusToWZprime_WToMuNu_ZToMuMu_MH-90_MZ-2_13TeV-madgraph") !=-1 : return 4.124e-06
elif fileName.find("ST_tW_DS_top_5f_inclusiveDecays_TuneCP5_13TeV-powheg-pythia8") !=-1 : return 33.67
elif fileName.find("TTToHplusToWZprime_WToENu_ZToMuMu_MH-120_MZ-5_13TeV-madgraph") !=-1 : return 5.064e-05
elif fileName.find("TTToHplusToWZprime_WToENu_ZToMuMu_MH-120_MZ-2_13TeV-madgraph") !=-1 : return 0.0001276
elif fileName.find("TTTo2L2Nu_TuneCP5CR1_QCDbased_PSweights_13TeV-powheg-pythia8") !=-1 : return 687.1
elif fileName.find("TTToHplusToWZprime_WToENu_ZToMuMu_MH-130_MZ-2_13TeV-madgraph") !=-1 : return 0.0001671
elif fileName.find("TTToHplusToWZprime_WToENu_ZToMuMu_MH-100_MZ-8_13TeV-madgraph") !=-1 : return 6.706e-06
elif fileName.find("DYJetsToLL_M-50_HT-70to100_TuneCP5_13TeV-madgraphMLM-pythia8") !=-1 : return 146.7
elif fileName.find("TTToHplusToWZprime_WToENu_ZToMuMu_MH-130_MZ-5_13TeV-madgraph") !=-1 : return 6.66e-05
elif fileName.find("TTToHplusToWZprime_WToENu_ZToMuMu_MH-90_MZ-2_13TeV-madgraph") !=-1 : return 4.126e-06
elif fileName.find("TTToSemiLeptonic_TuneCP5down_PSweights_13TeV-powheg-pythia8") !=-1 : return 687.1
elif fileName.find("DYToMuMu_M-4500To6000_NNPDF30NLO_TuneCUETP8M1_13TeV-pythia8") !=-1 : return 3.566e-07
elif fileName.find("ST_s-channel_4f_leptonDecays_TuneCP5_13TeV-amcatnlo-pythia8") !=-1 : return 3.74
elif fileName.find("DYJetsToLL_M-50_HT-40to70_TuneCP5_13TeV-madgraphMLM-pythia8") !=-1 : return 311.4
elif fileName.find("DYToMuMu_M-3500To4500_NNPDF30NLO_TuneCUETP8M1_13TeV-pythia8") !=-1 : return 3.317e-06
elif fileName.find("DYToMuMu_M-2300To3500_NNPDF30NLO_TuneCUETP8M1_13TeV-pythia8") !=-1 : return 7.34e-05
elif fileName.find("ST_s-channel_4f_leptonDecays_TuneCP5_13TeV-madgraph-pythia8") !=-1 : return 3.74
elif fileName.find("TTToHplusToWZprime_WToENu_ZToMuMu_MH-90_MZ-8_13TeV-madgraph") !=-1 : return 9.872e-07
elif fileName.find("TTZToLLNuNu_M-10_TuneCP5up_PSweights_13TeV-amcatnlo-pythia8") !=-1 : return 0.2432
elif fileName.find("DY1JetsToLL_M-10to50_TuneCUETP8M1_13TeV-madgraphMLM-pythia8") !=-1 : return 730.3
elif fileName.find("DY2JetsToLL_M-10to50_TuneCUETP8M1_13TeV-madgraphMLM-pythia8") !=-1 : return 387.4
elif fileName.find("DYJetsToEE_M-50_LTbinned_5To75_5f_LO_13TeV-madgraph_pythia8") !=-1 : return 866.2
elif fileName.find("TTToHplusToWZprime_WToENu_ZToMuMu_MH-90_MZ-5_13TeV-madgraph") !=-1 : return 1.655e-06
elif fileName.find("DYJetsToLL_M-10to50_TuneCUETP8M1_13TeV-amcatnloFXFX-pythia8") !=-1 : return 18810.0
elif fileName.find("TTZToLLNuNu_M-10_TuneCP5CR1_QCDbased_13TeV-amcatnlo-pythia8") !=-1 : return 0.2432
elif fileName.find("DYJetsToEE_M-50_LTbinned_0To75_5f_LO_13TeV-madgraph_pythia8") !=-1 : return 948.2
elif fileName.find("TTToHadronic_hdampUP_TuneCP5_PSweights_13TeV-powheg-pythia8") !=-1 : return 687.0
elif fileName.find("TTJets_SingleLeptFromTbar_TuneCP5_13TeV-madgraphMLM-pythia8") !=-1 : return 108.7
elif fileName.find("TTTo2L2Nu_HT500Njet7_hdampDOWN_TuneCP5_13TeV-powheg-pythia8") !=-1 : return 10.58
elif fileName.find("DY3JetsToLL_M-10to50_TuneCUETP8M1_13TeV-madgraphMLM-pythia8") !=-1 : return 95.02
elif fileName.find("TTTo2L2Nu_HT500Njet7_TuneCP5_PSweights_13TeV-powheg-pythia8") !=-1 : return 11.37
elif fileName.find("DY4JetsToLL_M-10to50_TuneCUETP8M1_13TeV-madgraphMLM-pythia8") !=-1 : return 36.71
elif fileName.find("TTZJetsToQQ_Dilept_TuneCP5_PSweights_13TeV-amcatnlo-pythia8") !=-1 : return 0.0568
elif fileName.find("DYJetsToLL_M-50_TuneCUETP8M1_13TeV-madgraphMLM-herwigpp_30M") !=-1 : return 14240.0
elif fileName.find("DYToMuMu_M-1400To2300_NNPDF30NLO_TuneCUETP8M1_13TeV-pythia8") !=-1 : return 0.001178
elif fileName.find("DYJetsToLL_M-1500to2000_TuneCP5_13TeV-amcatnloFXFX-pythia8") !=-1 : return 0.002367
elif fileName.find("TTJets_DiLept_genMET-150_TuneCP5_13TeV-madgraphMLM-pythia8") !=-1 : return 3.655
elif fileName.find("TTTo2L2Nu_mtop166p5_TuneCP5_PSweights_13TeV-powheg-pythia8") !=-1 : return 811.4
elif fileName.find("TTToSemiLepton_HT500Njet9_TuneCP5down_13TeV-powheg-pythia8") !=-1 : return 4.216
elif fileName.find("tZq_ll_4f_ckm_NLO_TuneCP5_PSweights_13TeV-amcatnlo-pythia8") !=-1 : return 0.07358
elif fileName.find("DYToMuMu_M-800To1400_NNPDF30NLO_TuneCUETP8M1_13TeV-pythia8") !=-1 : return 0.01437
elif fileName.find("DYJetsToLL_M-10to50_TuneCUETP8M1_13TeV-madgraphMLM-pythia8") !=-1 : return 16270.0
elif fileName.find("TTTo2L2Nu_hdampDOWN_TuneCP5_PSweights_13TeV-powheg-pythia8") !=-1 : return 687.1
elif fileName.find("QCD_HT1500to2000_BGenFilter_TuneCP5_13TeV-madgraph-pythia8") !=-1 : return 13.61
elif fileName.find("DYToMuMu_M-6000ToInf_NNPDF30NLO_TuneCUETP8M1_13TeV-pythia8") !=-1 : return 1.576e-08
elif fileName.find("TTToSemiLeptonic_TuneCP5CR2_GluonMove_13TeV-powheg-pythia8") !=-1 : return 687.1
elif fileName.find("QCD_HT1000to1500_BGenFilter_TuneCP5_13TeV-madgraph-pythia8") !=-1 : return 138.2
elif fileName.find("DYJetsToLL_M-2000to3000_TuneCP5_13TeV-amcatnloFXFX-pythia8") !=-1 : return 0.0005409
elif fileName.find("TTTo2L2Nu_mtop173p5_TuneCP5_PSweights_13TeV-powheg-pythia8") !=-1 : return 668.6
elif fileName.find("DYJetsToLL_M-10to50_TuneCUETP8M1_14TeV-madgraphMLM-pythia8") !=-1 : return 17230.0
elif fileName.find("TTTo2L2Nu_mtop169p5_TuneCP5_PSweights_13TeV-powheg-pythia8") !=-1 : return 746.2
elif fileName.find("TTTo2L2Nu_mtop175p5_TuneCP5_PSweights_13TeV-powheg-pythia8") !=-1 : return 633.4
elif fileName.find("DYJetsToLL_M-1000to1500_TuneCP5_13TeV-amcatnloFXFX-pythia8") !=-1 : return 0.01828
elif fileName.find("TTTo2L2Nu_mtop178p5_TuneCP5_PSweights_13TeV-powheg-pythia8") !=-1 : return 584.6
elif fileName.find("TTTo2L2Nu_mtop171p5_TuneCP5_PSweights_13TeV-powheg-pythia8") !=-1 : return 706.1
elif fileName.find("TTToHplusToWA_WToMuNu_AToMuMu_MH-140_MA-15_13TeV-madgraph") !=-1 : return 0.01533
elif fileName.find("QCD_HT2000toInf_BGenFilter_TuneCP5_13TeV-madgraph-pythia8") !=-1 : return 2.92
elif fileName.find("TTToHplusToWA_WToMuNu_AToMuMu_MH-110_MA-20_13TeV-madgraph") !=-1 : return 0.004946
elif fileName.find("TTToHplusToWA_WToMuNu_AToMuMu_MH-110_MA-10_13TeV-madgraph") !=-1 : return 0.003822
elif fileName.find("TTToHplusToWA_WToMuNu_AToMuMu_MH-160_MA-25_13TeV-madgraph") !=-1 : return 0.00815
elif fileName.find("TTToHplusToWA_WToMuNu_AToMuMu_MH-130_MA-30_13TeV-madgraph") !=-1 : return 0.01965
elif fileName.find("TTToHplusToWA_WToMuNu_AToMuMu_MH-150_MA-25_13TeV-madgraph") !=-1 : return 0.0186
elif fileName.find("TTToHplusToWA_WToMuNu_AToMuMu_MH-140_MA-10_13TeV-madgraph") !=-1 : return 0.01061
elif fileName.find("TTToHplusToWA_WToMuNu_AToMuMu_MH-120_MA-20_13TeV-madgraph") !=-1 : return 0.01191
elif fileName.find("TTToHplusToWA_WToMuNu_AToMuMu_MH-110_MA-30_13TeV-madgraph") !=-1 : return 0.0009769
elif fileName.find("DYJetsToLL_Pt-650ToInf_TuneCP5_13TeV-amcatnloFXFX-pythia8") !=-1 : return 0.04796
elif fileName.find("DYToEE_M-3500To4500_NNPDF30NLO_TuneCUETP8M1_13TeV-pythia8") !=-1 : return 3.327e-06
elif fileName.find("TTToHplusToWA_WToMuNu_AToMuMu_MH-100_MA-15_13TeV-madgraph") !=-1 : return 0.001117
elif fileName.find("TTToHplusToWA_WToMuNu_AToMuMu_MH-110_MA-15_13TeV-madgraph") !=-1 : return 0.004958
elif fileName.find("TTToSemiLeptonic_TuneCP5up_PSweights_13TeV-powheg-pythia8") !=-1 : return 687.1
elif fileName.find("TTToHplusToWA_WToMuNu_AToMuMu_MH-140_MA-25_13TeV-madgraph") !=-1 : return 0.02274
elif fileName.find("DYJetsToLL_Pt-250To400_TuneCP5_13TeV-amcatnloFXFX-pythia8") !=-1 : return 3.774
elif fileName.find("TTToHplusToWA_WToMuNu_AToMuMu_MH-140_MA-20_13TeV-madgraph") !=-1 : return 0.01947
elif fileName.find("TTToHplusToWA_WToMuNu_AToMuMu_MH-120_MA-35_13TeV-madgraph") !=-1 : return 0.005199
elif fileName.find("TTToHplusToWA_WToMuNu_AToMuMu_MH-160_MA-10_13TeV-madgraph") !=-1 : return 0.003564
elif fileName.find("TTTo2L2Nu_HT500Njet7_hdampUP_TuneCP5_13TeV-powheg-pythia8") !=-1 : return 12.39
elif fileName.find("DYJetsToLL_M-800to1000_TuneCP5_13TeV-amcatnloFXFX-pythia8") !=-1 : return 0.03406
elif fileName.find("TTToHplusToWA_WToMuNu_AToMuMu_MH-160_MA-30_13TeV-madgraph") !=-1 : return 0.009303
elif fileName.find("DYJetsToLL_M-3000toInf_TuneCP5_13TeV-amcatnloFXFX-pythia8") !=-1 : return 3.048e-05
elif fileName.find("ST_tW_top_5f_inclusiveDecays_TuneCP5_13TeV-powheg-pythia8") !=-1 : return 34.91
elif fileName.find("TTToHplusToWA_WToMuNu_AToMuMu_MH-120_MA-30_13TeV-madgraph") !=-1 : return 0.009536
elif fileName.find("TTToHplusToWA_WToMuNu_AToMuMu_MH-130_MA-35_13TeV-madgraph") !=-1 : return 0.01695
elif fileName.find("DYToMuMu_M-200To400_NNPDF30NLO_TuneCUETP8M1_13TeV-pythia8") !=-1 : return 2.342
elif fileName.find("DYJetsToLL_Pt-400To650_TuneCP5_13TeV-amcatnloFXFX-pythia8") !=-1 : return 0.5164
elif fileName.find("BdToXKst_MuTrkFilter_DGamma0_TuneCP5_13TeV-pythia8-evtgen") !=-1 : return 7990000.0
elif fileName.find("TTToHplusToWA_WToMuNu_AToMuMu_MH-150_MA-35_13TeV-madgraph") !=-1 : return 0.02178
elif fileName.find("TTToHplusToWA_WToMuNu_AToMuMu_MH-150_MA-10_13TeV-madgraph") !=-1 : return 0.008312
elif fileName.find("DYToEE_M-4500To6000_NNPDF30NLO_TuneCUETP8M1_13TeV-pythia8") !=-1 : return 3.551e-07
elif fileName.find("DYToEE_M-2300To3500_NNPDF30NLO_TuneCUETP8M1_13TeV-pythia8") !=-1 : return 7.405e-05
elif fileName.find("TTToHplusToWA_WToMuNu_AToMuMu_MH-140_MA-30_13TeV-madgraph") !=-1 : return 0.02451
elif fileName.find("TTToSemiLeptonic_TuneCP5CR1_QCDbased_13TeV-powheg-pythia8") !=-1 : return 687.1
elif fileName.find("TTToHplusToWA_WToMuNu_AToMuMu_MH-150_MA-30_13TeV-madgraph") !=-1 : return 0.02076
elif fileName.find("TTToHplusToWA_WToMuNu_AToMuMu_MH-130_MA-15_13TeV-madgraph") !=-1 : return 0.01426
elif fileName.find("TTToHplusToWA_WToMuNu_AToMuMu_MH-110_MA-25_13TeV-madgraph") !=-1 : return 0.003278
elif fileName.find("TTToHplusToWA_WToMuNu_AToMuMu_MH-120_MA-15_13TeV-madgraph") !=-1 : return 0.01005
elif fileName.find("TTToHplusToWA_WToMuNu_AToMuMu_MH-160_MA-35_13TeV-madgraph") !=-1 : return 0.01012
elif fileName.find("TTToHplusToWA_WToMuNu_AToMuMu_MH-120_MA-10_13TeV-madgraph") !=-1 : return 0.007271
elif fileName.find("TTToHplusToWA_WToMuNu_AToMuMu_MH-140_MA-35_13TeV-madgraph") !=-1 : return 0.02426
elif fileName.find("TTToHplusToWA_WToMuNu_AToMuMu_MH-130_MA-25_13TeV-madgraph") !=-1 : return 0.01975
elif fileName.find("TTToHplusToWA_WToMuNu_AToMuMu_MH-130_MA-10_13TeV-madgraph") !=-1 : return 0.01005
elif fileName.find("QCD_HT700to1000_BGenFilter_TuneCP5_13TeV-madgraph-pythia8") !=-1 : return 721.8
elif fileName.find("DYToEE_M-1400To2300_NNPDF30NLO_TuneCUETP8M1_13TeV-pythia8") !=-1 : return 0.001177
elif fileName.find("TTToHplusToWA_WToMuNu_AToMuMu_MH-100_MA-20_13TeV-madgraph") !=-1 : return 0.0004878
elif fileName.find("TTToHplusToWA_WToMuNu_AToMuMu_MH-130_MA-20_13TeV-madgraph") !=-1 : return 0.01767
elif fileName.find("TTToHplusToWA_WToMuNu_AToMuMu_MH-150_MA-20_13TeV-madgraph") !=-1 : return 0.01555
elif fileName.find("DYJetsToLL_Pt-100To250_TuneCP5_13TeV-amcatnloFXFX-pythia8") !=-1 : return 96.8
elif fileName.find("TTToHplusToWA_WToMuNu_AToMuMu_MH-160_MA-15_13TeV-madgraph") !=-1 : return 0.005209
elif fileName.find("TTToHplusToWA_WToMuNu_AToMuMu_MH-160_MA-20_13TeV-madgraph") !=-1 : return 0.006725
elif fileName.find("TTToHplusToWA_WToMuNu_AToMuMu_MH-100_MA-10_13TeV-madgraph") !=-1 : return 0.001144
elif fileName.find("TTJets_DiLept_genMET-80_TuneCP5_13TeV-madgraphMLM-pythia8") !=-1 : return 22.45
elif fileName.find("DYJetsToLL_M-1to10_TuneCUETP8M1_13TeV-madgraphMLM-pythia8") !=-1 : return 173100.0
elif fileName.find("TTZToLLNuNu_M-10_TuneCP5_PSweights_13TeV-amcatnlo-pythia8") !=-1 : return 0.2432
elif fileName.find("TTToHplusToWA_WToMuNu_AToMuMu_MH-150_MA-15_13TeV-madgraph") !=-1 : return 0.01206
elif fileName.find("TTToHplusToWA_WToMuNu_AToMuMu_MH-120_MA-25_13TeV-madgraph") !=-1 : return 0.01188
elif fileName.find("DYToMuMu_M-400To800_NNPDF30NLO_TuneCUETP8M1_13TeV-pythia8") !=-1 : return 0.2084
elif fileName.find("TTToHplusToWA_WToENu_AToMuMu_MH-100_MA-15_13TeV-madgraph") !=-1 : return 0.001121
elif fileName.find("TTToHplusToWA_WToENu_AToMuMu_MH-160_MA-15_13TeV-madgraph") !=-1 : return 0.005207
elif fileName.find("TTToHplusToWA_WToENu_AToMuMu_MH-150_MA-35_13TeV-madgraph") !=-1 : return 0.02177
elif fileName.find("TTToHplusToWA_WToENu_AToMuMu_MH-160_MA-10_13TeV-madgraph") !=-1 : return 0.003565
elif fileName.find("QCD_HT500to700_BGenFilter_TuneCP5_13TeV-madgraph-pythia8") !=-1 : return 3078.0
elif fileName.find("TTToHplusToWA_WToENu_AToMuMu_MH-100_MA-10_13TeV-madgraph") !=-1 : return 0.001146
elif fileName.find("TTToHplusToWA_WToENu_AToMuMu_MH-110_MA-25_13TeV-madgraph") !=-1 : return 0.003271
elif fileName.find("TTToHplusToWA_WToENu_AToMuMu_MH-140_MA-20_13TeV-madgraph") !=-1 : return 0.01949
elif fileName.find("DYJetsToLL_M-500to700_TuneCP5_13TeV-amcatnloFXFX-pythia8") !=-1 : return 0.2558
elif fileName.find("TTToHplusToWA_WToENu_AToMuMu_MH-110_MA-30_13TeV-madgraph") !=-1 : return 0.0009777
elif fileName.find("TTToHplusToWA_WToENu_AToMuMu_MH-130_MA-30_13TeV-madgraph") !=-1 : return 0.0196
elif fileName.find("TTToHplusToWA_WToENu_AToMuMu_MH-150_MA-25_13TeV-madgraph") !=-1 : return 0.01853
elif fileName.find("TTToSemiLepton_HT500Njet9_TuneCP5up_13TeV-powheg-pythia8") !=-1 : return 4.281
elif fileName.find("TTToHplusToWA_WToENu_AToMuMu_MH-140_MA-10_13TeV-madgraph") !=-1 : return 0.01066
elif fileName.find("TTToHplusToWA_WToMuNu_AToMuMu_MH-90_MA-10_13TeV-madgraph") !=-1 : return 0.000107
elif fileName.find("TTTo2L2Nu_hdampUP_TuneCP5_PSweights_13TeV-powheg-pythia8") !=-1 : return 687.0
elif fileName.find("TTToHplusToWA_WToENu_AToMuMu_MH-130_MA-25_13TeV-madgraph") !=-1 : return 0.01975
elif fileName.find("TTToSemiLeptonic_widthx1p15_TuneCP5_13TeV-powheg-pythia8") !=-1 : return 687.1
elif fileName.find("TTToHplusToWA_WToENu_AToMuMu_MH-120_MA-30_13TeV-madgraph") !=-1 : return 0.009525
elif fileName.find("ST_tWnunu_5f_LO_TuneCP5_PSweights_13TeV-madgraph-pythia8") !=-1 : return 0.02099
elif fileName.find("TTToHplusToWA_WToENu_AToMuMu_MH-150_MA-10_13TeV-madgraph") !=-1 : return 0.008305
elif fileName.find("QCD_HT200to300_BGenFilter_TuneCP5_13TeV-madgraph-pythia8") !=-1 : return 111700.0
elif fileName.find("TTToHplusToWA_WToENu_AToMuMu_MH-150_MA-15_13TeV-madgraph") !=-1 : return 0.01205
elif fileName.find("QCD_HT100to200_BGenFilter_TuneCP5_13TeV-madgraph-pythia8") !=-1 : return 1275000.0
elif fileName.find("DYJetsToLL_Pt-50To100_TuneCP5_13TeV-amcatnloFXFX-pythia8") !=-1 : return 407.9
elif fileName.find("TTToHplusToWA_WToENu_AToMuMu_MH-130_MA-35_13TeV-madgraph") !=-1 : return 0.01689
elif fileName.find("DYJetsToLL_M-200to400_TuneCP5_13TeV-amcatnloFXFX-pythia8") !=-1 : return 8.502
elif fileName.find("TTToHplusToWA_WToENu_AToMuMu_MH-160_MA-20_13TeV-madgraph") !=-1 : return 0.006756
elif fileName.find("TTToHplusToWA_WToENu_AToMuMu_MH-160_MA-30_13TeV-madgraph") !=-1 : return 0.00934
elif fileName.find("DYJetsToLL_M-700to800_TuneCP5_13TeV-amcatnloFXFX-pythia8") !=-1 : return 0.04023
elif fileName.find("TTToHplusToWA_WToENu_AToMuMu_MH-160_MA-35_13TeV-madgraph") !=-1 : return 0.01011
elif fileName.find("DY4JetsToTauTau_M-50_TuneCUETP8M1_13TeV-madgraph-pythia8") !=-1 : return 18.17
elif fileName.find("TTToHplusToWA_WToENu_AToMuMu_MH-130_MA-15_13TeV-madgraph") !=-1 : return 0.01429
elif fileName.find("TTToHplusToWA_WToENu_AToMuMu_MH-120_MA-15_13TeV-madgraph") !=-1 : return 0.01007
elif fileName.find("TTToHplusToWA_WToENu_AToMuMu_MH-110_MA-20_13TeV-madgraph") !=-1 : return 0.004962
elif fileName.find("DY2JetsToTauTau_M-50_TuneCUETP8M1_13TeV-madgraph-pythia8") !=-1 : return 111.1
elif fileName.find("DYToEE_M-800To1400_NNPDF30NLO_TuneCUETP8M1_13TeV-pythia8") !=-1 : return 0.01445
elif fileName.find("DY3JetsToTauTau_M-50_TuneCUETP8M1_13TeV-madgraph-pythia8") !=-1 : return 34.04
elif fileName.find("DYJetsToLL_M-50_TuneCP5_13TeV-madgraphMLM-pythia8_Fall17") !=-1 : return 5350.0
elif fileName.find("TTToHplusToWA_WToENu_AToMuMu_MH-120_MA-10_13TeV-madgraph") !=-1 : return 0.007278
elif fileName.find("TTToHplusToWA_WToENu_AToMuMu_MH-150_MA-30_13TeV-madgraph") !=-1 : return 0.02073
elif fileName.find("DYJetsToLL_M-400to500_TuneCP5_13TeV-amcatnloFXFX-pythia8") !=-1 : return 0.4514
elif fileName.find("TTToHplusToWA_WToENu_AToMuMu_MH-150_MA-20_13TeV-madgraph") !=-1 : return 0.01554
elif fileName.find("TTToHplusToWA_WToENu_AToMuMu_MH-140_MA-15_13TeV-madgraph") !=-1 : return 0.01535
elif fileName.find("TTToHplusToWA_WToENu_AToMuMu_MH-160_MA-25_13TeV-madgraph") !=-1 : return 0.008158
elif fileName.find("TTToHplusToWA_WToENu_AToMuMu_MH-110_MA-10_13TeV-madgraph") !=-1 : return 0.003828
elif fileName.find("TTToHplusToWA_WToENu_AToMuMu_MH-130_MA-20_13TeV-madgraph") !=-1 : return 0.0177
elif fileName.find("DYToEE_M-6000ToInf_NNPDF30NLO_TuneCUETP8M1_13TeV-pythia8") !=-1 : return 1.585e-08
elif fileName.find("TTToSemiLeptonic_widthx0p85_TuneCP5_13TeV-powheg-pythia8") !=-1 : return 687.1
elif fileName.find("TTToHplusToWA_WToENu_AToMuMu_MH-120_MA-25_13TeV-madgraph") !=-1 : return 0.0119
elif fileName.find("TTToHplusToWA_WToENu_AToMuMu_MH-120_MA-20_13TeV-madgraph") !=-1 : return 0.0119
elif fileName.find("TTToHplusToWA_WToENu_AToMuMu_MH-110_MA-15_13TeV-madgraph") !=-1 : return 0.004967
elif fileName.find("TTToHplusToWA_WToENu_AToMuMu_MH-140_MA-25_13TeV-madgraph") !=-1 : return 0.02275
elif fileName.find("TTToHplusToWA_WToENu_AToMuMu_MH-130_MA-10_13TeV-madgraph") !=-1 : return 0.009985
elif fileName.find("TTJets_SingleLeptFromT_TuneCP5_13TeV-madgraphMLM-pythia8") !=-1 : return 109.6
elif fileName.find("TTToHplusToWA_WToENu_AToMuMu_MH-100_MA-20_13TeV-madgraph") !=-1 : return 0.0004875
elif fileName.find("QCD_HT300to500_BGenFilter_TuneCP5_13TeV-madgraph-pythia8") !=-1 : return 27960.0
elif fileName.find("TTToHplusToWA_WToENu_AToMuMu_MH-140_MA-35_13TeV-madgraph") !=-1 : return 0.02431
elif fileName.find("DYJetsToLL_M-100to200_TuneCP5_13TeV-amcatnloFXFX-pythia8") !=-1 : return 247.8
elif fileName.find("TTToHplusToWA_WToENu_AToMuMu_MH-120_MA-35_13TeV-madgraph") !=-1 : return 0.00514
elif fileName.find("TTToHplusToWA_WToENu_AToMuMu_MH-140_MA-30_13TeV-madgraph") !=-1 : return 0.02449
elif fileName.find("DY2JetsToLL_M-50_TuneCUETP8M1_13TeV-madgraphMLM-pythia8") !=-1 : return 334.7
elif fileName.find("TTToSemiLeptonic_TuneCP5_PSweights_13TeV-powheg-pythia8") !=-1 : return 687.1
elif fileName.find("DY3JetsToLL_M-50_TuneCUETP8M1_13TeV-madgraphMLM-pythia8") !=-1 : return 102.3
elif fileName.find("DY1JetsToLL_M-50_TuneCUETP8M1_13TeV-madgraphMLM-pythia8") !=-1 : return 1012.0
elif fileName.find("DY4JetsToLL_M-50_TuneCUETP8M1_13TeV-madgraphMLM-pythia8") !=-1 : return 54.52
elif fileName.find("TTToLL_MLL_1200To1800_TuneCUETP8M1_13TeV-powheg-pythia8") !=-1 : return 76.63
elif fileName.find("TTToSemiLeptonic_TuneCP2_PSweights_13TeV-powheg-pythia8") !=-1 : return 687.1
elif fileName.find("DYJetsToLL_M-50_TuneCUETP8M1_13TeV-amcatnloFXFX-pythia8") !=-1 : return 5941.0
elif fileName.find("TTToHplusToWA_WToENu_AToMuMu_MH-90_MA-10_13TeV-madgraph") !=-1 : return 0.000107
elif fileName.find("TTToSemiLeptonic_mtop166p5_TuneCP5_13TeV-powheg-pythia8") !=-1 : return 811.4
elif fileName.find("TTToSemiLeptonic_widthx1p3_TuneCP5_13TeV-powheg-pythia8") !=-1 : return 687.1
elif fileName.find("TTToSemiLeptonic_mtop173p5_TuneCP5_13TeV-powheg-pythia8") !=-1 : return 668.6
elif fileName.find("TTToSemiLeptonic_mtop178p5_TuneCP5_13TeV-powheg-pythia8") !=-1 : return 584.6
elif fileName.find("DYBJetsToLL_M-50_TuneCUETP8M1_13TeV-madgraphMLM-pythia8") !=-1 : return 70.08
elif fileName.find("TTTo2L2Nu_TuneCUETP8M2_ttHtranche3_13TeV-powheg-pythia8") !=-1 : return 76.7
elif fileName.find("DYToEE_M-200To400_NNPDF30NLO_TuneCUETP8M1_13TeV-pythia8") !=-1 : return 2.341
elif fileName.find("TTToSemiLeptonic_hdampDOWN_TuneCP5_13TeV-powheg-pythia8") !=-1 : return 687.1
elif fileName.find("TTToSemiLeptonic_mtop169p5_TuneCP5_13TeV-powheg-pythia8") !=-1 : return 746.2
elif fileName.find("TTToSemiLeptonic_mtop171p5_TuneCP5_13TeV-powheg-pythia8") !=-1 : return 706.1
elif fileName.find("TTToSemiLeptonic_widthx0p7_TuneCP5_13TeV-powheg-pythia8") !=-1 : return 687.1
elif fileName.find("DYToEE_M-400To800_NNPDF30NLO_TuneCUETP8M1_13TeV-pythia8") !=-1 : return 0.208
elif fileName.find("TTToHadronic_TuneCP5down_PSweights_13TeV-powheg-pythia8") !=-1 : return 687.1
elif fileName.find("TTWJetsToLNu_TuneCP5_13TeV-amcatnloFXFX-madspin-pythia8") !=-1 : return 0.2149
elif fileName.find("TTToSemiLeptonic_mtop175p5_TuneCP5_13TeV-powheg-pythia8") !=-1 : return 633.4
elif fileName.find("DYJetsToLL_M-50_TuneCUETHS1_13TeV-madgraphMLM-herwigpp") !=-1 : return 358.6
elif fileName.find("TTToLL_MLL_1800ToInf_TuneCUETP8M1_13TeV-powheg-pythia8") !=-1 : return 76.63
elif fileName.find("TTToHadronic_TuneCP5CR2_GluonMove_13TeV-powheg-pythia8") !=-1 : return 687.1
elif fileName.find("DYJetsToLL_Pt-0To50_TuneCP5_13TeV-amcatnloFXFX-pythia8") !=-1 : return 106300.0
elif fileName.find("DYJetsToLL_M-50_TuneCUETP8M1_13TeV-madgraphMLM-pythia8") !=-1 : return 4963.0
elif fileName.find("TTWJetsToQQ_TuneCP5_13TeV-amcatnloFXFX-madspin-pythia8") !=-1 : return 0.4316
elif fileName.find("TTToSemiLepton_HT500Njet9_TuneCP5_13TeV-powheg-pythia8") !=-1 : return 4.239
elif fileName.find("ST_tWll_5f_LO_TuneCP5_PSweights_13TeV-madgraph-pythia8") !=-1 : return 0.01103
elif fileName.find("TTToLL_MLL_800To1200_TuneCUETP8M1_13TeV-powheg-pythia8") !=-1 : return 76.63
elif fileName.find("Test_BulkGravToWW_narrow_M-4000_13TeV_TuneCP5-madgraph") !=-1 : return 4.543e-05
elif fileName.find("TTJets_HT-1200to2500_TuneCP5_13TeV-madgraphMLM-pythia8") !=-1 : return 0.1316
elif fileName.find("tZq_W_lept_Z_hadron_4f_ckm_NLO_13TeV_amcatnlo_pythia8") !=-1 : return 0.1573
elif fileName.find("DYJetsToLL_M-10to50_TuneCP5_13TeV-madgraphMLM-pythia8") !=-1 : return 15810
elif fileName.find("ttHTobb_ttToSemiLep_M125_TuneCP5_13TeV-powheg-pythia8") !=-1 : return 0.5418
elif fileName.find("TTJets_HT-2500toInf_TuneCP5_13TeV-madgraphMLM-pythia8") !=-1 : return 0.001407
elif fileName.find("TTTo2L2Nu_HT500Njet7_TuneCP5down_13TeV-powheg-pythia8") !=-1 : return 11.38
elif fileName.find("TTToHadronic_TuneCP5up_PSweights_13TeV-powheg-pythia8") !=-1 : return 687.1
elif fileName.find("TTJets_HT-800to1200_TuneCP5_13TeV-madgraphMLM-pythia8") !=-1 : return 0.7532
elif fileName.find("TTToSemiLeptonic_hdampUP_TuneCP5_13TeV-powheg-pythia8") !=-1 : return 687.0
elif fileName.find("TTToLL_MLL_500To800_TuneCUETP8M1_13TeV-powheg-pythia8") !=-1 : return 76.63
elif fileName.find("TTToHadronic_TuneCP5CR1_QCDbased_13TeV-powheg-pythia8") !=-1 : return 687.1
elif fileName.find("TTTo2L2Nu_TuneCP5down_PSweights_13TeV-powheg-pythia8") !=-1 : return 687.1
elif fileName.find("TTZPrimeToMuMu_M-1800_TuneCP5_13TeV-madgraph-pythia8") !=-1 : return 6.694e-05
elif fileName.find("TTZPrimeToMuMu_M-1600_TuneCP5_13TeV-madgraph-pythia8") !=-1 : return 0.0001324
elif fileName.find("TTZPrimeToMuMu_M-1200_TuneCP5_13TeV-madgraph-pythia8") !=-1 : return 0.0006278
elif fileName.find("ST_t-channel_antitop_5f_TuneCP5_13TeV-powheg-pythia8") !=-1 : return 82.52
elif fileName.find("TTZPrimeToMuMu_M-1500_TuneCP5_13TeV-madgraph-pythia8") !=-1 : return 0.0001899
elif fileName.find("TTZPrimeToMuMu_M-1700_TuneCP5_13TeV-madgraph-pythia8") !=-1 : return 9.354e-05
elif fileName.find("TTJets_HT-600to800_TuneCP5_13TeV-madgraphMLM-pythia8") !=-1 : return 1.821
elif fileName.find("TTZPrimeToMuMu_M-1000_TuneCP5_13TeV-madgraph-pythia8") !=-1 : return 0.001539
elif fileName.find("ST_t-channel_eleDecays_13TeV-comphep-pythia8_TuneCP5") !=-1 : return 24.85
elif fileName.find("ST_t-channel_tauDecays_13TeV-comphep-pythia8_TuneCP5") !=-1 : return 24.7
elif fileName.find("DYJetsToLL_M-5to50_TuneCP5_13TeV-madgraphMLM-pythia8") !=-1 : return 81880.0
elif fileName.find("TTZPrimeToMuMu_M-2000_TuneCP5_13TeV-madgraph-pythia8") !=-1 : return 0.001539
elif fileName.find("TTZPrimeToMuMu_M-1400_TuneCP5_13TeV-madgraph-pythia8") !=-1 : return 0.0002776
elif fileName.find("TTZPrimeToMuMu_M-1300_TuneCP5_13TeV-madgraph-pythia8") !=-1 : return 0.0004127
elif fileName.find("TTZPrimeToMuMu_M-1900_TuneCP5_13TeV-madgraph-pythia8") !=-1 : return 4.857e-05
elif fileName.find("TTToHadronic_hdampDOWN_TuneCP5_13TeV-powheg-pythia8") !=-1 : return 687.1
elif fileName.find("TTToHadronic_TuneCP5_PSweights_13TeV-powheg-pythia8") !=-1 : return 687.1
elif fileName.find("TTZPrimeToMuMu_M-600_TuneCP5_13TeV-madgraph-pythia8") !=-1 : return 0.01408
elif fileName.find("TTToHadronic_mtop169p5_TuneCP5_13TeV-powheg-pythia8") !=-1 : return 746.2
elif fileName.find("TTZPrimeToMuMu_M-300_TuneCP5_13TeV-madgraph-pythia8") !=-1 : return 0.22
elif fileName.find("TTToSemiLeptonic_TuneCP5_erdON_13TeV-powheg-pythia8") !=-1 : return 687.1
elif fileName.find("TTZToLLNuNu_M-10_TuneCP5down_13TeV-amcatnlo-pythia8") !=-1 : return 0.2432
elif fileName.find("ST_t-channel_muDecays_13TeV-comphep-pythia8_TuneCP5") !=-1 : return 24.72
elif fileName.find("TTToHadronic_mtop171p5_TuneCP5_13TeV-powheg-pythia8") !=-1 : return 706.1
elif fileName.find("TTZPrimeToMuMu_M-400_TuneCP5_13TeV-madgraph-pythia8") !=-1 : return 0.0684
elif fileName.find("DYBBJetsToLL_M-50_TuneCP5_13TeV-madgraphMLM-pythia8") !=-1 : return 14.49
elif fileName.find("TTToHadronic_mtop166p5_TuneCP5_13TeV-powheg-pythia8") !=-1 : return 811.4
elif fileName.find("ttHTobb_ttTo2L2Nu_M125_TuneCP5_13TeV-powheg-pythia8") !=-1 : return 0.5418
elif fileName.find("TTTo2L2Nu_HT500Njet7_TuneCP5up_13TeV-powheg-pythia8") !=-1 : return 11.43
elif fileName.find("TTZPrimeToMuMu_M-900_TuneCP5_13TeV-madgraph-pythia8") !=-1 : return 0.002517
elif fileName.find("TTToHadronic_mtop178p5_TuneCP5_13TeV-powheg-pythia8") !=-1 : return 584.6
elif fileName.find("TTToHadronic_mtop173p5_TuneCP5_13TeV-powheg-pythia8") !=-1 : return 668.6
elif fileName.find("TTZPrimeToMuMu_M-700_TuneCP5_13TeV-madgraph-pythia8") !=-1 : return 0.007514
elif fileName.find("TTZPrimeToMuMu_M-500_TuneCP5_13TeV-madgraph-pythia8") !=-1 : return 0.02871
elif fileName.find("TTZPrimeToMuMu_M-800_TuneCP5_13TeV-madgraph-pythia8") !=-1 : return 0.004255
elif fileName.find("TTTo2L2Nu_TuneCP5CR2_GluonMove_13TeV-powheg-pythia8") !=-1 : return 687.1
elif fileName.find("TTToHadronic_mtop175p5_TuneCP5_13TeV-powheg-pythia8") !=-1 : return 633.4
elif fileName.find("TTTo2L2Nu_noSC_TuneCUETP8M2T4_13TeV-powheg-pythia8") !=-1 : return 76.7
elif fileName.find("DY1JetsToLL_M-50_TuneCP5_13TeV-madgraphMLM-pythia8") !=-1 : return 877.8
elif fileName.find("DYJetsToLL_M-50_TuneCP5_13TeV-amcatnloFXFX-pythia8") !=-1 : return 6529.0
elif fileName.find("QCD_HT1000to1500_TuneCP5_13TeV-madgraphMLM-pythia8") !=-1 : return 1092.0
elif fileName.find("DY4JetsToLL_M-50_TuneCP5_13TeV-madgraphMLM-pythia8") !=-1 : return 44.03
elif fileName.find("TTTo2L2Nu_TuneCP5up_PSweights_13TeV-powheg-pythia8") !=-1 : return 687.1
elif fileName.find("QCD_HT1500to2000_TuneCP5_13TeV-madgraphMLM-pythia8") !=-1 : return 99.76
elif fileName.find("TTTo2L2Nu_TuneCP5CR1_QCDbased_13TeV-powheg-pythia8") !=-1 : return 687.1
elif fileName.find("DY3JetsToLL_M-50_TuneCP5_13TeV-madgraphMLM-pythia8") !=-1 : return 111.5
elif fileName.find("DY2JetsToLL_M-50_TuneCP5_13TeV-madgraphMLM-pythia8") !=-1 : return 304.4
elif fileName.find("TTToHadrons_mtop178p5_TuneCP5_13TeV-powheg-pythia8") !=-1 : return 584.6
elif fileName.find("DYJetsToQQ_HT180_13TeV_TuneCP5-madgraphMLM-pythia8") !=-1 : return 1728.0
elif fileName.find("DYJetsToLL_M-50_TuneCP5_13TeV-madgraphMLM-pythia8") !=-1 : return 5343.0
elif fileName.find("QCD_HT2000toInf_TuneCP5_13TeV-madgraphMLM-pythia8") !=-1 : return 20.35
elif fileName.find("TTZToLLNuNu_M-10_TuneCP5up_13TeV-amcatnlo-pythia8") !=-1 : return 0.2432
elif fileName.find("TTTo2L2Nu_widthx1p15_TuneCP5_13TeV-powheg-pythia8") !=-1 : return 687.1
elif fileName.find("DYTo2Mu_M1300_CUETP8M1_13TeV_Pythia8_Corrected-v3") !=-1 : return 0.001656
elif fileName.find("DYJetsToLL_M-50_TuneCH3_13TeV-madgraphMLM-herwig7") !=-1 : return 9.402
elif fileName.find("TTTo2L2Nu_HT500Njet7_TuneCP5_13TeV-powheg-pythia8") !=-1 : return 11.28
elif fileName.find("DYJetsToEE_M-50_TuneCP5_13TeV-madgraphMLM-pythia8") !=-1 : return 1795.0
elif fileName.find("QCD_HT700to1000_TuneCP5_13TeV-madgraphMLM-pythia8") !=-1 : return 6344.0
elif fileName.find("TTToSemiLeptonic_TuneCP5down_13TeV-powheg-pythia8") !=-1 : return 687.1
elif fileName.find("DYJetsToLL_M-50_TuneCP2_13TeV-madgraphMLM-pythia8") !=-1 : return 4878.0
elif fileName.find("DYJetsToLL_M-50_TuneCP1_13TeV-madgraphMLM-pythia8") !=-1 : return 4661.0
elif fileName.find("TTToHadronic_hdampUP_TuneCP5_13TeV-powheg-pythia8") !=-1 : return 687.0
elif fileName.find("TTTo2L2Nu_widthx0p85_TuneCP5_13TeV-powheg-pythia8") !=-1 : return 687.1
elif fileName.find("DYTo2E_M1300_CUETP8M1_13TeV_Pythia8_Corrected-v3") !=-1 : return 0.001661
elif fileName.find("DYTo2Mu_M800_CUETP8M1_13TeV_Pythia8_Corrected-v3") !=-1 : return 0.01419
elif fileName.find("TTTo2L2Nu_TuneCP5_PSweights_13TeV-powheg-pythia8") !=-1 : return 687.1
elif fileName.find("DYTo2Mu_M300_CUETP8M1_13TeV_Pythia8_Corrected-v3") !=-1 : return 0.5658
elif fileName.find("TTTo2L2Nu_mtop175p5_TuneCP5_13TeV-powheg-pythia8") !=-1 : return 633.4
elif fileName.find("TTTo2L2Nu_mtop169p5_TuneCP5_13TeV-powheg-pythia8") !=-1 : return 746.2
elif fileName.find("DYJetsToLL_1J_TuneCP5_13TeV-amcatnloFXFX-pythia8") !=-1 : return 955.8
elif fileName.find("QCD_HT300to500_TuneCP5_13TeV-madgraphMLM-pythia8") !=-1 : return 323400.0
elif fileName.find("DYToEE_M-50_NNPDF31_TuneCP5_13TeV-powheg-pythia8") !=-1 : return 2137.0
elif fileName.find("TTTo2L2Nu_mtop173p5_TuneCP5_13TeV-powheg-pythia8") !=-1 : return 668.6
elif fileName.find("TTTo2L2Nu_hdampDOWN_TuneCP5_13TeV-powheg-pythia8") !=-1 : return 687.1
elif fileName.find("DYJetsToLL_0J_TuneCP5_13TeV-amcatnloFXFX-pythia8") !=-1 : return 5313.0
elif fileName.find("DYJetsToLL_2J_TuneCP5_13TeV-amcatnloFXFX-pythia8") !=-1 : return 360.4
elif fileName.find("TTTo2L2Nu_mtop178p5_TuneCP5_13TeV-powheg-pythia8") !=-1 : return 584.6
elif fileName.find("TTTo2L2Nu_mtop171p5_TuneCP5_13TeV-powheg-pythia8") !=-1 : return 706.1
elif fileName.find("QCD_HT200to300_TuneCP5_13TeV-madgraphMLM-pythia8") !=-1 : return 1551000.0
elif fileName.find("TTTo2L2Nu_widthx0p7_TuneCP5_13TeV-powheg-pythia8") !=-1 : return 687.1
elif fileName.find("QCD_HT500to700_TuneCP5_13TeV-madgraphMLM-pythia8") !=-1 : return 30140.0
elif fileName.find("TTTo2L2Nu_mtop166p5_TuneCP5_13TeV-powheg-pythia8") !=-1 : return 811.4
elif fileName.find("ST_t-channel_top_5f_TuneCP5_13TeV-powheg-pythia8") !=-1 : return 119.7
elif fileName.find("TTTo2L2Nu_widthx1p3_TuneCP5_13TeV-powheg-pythia8") !=-1 : return 687.1
elif fileName.find("QCD_HT100to200_TuneCP5_13TeV-madgraphMLM-pythia8") !=-1 : return 23590000.0
elif fileName.find("QCD_HT50to100_TuneCP5_13TeV-madgraphMLM-pythia8") !=-1 : return 185300000.0
elif fileName.find("TTZToLLNuNu_M-10_TuneCP5_13TeV-amcatnlo-pythia8") !=-1 : return 0.2432
elif fileName.find("TTToSemiLeptonic_TuneCP5up_13TeV-powheg-pythia8") !=-1 : return 687.1
elif fileName.find("TTJets_DiLept_TuneCP5_13TeV-madgraphMLM-pythia8") !=-1 : return 54.23
elif fileName.find("QCD_HT1000to1500_TuneCP5_13TeV-madgraph-pythia8") !=-1 : return 1088.0
elif fileName.find("TTToHadronic_TuneCP5_erdON_13TeV-powheg-pythia8") !=-1 : return 687.1
elif fileName.find("TTToLL_MLL_500To800_41to65_NNPDF31_13TeV-powheg") !=-1 : return 687.1
elif fileName.find("QCD_HT1500to2000_TuneCP5_13TeV-madgraph-pythia8") !=-1 : return 99.11
elif fileName.find("TTZToLL_M-1to10_TuneCP5_13TeV-amcatnlo-pythia8") !=-1 : return 0.05324
elif fileName.find("TTTo2L2Nu_hdampUP_TuneCP5_13TeV-powheg-pythia8") !=-1 : return 687.0
elif fileName.find("QCD_HT700to1000_TuneCP5_13TeV-madgraph-pythia8") !=-1 : return 6334.0
elif fileName.find("QCD_HT2000toInf_TuneCP5_13TeV-madgraph-pythia8") !=-1 : return 20.23
elif fileName.find("ST_tWlnuZ_5f_LO_TuneCP5_13TeV-madgraph-pythia8") !=-1 : return 0.001267
elif fileName.find("TTToLL_MLL_500To800_0to20_NNPDF31_13TeV-powheg") !=-1 : return 687.1
elif fileName.find("W4JetsToLNu_TuneCP5_13TeV-madgraphMLM-pythia8") !=-1 : return 544.3
elif fileName.find("QCD_HT300to500_TuneCP5_13TeV-madgraph-pythia8") !=-1 : return 322600.0
elif fileName.find("W2JetsToLNu_TuneCP5_13TeV-madgraphMLM-pythia8") !=-1 : return 2793.0
elif fileName.find("TTZZTo4b_5f_LO_TuneCP5_13TeV_madgraph_pythia8") !=-1 : return 0.001385
elif fileName.find("TTToSemiLeptonic_TuneCP5_13TeV-powheg-pythia8") !=-1 : return 687.1
elif fileName.find("W3JetsToLNu_TuneCP5_13TeV-madgraphMLM-pythia8") !=-1 : return 992.5
elif fileName.find("QCD_HT500to700_TuneCP5_13TeV-madgraph-pythia8") !=-1 : return 29980.0
elif fileName.find("QCD_HT200to300_TuneCP5_13TeV-madgraph-pythia8") !=-1 : return 1547000.0
elif fileName.find("W1JetsToLNu_TuneCP5_13TeV-madgraphMLM-pythia8") !=-1 : return 8873.0
elif fileName.find("TTToHadronic_TuneCP5down_13TeV-powheg-pythia8") !=-1 : return 687.1
elif fileName.find("QCD_HT100to200_TuneCP5_13TeV-madgraph-pythia8") !=-1 : return 23700000.0
elif fileName.find("ST_tWqqZ_5f_LO_TuneCP5_13TeV-madgraph-pythia8") !=-1 : return 0.001122
elif fileName.find("DYToMuMu_pomflux_Pt-30_TuneCP5_13TeV-pythia8") !=-1 : return 4.219
elif fileName.find("WJetsToLNu_TuneCP5_13TeV-madgraphMLM-pythia8") !=-1 : return 52940.0
elif fileName.find("TTTo2L2Nu_TuneCP5_erdON_13TeV-powheg-pythia8") !=-1 : return 687.1
elif fileName.find("TTToHadronic_TuneCP5up_13TeV-powheg-pythia8") !=-1 : return 687.1
elif fileName.find("TTTo2L2Nu_TuneCUETP8M1_14TeV-powheg-pythia8") !=-1 : return 90.75
elif fileName.find("TTToLL_MLL_1200To1800_NNPDF31_13TeV-powheg") !=-1 : return 687.1
elif fileName.find("TTTo2L2Nu_TuneCP5down_13TeV-powheg-pythia8") !=-1 : return 687.1
elif fileName.find("DYJetsToQQ_HT180_13TeV-madgraphMLM-pythia8") !=-1 : return 1208.0
elif fileName.find("tZq_ll_4f_scaledown_13TeV-amcatnlo-pythia8") !=-1 : return 0.0758
elif fileName.find("TTToLL_MLL_1800ToInf_NNPDF31_13TeV-powheg") !=-1 : return 687.1
elif fileName.find("TTJets_TuneCP5_13TeV-amcatnloFXFX-pythia8") !=-1 : return 722.8
elif fileName.find("ttZJets_TuneCP5_13TeV_madgraphMLM_pythia8") !=-1 : return 0.5407
elif fileName.find("TTToHadronic_TuneCP5_13TeV-powheg-pythia8") !=-1 : return 687.1
elif fileName.find("tZq_ll_4f_ckm_NLO_13TeV-amcatnlo-herwigpp") !=-1 : return 0.07579
elif fileName.find("ttWJets_TuneCP5_13TeV_madgraphMLM_pythia8") !=-1 : return 0.4611
elif fileName.find("TTToLL_MLL_800To1200_NNPDF31_13TeV-powheg") !=-1 : return 687.1
elif fileName.find("tZq_ll_4f_scaleup_13TeV-amcatnlo-pythia8") !=-1 : return 0.0758
elif fileName.find("DYToEE_M-50_NNPDF31_13TeV-powheg-pythia8") !=-1 : return 2137.0
elif fileName.find("TTJets_TuneCP5_13TeV-madgraphMLM-pythia8") !=-1 : return 496.1
elif fileName.find("DYToLL-M-50_3J_14TeV-madgraphMLM-pythia8") !=-1 : return 191.7
elif fileName.find("DYToLL-M-50_0J_14TeV-madgraphMLM-pythia8") !=-1 : return 3676.0
elif fileName.find("TTTo2L2Nu_TuneCP5up_13TeV-powheg-pythia8") !=-1 : return 687.1
elif fileName.find("DYToLL-M-50_1J_14TeV-madgraphMLM-pythia8") !=-1 : return 1090.0
elif fileName.find("DYToLL-M-50_2J_14TeV-madgraphMLM-pythia8") !=-1 : return 358.7
elif fileName.find("TTZToQQ_TuneCP5_13TeV-amcatnlo-pythia8") !=-1 : return 0.5104
elif fileName.find("TTTo2L2Nu_TuneCP5_13TeV-powheg-pythia8") !=-1 : return 687.1
elif fileName.find("TTZToBB_TuneCP5_13TeV-amcatnlo-pythia8") !=-1 : return 0.1118
elif fileName.find("DYToLL_M_1_TuneCUETP8M1_13TeV_pythia8") !=-1 : return 19670.0
elif fileName.find("DYToLL_2J_13TeV-amcatnloFXFX-pythia8") !=-1 : return 340.5
elif fileName.find("DYToLL_0J_13TeV-amcatnloFXFX-pythia8") !=-1 : return 4757.0
elif fileName.find("DYTo2Mu_M1300_CUETP8M1_13TeV-pythia8") !=-1 : return 78420000000.0
elif fileName.find("TTWZ_TuneCP5_13TeV-madgraph-pythia8") !=-1 : return 0.002441
elif fileName.find("TTWW_TuneCP5_13TeV-madgraph-pythia8") !=-1 : return 0.006979
elif fileName.find("TTZZ_TuneCP5_13TeV-madgraph-pythia8") !=-1 : return 0.001386
elif fileName.find("DYTo2Mu_M300_CUETP8M1_13TeV-pythia8") !=-1 : return 78390000000.0
elif fileName.find("TTZH_TuneCP5_13TeV-madgraph-pythia8") !=-1 : return 0.00113
elif fileName.find("TTTW_TuneCP5_13TeV-madgraph-pythia8") !=-1 : return 0.0007314
elif fileName.find("DYTo2E_M1300_CUETP8M1_13TeV-pythia8") !=-1 : return 78390000000.0
elif fileName.find("DYTo2Mu_M800_CUETP8M1_13TeV-pythia8") !=-1 : return 78420000000.0
elif fileName.find("TTWH_TuneCP5_13TeV-madgraph-pythia8") !=-1 : return 0.001141
elif fileName.find("WZ_TuneCP5_PSweights_13TeV-pythia8") !=-1 : return 27.52
elif fileName.find("WWZ_TuneCP5_13TeV-amcatnlo-pythia8") !=-1 : return 0.1676
elif fileName.find("DYTo2E_M800_CUETP8M1_13TeV-pythia8") !=-1 : return 78420000000.0
elif fileName.find("WW_TuneCP5_PSweights_13TeV-pythia8") !=-1 : return 76.15
elif fileName.find("ZZZ_TuneCP5_13TeV-amcatnlo-pythia8") !=-1 : return 0.01398
elif fileName.find("DYTo2E_M300_CUETP8M1_13TeV-pythia8") !=-1 : return 78420000000.0
elif fileName.find("WZZ_TuneCP5_13TeV-amcatnlo-pythia8") !=-1 : return 0.05565
elif fileName.find("tZq_ll_4f_13TeV-amcatnlo-herwigpp") !=-1 : return 0.0758
elif fileName.find("tZq_ll_4f_13TeV-amcatnlo-pythia8") !=-1 : return 0.0758
elif fileName.find("DYToLL_M-50_14TeV_pythia8_pilot1") !=-1 : return 4927.0
elif fileName.find("DYToLL_M-50_14TeV_pythia8") !=-1 : return 4963.0
elif fileName.find("WZ_TuneCP5_13TeV-pythia8") !=-1 : return 27.6
elif fileName.find("ZZ_TuneCP5_13TeV-pythia8") !=-1 : return 12.14
elif fileName.find("WW_TuneCP5_13TeV-pythia8") !=-1 : return 75.8
elif fileName.find("DYJetsToLL_Pt-100To250") !=-1 : return 84.014804
elif fileName.find("DYJetsToLL_Pt-400To650") !=-1 : return 0.436041144
elif fileName.find("DYJetsToLL_Pt-650ToInf") !=-1 : return 0.040981055
elif fileName.find("DYJetsToLL_Pt-250To400") !=-1 : return 3.228256512
elif fileName.find("DYJetsToLL_Pt-50To100") !=-1 : return 363.81428
elif fileName.find("DYJetsToLL_Zpt-0To50") !=-1 : return 5352.57924
elif fileName.find("TTToSemiLeptonic") !=-1 : return 365.34
elif fileName.find("TTToHadronic") !=-1 : return 377.96
elif fileName.find("TTJetsFXFX") !=-1 : return 831.76
elif fileName.find("TTTo2L2Nu") !=-1 : return 88.29
elif fileName.find("DYToLL_0J") !=-1 : return 4620.519036
elif fileName.find("DYToLL_2J") !=-1 : return 338.258531
elif fileName.find("DYToLL_1J") !=-1 : return 859.589402
elif fileName.find("SingleMuon")!=-1 or fileName.find("SingleElectron") !=-1 or fileName.find("JetHT") !=-1 or fileName.find("MET") !=-1 or fileName.find("MTHT") !=-1: return 1.
else:
print("Cross section not defined! Returning 0 and skipping sample:\n{}\n".format(fileName))
return 0
| 105.236982
| 180
| 0.812285
| 14,530
| 99,028
| 5.233517
| 0.072815
| 0.147232
| 0.195258
| 0.066278
| 0.93046
| 0.898242
| 0.817156
| 0.727194
| 0.627684
| 0.488802
| 0
| 0.136066
| 0.066668
| 99,028
| 940
| 181
| 105.348936
| 0.686679
| 0.003504
| 0
| 0
| 0
| 0
| 0.570291
| 0.56799
| 0
| 0
| 0
| 0.001064
| 0
| 1
| 0.001068
| false
| 0
| 0
| 0
| 0.002137
| 0.001068
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ed7a53ae0448bee12a00c71c49df313210f6f3c5
| 119,088
|
py
|
Python
|
electrum_zcash/tests/test_wallet_vertical.py
|
zebra-lucky/electrum-zcash
|
2e66d47fa043d8c5ea536cd27a09dfb9e4ec314b
|
[
"MIT"
] | 11
|
2018-08-06T22:37:42.000Z
|
2021-09-19T12:44:16.000Z
|
electrum_zcash/tests/test_wallet_vertical.py
|
zebra-lucky/electrum-zcash
|
2e66d47fa043d8c5ea536cd27a09dfb9e4ec314b
|
[
"MIT"
] | 9
|
2019-04-28T18:33:24.000Z
|
2021-07-12T07:17:48.000Z
|
electrum_zcash/tests/test_wallet_vertical.py
|
zebra-lucky/electrum-zcash
|
2e66d47fa043d8c5ea536cd27a09dfb9e4ec314b
|
[
"MIT"
] | 14
|
2018-04-29T16:30:44.000Z
|
2022-03-25T13:42:40.000Z
|
import unittest
from unittest import mock
import shutil
import tempfile
from typing import Sequence
import asyncio
import copy
from electrum_zcash import storage, bitcoin, keystore, bip32, wallet
from electrum_zcash import Transaction
from electrum_zcash import SimpleConfig
from electrum_zcash.address_synchronizer import TX_HEIGHT_UNCONFIRMED, TX_HEIGHT_UNCONF_PARENT
from electrum_zcash.wallet import sweep, Multisig_Wallet, Standard_Wallet, Imported_Wallet, restore_wallet_from_text, Abstract_Wallet
from electrum_zcash.util import bfh, bh2u
from electrum_zcash.transaction import TxOutput, Transaction, PartialTransaction, PartialTxOutput, PartialTxInput, tx_from_any
from electrum_zcash.mnemonic import seed_type
from . import TestCaseForTestnet
from . import ElectrumTestCase
UNICODE_HORROR_HEX = 'e282bf20f09f988020f09f98882020202020e3818620e38191e3819fe381be20e3828fe3828b2077cda2cda2cd9d68cda16fcda2cda120ccb8cda26bccb5cd9f6eccb4cd98c7ab77ccb8cc9b73cd9820cc80cc8177cd98cda2e1b8a9ccb561d289cca1cda27420cca7cc9568cc816fccb572cd8fccb5726f7273cca120ccb6cda1cda06cc4afccb665cd9fcd9f20ccb6cd9d696ecda220cd8f74cc9568ccb7cca1cd9f6520cd9fcd9f64cc9b61cd9c72cc95cda16bcca2cca820cda168ccb465cd8f61ccb7cca2cca17274cc81cd8f20ccb4ccb7cda0c3b2ccb5ccb666ccb82075cca7cd986ec3adcc9bcd9c63cda2cd8f6fccb7cd8f64ccb8cda265cca1cd9d3fcd9e'
UNICODE_HORROR = bfh(UNICODE_HORROR_HEX).decode('utf-8')
assert UNICODE_HORROR == '₿ 😀 😈 う けたま わる w͢͢͝h͡o͢͡ ̸͢k̵͟n̴͘ǫw̸̛s͘ ̀́w͘͢ḩ̵a҉̡͢t ̧̕h́o̵r͏̵rors̡ ̶͡͠lį̶e͟͟ ̶͝in͢ ͏t̕h̷̡͟e ͟͟d̛a͜r̕͡k̢̨ ͡h̴e͏a̷̢̡rt́͏ ̴̷͠ò̵̶f̸ u̧͘ní̛͜c͢͏o̷͏d̸͢e̡͝?͞'
class WalletIntegrityHelper:
gap_limit = 1 # make tests run faster
@classmethod
def check_seeded_keystore_sanity(cls, test_obj, ks):
test_obj.assertTrue(ks.is_deterministic())
test_obj.assertFalse(ks.is_watching_only())
test_obj.assertFalse(ks.can_import())
test_obj.assertTrue(ks.has_seed())
@classmethod
def check_xpub_keystore_sanity(cls, test_obj, ks):
test_obj.assertTrue(ks.is_deterministic())
test_obj.assertTrue(ks.is_watching_only())
test_obj.assertFalse(ks.can_import())
test_obj.assertFalse(ks.has_seed())
@classmethod
def create_standard_wallet(cls, ks, *, config: SimpleConfig, gap_limit=None):
db = storage.WalletDB('', manual_upgrades=False)
db.put('keystore', ks.dump())
db.put('gap_limit', gap_limit or cls.gap_limit)
w = Standard_Wallet(db, None, config=config)
w.synchronize()
return w
@classmethod
def create_imported_wallet(cls, *, config: SimpleConfig, privkeys: bool):
db = storage.WalletDB('', manual_upgrades=False)
if privkeys:
k = keystore.Imported_KeyStore({})
db.put('keystore', k.dump())
w = Imported_Wallet(db, None, config=config)
return w
@classmethod
def create_multisig_wallet(cls, keystores: Sequence, multisig_type: str, *,
config: SimpleConfig, gap_limit=None):
"""Creates a multisig wallet."""
db = storage.WalletDB('', manual_upgrades=True)
for i, ks in enumerate(keystores):
cosigner_index = i + 1
db.put('x%d/' % cosigner_index, ks.dump())
db.put('wallet_type', multisig_type)
db.put('gap_limit', gap_limit or cls.gap_limit)
w = Multisig_Wallet(db, None, config=config)
w.synchronize()
return w
class TestWalletKeystoreAddressIntegrityForMainnet(ElectrumTestCase):
def setUp(self):
super().setUp()
self.config = SimpleConfig({'electrum_path': self.electrum_path})
@mock.patch.object(wallet.Abstract_Wallet, 'save_db')
def test_electrum_seed_standard(self, mock_save_db):
seed_words = 'cycle rocket west magnet parrot shuffle foot correct salt library feed song'
self.assertEqual(seed_type(seed_words), 'standard')
ks = keystore.from_seed(seed_words, '', False)
WalletIntegrityHelper.check_seeded_keystore_sanity(self, ks)
self.assertTrue(isinstance(ks, keystore.BIP32_KeyStore))
self.assertEqual(ks.xprv, 'xprv9s21ZrQH143K32jECVM729vWgGq4mUDJCk1ozqAStTphzQtCTuoFmFafNoG1g55iCnBTXUzz3zWnDb5CVLGiFvmaZjuazHDL8a81cPQ8KL6')
self.assertEqual(ks.xpub, 'xpub661MyMwAqRbcFWohJWt7PHsFEJfZAvw9ZxwQoDa4SoMgsDDM1T7WK3u9E4edkC4ugRnZ8E4xDZRpk8Rnts3Nbt97dPwT52CwBdDWroaZf8U')
w = WalletIntegrityHelper.create_standard_wallet(ks, config=self.config)
self.assertEqual(w.txin_type, 'p2pkh')
self.assertEqual(w.get_receiving_addresses()[0], 't1fFMuEC9XFGsEUEPzpEE8jhxpcEMs369xJ')
self.assertEqual(w.get_change_addresses()[0], 't1cKFzsmq8d97RtePBbqS1WebLofuXaXkzF')
@mock.patch.object(wallet.Abstract_Wallet, 'save_db')
def test_electrum_seed_old(self, mock_save_db):
seed_words = 'powerful random nobody notice nothing important anyway look away hidden message over'
self.assertEqual(seed_type(seed_words), 'old')
ks = keystore.from_seed(seed_words, '', False)
WalletIntegrityHelper.check_seeded_keystore_sanity(self, ks)
self.assertTrue(isinstance(ks, keystore.Old_KeyStore))
self.assertEqual(ks.mpk, 'e9d4b7866dd1e91c862aebf62a49548c7dbf7bcc6e4b7b8c9da820c7737968df9c09d5a3e271dc814a29981f81b3faaf2737b551ef5dcc6189cf0f8252c442b3')
w = WalletIntegrityHelper.create_standard_wallet(ks, config=self.config)
self.assertEqual(w.txin_type, 'p2pkh')
self.assertEqual(w.get_receiving_addresses()[0], 't1YAqEWYrfi9CbW5LgmayAvjDE5T5MgaYiD')
self.assertEqual(w.get_change_addresses()[0], 't1cJ799hEFa5AHmB3ReeDo3Rr2X4quderf4')
@mock.patch.object(wallet.Abstract_Wallet, 'save_db')
def test_bip39_seed_bip44_standard(self, mock_save_db):
seed_words = 'treat dwarf wealth gasp brass outside high rent blood crowd make initial'
self.assertEqual(keystore.bip39_is_checksum_valid(seed_words), (True, True))
ks = keystore.from_bip39_seed(seed_words, '', "m/44'/0'/0'")
self.assertTrue(isinstance(ks, keystore.BIP32_KeyStore))
self.assertEqual(ks.xprv, 'xprv9zGLcNEb3cHUKizLVBz6RYeE9bEZAVPjH2pD1DEzCnPcsemWc3d3xTao8sfhfUmDLMq6e3RcEMEvJG1Et8dvfL8DV4h7mwm9J6AJsW9WXQD')
self.assertEqual(ks.xpub, 'xpub6DFh1smUsyqmYD4obDX6ngaxhd53Zx7aeFjoobebm7vbkT6f9awJWFuGzBT9FQJEWFBL7UyhMXtYzRcwDuVbcxtv9Ce2W9eMm4KXLdvdbjv')
w = WalletIntegrityHelper.create_standard_wallet(ks, config=self.config)
self.assertEqual(w.txin_type, 'p2pkh')
self.assertEqual(w.get_receiving_addresses()[0], 't1PbiEBABXU1E4GEnE31cUPULDoYYWVMpjs')
self.assertEqual(w.get_change_addresses()[0], 't1Z8gbq4eeVbg89ACGddq1T6yfEP9bQx9Ki')
@mock.patch.object(wallet.Abstract_Wallet, 'save_db')
def test_bip39_seed_bip44_standard_passphrase(self, mock_save_db):
seed_words = 'treat dwarf wealth gasp brass outside high rent blood crowd make initial'
self.assertEqual(keystore.bip39_is_checksum_valid(seed_words), (True, True))
ks = keystore.from_bip39_seed(seed_words, UNICODE_HORROR, "m/44'/0'/0'")
self.assertTrue(isinstance(ks, keystore.BIP32_KeyStore))
self.assertEqual(ks.xprv, 'xprv9z8izheguGnLopSqkY7GcGFrP2Gu6rzBvvHo6uB9B8DWJhsows6WDZAsbBTaP3ncP2AVbTQphyEQkahrB9s1L7ihZtfz5WGQPMbXwsUtSik')
self.assertEqual(ks.xpub, 'xpub6D85QDBajeLe2JXJrZeGyQCaw47PWKi3J9DPuHakjTkVBWCxVQQkmMVMSSfnw39tj9FntbozpRtb1AJ8ubjeVSBhyK4M5mzdvsXZzKPwodT')
w = WalletIntegrityHelper.create_standard_wallet(ks, config=self.config)
self.assertEqual(w.txin_type, 'p2pkh')
self.assertEqual(w.get_receiving_addresses()[0], 't1XzjgNCi9gUomksSCKhWe5Wc7dneqNzjvL')
self.assertEqual(w.get_change_addresses()[0], 't1Zw1DMGp1KBtf7no6v7yDTbvutYDGJ8fS1')
@mock.patch.object(wallet.Abstract_Wallet, 'save_db')
def test_electrum_multisig_seed_standard(self, mock_save_db):
seed_words = 'blast uniform dragon fiscal ensure vast young utility dinosaur abandon rookie sure'
self.assertEqual(seed_type(seed_words), 'standard')
ks1 = keystore.from_seed(seed_words, '', True)
WalletIntegrityHelper.check_seeded_keystore_sanity(self, ks1)
self.assertTrue(isinstance(ks1, keystore.BIP32_KeyStore))
self.assertEqual(ks1.xprv, 'xprv9s21ZrQH143K3t9vo23J3hajRbzvkRLJ6Y1zFrUFAfU3t8oooMPfb7f87cn5KntgqZs5nipZkCiBFo5ZtaSD2eDo7j7CMuFV8Zu6GYLTpY6')
self.assertEqual(ks1.xpub, 'xpub661MyMwAqRbcGNEPu3aJQqXTydqR9t49Tkwb4Esrj112kw8xLthv8uybxvaki4Ygt9xiwZUQGeFTG7T2TUzR3eA4Zp3aq5RXsABHFBUrq4c')
# electrum seed: ghost into match ivory badge robot record tackle radar elbow traffic loud
ks2 = keystore.from_xpub('xpub661MyMwAqRbcGfCPEkkyo5WmcrhTq8mi3xuBS7VEZ3LYvsgY1cCFDbenT33bdD12axvrmXhuX3xkAbKci3yZY9ZEk8vhLic7KNhLjqdh5ec')
WalletIntegrityHelper.check_xpub_keystore_sanity(self, ks2)
self.assertTrue(isinstance(ks2, keystore.BIP32_KeyStore))
w = WalletIntegrityHelper.create_multisig_wallet([ks1, ks2], '2of2', config=self.config)
self.assertEqual(w.txin_type, 'p2sh')
self.assertEqual(w.get_receiving_addresses()[0], 't3KcK3kAJerAahSJhN6Pt729u7ficQqK4XX')
self.assertEqual(w.get_change_addresses()[0], 't3PQ7wZhzpoywPLnD1nfcd5sPYLtw2qh5ak')
@mock.patch.object(wallet.Abstract_Wallet, 'save_db')
def test_bip39_multisig_seed_bip45_standard(self, mock_save_db):
seed_words = 'treat dwarf wealth gasp brass outside high rent blood crowd make initial'
self.assertEqual(keystore.bip39_is_checksum_valid(seed_words), (True, True))
ks1 = keystore.from_bip39_seed(seed_words, '', "m/45'/0")
self.assertTrue(isinstance(ks1, keystore.BIP32_KeyStore))
self.assertEqual(ks1.xprv, 'xprv9vyEFyXf7pYVv4eDU3hhuCEAHPHNGuxX73nwtYdpbLcqwJCPwFKknAK8pHWuHHBirCzAPDZ7UJHrYdhLfn1NkGp9rk3rVz2aEqrT93qKRD9')
self.assertEqual(ks1.xpub, 'xpub69xafV4YxC6o8Yiga5EiGLAtqR7rgNgNUGiYgw3S9g9pp6XYUne1KxdcfYtxwmA3eBrzMFuYcNQKfqsXCygCo4GxQFHfywxpUbKNfYvGJka')
# bip39 seed: tray machine cook badge night page project uncover ritual toward person enact
# der: m/45'/0
ks2 = keystore.from_xpub('xpub6B26nSWddbWv7J3qQn9FbwPPQktSBdPQfLfHhRK4375QoZq8fvM8rQey1koGSTxC5xVoMzNMaBETMUmCqmXzjc8HyAbN7LqrvE4ovGRwNGg')
WalletIntegrityHelper.check_xpub_keystore_sanity(self, ks2)
self.assertTrue(isinstance(ks2, keystore.BIP32_KeyStore))
w = WalletIntegrityHelper.create_multisig_wallet([ks1, ks2], '2of2', config=self.config)
self.assertEqual(w.txin_type, 'p2sh')
self.assertEqual(w.get_receiving_addresses()[0], 't3bG4QNCrrpk7mw4sdnTM56CkJfJZbEq42E')
self.assertEqual(w.get_change_addresses()[0], 't3Y9aEFNpSYZdR5cY2NyRQq5QC6pniqAos6')
@mock.patch.object(wallet.Abstract_Wallet, 'save_db')
def test_bip32_extended_version_bytes(self, mock_save_db):
seed_words = 'crouch dumb relax small truck age shine pink invite spatial object tenant'
self.assertEqual(keystore.bip39_is_checksum_valid(seed_words), (True, True))
bip32_seed = keystore.bip39_to_seed(seed_words, '')
self.assertEqual('0df68c16e522eea9c1d8e090cfb2139c3b3a2abed78cbcb3e20be2c29185d3b8df4e8ce4e52a1206a688aeb88bfee249585b41a7444673d1f16c0d45755fa8b9',
bh2u(bip32_seed))
def create_keystore_from_bip32seed(xtype):
ks = keystore.BIP32_KeyStore({})
ks.add_xprv_from_seed(bip32_seed, xtype=xtype, derivation='m/')
return ks
ks = create_keystore_from_bip32seed(xtype='standard')
self.assertEqual('033a05ec7ae9a9833b0696eb285a762f17379fa208b3dc28df1c501cf84fe415d0', ks.derive_pubkey(0, 0).hex())
self.assertEqual('02bf27f41683d84183e4e930e66d64fc8af5508b4b5bf3c473c505e4dbddaeed80', ks.derive_pubkey(1, 0).hex())
ks = create_keystore_from_bip32seed(xtype='standard') # p2pkh
w = WalletIntegrityHelper.create_standard_wallet(ks, config=self.config)
self.assertEqual(ks.xprv, 'xprv9s21ZrQH143K3nyWMZVjzGL4KKAE1zahmhTHuV5pdw4eK3o3igC5QywgQG7UTRe6TGBniPDpPFWzXMeMUFbBj8uYsfXGjyMmF54wdNt8QBm')
self.assertEqual(ks.xpub, 'xpub661MyMwAqRbcGH3yTb2kMQGnsLziRTJZ8vNthsVSCGbdBr8CGDWKxnGAFYgyKTzBtwvPPmfVAWJuFmxRXjSbUTg87wDkWQ5GmzpfUcN9t8Z')
self.assertEqual(w.get_receiving_addresses()[0], 't1SY7Epzfp15qrRACUofDwj4tzEA8i4djyT')
self.assertEqual(w.get_change_addresses()[0], 't1X787xzAzAaE9chDa2ADDu2wFJGmNsaLd3')
ks = create_keystore_from_bip32seed(xtype='standard') # p2sh
w = WalletIntegrityHelper.create_multisig_wallet([ks], '1of1', config=self.config)
self.assertEqual(ks.xprv, 'xprv9s21ZrQH143K3nyWMZVjzGL4KKAE1zahmhTHuV5pdw4eK3o3igC5QywgQG7UTRe6TGBniPDpPFWzXMeMUFbBj8uYsfXGjyMmF54wdNt8QBm')
self.assertEqual(ks.xpub, 'xpub661MyMwAqRbcGH3yTb2kMQGnsLziRTJZ8vNthsVSCGbdBr8CGDWKxnGAFYgyKTzBtwvPPmfVAWJuFmxRXjSbUTg87wDkWQ5GmzpfUcN9t8Z')
self.assertEqual(w.get_receiving_addresses()[0], 't3XwPmTv3kuuNZ8yjduC9AwVTwJDnZz3uLF')
self.assertEqual(w.get_change_addresses()[0], 't3f1LveguwKGsq1pz7VoamNyxconrEhzwxa')
class TestWalletKeystoreAddressIntegrityForTestnet(TestCaseForTestnet):
def setUp(self):
super().setUp()
self.config = SimpleConfig({'electrum_path': self.electrum_path})
@mock.patch.object(wallet.Abstract_Wallet, 'save_db')
def test_bip32_extended_version_bytes(self, mock_save_db):
seed_words = 'crouch dumb relax small truck age shine pink invite spatial object tenant'
self.assertEqual(keystore.bip39_is_checksum_valid(seed_words), (True, True))
bip32_seed = keystore.bip39_to_seed(seed_words, '')
self.assertEqual('0df68c16e522eea9c1d8e090cfb2139c3b3a2abed78cbcb3e20be2c29185d3b8df4e8ce4e52a1206a688aeb88bfee249585b41a7444673d1f16c0d45755fa8b9',
bh2u(bip32_seed))
def create_keystore_from_bip32seed(xtype):
ks = keystore.BIP32_KeyStore({})
ks.add_xprv_from_seed(bip32_seed, xtype=xtype, derivation='m/')
return ks
ks = create_keystore_from_bip32seed(xtype='standard')
self.assertEqual('033a05ec7ae9a9833b0696eb285a762f17379fa208b3dc28df1c501cf84fe415d0', ks.derive_pubkey(0, 0).hex())
self.assertEqual('02bf27f41683d84183e4e930e66d64fc8af5508b4b5bf3c473c505e4dbddaeed80', ks.derive_pubkey(1, 0).hex())
ks = create_keystore_from_bip32seed(xtype='standard') # p2pkh
w = WalletIntegrityHelper.create_standard_wallet(ks, config=self.config)
self.assertEqual(ks.xprv, 'tprv8ZgxMBicQKsPecD328MF9ux3dSaSFWci7FNQmuWH7uZ86eY8i3XpvjK8KSH8To2QphiZiUqaYc6nzDC6bTw8YCB9QJjaQL5pAApN4z7vh2B')
self.assertEqual(ks.xpub, 'tpubD6NzVbkrYhZ4Y5Epun1qZKcACU6NQqocgYyC4RYaYBMWw8nuLSMR7DvzVamkqxwRgrTJ1MBMhc8wwxT2vbHqMu8RBXy4BvjWMxR5EdZroxE')
self.assertEqual(w.get_receiving_addresses()[0], 'tmJNrZfV5CfbLzfMe9XxxoPjebDExBN52Lu')
self.assertEqual(w.get_change_addresses()[0], 'tmNwsSoUaNq5jHrtfEkTx5ZhgrHMauMbCqH')
ks = create_keystore_from_bip32seed(xtype='standard') # p2sh
w = WalletIntegrityHelper.create_multisig_wallet([ks], '1of1', config=self.config)
self.assertEqual(ks.xprv, 'tprv8ZgxMBicQKsPecD328MF9ux3dSaSFWci7FNQmuWH7uZ86eY8i3XpvjK8KSH8To2QphiZiUqaYc6nzDC6bTw8YCB9QJjaQL5pAApN4z7vh2B')
self.assertEqual(ks.xpub, 'tpubD6NzVbkrYhZ4Y5Epun1qZKcACU6NQqocgYyC4RYaYBMWw8nuLSMR7DvzVamkqxwRgrTJ1MBMhc8wwxT2vbHqMu8RBXy4BvjWMxR5EdZroxE')
self.assertEqual(w.get_receiving_addresses()[0], 't2Kvap92BdNWk6qZUZeCBiZg73nSxPKhj2y')
self.assertEqual(w.get_change_addresses()[0], 't2SzXyKo3omtFNiQj3EodK1AbjJ225gzuk5')
class TestWalletSending(TestCaseForTestnet):
def setUp(self):
super().setUp()
self.config = SimpleConfig({'electrum_path': self.electrum_path})
def create_standard_wallet_from_seed(self, seed_words, *, config=None, gap_limit=2):
if config is None:
config = self.config
ks = keystore.from_seed(seed_words, '', False)
return WalletIntegrityHelper.create_standard_wallet(ks, gap_limit=gap_limit, config=config)
@unittest.skip("skip until replace with zcash wallet")
@mock.patch.object(wallet.Abstract_Wallet, 'save_db')
def test_sending_between_p2sh_2of3_and_uncompressed_p2pkh(self, mock_save_db):
wallet1a = WalletIntegrityHelper.create_multisig_wallet(
[
keystore.from_seed('blast uniform dragon fiscal ensure vast young utility dinosaur abandon rookie sure', '', True),
keystore.from_xpub('tpubD6NzVbkrYhZ4YTPEgwk4zzr8wyo7pXGmbbVUnfYNtx6SgAMF5q3LN3Kch58P9hxGNsTmP7Dn49nnrmpE6upoRb1Xojg12FGLuLHkVpVtS44'),
keystore.from_xpub('tpubD6NzVbkrYhZ4XJzYkhsCbDCcZRmDAKSD7bXi9mdCni7acVt45fxbTVZyU6jRGh29ULKTjoapkfFsSJvQHitcVKbQgzgkkYsAmaovcro7Mhf')
],
'2of3', gap_limit=2,
config=self.config
)
wallet1b = WalletIntegrityHelper.create_multisig_wallet(
[
keystore.from_seed('cycle rocket west magnet parrot shuffle foot correct salt library feed song', '', True),
keystore.from_xpub('tpubD6NzVbkrYhZ4YTPEgwk4zzr8wyo7pXGmbbVUnfYNtx6SgAMF5q3LN3Kch58P9hxGNsTmP7Dn49nnrmpE6upoRb1Xojg12FGLuLHkVpVtS44'),
keystore.from_xpub('tpubD6NzVbkrYhZ4YARFMEZPckrqJkw59GZD1PXtQnw14ukvWDofR7Z1HMeSCxfYEZVvg4VdZ8zGok5VxHwdrLqew5cMdQntWc5mT7mh1CSgrnX')
],
'2of3', gap_limit=2,
config=self.config
)
# ^ third seed: ghost into match ivory badge robot record tackle radar elbow traffic loud
wallet2 = self.create_standard_wallet_from_seed('powerful random nobody notice nothing important anyway look away hidden message over')
# bootstrap wallet1
funding_tx = Transaction('010000000001014121f99dc02f0364d2dab3d08905ff4c36fc76c55437fd90b769c35cc18618280100000000fdffffff02d4c22d00000000001600143fd1bc5d32245850c8cb5be5b09c73ccbb9a0f75001bb7000000000017a91480c2353f6a7bc3c71e99e062655b19adb3dd2e4887024830450221008781c78df0c9d4b5ea057333195d5d76bc29494d773f14fa80e27d2f288b2c360220762531614799b6f0fb8d539b18cb5232ab4253dd4385435157b28a44ff63810d0121033de77d21926e09efd04047ae2d39dbd3fb9db446e8b7ed53e0f70f9c9478f735dac11300')
funding_txid = funding_tx.txid()
funding_output_value = 12000000
self.assertEqual('b25cd55687c9e528c2cfd546054f35fb6741f7cf32d600f07dfecdf2e1d42071', funding_txid)
wallet1a.receive_tx_callback(funding_txid, funding_tx, TX_HEIGHT_UNCONFIRMED)
# wallet1 -> wallet2
outputs = [PartialTxOutput.from_address_and_value(wallet2.get_receiving_address(), 370000)]
tx = wallet1a.mktx(outputs=outputs, password=None, fee=5000, tx_version=1)
partial_tx = tx.serialize_as_bytes().hex()
self.assertEqual("70736274ff01007501000000017120d4e1f2cdfe7df000d632cff74167fb354f0546d5cfc228e5c98756d55cb20100000000feffffff0250a50500000000001976a9149cd3dfb0d87a861770ae4e268e74b45335cf00ab88ac2862b1000000000017a9142e517854aa54668128c0e9a3fdd4dec13ad571368700000000000100e0010000000001014121f99dc02f0364d2dab3d08905ff4c36fc76c55437fd90b769c35cc18618280100000000fdffffff02d4c22d00000000001600143fd1bc5d32245850c8cb5be5b09c73ccbb9a0f75001bb7000000000017a91480c2353f6a7bc3c71e99e062655b19adb3dd2e4887024830450221008781c78df0c9d4b5ea057333195d5d76bc29494d773f14fa80e27d2f288b2c360220762531614799b6f0fb8d539b18cb5232ab4253dd4385435157b28a44ff63810d0121033de77d21926e09efd04047ae2d39dbd3fb9db446e8b7ed53e0f70f9c9478f735dac11300220202afb4af9a91264e1c6dce3ebe5312801723270ac0ba8134b7b49129328fcb0f284730440220751ee3599e59debb8b2aeef61bb5f574f26379cd961caf382d711a507bc632390220598d53e62557c4a5ab8cfb2f8948f37cca06a861714b55c781baf2c3d7a580b501010469522102afb4af9a91264e1c6dce3ebe5312801723270ac0ba8134b7b49129328fcb0f2821030b482838721a38d94847699fed8818b5c5f56500ef72f13489e365b65e5749cf2103e5db7969ae2f2576e6a061bf3bb2db16571e77ffb41e0b27170734359235cbce53ae220602afb4af9a91264e1c6dce3ebe5312801723270ac0ba8134b7b49129328fcb0f280c0036e9ac00000000000000002206030b482838721a38d94847699fed8818b5c5f56500ef72f13489e365b65e5749cf0c48adc7a00000000000000000220603e5db7969ae2f2576e6a061bf3bb2db16571e77ffb41e0b27170734359235cbce0cdb692427000000000000000000000100695221022ec6f62b0f3b7c2446f44346bff0a6f06b5fdbc27368be8a36478e0287fe47be21024238f21f90527dc87e945f389f3d1711943b06f0a738d5baab573fc0ab6c98582102b7139e93747d7c77f62af5a38b8a2b009f3456aa94dea9bf21f73a6298c867a253ae2202022ec6f62b0f3b7c2446f44346bff0a6f06b5fdbc27368be8a36478e0287fe47be0cdb69242701000000000000002202024238f21f90527dc87e945f389f3d1711943b06f0a738d5baab573fc0ab6c98580c0036e9ac0100000000000000220202b7139e93747d7c77f62af5a38b8a2b009f3456aa94dea9bf21f73a6298c867a20c48adc7a0010000000000000000",
partial_tx)
tx = tx_from_any(partial_tx) # simulates moving partial txn between cosigners
self.assertFalse(tx.is_complete())
wallet1b.sign_transaction(tx, password=None)
self.assertTrue(tx.is_complete())
self.assertEqual(1, len(tx.inputs()))
self.assertEqual(wallet1a.txin_type, tx.inputs()[0].script_type)
tx_copy = tx_from_any(tx.serialize())
self.assertTrue(wallet1a.is_mine(wallet1a.get_txin_address(tx_copy.inputs()[0])))
self.assertEqual('01000000017120d4e1f2cdfe7df000d632cff74167fb354f0546d5cfc228e5c98756d55cb201000000fc004730440220751ee3599e59debb8b2aeef61bb5f574f26379cd961caf382d711a507bc632390220598d53e62557c4a5ab8cfb2f8948f37cca06a861714b55c781baf2c3d7a580b501473044022023b55c679397bdf3a04d545adc6193eabc11b3a28850d3d46049a51a30c6732402205dbfdade5620e9072ae4aa7577c5f0fd294f59a6b0064cc7105093c0fe7a6d24014c69522102afb4af9a91264e1c6dce3ebe5312801723270ac0ba8134b7b49129328fcb0f2821030b482838721a38d94847699fed8818b5c5f56500ef72f13489e365b65e5749cf2103e5db7969ae2f2576e6a061bf3bb2db16571e77ffb41e0b27170734359235cbce53aefeffffff0250a50500000000001976a9149cd3dfb0d87a861770ae4e268e74b45335cf00ab88ac2862b1000000000017a9142e517854aa54668128c0e9a3fdd4dec13ad571368700000000',
str(tx_copy))
self.assertEqual('b508ee1908181e55d2a18a5b2a3904dffbc7cb6b6320bbfba4433578d0f7831e', tx_copy.txid())
self.assertEqual('b508ee1908181e55d2a18a5b2a3904dffbc7cb6b6320bbfba4433578d0f7831e', tx_copy.wtxid())
self.assertEqual(tx.wtxid(), tx_copy.wtxid())
wallet1a.receive_tx_callback(tx.txid(), tx, TX_HEIGHT_UNCONFIRMED)
wallet2.receive_tx_callback(tx.txid(), tx, TX_HEIGHT_UNCONFIRMED)
# wallet2 -> wallet1
outputs = [PartialTxOutput.from_address_and_value(wallet1a.get_receiving_address(), 100000)]
tx = wallet2.mktx(outputs=outputs, password=None, fee=5000, tx_version=1)
self.assertTrue(tx.is_complete())
self.assertEqual(1, len(tx.inputs()))
self.assertEqual(wallet2.txin_type, tx.inputs()[0].script_type)
tx_copy = tx_from_any(tx.serialize())
self.assertTrue(wallet2.is_mine(wallet2.get_txin_address(tx_copy.inputs()[0])))
self.assertEqual('01000000011e83f7d0783543a4fbbb20636bcbc7fbdf04392a5b8aa1d2551e180819ee08b5000000008a473044022007569f938b5d7a7f529ceccc413363d84325c11d589c1897660bebfd5fd1cc4302203ef71fa42f9b31bb1e816af13b0bf725c493a0405433390c783cd9374713c5880141045f7ba332df2a7b4f5d13f246e307c9174cfa9b8b05f3b83410a3c23ef8958d610be285963d67c7bc1feb082f168fa9877c25999963ff8b56b242a852b23e25edfeffffff02a08601000000000017a914efe136b8275f49bc0f9871eebb9a48d0516229fd87280b0400000000001976a914ca14915184a2662b5d1505ce7142c8ca066c70e288ac00000000',
str(tx_copy))
self.assertEqual('30f6eec4db5e6b1dfe572dfbc7077661df9a15a2a1b7701612b906d3e1bee3d8', tx_copy.txid())
self.assertEqual('30f6eec4db5e6b1dfe572dfbc7077661df9a15a2a1b7701612b906d3e1bee3d8', tx_copy.wtxid())
self.assertEqual(tx.wtxid(), tx_copy.wtxid())
wallet1a.receive_tx_callback(tx.txid(), tx, TX_HEIGHT_UNCONFIRMED)
wallet2.receive_tx_callback(tx.txid(), tx, TX_HEIGHT_UNCONFIRMED)
# wallet level checks
self.assertEqual((0, funding_output_value - 370000 - 5000 + 100000, 0), wallet1a.get_balance())
self.assertEqual((0, 370000 - 5000 - 100000, 0), wallet2.get_balance())
@unittest.skip("skip until replace with zcash wallet")
def test_sweep_p2pk(self):
class NetworkMock:
relay_fee = 1000
async def listunspent_for_scripthash(self, scripthash):
if scripthash == '460e4fb540b657d775d84ff4955c9b13bd954c2adc26a6b998331343f85b6a45':
return [{'tx_hash': 'ac24de8b58e826f60bd7b9ba31670bdfc3e8aedb2f28d0e91599d741569e3429', 'tx_pos': 1, 'height': 1325785, 'value': 1000000}]
else:
return []
async def get_transaction(self, txid):
if txid == "ac24de8b58e826f60bd7b9ba31670bdfc3e8aedb2f28d0e91599d741569e3429":
return "010000000001021b41471d6af3aa80ebe536dbf4f505a6d46af456131a8e12e1950171959b690e0f00000000fdffffff2ef29833a69863b31e884fc5e6f7b99a23b5601e14f0eb65905faa42fec0776d0000000000fdffffff02f96a070000000000160014e61b989a740056254b5f8061281ac96ca15d35e140420f00000000004341049afa8fb50f52104b381a673c6e4fb7fb54987271d0e948dd9a568bb2af6f9310a7a809ce06e09d1510e5836f20414596232e2c0be63715459fa3cf8e7092af05ac0247304402201fe20012c1c732a6a8f942c4e0feed5ed0bddfb94db736ec3d0c0d38f0f7f46a022021d690e6d2688b90b76002f4c3134981502d666211e85e8a6ca91e78405dfa3801210346fb31136ab48e6c648865264d32004b43643d01f0ba485cffac4bb0b3f739470247304402204a2473ab4b3bfc8e6b1a6b8675dc2c3d115d8c04f5df37f29779dca6d300d9db02205e72ebbccd018c67b86ae4da6b0e6222902a8de85915ed6115330b9328764b370121027a93ffc9444a12d99307318e2e538949072cb35b2aca344b8163795a022414c7d73a1400"
else:
raise Exception("unexpected txid")
privkeys = ['93NQ7CFbwTPyKDJLXe97jczw33fiLijam2SCZL3Uinz1NSbHrTu',]
network = NetworkMock()
dest_addr = 'tb1q3ws2p0qjk5vrravv065xqlnkckvzcpclk79eu2'
sweep_coro = sweep(privkeys, network=network, config=self.config, to_address=dest_addr, fee=5000, locktime=1325785, tx_version=1)
loop = asyncio.get_event_loop()
tx = loop.run_until_complete(sweep_coro)
tx_copy = tx_from_any(tx.serialize())
self.assertEqual('010000000129349e5641d79915e9d0282fdbaee8c3df0b6731bab9d70bf626e8588bde24ac010000004847304402206bf0d0a93abae0d5873a62ebf277a5dd2f33837821e8b93e74d04e19d71b578002201a6d729bc159941ef5c4c9e5fe13ece9fc544351ba531b00f68ba549c8b38a9a01fdffffff01b82e0f00000000001600148ba0a0bc12b51831f58c7ea8607e76c5982c071fd93a1400',
str(tx_copy))
self.assertEqual('7f827fc5256c274fd1094eb7e020c8ded0baf820356f61aa4f14a9093b0ea0ee', tx_copy.txid())
self.assertEqual('7f827fc5256c274fd1094eb7e020c8ded0baf820356f61aa4f14a9093b0ea0ee', tx_copy.wtxid())
@unittest.skip("skip until replace with zcash wallet")
@mock.patch.object(wallet.Abstract_Wallet, 'save_db')
def test_standard_wallet_cannot_sign_multisig_input_even_if_cosigner(self, mock_save_db):
"""Just because our keystore recognizes the pubkeys in a txin, if the prevout does not belong to the wallet,
then wallet.is_mine and wallet.can_sign should return False (e.g. multisig input for single-sig wallet).
(see issue #5948)
"""
wallet_2of2 = WalletIntegrityHelper.create_multisig_wallet(
[
# seed: frost repair depend effort salon ring foam oak cancel receive save usage
# convert_xkey(wallet.get_master_public_key(), "p2wsh")
keystore.from_xpub('Vpub5gqF73Wpbp9ThwEgZKHLjBDthsatXjajYvrN8CVnkdBYeTR1M1sfZFQqQ5wpKHGhnwKhzgMhaWrtgKG2LthCzxjd653KqKVUAw7UrwYnbKQ'),
# seed: bitter grass shiver impose acquire brush forget axis eager alone wine silver
# convert_xkey(wallet.get_master_public_key(), "p2wsh")
keystore.from_xpub('Vpub5gSKXzxK7FeKNi2WPNW9iuA48SbJRZvKFBwtgucpegMWPdohQPeK2DoR6XFtC7BBLsHhfWDAPKaiecqJ7jTzYSfeg5YATowmPcgCWxARabT')
],
'2of2', gap_limit=2,
config=self.config
)
wallet_frost = self.create_standard_wallet_from_seed('frost repair depend effort salon ring foam oak cancel receive save usage')
# bootstrap wallet_2of2
funding_tx = Transaction('020000000001018ed0132bb5f35d097572081524cd5e847c895e765b93d5af46b8a8bef621244a0100000000fdffffff0220a1070000000000220020302981db44eb5dad0dab3987134a985b360ae2227a7e7a10cfe8cffd23bacdc9b07912000000000016001442b423aab2aa803f957084832b10359beaa2469002473044022065c5e28900b4706487223357e8539e176552e3560e2081ac18de7c26e8e420ba02202755c7fc8177ff502634104c090e3fd4c4252bfa8566d4eb6605bb9e236e7839012103b63bbf85ec9e5e312e4d7a2b45e690f48b916a442e787a47a6092d6c052394c5966a1900')
funding_txid = funding_tx.txid()
self.assertEqual('0c2f5981981a6cb69d7b729feceb55be7962b16dc41e8aaf64e5203f7cb604d0', funding_txid)
wallet_2of2.receive_tx_callback(funding_txid, funding_tx, TX_HEIGHT_UNCONFIRMED)
# create tx
outputs = [PartialTxOutput.from_address_and_value('tb1qfrlx5pza9vmez6vpx7swt8yp0nmgz3qa7jjkuf', 100_000)]
coins = wallet_2of2.get_spendable_coins(domain=None)
tx = wallet_2of2.make_unsigned_transaction(coins=coins, outputs=outputs, fee=5000)
tx.locktime = 1665628
partial_tx = tx.serialize_as_bytes().hex()
self.assertEqual("70736274ff01007d0200000001d004b67c3f20e564af8a1ec46db16279be55ebec9f727b9db66c1a9881592f0c0000000000fdffffff02a08601000000000016001448fe6a045d2b3791698137a0e59c817cf681441df806060000000000220020eb428a0bdeca2c1b3731aedb81c0518456875a99755d177d204d6516d8f6b3075c6a1900000100ea020000000001018ed0132bb5f35d097572081524cd5e847c895e765b93d5af46b8a8bef621244a0100000000fdffffff0220a1070000000000220020302981db44eb5dad0dab3987134a985b360ae2227a7e7a10cfe8cffd23bacdc9b07912000000000016001442b423aab2aa803f957084832b10359beaa2469002473044022065c5e28900b4706487223357e8539e176552e3560e2081ac18de7c26e8e420ba02202755c7fc8177ff502634104c090e3fd4c4252bfa8566d4eb6605bb9e236e7839012103b63bbf85ec9e5e312e4d7a2b45e690f48b916a442e787a47a6092d6c052394c5966a19000105475221028d4c44ca36d2c4bff3813df8d5d3c0278357521ecb892cd694c473c03970e4c521030faee9b4a25b7db82023ca989192712cdd4cb53d3d9338591c7909e581ae1c0c52ae2206028d4c44ca36d2c4bff3813df8d5d3c0278357521ecb892cd694c473c03970e4c510e8a903980000008000000000000000002206030faee9b4a25b7db82023ca989192712cdd4cb53d3d9338591c7909e581ae1c0c10b2e35a7d0000008000000000000000000000010147522102105dd9133f33cbd4e50443ef9af428c0be61f097f8942aaa916f50b530125aea21028584e789e39f41391b2f27852ca18abec06a5411c21be350fed61eec7120de5352ae220202105dd9133f33cbd4e50443ef9af428c0be61f097f8942aaa916f50b530125aea10e8a903980000008001000000000000002202028584e789e39f41391b2f27852ca18abec06a5411c21be350fed61eec7120de5310b2e35a7d00000080010000000000000000",
partial_tx)
tx = tx_from_any(partial_tx) # simulates moving partial txn between cosigners
self.assertFalse(tx.is_complete())
self.assertEqual('652c1a903a659c9fabb9caf4a2281a9fbcc59cd598bf6edc88cd60f940c2352c', tx.txid())
self.assertEqual('tb1qxq5crk6yadw66rdt8xr3xj5ctvmq4c3z0fl85yx0ar8l6ga6ehysk0rjrk', tx.inputs()[0].address)
self.assertEqual('tb1qfrlx5pza9vmez6vpx7swt8yp0nmgz3qa7jjkuf', tx.outputs()[0].address)
self.assertEqual('tb1qadpg5z77egkpkde34mdcrsz3s3tgwk5ew4w3wlfqf4j3dk8kkvrs3t3mn0', tx.outputs()[1].address)
# check that wallet_frost does not mistakenly think tx is related to it in any way
tx.add_info_from_wallet(wallet_frost)
self.assertFalse(wallet_frost.can_sign(tx))
self.assertFalse(any([wallet_frost.is_mine(txin.address) for txin in tx.inputs()]))
self.assertFalse(any([wallet_frost.is_mine(txout.address) for txout in tx.outputs()]))
@unittest.skip("skip until replace with zcash wallet")
@mock.patch.object(wallet.Abstract_Wallet, 'save_db')
def test_wallet_history_chain_of_unsigned_transactions(self, mock_save_db):
wallet = self.create_standard_wallet_from_seed('cross end slow expose giraffe fuel track awake turtle capital ranch pulp',
config=self.config, gap_limit=3)
# bootstrap wallet
funding_tx = Transaction('0200000000010132515e6aade1b79ec7dd3bac0896d8b32c56195d23d07d48e21659cef24301560100000000fdffffff0112841e000000000016001477fe6d2a27e8860c278d4d2cd90bad716bb9521a02473044022041ed68ef7ef122813ac6a5e996b8284f645c53fbe6823b8e430604a8915a867802203233f5f4d347a687eb19b2aa570829ab12aeeb29a24cc6d6d20b8b3d79e971ae012102bee0ee043817e50ac1bb31132770f7c41e35946ccdcb771750fb9696bdd1b307ad951d00')
funding_txid = funding_tx.txid()
self.assertEqual('db949963c3787c90a40fb689ffdc3146c27a9874a970d1fd20921afbe79a7aa9', funding_txid)
wallet.receive_tx_callback(funding_txid, funding_tx, TX_HEIGHT_UNCONFIRMED)
# create tx1
outputs = [PartialTxOutput.from_address_and_value('tb1qsfcddwf7yytl62e3catwv8hpl2hs9e36g2cqxl', 100000)]
coins = wallet.get_spendable_coins(domain=None)
tx = wallet.make_unsigned_transaction(coins=coins, outputs=outputs, fee=190)
tx.set_rbf(True)
tx.locktime = 1938861
tx.version = 2
self.assertEqual("70736274ff0100710200000001a97a9ae7fb1a9220fdd170a974987ac24631dcff89b60fa4907c78c3639994db0000000000fdffffff02a0860100000000001600148270d6b93e2117fd2b31c756e61ee1faaf02e63ab4fc1c0000000000160014b8e4fdc91593b67de2bf214694ef47e38dc2ee8ead951d00000100bf0200000000010132515e6aade1b79ec7dd3bac0896d8b32c56195d23d07d48e21659cef24301560100000000fdffffff0112841e000000000016001477fe6d2a27e8860c278d4d2cd90bad716bb9521a02473044022041ed68ef7ef122813ac6a5e996b8284f645c53fbe6823b8e430604a8915a867802203233f5f4d347a687eb19b2aa570829ab12aeeb29a24cc6d6d20b8b3d79e971ae012102bee0ee043817e50ac1bb31132770f7c41e35946ccdcb771750fb9696bdd1b307ad951d002206026cc6a74c2b0e38661d341ffae48fe7dde5196ca4afe95d28b496673fa4cf6467105f83afb40000008000000000000000000022020312ea49b9b1eea28e3330316a5b7e6673b43e01da38f802c99a777d30b903fa5e105f83afb40000008000000000010000000022020349321bee98c012887997f26c6400018b0711dd254b702c038b96a30ebe2af1d2105f83afb400000080010000000000000000",
tx.serialize_as_bytes().hex())
self.assertFalse(tx.is_complete())
self.assertTrue(tx.is_segwit())
wallet.add_transaction(tx)
# create tx2, which spends from unsigned tx1
outputs = [PartialTxOutput.from_address_and_value('tb1qq0lm9esmq6pfjc3jls7v6twy93lnqcs85wlth3', '!')]
coins = wallet.get_spendable_coins(domain=None)
tx = wallet.make_unsigned_transaction(coins=coins, outputs=outputs, fee=5000)
tx.set_rbf(True)
tx.locktime = 1938863
tx.version = 2
self.assertEqual("70736274ff01007b020000000288234495e0ff1d8ac06038f6cc5d5a92738d719f4c15afd581366da94754478f0000000000fdffffff88234495e0ff1d8ac06038f6cc5d5a92738d719f4c15afd581366da94754478f0100000000fdffffff01cc6f1e000000000016001403ffb2e61b0682996232fc3ccd2dc42c7f306207af951d000001011fa0860100000000001600148270d6b93e2117fd2b31c756e61ee1faaf02e63a22060312ea49b9b1eea28e3330316a5b7e6673b43e01da38f802c99a777d30b903fa5e105f83afb40000008000000000010000000001011fb4fc1c0000000000160014b8e4fdc91593b67de2bf214694ef47e38dc2ee8e22060349321bee98c012887997f26c6400018b0711dd254b702c038b96a30ebe2af1d2105f83afb4000000800100000000000000002202036f9a5913f1c22742dbc9e7f3ac3064be8b125a23563fcc8a519f387e16c7244c105f83afb400000080000000000200000000",
tx.serialize_as_bytes().hex())
self.assertFalse(tx.is_complete())
self.assertTrue(tx.is_segwit())
wallet.add_transaction(tx)
coins = wallet.get_spendable_coins(domain=None)
self.assertEqual(1, len(coins))
self.assertEqual("bf08206effded4126a95fbed375cedc0452b5e16a5d2025ac645dfae81addbe4:0",
coins[0].prevout.to_str())
class TestWalletOfflineSigning(TestCaseForTestnet):
def setUp(self):
super().setUp()
self.config = SimpleConfig({'electrum_path': self.electrum_path})
@unittest.skip("skip until replace with zcash wallet")
@mock.patch.object(wallet.Abstract_Wallet, 'save_db')
def test_sending_offline_old_electrum_seed_online_mpk(self, mock_save_db):
wallet_offline = WalletIntegrityHelper.create_standard_wallet(
keystore.from_seed('alone body father children lead goodbye phone twist exist grass kick join', '', False),
gap_limit=4,
config=self.config
)
wallet_online = WalletIntegrityHelper.create_standard_wallet(
keystore.from_master_key('cd805ed20aec61c7a8b409c121c6ba60a9221f46d20edbc2be83ebd91460e97937cd7d782e77c1cb08364c6bc1c98bc040fdad53f22f29f7d3a85c8e51f9c875'),
gap_limit=4,
config=self.config
)
# bootstrap wallet_online
funding_tx = Transaction('01000000000101161115f8d8110001aa0883989487f9c7a2faf4451038e4305c7594c5236cbb490100000000fdffffff0338117a0000000000160014c1d7b2ded7017cbde837aab36c1e7b2a3952a57800127a00000000001600143e2ab71fc9738ce16fbe6b3b1c210a68c12db84180969800000000001976a91424b64d981d621c227716b51479faf33019371f4688ac0247304402207a5efc6d970f6a5fdcd1933f68b353b4bf2904743f9f1dc3e9177d8754074baf02202eed707e661493bc450357f12cd7a8b8c610c7cb32ded10516c2933a2ba4346a01210287dce03f594fd889726b13a12970237992a0094a5c9f4eebcca6d50d454b39e9ff121600')
funding_txid = funding_tx.txid()
self.assertEqual('3b9e0581602f4656cb04633dac13662bc62d9f5191caa15cc901dcc76e430856', funding_txid)
wallet_online.receive_tx_callback(funding_txid, funding_tx, TX_HEIGHT_UNCONFIRMED)
# create unsigned tx
outputs = [PartialTxOutput.from_address_and_value('tb1qyw3c0rvn6kk2c688y3dygvckn57525y8qnxt3a', 2500000)]
tx = wallet_online.mktx(outputs=outputs, password=None, fee=5000)
tx.locktime = 1446655
tx.version = 1
self.assertFalse(tx.is_complete())
self.assertEqual(1, len(tx.inputs()))
partial_tx = tx.serialize_as_bytes().hex()
self.assertEqual("70736274ff01007401000000015608436ec7dc01c95ca1ca91519f2dc62b6613ac3d6304cb56462f6081059e3b0200000000fdffffff02a02526000000000016001423a3878d93d5acac68e7245a4433169d3d455087585d7200000000001976a914b6a6bbbc4cf9da58786a8acc58291e218d52130688acff121600000100fd000101000000000101161115f8d8110001aa0883989487f9c7a2faf4451038e4305c7594c5236cbb490100000000fdffffff0338117a0000000000160014c1d7b2ded7017cbde837aab36c1e7b2a3952a57800127a00000000001600143e2ab71fc9738ce16fbe6b3b1c210a68c12db84180969800000000001976a91424b64d981d621c227716b51479faf33019371f4688ac0247304402207a5efc6d970f6a5fdcd1933f68b353b4bf2904743f9f1dc3e9177d8754074baf02202eed707e661493bc450357f12cd7a8b8c610c7cb32ded10516c2933a2ba4346a01210287dce03f594fd889726b13a12970237992a0094a5c9f4eebcca6d50d454b39e9ff121600420604e79eb77f2f3f989f5e9d090bc0af50afeb0d5bd6ec916f2022c5629ed022e84a87584ef647d69f073ea314a0f0c110ebe24ad64bc1922a10819ea264fc3f35f50c343ddcab000000000100000000004202048e2004ca581afcc54a5d9b3b47affdf48b3f89e16d5bd96774fc0f167f2d7873bac6264e3d1f1bb96f64d1530a54e026e0bd7d674151d146fba582e79f4ef5e80c343ddcab010000000000000000",
partial_tx)
tx_copy = tx_from_any(partial_tx) # simulates moving partial txn between cosigners
self.assertTrue(wallet_online.is_mine(wallet_online.get_txin_address(tx_copy.inputs()[0])))
self.assertEqual(tx.txid(), tx_copy.txid())
# sign tx
tx = wallet_offline.sign_transaction(tx_copy, password=None)
self.assertTrue(tx.is_complete())
self.assertEqual('01000000015608436ec7dc01c95ca1ca91519f2dc62b6613ac3d6304cb56462f6081059e3b020000008a47304402206bed3e02af8a38f6ba2fa3bf5908cb8c643aa62e78e8de6d9af2e19dec55fafc0220039cc1d81d4e5e0292bbc54ea92b8ec4ec016d4828eedc8975a66952cedf13a1014104e79eb77f2f3f989f5e9d090bc0af50afeb0d5bd6ec916f2022c5629ed022e84a87584ef647d69f073ea314a0f0c110ebe24ad64bc1922a10819ea264fc3f35f5fdffffff02a02526000000000016001423a3878d93d5acac68e7245a4433169d3d455087585d7200000000001976a914b6a6bbbc4cf9da58786a8acc58291e218d52130688acff121600',
str(tx))
self.assertEqual('06032230d0bf6a277bc4f8c39e3311a712e0e614626d0dea7cc9f592abfae5d8', tx.txid())
self.assertEqual('06032230d0bf6a277bc4f8c39e3311a712e0e614626d0dea7cc9f592abfae5d8', tx.wtxid())
@unittest.skip("skip until replace with zcash wallet")
@mock.patch.object(wallet.Abstract_Wallet, 'save_db')
def test_sending_offline_xprv_online_xpub_p2pkh(self, mock_save_db):
wallet_offline = WalletIntegrityHelper.create_standard_wallet(
# bip39: "qwe", der: m/44'/1'/0'
keystore.from_xprv('tprv8gfKwjuAaqtHgqxMh1tosAQ28XvBMkcY5NeFRA3pZMpz6MR4H4YZ3MJM4fvNPnRKeXR1Td2vQGgjorNXfo94WvT5CYDsPAqjHxSn436G1Eu'),
gap_limit=4,
config=self.config
)
wallet_online = WalletIntegrityHelper.create_standard_wallet(
keystore.from_xpub('tpubDDMN69wQjDZxaJz9afZQGa48hZS7X5oSegF2hg67yddNvqfpuTN9DqvDEp7YyVf7AzXnqBqHdLhzTAStHvsoMDDb8WoJQzNrcHgDJHVYgQF'),
gap_limit=4,
config=self.config
)
# bootstrap wallet_online
funding_tx = Transaction('01000000000116e9c9dac2651672316aab3b9553257b6942c5f762c5d795776d9cfa504f183c000000000000fdffffff8085019852fada9da84b58dcf753d292dde314a19f5a5527f6588fa2566142130000000000fdffffffa4154a48db20ce538b28722a89c6b578bd5b5d60d6d7b52323976339e39405230000000000fdffffff0b5ef43f843a96364aebd708e25ea1bdcf2c7df7d0d995560b8b1be5f357b64f0100000000fdffffffd41dfe1199c76fdb3f20e9947ea31136d032d9da48c5e45d85c8f440e2351a510100000000fdffffff5bd015d17e4a1837b01c24ebb4a6b394e3da96a85442bd7dc6abddfbf16f20510000000000fdffffff13a3e7f80b1bd46e38f2abc9e2f335c18a4b0af1778133c7f1c3caae9504345c0200000000fdffffffdf4fc1ab21bca69d18544ddb10a913cd952dbc730ab3d236dd9471445ff405680100000000fdffffffe0424d78a30d5e60ac6b26e2274d7d6e7c6b78fe0b49bdc3ac4dd2147c9535750100000000fdffffff7ab6dd6b3c0d44b0fef0fdc9ab0ad6eee23eef799eee29c005d52bc4461998760000000000fdffffff48a77e5053a21acdf4f235ce00c82c9bc1704700f54d217f6a30704711b9737d0000000000fdffffff86918b39c1d9bb6f34d9b082182f73cedd15504331164dc2b186e95c568ccb870000000000fdffffff15a847356cbb44be67f345965bb3f2589e2fec1c9a0ada21fd28225dcc602e8f0100000000fdffffff9a2875297f81dfd3b77426d63f621db350c270cc28c634ad86b9969ee33ac6960000000000fdffffffd6eeb1d1833e00967083d1ab86fa5a2e44355bd613d9277135240fe6f60148a20100000000fdffffffd8a6e5a9b68a65ff88220ca33e36faf6f826ae8c5c8a13fe818a5e63828b68a40100000000fdffffff73aab8471f82092e45ed1b1afeffdb49ea1ec74ce4853f971812f6a72a7e85aa0000000000fdffffffacd6459dec7c3c51048eb112630da756f5d4cb4752b8d39aa325407ae0885cba0000000000fdffffff1eddd5e13bef1aba1ff151762b5860837daa9b39db1eae8ea8227c81a5a1c8ba0000000000fdffffff67a096ff7c343d39e96929798097f6d7a61156bbdb905fbe534ba36f273271d40100000000fdffffff109a671eb7daf6dcd07c0ceff99f2de65864ab36d64fb3a890bab951569adeee0100000000fdffffff4f1bdc64da8056d08f79db7f5348d1de55946e57aa7c8279499c703889b6e0fd0200000000fdffffff042f280000000000001600149c756aa33f4f89418b33872a973274b5445c727b80969800000000001600146c540c1c9f546004539f45318b8d9f4d7b4857ef80969800000000001976a91422a6daa4a7b695c8a2dd104d47c5dc73d655c96f88ac809698000000000017a914a6885437e0762013facbda93894202a0fe86e35f8702473044022075ef5f04d7a63347064938e15a0c74277a79e5c9d32a26e39e8a517a44d565cc022015246790fb5b29c9bf3eded1b95699b1635bcfc6d521886fddf1135ba1b988ec012102801bc7170efb82c490e243204d86970f15966aa3bce6a06bef5c09a83a5bfffe02473044022061aa9b0d9649ffd7259bc54b35f678565dbbe11507d348dd8885522eaf1fa70c02202cc79de09e8e63e8d57fde6ef66c079ddac4d9828e1936a9db833d4c142615c3012103a8f58fc1f5625f18293403104874f2d38c9279f777e512570e4199c7d292b81b0247304402207744dc1ab0bf77c081b58540c4321d090c0a24a32742a361aa55ad86f0c7c24e02201a9b0dd78b63b495ab5a0b5b161c54cb085d70683c90e188bb4dc2e41e142f6601210361fb354f8259abfcbfbdda36b7cb4c3b05a3ca3d68dd391fd8376e920d93870d0247304402204803e423c321acc6c12cb0ebf196d2906842fdfed6de977cc78277052ee5f15002200634670c1dc25e6b1787a65d3e09c8e6bb0340238d90b9d98887e8fd53944e080121031104c60d027123bf8676bcaefaa66c001a0d3d379dc4a9492a567a9e1004452d02473044022050e4b5348d30011a22b6ae8b43921d29249d88ea71b1fbaa2d9c22dfdef58b7002201c5d5e143aa8835454f61b0742226ebf8cd466bcc2cdcb1f77b92e473d3b13190121030496b9d49aa8efece4f619876c60a77d2c0dc846390ecdc5d9acbfa1bb3128760247304402204d6a9b986e1a0e3473e8aef84b3eb7052442a76dfd7631e35377f141496a55490220131ab342853c01e31f111436f8461e28bc95883b871ca0e01b5f57146e79d7bb012103262ffbc88e25296056a3c65c880e3686297e07f360e6b80f1219d65b0900e84e02483045022100c8ffacf92efa1dddef7e858a241af7a80adcc2489bcc325195970733b1f35fac022076f40c26023a228041a9665c5290b9918d06f03b716e4d8f6d47e79121c7eb37012102d9ba7e02d7cd7dd24302f823b3114c99da21549c663f72440dc87e8ba412120902483045022100b55545d84e43d001bbc10a981f184e7d3b98a7ed6689863716cab053b3655a2f0220537eb76a695fbe86bf020b4b6f7ae93b506d778bbd0885f0a61067616a2c8bce0121034a57f2fa2c32c9246691f6a922fb1ebdf1468792bae7eff253a99fc9f2a5023902483045022100f1d4408463dbfe257f9f778d5e9c8cdb97c8b1d395dbd2e180bc08cad306492c022002a024e19e1a406eaa24467f033659de09ab58822987281e28bb6359288337bd012103e91daa18d924eea62011ce596e15b6d683975cf724ea5bf69a8e2022c26fc12f0247304402204f1e12b923872f396e5e1a3aa94b0b2e86b4ce448f4349a017631db26d7dff8a022069899a05de2ad2bbd8e0202c56ab1025a7db9a4998eea70744e3c367d2a7eb71012103b0eee86792dbef1d4a49bc4ea32d197c8c15d27e6e0c5c33e58e409e26d4a39a0247304402201787dacdb92e0df6ad90226649f0e8321287d0bd8fddc536a297dd19b5fc103e022001fe89300a76e5b46d0e3f7e39e0ee26cc83b71d59a2a5da1dd7b13350cd0c07012103afb1e43d7ec6b7999ef0f1093069e68fe1dfe5d73fc6cfb4f7a5022f7098758c02483045022100acc1212bba0fe4fcc6c3ae5cf8e25f221f140c8444d3c08dfc53a93630ac25da02203f12982847244bd9421ef340293f3a38d2ab5d028af60769e46fcc7d81312e7e012102801bc7170efb82c490e243204d86970f15966aa3bce6a06bef5c09a83a5bfffe024830450221009c04934102402949484b21899271c3991c007b783b8efc85a3c3d24641ac7c24022006fb1895ce969d08a2cb29413e1a85427c7e85426f7a185108ca44b5a0328cb301210360248db4c7d7f76fe231998d2967104fee04df8d8da34f10101cc5523e82648c02483045022100b11fe61b393fa5dbe18ab98f65c249345b429b13f69ee2d1b1335725b24a0e73022010960cdc5565cbc81885c8ed95142435d3c202dfa5a3dc5f50f3914c106335ce0121029c878610c34c21381cda12f6f36ab88bf60f5f496c1b82c357b8ac448713e7b50247304402200ca080db069c15bbf98e1d4dff68d0aea51227ff5d17a8cf67ceae464c22bbb0022051e7331c0918cbb71bb2cef29ca62411454508a16180b0fb5df94248890840df0121028f0be0cde43ff047edbda42c91c37152449d69789eb812bb2e148e4f22472c0f0247304402201fefe258938a2c481d5a745ef3aa8d9f8124bbe7f1f8c693e2ddce4ddc9a927c02204049e0060889ede8fda975edf896c03782d71ba53feb51b04f5ae5897d7431dc012103946730b480f52a43218a9edce240e8b234790e21df5e96482703d81c3c19d3f1024730440220126a6a56dbe69af78d156626fc9cf41d6aac0c07b8b5f0f8491f68db5e89cb5002207ee6ed6f2f41da256f3c1e79679a3de6cf34cc08b940b82be14aefe7da031a6b012102801bc7170efb82c490e243204d86970f15966aa3bce6a06bef5c09a83a5bfffe024730440220363204a1586d7f13c148295122cbf9ec7939685e3cadab81d6d9e921436d21b7022044626b8c2bd4aa7c167d74bc4e9eb9d0744e29ce0ad906d78e10d6d854f23d170121037fb9c51716739bb4c146857fab5a783372f72a65987d61f3b58c74360f4328dd0247304402207925a4c2a3a6b76e10558717ee28fcb8c6fde161b9dc6382239af9f372ace99902204a58e31ce0b4a4804a42d2224331289311ded2748062c92c8aca769e81417a4c012102e18a8c235b48e41ef98265a8e07fa005d2602b96d585a61ad67168d74e7391cb02483045022100bbfe060479174a8d846b5a897526003eb2220ba307a5fee6e1e8de3e4e8b38fd02206723857301d447f67ac98a5a5c2b80ef6820e98fae213db1720f93d91161803b01210386728e2ac3ecee15f58d0505ee26f86a68f08c702941ffaf2fb7213e5026aea10247304402203a2613ae68f697eb02b5b7d18e3c4236966dac2b3a760e3021197d76e9ad4239022046f9067d3df650fcabbdfd250308c64f90757dec86f0b08813c979a42d06a6ec012102a1d7ee1cb4dc502f899aaafae0a2eb6cbf80d9a1073ae60ddcaabc3b1d1f15df02483045022100ab1bea2cc5388428fd126c7801550208701e21564bd4bd00cfd4407cfafc1acd0220508ee587f080f3c80a5c0b2175b58edd84b755e659e2135b3152044d75ebc4b501210236dd1b7f27a296447d0eb3750e1bdb2d53af50b31a72a45511dc1ec3fe7a684a19391400')
funding_txid = funding_tx.txid()
self.assertEqual('98574bc5f6e75769eb0c93d41453cc1dfbd15c14e63cc3c42f37cdbd08858762', funding_txid)
wallet_online.receive_tx_callback(funding_txid, funding_tx, TX_HEIGHT_UNCONFIRMED)
# create unsigned tx
outputs = [PartialTxOutput.from_address_and_value('tb1qp0mv2sxsyxxfj5gl0332f9uyez93su9cf26757', 2500000)]
tx = wallet_online.mktx(outputs=outputs, password=None, fee=5000)
tx.locktime = 1325340
tx.version = 1
self.assertFalse(tx.is_complete())
self.assertEqual(1, len(tx.inputs()))
orig_tx = tx
for uses_qr_code in (False, True):
with self.subTest(msg="uses_qr_code", uses_qr_code=uses_qr_code):
tx = copy.deepcopy(orig_tx)
if uses_qr_code:
partial_tx = tx.to_qr_data()
self.assertEqual("8VXO.MYW+UE2.+5LGGVQP.$087REZNQ8:6*U1CLU+NW7:.T7K04HTV.JW78BXOF$IM*4YYL6LWVSZ4QA0Q-1*8W38XJH833$K3EUK:87-TGQ86XAQ3/RD*PZKM1RLVRAVCFG/8.UHCF8IX*ED1HXNGI*WQ37K*HWJ:XXNKMU.M2A$IYUM-AR:*P34/.EGOQF-YUJ.F0UF$LMW-YXWQU$$CMXD4-L21B7X5/OL7MKXCAD5-9IL/TDP5J2$13KFIH2K5B0/2F*/-XCY:/G-+8K*+1U$56WUE3:J/8KOGSRAN66CNZLG7Y4IB$Y*.S64CC2A9Q/-P5TQFZCF7F+CYG+V363/ME.W0WTPXJM3BC.YPH+Y3K7VIF2+0D.O.JS4LYMZ",
partial_tx)
else:
partial_tx = tx.serialize_as_bytes().hex()
self.assertEqual("70736274ff010074010000000162878508bdcd372fc4c33ce6145cd1fb1dcc5314d4930ceb6957e7f6c54b57980200000000fdffffff02a0252600000000001600140bf6c540d0218c99511f7c62a49784c88b1870b8585d7200000000001976a9149b308d0b3efd4e3469441bc83c3521afde4072b988ac1c391400000100fd4c0d01000000000116e9c9dac2651672316aab3b9553257b6942c5f762c5d795776d9cfa504f183c000000000000fdffffff8085019852fada9da84b58dcf753d292dde314a19f5a5527f6588fa2566142130000000000fdffffffa4154a48db20ce538b28722a89c6b578bd5b5d60d6d7b52323976339e39405230000000000fdffffff0b5ef43f843a96364aebd708e25ea1bdcf2c7df7d0d995560b8b1be5f357b64f0100000000fdffffffd41dfe1199c76fdb3f20e9947ea31136d032d9da48c5e45d85c8f440e2351a510100000000fdffffff5bd015d17e4a1837b01c24ebb4a6b394e3da96a85442bd7dc6abddfbf16f20510000000000fdffffff13a3e7f80b1bd46e38f2abc9e2f335c18a4b0af1778133c7f1c3caae9504345c0200000000fdffffffdf4fc1ab21bca69d18544ddb10a913cd952dbc730ab3d236dd9471445ff405680100000000fdffffffe0424d78a30d5e60ac6b26e2274d7d6e7c6b78fe0b49bdc3ac4dd2147c9535750100000000fdffffff7ab6dd6b3c0d44b0fef0fdc9ab0ad6eee23eef799eee29c005d52bc4461998760000000000fdffffff48a77e5053a21acdf4f235ce00c82c9bc1704700f54d217f6a30704711b9737d0000000000fdffffff86918b39c1d9bb6f34d9b082182f73cedd15504331164dc2b186e95c568ccb870000000000fdffffff15a847356cbb44be67f345965bb3f2589e2fec1c9a0ada21fd28225dcc602e8f0100000000fdffffff9a2875297f81dfd3b77426d63f621db350c270cc28c634ad86b9969ee33ac6960000000000fdffffffd6eeb1d1833e00967083d1ab86fa5a2e44355bd613d9277135240fe6f60148a20100000000fdffffffd8a6e5a9b68a65ff88220ca33e36faf6f826ae8c5c8a13fe818a5e63828b68a40100000000fdffffff73aab8471f82092e45ed1b1afeffdb49ea1ec74ce4853f971812f6a72a7e85aa0000000000fdffffffacd6459dec7c3c51048eb112630da756f5d4cb4752b8d39aa325407ae0885cba0000000000fdffffff1eddd5e13bef1aba1ff151762b5860837daa9b39db1eae8ea8227c81a5a1c8ba0000000000fdffffff67a096ff7c343d39e96929798097f6d7a61156bbdb905fbe534ba36f273271d40100000000fdffffff109a671eb7daf6dcd07c0ceff99f2de65864ab36d64fb3a890bab951569adeee0100000000fdffffff4f1bdc64da8056d08f79db7f5348d1de55946e57aa7c8279499c703889b6e0fd0200000000fdffffff042f280000000000001600149c756aa33f4f89418b33872a973274b5445c727b80969800000000001600146c540c1c9f546004539f45318b8d9f4d7b4857ef80969800000000001976a91422a6daa4a7b695c8a2dd104d47c5dc73d655c96f88ac809698000000000017a914a6885437e0762013facbda93894202a0fe86e35f8702473044022075ef5f04d7a63347064938e15a0c74277a79e5c9d32a26e39e8a517a44d565cc022015246790fb5b29c9bf3eded1b95699b1635bcfc6d521886fddf1135ba1b988ec012102801bc7170efb82c490e243204d86970f15966aa3bce6a06bef5c09a83a5bfffe02473044022061aa9b0d9649ffd7259bc54b35f678565dbbe11507d348dd8885522eaf1fa70c02202cc79de09e8e63e8d57fde6ef66c079ddac4d9828e1936a9db833d4c142615c3012103a8f58fc1f5625f18293403104874f2d38c9279f777e512570e4199c7d292b81b0247304402207744dc1ab0bf77c081b58540c4321d090c0a24a32742a361aa55ad86f0c7c24e02201a9b0dd78b63b495ab5a0b5b161c54cb085d70683c90e188bb4dc2e41e142f6601210361fb354f8259abfcbfbdda36b7cb4c3b05a3ca3d68dd391fd8376e920d93870d0247304402204803e423c321acc6c12cb0ebf196d2906842fdfed6de977cc78277052ee5f15002200634670c1dc25e6b1787a65d3e09c8e6bb0340238d90b9d98887e8fd53944e080121031104c60d027123bf8676bcaefaa66c001a0d3d379dc4a9492a567a9e1004452d02473044022050e4b5348d30011a22b6ae8b43921d29249d88ea71b1fbaa2d9c22dfdef58b7002201c5d5e143aa8835454f61b0742226ebf8cd466bcc2cdcb1f77b92e473d3b13190121030496b9d49aa8efece4f619876c60a77d2c0dc846390ecdc5d9acbfa1bb3128760247304402204d6a9b986e1a0e3473e8aef84b3eb7052442a76dfd7631e35377f141496a55490220131ab342853c01e31f111436f8461e28bc95883b871ca0e01b5f57146e79d7bb012103262ffbc88e25296056a3c65c880e3686297e07f360e6b80f1219d65b0900e84e02483045022100c8ffacf92efa1dddef7e858a241af7a80adcc2489bcc325195970733b1f35fac022076f40c26023a228041a9665c5290b9918d06f03b716e4d8f6d47e79121c7eb37012102d9ba7e02d7cd7dd24302f823b3114c99da21549c663f72440dc87e8ba412120902483045022100b55545d84e43d001bbc10a981f184e7d3b98a7ed6689863716cab053b3655a2f0220537eb76a695fbe86bf020b4b6f7ae93b506d778bbd0885f0a61067616a2c8bce0121034a57f2fa2c32c9246691f6a922fb1ebdf1468792bae7eff253a99fc9f2a5023902483045022100f1d4408463dbfe257f9f778d5e9c8cdb97c8b1d395dbd2e180bc08cad306492c022002a024e19e1a406eaa24467f033659de09ab58822987281e28bb6359288337bd012103e91daa18d924eea62011ce596e15b6d683975cf724ea5bf69a8e2022c26fc12f0247304402204f1e12b923872f396e5e1a3aa94b0b2e86b4ce448f4349a017631db26d7dff8a022069899a05de2ad2bbd8e0202c56ab1025a7db9a4998eea70744e3c367d2a7eb71012103b0eee86792dbef1d4a49bc4ea32d197c8c15d27e6e0c5c33e58e409e26d4a39a0247304402201787dacdb92e0df6ad90226649f0e8321287d0bd8fddc536a297dd19b5fc103e022001fe89300a76e5b46d0e3f7e39e0ee26cc83b71d59a2a5da1dd7b13350cd0c07012103afb1e43d7ec6b7999ef0f1093069e68fe1dfe5d73fc6cfb4f7a5022f7098758c02483045022100acc1212bba0fe4fcc6c3ae5cf8e25f221f140c8444d3c08dfc53a93630ac25da02203f12982847244bd9421ef340293f3a38d2ab5d028af60769e46fcc7d81312e7e012102801bc7170efb82c490e243204d86970f15966aa3bce6a06bef5c09a83a5bfffe024830450221009c04934102402949484b21899271c3991c007b783b8efc85a3c3d24641ac7c24022006fb1895ce969d08a2cb29413e1a85427c7e85426f7a185108ca44b5a0328cb301210360248db4c7d7f76fe231998d2967104fee04df8d8da34f10101cc5523e82648c02483045022100b11fe61b393fa5dbe18ab98f65c249345b429b13f69ee2d1b1335725b24a0e73022010960cdc5565cbc81885c8ed95142435d3c202dfa5a3dc5f50f3914c106335ce0121029c878610c34c21381cda12f6f36ab88bf60f5f496c1b82c357b8ac448713e7b50247304402200ca080db069c15bbf98e1d4dff68d0aea51227ff5d17a8cf67ceae464c22bbb0022051e7331c0918cbb71bb2cef29ca62411454508a16180b0fb5df94248890840df0121028f0be0cde43ff047edbda42c91c37152449d69789eb812bb2e148e4f22472c0f0247304402201fefe258938a2c481d5a745ef3aa8d9f8124bbe7f1f8c693e2ddce4ddc9a927c02204049e0060889ede8fda975edf896c03782d71ba53feb51b04f5ae5897d7431dc012103946730b480f52a43218a9edce240e8b234790e21df5e96482703d81c3c19d3f1024730440220126a6a56dbe69af78d156626fc9cf41d6aac0c07b8b5f0f8491f68db5e89cb5002207ee6ed6f2f41da256f3c1e79679a3de6cf34cc08b940b82be14aefe7da031a6b012102801bc7170efb82c490e243204d86970f15966aa3bce6a06bef5c09a83a5bfffe024730440220363204a1586d7f13c148295122cbf9ec7939685e3cadab81d6d9e921436d21b7022044626b8c2bd4aa7c167d74bc4e9eb9d0744e29ce0ad906d78e10d6d854f23d170121037fb9c51716739bb4c146857fab5a783372f72a65987d61f3b58c74360f4328dd0247304402207925a4c2a3a6b76e10558717ee28fcb8c6fde161b9dc6382239af9f372ace99902204a58e31ce0b4a4804a42d2224331289311ded2748062c92c8aca769e81417a4c012102e18a8c235b48e41ef98265a8e07fa005d2602b96d585a61ad67168d74e7391cb02483045022100bbfe060479174a8d846b5a897526003eb2220ba307a5fee6e1e8de3e4e8b38fd02206723857301d447f67ac98a5a5c2b80ef6820e98fae213db1720f93d91161803b01210386728e2ac3ecee15f58d0505ee26f86a68f08c702941ffaf2fb7213e5026aea10247304402203a2613ae68f697eb02b5b7d18e3c4236966dac2b3a760e3021197d76e9ad4239022046f9067d3df650fcabbdfd250308c64f90757dec86f0b08813c979a42d06a6ec012102a1d7ee1cb4dc502f899aaafae0a2eb6cbf80d9a1073ae60ddcaabc3b1d1f15df02483045022100ab1bea2cc5388428fd126c7801550208701e21564bd4bd00cfd4407cfafc1acd0220508ee587f080f3c80a5c0b2175b58edd84b755e659e2135b3152044d75ebc4b501210236dd1b7f27a296447d0eb3750e1bdb2d53af50b31a72a45511dc1ec3fe7a684a19391400220602ab053d10eda769fab03ab52ee4f1692730288751369643290a8506e31d1e80f00c233d2ae40000000002000000000022020327295144ffff9943356c2d6625f5e2d6411bab77fd56dce571fda6234324e3d90c233d2ae4010000000000000000",
partial_tx)
tx_copy = tx_from_any(partial_tx) # simulates moving partial txn between cosigners
self.assertTrue(wallet_online.is_mine(wallet_online.get_txin_address(tx_copy.inputs()[0])))
self.assertEqual(tx.txid(), tx_copy.txid())
# sign tx
tx = wallet_offline.sign_transaction(tx_copy, password=None)
self.assertTrue(tx.is_complete())
self.assertFalse(tx.is_segwit())
self.assertEqual('d9c21696eca80321933e7444ca928aaf25eeda81aaa2f4e5c085d4d0a9cf7aa7', tx.txid())
self.assertEqual('d9c21696eca80321933e7444ca928aaf25eeda81aaa2f4e5c085d4d0a9cf7aa7', tx.wtxid())
@unittest.skip("skip until replace with zcash wallet")
@mock.patch.object(wallet.Abstract_Wallet, 'save_db')
def test_offline_signing_beyond_gap_limit(self, mock_save_db):
wallet_offline = WalletIntegrityHelper.create_standard_wallet(
# bip39: "qwe", der: m/84'/1'/0'
keystore.from_xprv('vprv9K9hbuA23Bidgj1KRSHUZMa59jJLeZBpXPVn4RP7sBLArNhZxJjw4AX7aQmVTErDt4YFC11ptMLjbwxgrsH8GLQ1cx77KggWeVPeDBjr9xM'),
gap_limit=1, # gap limit of offline wallet intentionally set too low
config=self.config
)
wallet_online = WalletIntegrityHelper.create_standard_wallet(
keystore.from_xpub('vpub5Y941QgusZGvuD5nXTpUvVWohm8q41uftcRNronjRWs9jB2iVr4BbxqbRfAoQjWHgJtDCQEXChgfsPbEuBnidtkFztZSD3zDKTrtwXa2LCa'),
gap_limit=4,
config=self.config
)
# bootstrap wallet_online
funding_tx = Transaction('01000000000116e9c9dac2651672316aab3b9553257b6942c5f762c5d795776d9cfa504f183c000000000000fdffffff8085019852fada9da84b58dcf753d292dde314a19f5a5527f6588fa2566142130000000000fdffffffa4154a48db20ce538b28722a89c6b578bd5b5d60d6d7b52323976339e39405230000000000fdffffff0b5ef43f843a96364aebd708e25ea1bdcf2c7df7d0d995560b8b1be5f357b64f0100000000fdffffffd41dfe1199c76fdb3f20e9947ea31136d032d9da48c5e45d85c8f440e2351a510100000000fdffffff5bd015d17e4a1837b01c24ebb4a6b394e3da96a85442bd7dc6abddfbf16f20510000000000fdffffff13a3e7f80b1bd46e38f2abc9e2f335c18a4b0af1778133c7f1c3caae9504345c0200000000fdffffffdf4fc1ab21bca69d18544ddb10a913cd952dbc730ab3d236dd9471445ff405680100000000fdffffffe0424d78a30d5e60ac6b26e2274d7d6e7c6b78fe0b49bdc3ac4dd2147c9535750100000000fdffffff7ab6dd6b3c0d44b0fef0fdc9ab0ad6eee23eef799eee29c005d52bc4461998760000000000fdffffff48a77e5053a21acdf4f235ce00c82c9bc1704700f54d217f6a30704711b9737d0000000000fdffffff86918b39c1d9bb6f34d9b082182f73cedd15504331164dc2b186e95c568ccb870000000000fdffffff15a847356cbb44be67f345965bb3f2589e2fec1c9a0ada21fd28225dcc602e8f0100000000fdffffff9a2875297f81dfd3b77426d63f621db350c270cc28c634ad86b9969ee33ac6960000000000fdffffffd6eeb1d1833e00967083d1ab86fa5a2e44355bd613d9277135240fe6f60148a20100000000fdffffffd8a6e5a9b68a65ff88220ca33e36faf6f826ae8c5c8a13fe818a5e63828b68a40100000000fdffffff73aab8471f82092e45ed1b1afeffdb49ea1ec74ce4853f971812f6a72a7e85aa0000000000fdffffffacd6459dec7c3c51048eb112630da756f5d4cb4752b8d39aa325407ae0885cba0000000000fdffffff1eddd5e13bef1aba1ff151762b5860837daa9b39db1eae8ea8227c81a5a1c8ba0000000000fdffffff67a096ff7c343d39e96929798097f6d7a61156bbdb905fbe534ba36f273271d40100000000fdffffff109a671eb7daf6dcd07c0ceff99f2de65864ab36d64fb3a890bab951569adeee0100000000fdffffff4f1bdc64da8056d08f79db7f5348d1de55946e57aa7c8279499c703889b6e0fd0200000000fdffffff042f280000000000001600149c756aa33f4f89418b33872a973274b5445c727b80969800000000001600146c540c1c9f546004539f45318b8d9f4d7b4857ef80969800000000001976a91422a6daa4a7b695c8a2dd104d47c5dc73d655c96f88ac809698000000000017a914a6885437e0762013facbda93894202a0fe86e35f8702473044022075ef5f04d7a63347064938e15a0c74277a79e5c9d32a26e39e8a517a44d565cc022015246790fb5b29c9bf3eded1b95699b1635bcfc6d521886fddf1135ba1b988ec012102801bc7170efb82c490e243204d86970f15966aa3bce6a06bef5c09a83a5bfffe02473044022061aa9b0d9649ffd7259bc54b35f678565dbbe11507d348dd8885522eaf1fa70c02202cc79de09e8e63e8d57fde6ef66c079ddac4d9828e1936a9db833d4c142615c3012103a8f58fc1f5625f18293403104874f2d38c9279f777e512570e4199c7d292b81b0247304402207744dc1ab0bf77c081b58540c4321d090c0a24a32742a361aa55ad86f0c7c24e02201a9b0dd78b63b495ab5a0b5b161c54cb085d70683c90e188bb4dc2e41e142f6601210361fb354f8259abfcbfbdda36b7cb4c3b05a3ca3d68dd391fd8376e920d93870d0247304402204803e423c321acc6c12cb0ebf196d2906842fdfed6de977cc78277052ee5f15002200634670c1dc25e6b1787a65d3e09c8e6bb0340238d90b9d98887e8fd53944e080121031104c60d027123bf8676bcaefaa66c001a0d3d379dc4a9492a567a9e1004452d02473044022050e4b5348d30011a22b6ae8b43921d29249d88ea71b1fbaa2d9c22dfdef58b7002201c5d5e143aa8835454f61b0742226ebf8cd466bcc2cdcb1f77b92e473d3b13190121030496b9d49aa8efece4f619876c60a77d2c0dc846390ecdc5d9acbfa1bb3128760247304402204d6a9b986e1a0e3473e8aef84b3eb7052442a76dfd7631e35377f141496a55490220131ab342853c01e31f111436f8461e28bc95883b871ca0e01b5f57146e79d7bb012103262ffbc88e25296056a3c65c880e3686297e07f360e6b80f1219d65b0900e84e02483045022100c8ffacf92efa1dddef7e858a241af7a80adcc2489bcc325195970733b1f35fac022076f40c26023a228041a9665c5290b9918d06f03b716e4d8f6d47e79121c7eb37012102d9ba7e02d7cd7dd24302f823b3114c99da21549c663f72440dc87e8ba412120902483045022100b55545d84e43d001bbc10a981f184e7d3b98a7ed6689863716cab053b3655a2f0220537eb76a695fbe86bf020b4b6f7ae93b506d778bbd0885f0a61067616a2c8bce0121034a57f2fa2c32c9246691f6a922fb1ebdf1468792bae7eff253a99fc9f2a5023902483045022100f1d4408463dbfe257f9f778d5e9c8cdb97c8b1d395dbd2e180bc08cad306492c022002a024e19e1a406eaa24467f033659de09ab58822987281e28bb6359288337bd012103e91daa18d924eea62011ce596e15b6d683975cf724ea5bf69a8e2022c26fc12f0247304402204f1e12b923872f396e5e1a3aa94b0b2e86b4ce448f4349a017631db26d7dff8a022069899a05de2ad2bbd8e0202c56ab1025a7db9a4998eea70744e3c367d2a7eb71012103b0eee86792dbef1d4a49bc4ea32d197c8c15d27e6e0c5c33e58e409e26d4a39a0247304402201787dacdb92e0df6ad90226649f0e8321287d0bd8fddc536a297dd19b5fc103e022001fe89300a76e5b46d0e3f7e39e0ee26cc83b71d59a2a5da1dd7b13350cd0c07012103afb1e43d7ec6b7999ef0f1093069e68fe1dfe5d73fc6cfb4f7a5022f7098758c02483045022100acc1212bba0fe4fcc6c3ae5cf8e25f221f140c8444d3c08dfc53a93630ac25da02203f12982847244bd9421ef340293f3a38d2ab5d028af60769e46fcc7d81312e7e012102801bc7170efb82c490e243204d86970f15966aa3bce6a06bef5c09a83a5bfffe024830450221009c04934102402949484b21899271c3991c007b783b8efc85a3c3d24641ac7c24022006fb1895ce969d08a2cb29413e1a85427c7e85426f7a185108ca44b5a0328cb301210360248db4c7d7f76fe231998d2967104fee04df8d8da34f10101cc5523e82648c02483045022100b11fe61b393fa5dbe18ab98f65c249345b429b13f69ee2d1b1335725b24a0e73022010960cdc5565cbc81885c8ed95142435d3c202dfa5a3dc5f50f3914c106335ce0121029c878610c34c21381cda12f6f36ab88bf60f5f496c1b82c357b8ac448713e7b50247304402200ca080db069c15bbf98e1d4dff68d0aea51227ff5d17a8cf67ceae464c22bbb0022051e7331c0918cbb71bb2cef29ca62411454508a16180b0fb5df94248890840df0121028f0be0cde43ff047edbda42c91c37152449d69789eb812bb2e148e4f22472c0f0247304402201fefe258938a2c481d5a745ef3aa8d9f8124bbe7f1f8c693e2ddce4ddc9a927c02204049e0060889ede8fda975edf896c03782d71ba53feb51b04f5ae5897d7431dc012103946730b480f52a43218a9edce240e8b234790e21df5e96482703d81c3c19d3f1024730440220126a6a56dbe69af78d156626fc9cf41d6aac0c07b8b5f0f8491f68db5e89cb5002207ee6ed6f2f41da256f3c1e79679a3de6cf34cc08b940b82be14aefe7da031a6b012102801bc7170efb82c490e243204d86970f15966aa3bce6a06bef5c09a83a5bfffe024730440220363204a1586d7f13c148295122cbf9ec7939685e3cadab81d6d9e921436d21b7022044626b8c2bd4aa7c167d74bc4e9eb9d0744e29ce0ad906d78e10d6d854f23d170121037fb9c51716739bb4c146857fab5a783372f72a65987d61f3b58c74360f4328dd0247304402207925a4c2a3a6b76e10558717ee28fcb8c6fde161b9dc6382239af9f372ace99902204a58e31ce0b4a4804a42d2224331289311ded2748062c92c8aca769e81417a4c012102e18a8c235b48e41ef98265a8e07fa005d2602b96d585a61ad67168d74e7391cb02483045022100bbfe060479174a8d846b5a897526003eb2220ba307a5fee6e1e8de3e4e8b38fd02206723857301d447f67ac98a5a5c2b80ef6820e98fae213db1720f93d91161803b01210386728e2ac3ecee15f58d0505ee26f86a68f08c702941ffaf2fb7213e5026aea10247304402203a2613ae68f697eb02b5b7d18e3c4236966dac2b3a760e3021197d76e9ad4239022046f9067d3df650fcabbdfd250308c64f90757dec86f0b08813c979a42d06a6ec012102a1d7ee1cb4dc502f899aaafae0a2eb6cbf80d9a1073ae60ddcaabc3b1d1f15df02483045022100ab1bea2cc5388428fd126c7801550208701e21564bd4bd00cfd4407cfafc1acd0220508ee587f080f3c80a5c0b2175b58edd84b755e659e2135b3152044d75ebc4b501210236dd1b7f27a296447d0eb3750e1bdb2d53af50b31a72a45511dc1ec3fe7a684a19391400')
funding_txid = funding_tx.txid()
self.assertEqual('98574bc5f6e75769eb0c93d41453cc1dfbd15c14e63cc3c42f37cdbd08858762', funding_txid)
wallet_online.receive_tx_callback(funding_txid, funding_tx, TX_HEIGHT_UNCONFIRMED)
# create unsigned tx
outputs = [PartialTxOutput.from_address_and_value('tb1qp0mv2sxsyxxfj5gl0332f9uyez93su9cf26757', 2500000)]
tx = wallet_online.mktx(outputs=outputs, password=None, fee=5000)
tx.locktime = 1325341
tx.version = 1
self.assertFalse(tx.is_complete())
self.assertTrue(tx.is_segwit())
self.assertEqual(1, len(tx.inputs()))
partial_tx = tx.serialize_as_bytes().hex()
self.assertEqual("70736274ff010071010000000162878508bdcd372fc4c33ce6145cd1fb1dcc5314d4930ceb6957e7f6c54b57980100000000fdffffff02a0252600000000001600140bf6c540d0218c99511f7c62a49784c88b1870b8585d7200000000001600145543fe1a1364b806b27a5c9dc92ac9bbf0d42aa31d391400000100fd4c0d01000000000116e9c9dac2651672316aab3b9553257b6942c5f762c5d795776d9cfa504f183c000000000000fdffffff8085019852fada9da84b58dcf753d292dde314a19f5a5527f6588fa2566142130000000000fdffffffa4154a48db20ce538b28722a89c6b578bd5b5d60d6d7b52323976339e39405230000000000fdffffff0b5ef43f843a96364aebd708e25ea1bdcf2c7df7d0d995560b8b1be5f357b64f0100000000fdffffffd41dfe1199c76fdb3f20e9947ea31136d032d9da48c5e45d85c8f440e2351a510100000000fdffffff5bd015d17e4a1837b01c24ebb4a6b394e3da96a85442bd7dc6abddfbf16f20510000000000fdffffff13a3e7f80b1bd46e38f2abc9e2f335c18a4b0af1778133c7f1c3caae9504345c0200000000fdffffffdf4fc1ab21bca69d18544ddb10a913cd952dbc730ab3d236dd9471445ff405680100000000fdffffffe0424d78a30d5e60ac6b26e2274d7d6e7c6b78fe0b49bdc3ac4dd2147c9535750100000000fdffffff7ab6dd6b3c0d44b0fef0fdc9ab0ad6eee23eef799eee29c005d52bc4461998760000000000fdffffff48a77e5053a21acdf4f235ce00c82c9bc1704700f54d217f6a30704711b9737d0000000000fdffffff86918b39c1d9bb6f34d9b082182f73cedd15504331164dc2b186e95c568ccb870000000000fdffffff15a847356cbb44be67f345965bb3f2589e2fec1c9a0ada21fd28225dcc602e8f0100000000fdffffff9a2875297f81dfd3b77426d63f621db350c270cc28c634ad86b9969ee33ac6960000000000fdffffffd6eeb1d1833e00967083d1ab86fa5a2e44355bd613d9277135240fe6f60148a20100000000fdffffffd8a6e5a9b68a65ff88220ca33e36faf6f826ae8c5c8a13fe818a5e63828b68a40100000000fdffffff73aab8471f82092e45ed1b1afeffdb49ea1ec74ce4853f971812f6a72a7e85aa0000000000fdffffffacd6459dec7c3c51048eb112630da756f5d4cb4752b8d39aa325407ae0885cba0000000000fdffffff1eddd5e13bef1aba1ff151762b5860837daa9b39db1eae8ea8227c81a5a1c8ba0000000000fdffffff67a096ff7c343d39e96929798097f6d7a61156bbdb905fbe534ba36f273271d40100000000fdffffff109a671eb7daf6dcd07c0ceff99f2de65864ab36d64fb3a890bab951569adeee0100000000fdffffff4f1bdc64da8056d08f79db7f5348d1de55946e57aa7c8279499c703889b6e0fd0200000000fdffffff042f280000000000001600149c756aa33f4f89418b33872a973274b5445c727b80969800000000001600146c540c1c9f546004539f45318b8d9f4d7b4857ef80969800000000001976a91422a6daa4a7b695c8a2dd104d47c5dc73d655c96f88ac809698000000000017a914a6885437e0762013facbda93894202a0fe86e35f8702473044022075ef5f04d7a63347064938e15a0c74277a79e5c9d32a26e39e8a517a44d565cc022015246790fb5b29c9bf3eded1b95699b1635bcfc6d521886fddf1135ba1b988ec012102801bc7170efb82c490e243204d86970f15966aa3bce6a06bef5c09a83a5bfffe02473044022061aa9b0d9649ffd7259bc54b35f678565dbbe11507d348dd8885522eaf1fa70c02202cc79de09e8e63e8d57fde6ef66c079ddac4d9828e1936a9db833d4c142615c3012103a8f58fc1f5625f18293403104874f2d38c9279f777e512570e4199c7d292b81b0247304402207744dc1ab0bf77c081b58540c4321d090c0a24a32742a361aa55ad86f0c7c24e02201a9b0dd78b63b495ab5a0b5b161c54cb085d70683c90e188bb4dc2e41e142f6601210361fb354f8259abfcbfbdda36b7cb4c3b05a3ca3d68dd391fd8376e920d93870d0247304402204803e423c321acc6c12cb0ebf196d2906842fdfed6de977cc78277052ee5f15002200634670c1dc25e6b1787a65d3e09c8e6bb0340238d90b9d98887e8fd53944e080121031104c60d027123bf8676bcaefaa66c001a0d3d379dc4a9492a567a9e1004452d02473044022050e4b5348d30011a22b6ae8b43921d29249d88ea71b1fbaa2d9c22dfdef58b7002201c5d5e143aa8835454f61b0742226ebf8cd466bcc2cdcb1f77b92e473d3b13190121030496b9d49aa8efece4f619876c60a77d2c0dc846390ecdc5d9acbfa1bb3128760247304402204d6a9b986e1a0e3473e8aef84b3eb7052442a76dfd7631e35377f141496a55490220131ab342853c01e31f111436f8461e28bc95883b871ca0e01b5f57146e79d7bb012103262ffbc88e25296056a3c65c880e3686297e07f360e6b80f1219d65b0900e84e02483045022100c8ffacf92efa1dddef7e858a241af7a80adcc2489bcc325195970733b1f35fac022076f40c26023a228041a9665c5290b9918d06f03b716e4d8f6d47e79121c7eb37012102d9ba7e02d7cd7dd24302f823b3114c99da21549c663f72440dc87e8ba412120902483045022100b55545d84e43d001bbc10a981f184e7d3b98a7ed6689863716cab053b3655a2f0220537eb76a695fbe86bf020b4b6f7ae93b506d778bbd0885f0a61067616a2c8bce0121034a57f2fa2c32c9246691f6a922fb1ebdf1468792bae7eff253a99fc9f2a5023902483045022100f1d4408463dbfe257f9f778d5e9c8cdb97c8b1d395dbd2e180bc08cad306492c022002a024e19e1a406eaa24467f033659de09ab58822987281e28bb6359288337bd012103e91daa18d924eea62011ce596e15b6d683975cf724ea5bf69a8e2022c26fc12f0247304402204f1e12b923872f396e5e1a3aa94b0b2e86b4ce448f4349a017631db26d7dff8a022069899a05de2ad2bbd8e0202c56ab1025a7db9a4998eea70744e3c367d2a7eb71012103b0eee86792dbef1d4a49bc4ea32d197c8c15d27e6e0c5c33e58e409e26d4a39a0247304402201787dacdb92e0df6ad90226649f0e8321287d0bd8fddc536a297dd19b5fc103e022001fe89300a76e5b46d0e3f7e39e0ee26cc83b71d59a2a5da1dd7b13350cd0c07012103afb1e43d7ec6b7999ef0f1093069e68fe1dfe5d73fc6cfb4f7a5022f7098758c02483045022100acc1212bba0fe4fcc6c3ae5cf8e25f221f140c8444d3c08dfc53a93630ac25da02203f12982847244bd9421ef340293f3a38d2ab5d028af60769e46fcc7d81312e7e012102801bc7170efb82c490e243204d86970f15966aa3bce6a06bef5c09a83a5bfffe024830450221009c04934102402949484b21899271c3991c007b783b8efc85a3c3d24641ac7c24022006fb1895ce969d08a2cb29413e1a85427c7e85426f7a185108ca44b5a0328cb301210360248db4c7d7f76fe231998d2967104fee04df8d8da34f10101cc5523e82648c02483045022100b11fe61b393fa5dbe18ab98f65c249345b429b13f69ee2d1b1335725b24a0e73022010960cdc5565cbc81885c8ed95142435d3c202dfa5a3dc5f50f3914c106335ce0121029c878610c34c21381cda12f6f36ab88bf60f5f496c1b82c357b8ac448713e7b50247304402200ca080db069c15bbf98e1d4dff68d0aea51227ff5d17a8cf67ceae464c22bbb0022051e7331c0918cbb71bb2cef29ca62411454508a16180b0fb5df94248890840df0121028f0be0cde43ff047edbda42c91c37152449d69789eb812bb2e148e4f22472c0f0247304402201fefe258938a2c481d5a745ef3aa8d9f8124bbe7f1f8c693e2ddce4ddc9a927c02204049e0060889ede8fda975edf896c03782d71ba53feb51b04f5ae5897d7431dc012103946730b480f52a43218a9edce240e8b234790e21df5e96482703d81c3c19d3f1024730440220126a6a56dbe69af78d156626fc9cf41d6aac0c07b8b5f0f8491f68db5e89cb5002207ee6ed6f2f41da256f3c1e79679a3de6cf34cc08b940b82be14aefe7da031a6b012102801bc7170efb82c490e243204d86970f15966aa3bce6a06bef5c09a83a5bfffe024730440220363204a1586d7f13c148295122cbf9ec7939685e3cadab81d6d9e921436d21b7022044626b8c2bd4aa7c167d74bc4e9eb9d0744e29ce0ad906d78e10d6d854f23d170121037fb9c51716739bb4c146857fab5a783372f72a65987d61f3b58c74360f4328dd0247304402207925a4c2a3a6b76e10558717ee28fcb8c6fde161b9dc6382239af9f372ace99902204a58e31ce0b4a4804a42d2224331289311ded2748062c92c8aca769e81417a4c012102e18a8c235b48e41ef98265a8e07fa005d2602b96d585a61ad67168d74e7391cb02483045022100bbfe060479174a8d846b5a897526003eb2220ba307a5fee6e1e8de3e4e8b38fd02206723857301d447f67ac98a5a5c2b80ef6820e98fae213db1720f93d91161803b01210386728e2ac3ecee15f58d0505ee26f86a68f08c702941ffaf2fb7213e5026aea10247304402203a2613ae68f697eb02b5b7d18e3c4236966dac2b3a760e3021197d76e9ad4239022046f9067d3df650fcabbdfd250308c64f90757dec86f0b08813c979a42d06a6ec012102a1d7ee1cb4dc502f899aaafae0a2eb6cbf80d9a1073ae60ddcaabc3b1d1f15df02483045022100ab1bea2cc5388428fd126c7801550208701e21564bd4bd00cfd4407cfafc1acd0220508ee587f080f3c80a5c0b2175b58edd84b755e659e2135b3152044d75ebc4b501210236dd1b7f27a296447d0eb3750e1bdb2d53af50b31a72a45511dc1ec3fe7a684a19391400220603fd88f32a81e812af0187677fc0e7ac9b7fb63ca68c2d98c2afbcf99aa311ac060cdf758ae500000000020000000000220202ac05f54ef082ac98302d57d532e728653565bd55f46fcf03cacbddb168fd6c760cdf758ae5010000000000000000",
partial_tx)
tx_copy = tx_from_any(partial_tx) # simulates moving partial txn between cosigners
self.assertTrue(wallet_online.is_mine(wallet_online.get_txin_address(tx_copy.inputs()[0])))
self.assertEqual('ee76c0c6da87f0eb5ab4d1ae05d3942512dcd3c4c42518f9d3619e74400cfc1f', tx_copy.txid())
self.assertEqual(tx.txid(), tx_copy.txid())
# sign tx
tx = wallet_offline.sign_transaction(tx_copy, password=None)
self.assertTrue(tx.is_complete())
self.assertTrue(tx.is_segwit())
self.assertEqual('ee76c0c6da87f0eb5ab4d1ae05d3942512dcd3c4c42518f9d3619e74400cfc1f', tx.txid())
self.assertEqual('484e350beaa722a744bb3e2aa38de005baa8526d86536d6143e5814355acf775', tx.wtxid())
@unittest.skip("skip until replace with zcash wallet")
@mock.patch.object(wallet.Abstract_Wallet, 'save_db')
def test_signing_where_offline_ks_does_not_have_keyorigin_but_psbt_contains_it(self, mock_save_db):
# keystore has intermediate xprv without root fp; tx contains root fp and full path.
# tx has input with key beyond gap limit
wallet_offline = WalletIntegrityHelper.create_standard_wallet(
# bip39 seed: "brave scare company drastic consider confirm grow differ alter wide olympic utility"
# der: m/84'/1'/0'
keystore.from_xprv('vprv9KXDgRXYp3WCozCS3bMehASe2cJhY28DihCZ3KuyiTTjngopkfRC9QkH1SUREyCvnV7TSD6EgEHTTYa5yod7ZveBhVReEU1uDgfVASFqLNw'),
gap_limit=4,
config=self.config
)
tx = tx_from_any('70736274ff01005202000000017b748828553b1127b86674e71ad0cd4a2e5e8baeab8792a3c3263f7ea0ba86500000000000fdffffff01ad16010000000000160014d74b54300bc0d4b6e8f506fe540b47ce0da38b4a08f21c00000100bf0200000000010163a419b779be17167c54ff3acb1205e5347fbd72963f89fb1d66b5cf09f329c90000000000fdffffff011b17010000000000160014ed420532f0c33477b9b3fbb57431b4a1adce99c90247304402204e4ad4992fa8798e3b595d17c59961b905ca71c32dc3ba910ae14f139259ffbe02206ee2281f21499e46aa77f4bec2edce3674fea529d9dd340439365c2232bad35701210334080358ffdac08f83d6800a8e477e3512ad5c39ede553089db8c4bbe16f59aad7f11c00220602d137f257a96cbc58c7e60f2085cd65a311e242459e23d1efbed77dd8f372513818cc2bdaaa540000800100008000000080000000001e000000002202030671d324eeba0f85499a8749f783a4883103d23f5dedbe048391ff18c3da067818cc2bdaaa540000800100008000000080000000000100000000')
self.assertEqual('065b6e0a5731107641828337f5e000c9ddd94a12d074708643b0bca517374c6a', tx.txid())
# sign tx
tx = wallet_offline.sign_transaction(tx, password=None)
self.assertTrue(tx.is_complete())
self.assertEqual('020000000001017b748828553b1127b86674e71ad0cd4a2e5e8baeab8792a3c3263f7ea0ba86500000000000fdffffff01ad16010000000000160014d74b54300bc0d4b6e8f506fe540b47ce0da38b4a0247304402203098741bf4d4f956e96f2706a517a1c0a63f67a242a50d155fbc56ad0bbac8b102207e535391c03bdab641f3205762311c1e6648b3459681e53d68fa44e63604a7f6012102d137f257a96cbc58c7e60f2085cd65a311e242459e23d1efbed77dd8f372513808f21c00',
str(tx))
@unittest.skip("skip until replace with zcash wallet")
@mock.patch.object(wallet.Abstract_Wallet, 'save_db')
def test_sending_offline_wif_online_addr_p2pkh(self, mock_save_db): # compressed pubkey
wallet_offline = WalletIntegrityHelper.create_imported_wallet(privkeys=True, config=self.config)
wallet_offline.import_private_key('p2pkh:cQDxbmQfwRV3vP1mdnVHq37nJekHLsuD3wdSQseBRA2ct4MFk5Pq', password=None)
wallet_online = WalletIntegrityHelper.create_imported_wallet(privkeys=False, config=self.config)
wallet_online.import_address('mg2jk6S5WGDhUPA8mLSxDLWpUoQnX1zzoG')
# bootstrap wallet_online
funding_tx = Transaction('01000000000101197a89cff51096b9dd4214cdee0eb90cb27a25477e739521d728a679724042730100000000fdffffff048096980000000000160014dab37af8fefbbb31887a0a5f9b2698f4a7b45f6a80969800000000001976a91405a20074ef7eb42c7c6fcd4f499faa699742783288ac809698000000000017a914b808938a8007bc54509cd946944c479c0fa6554f87131b2c0400000000160014a04dfdb9a9aeac3b3fada6f43c2a66886186e2440247304402204f5dbb9dda65eab26179f1ca7c37c8baf028153815085dd1bbb2b826296e3b870220379fcd825742d6e2bdff772f347b629047824f289a5499a501033f6c3495594901210363c9c98740fe0455c646215cea9b13807b758791c8af7b74e62968bef57ff8ae1e391400')
funding_txid = funding_tx.txid()
self.assertEqual('0a08ea26a49e2b80f253796d605b69e2d0403fac64bdf6f7db82ada4b7bb6b62', funding_txid)
wallet_online.receive_tx_callback(funding_txid, funding_tx, TX_HEIGHT_UNCONFIRMED)
# create unsigned tx
outputs = [PartialTxOutput.from_address_and_value('tb1quk7ahlhr3qmjndy0uvu9y9hxfesrtahtta9ghm', 2500000)]
tx = wallet_online.mktx(outputs=outputs, password=None, fee=5000)
tx.locktime = 1325340
tx.version = 1
self.assertFalse(tx.is_complete())
self.assertEqual(1, len(tx.inputs()))
partial_tx = tx.serialize_as_bytes().hex()
self.assertEqual("70736274ff0100740100000001626bbbb7a4ad82dbf7f6bd64ac3f40d0e2695b606d7953f2802b9ea426ea080a0100000000fdffffff02a025260000000000160014e5bddbfee3883729b48fe3385216e64e6035f6eb585d7200000000001976a91405a20074ef7eb42c7c6fcd4f499faa699742783288ac1c391400000100fd200101000000000101197a89cff51096b9dd4214cdee0eb90cb27a25477e739521d728a679724042730100000000fdffffff048096980000000000160014dab37af8fefbbb31887a0a5f9b2698f4a7b45f6a80969800000000001976a91405a20074ef7eb42c7c6fcd4f499faa699742783288ac809698000000000017a914b808938a8007bc54509cd946944c479c0fa6554f87131b2c0400000000160014a04dfdb9a9aeac3b3fada6f43c2a66886186e2440247304402204f5dbb9dda65eab26179f1ca7c37c8baf028153815085dd1bbb2b826296e3b870220379fcd825742d6e2bdff772f347b629047824f289a5499a501033f6c3495594901210363c9c98740fe0455c646215cea9b13807b758791c8af7b74e62968bef57ff8ae1e391400000000",
partial_tx)
tx_copy = tx_from_any(partial_tx) # simulates moving partial txn between cosigners
self.assertTrue(wallet_online.is_mine(wallet_online.get_txin_address(tx_copy.inputs()[0])))
self.assertEqual(None, tx_copy.txid()) # not segwit
self.assertEqual(tx.txid(), tx_copy.txid())
# sign tx
tx = wallet_offline.sign_transaction(tx_copy, password=None)
self.assertTrue(tx.is_complete())
self.assertFalse(tx.is_segwit())
self.assertEqual('e56da664631b8c666c6df38ec80c954c4ac3c4f56f040faf0070e4681e937fc4', tx.txid())
self.assertEqual('e56da664631b8c666c6df38ec80c954c4ac3c4f56f040faf0070e4681e937fc4', tx.wtxid())
@unittest.skip("skip until replace with zcash wallet")
@mock.patch.object(wallet.Abstract_Wallet, 'save_db')
def test_sending_offline_xprv_online_addr_p2pkh(self, mock_save_db): # compressed pubkey
wallet_offline = WalletIntegrityHelper.create_standard_wallet(
# bip39: "qwe", der: m/44'/1'/0'
keystore.from_xprv('tprv8gfKwjuAaqtHgqxMh1tosAQ28XvBMkcY5NeFRA3pZMpz6MR4H4YZ3MJM4fvNPnRKeXR1Td2vQGgjorNXfo94WvT5CYDsPAqjHxSn436G1Eu'),
gap_limit=4,
config=self.config
)
wallet_online = WalletIntegrityHelper.create_imported_wallet(privkeys=False, config=self.config)
wallet_online.import_address('mg2jk6S5WGDhUPA8mLSxDLWpUoQnX1zzoG')
# bootstrap wallet_online
funding_tx = Transaction('01000000000101197a89cff51096b9dd4214cdee0eb90cb27a25477e739521d728a679724042730100000000fdffffff048096980000000000160014dab37af8fefbbb31887a0a5f9b2698f4a7b45f6a80969800000000001976a91405a20074ef7eb42c7c6fcd4f499faa699742783288ac809698000000000017a914b808938a8007bc54509cd946944c479c0fa6554f87131b2c0400000000160014a04dfdb9a9aeac3b3fada6f43c2a66886186e2440247304402204f5dbb9dda65eab26179f1ca7c37c8baf028153815085dd1bbb2b826296e3b870220379fcd825742d6e2bdff772f347b629047824f289a5499a501033f6c3495594901210363c9c98740fe0455c646215cea9b13807b758791c8af7b74e62968bef57ff8ae1e391400')
funding_txid = funding_tx.txid()
self.assertEqual('0a08ea26a49e2b80f253796d605b69e2d0403fac64bdf6f7db82ada4b7bb6b62', funding_txid)
wallet_online.receive_tx_callback(funding_txid, funding_tx, TX_HEIGHT_UNCONFIRMED)
# create unsigned tx
outputs = [PartialTxOutput.from_address_and_value('tb1quk7ahlhr3qmjndy0uvu9y9hxfesrtahtta9ghm', 2500000)]
tx = wallet_online.mktx(outputs=outputs, password=None, fee=5000)
tx.locktime = 1325340
tx.version = 1
self.assertFalse(tx.is_complete())
self.assertEqual(1, len(tx.inputs()))
partial_tx = tx.serialize_as_bytes().hex()
self.assertEqual("70736274ff0100740100000001626bbbb7a4ad82dbf7f6bd64ac3f40d0e2695b606d7953f2802b9ea426ea080a0100000000fdffffff02a025260000000000160014e5bddbfee3883729b48fe3385216e64e6035f6eb585d7200000000001976a91405a20074ef7eb42c7c6fcd4f499faa699742783288ac1c391400000100fd200101000000000101197a89cff51096b9dd4214cdee0eb90cb27a25477e739521d728a679724042730100000000fdffffff048096980000000000160014dab37af8fefbbb31887a0a5f9b2698f4a7b45f6a80969800000000001976a91405a20074ef7eb42c7c6fcd4f499faa699742783288ac809698000000000017a914b808938a8007bc54509cd946944c479c0fa6554f87131b2c0400000000160014a04dfdb9a9aeac3b3fada6f43c2a66886186e2440247304402204f5dbb9dda65eab26179f1ca7c37c8baf028153815085dd1bbb2b826296e3b870220379fcd825742d6e2bdff772f347b629047824f289a5499a501033f6c3495594901210363c9c98740fe0455c646215cea9b13807b758791c8af7b74e62968bef57ff8ae1e391400000000",
partial_tx)
tx_copy = tx_from_any(partial_tx) # simulates moving partial txn between cosigners
self.assertTrue(wallet_online.is_mine(wallet_online.get_txin_address(tx_copy.inputs()[0])))
self.assertEqual(None, tx_copy.txid()) # not segwit
self.assertEqual(tx.txid(), tx_copy.txid())
# sign tx
tx = wallet_offline.sign_transaction(tx_copy, password=None)
self.assertTrue(tx.is_complete())
self.assertFalse(tx.is_segwit())
self.assertEqual('e56da664631b8c666c6df38ec80c954c4ac3c4f56f040faf0070e4681e937fc4', tx.txid())
self.assertEqual('e56da664631b8c666c6df38ec80c954c4ac3c4f56f040faf0070e4681e937fc4', tx.wtxid())
@unittest.skip("skip until replace with zcash wallet")
@mock.patch.object(wallet.Abstract_Wallet, 'save_db')
def test_sending_offline_hd_multisig_online_addr_p2sh(self, mock_save_db):
# 2-of-3 legacy p2sh multisig
wallet_offline1 = WalletIntegrityHelper.create_multisig_wallet(
[
keystore.from_seed('blast uniform dragon fiscal ensure vast young utility dinosaur abandon rookie sure', '', True),
keystore.from_xpub('tpubD6NzVbkrYhZ4YTPEgwk4zzr8wyo7pXGmbbVUnfYNtx6SgAMF5q3LN3Kch58P9hxGNsTmP7Dn49nnrmpE6upoRb1Xojg12FGLuLHkVpVtS44'),
keystore.from_xpub('tpubD6NzVbkrYhZ4XJzYkhsCbDCcZRmDAKSD7bXi9mdCni7acVt45fxbTVZyU6jRGh29ULKTjoapkfFsSJvQHitcVKbQgzgkkYsAmaovcro7Mhf')
],
'2of3', gap_limit=2,
config=self.config
)
wallet_offline2 = WalletIntegrityHelper.create_multisig_wallet(
[
keystore.from_seed('cycle rocket west magnet parrot shuffle foot correct salt library feed song', '', True),
keystore.from_xpub('tpubD6NzVbkrYhZ4YTPEgwk4zzr8wyo7pXGmbbVUnfYNtx6SgAMF5q3LN3Kch58P9hxGNsTmP7Dn49nnrmpE6upoRb1Xojg12FGLuLHkVpVtS44'),
keystore.from_xpub('tpubD6NzVbkrYhZ4YARFMEZPckrqJkw59GZD1PXtQnw14ukvWDofR7Z1HMeSCxfYEZVvg4VdZ8zGok5VxHwdrLqew5cMdQntWc5mT7mh1CSgrnX')
],
'2of3', gap_limit=2,
config=self.config
)
wallet_online = WalletIntegrityHelper.create_imported_wallet(privkeys=False, config=self.config)
wallet_online.import_address('2N4z38eTKcWTZnfugCCfRyXtXWMLnn8HDfw')
# bootstrap wallet_online
funding_tx = Transaction('010000000001016207d958dc46508d706e4cd7d3bc46c5c2b02160e2578e5fad2efafc3927050301000000171600147a4fc8cdc1c2cf7abbcd88ef6d880e59269797acfdffffff02809698000000000017a91480c2353f6a7bc3c71e99e062655b19adb3dd2e48870d0916020000000017a914703f83ef20f3a52d908475dcad00c5144164d5a2870247304402203b1a5cb48cadeee14fa6c7bbf2bc581ca63104762ec5c37c703df778884cc5b702203233fa53a2a0bfbd85617c636e415da72214e359282cce409019319d031766c50121021112c01a48cc7ea13cba70493c6bffebb3e805df10ff4611d2bf559d26e25c04bf391400')
funding_txid = funding_tx.txid()
self.assertEqual('c59913a1fa9b1ef1f6928f0db490be67eeb9d7cb05aa565ee647e859642f3532', funding_txid)
wallet_online.receive_tx_callback(funding_txid, funding_tx, TX_HEIGHT_UNCONFIRMED)
# create unsigned tx
outputs = [PartialTxOutput.from_address_and_value('2MuCQQHJNnrXzQzuqfUCfAwAjPqpyEHbgue', 2500000)]
tx = wallet_online.mktx(outputs=outputs, password=None, fee=5000)
tx.locktime = 1325503
tx.version = 1
self.assertFalse(tx.is_complete())
self.assertEqual(1, len(tx.inputs()))
partial_tx = tx.serialize_as_bytes().hex()
self.assertEqual("70736274ff010073010000000132352f6459e847e65e56aa05cbd7b9ee67be90b40d8f92f6f11e9bfaa11399c50000000000fdffffff02a02526000000000017a9141567b2578f300fa618ef0033611fd67087aff6d187585d72000000000017a91480c2353f6a7bc3c71e99e062655b19adb3dd2e4887bf391400000100f7010000000001016207d958dc46508d706e4cd7d3bc46c5c2b02160e2578e5fad2efafc3927050301000000171600147a4fc8cdc1c2cf7abbcd88ef6d880e59269797acfdffffff02809698000000000017a91480c2353f6a7bc3c71e99e062655b19adb3dd2e48870d0916020000000017a914703f83ef20f3a52d908475dcad00c5144164d5a2870247304402203b1a5cb48cadeee14fa6c7bbf2bc581ca63104762ec5c37c703df778884cc5b702203233fa53a2a0bfbd85617c636e415da72214e359282cce409019319d031766c50121021112c01a48cc7ea13cba70493c6bffebb3e805df10ff4611d2bf559d26e25c04bf391400000000",
partial_tx)
tx_copy = tx_from_any(partial_tx) # simulates moving partial txn between cosigners
self.assertTrue(wallet_online.is_mine(wallet_online.get_txin_address(tx_copy.inputs()[0])))
self.assertEqual(None, tx_copy.txid()) # not segwit
self.assertEqual(tx.txid(), tx_copy.txid())
# sign tx - first
tx = wallet_offline1.sign_transaction(tx_copy, password=None)
self.assertFalse(tx.is_complete())
partial_tx = tx.serialize_as_bytes().hex()
self.assertEqual("70736274ff010073010000000132352f6459e847e65e56aa05cbd7b9ee67be90b40d8f92f6f11e9bfaa11399c50000000000fdffffff02a02526000000000017a9141567b2578f300fa618ef0033611fd67087aff6d187585d72000000000017a91480c2353f6a7bc3c71e99e062655b19adb3dd2e4887bf391400000100f7010000000001016207d958dc46508d706e4cd7d3bc46c5c2b02160e2578e5fad2efafc3927050301000000171600147a4fc8cdc1c2cf7abbcd88ef6d880e59269797acfdffffff02809698000000000017a91480c2353f6a7bc3c71e99e062655b19adb3dd2e48870d0916020000000017a914703f83ef20f3a52d908475dcad00c5144164d5a2870247304402203b1a5cb48cadeee14fa6c7bbf2bc581ca63104762ec5c37c703df778884cc5b702203233fa53a2a0bfbd85617c636e415da72214e359282cce409019319d031766c50121021112c01a48cc7ea13cba70493c6bffebb3e805df10ff4611d2bf559d26e25c04bf391400220202afb4af9a91264e1c6dce3ebe5312801723270ac0ba8134b7b49129328fcb0f284730440220451f77cb18224adcb4981492d9be2c3fa7537f94f4b29eb405992dbdd5df04aa022071e6759d40dde810caa01ca7f16bad3cb742d64428c419c8fb4bad6f1c3f718101010469522102afb4af9a91264e1c6dce3ebe5312801723270ac0ba8134b7b49129328fcb0f2821030b482838721a38d94847699fed8818b5c5f56500ef72f13489e365b65e5749cf2103e5db7969ae2f2576e6a061bf3bb2db16571e77ffb41e0b27170734359235cbce53ae220602afb4af9a91264e1c6dce3ebe5312801723270ac0ba8134b7b49129328fcb0f280c0036e9ac00000000000000002206030b482838721a38d94847699fed8818b5c5f56500ef72f13489e365b65e5749cf0c48adc7a00000000000000000220603e5db7969ae2f2576e6a061bf3bb2db16571e77ffb41e0b27170734359235cbce0cdb69242700000000000000000000010069522102afb4af9a91264e1c6dce3ebe5312801723270ac0ba8134b7b49129328fcb0f2821030b482838721a38d94847699fed8818b5c5f56500ef72f13489e365b65e5749cf2103e5db7969ae2f2576e6a061bf3bb2db16571e77ffb41e0b27170734359235cbce53ae220202afb4af9a91264e1c6dce3ebe5312801723270ac0ba8134b7b49129328fcb0f280c0036e9ac00000000000000002202030b482838721a38d94847699fed8818b5c5f56500ef72f13489e365b65e5749cf0c48adc7a00000000000000000220203e5db7969ae2f2576e6a061bf3bb2db16571e77ffb41e0b27170734359235cbce0cdb692427000000000000000000",
partial_tx)
tx = tx_from_any(partial_tx) # simulates moving partial txn between cosigners
# sign tx - second
tx = wallet_offline2.sign_transaction(tx, password=None)
self.assertTrue(tx.is_complete())
tx = tx_from_any(tx.serialize())
self.assertEqual('010000000132352f6459e847e65e56aa05cbd7b9ee67be90b40d8f92f6f11e9bfaa11399c500000000fc004730440220451f77cb18224adcb4981492d9be2c3fa7537f94f4b29eb405992dbdd5df04aa022071e6759d40dde810caa01ca7f16bad3cb742d64428c419c8fb4bad6f1c3f718101473044022052980154bdf2e43d6bd8775316cc220ef5ae13b4b9574a7a904a691ee3c5efd3022069b3eddf904cc645bd8fc8b2aaa7aaf7eb5bbfb7bbbd3b6e6cd89b37dfb2856c014c69522102afb4af9a91264e1c6dce3ebe5312801723270ac0ba8134b7b49129328fcb0f2821030b482838721a38d94847699fed8818b5c5f56500ef72f13489e365b65e5749cf2103e5db7969ae2f2576e6a061bf3bb2db16571e77ffb41e0b27170734359235cbce53aefdffffff02a02526000000000017a9141567b2578f300fa618ef0033611fd67087aff6d187585d72000000000017a91480c2353f6a7bc3c71e99e062655b19adb3dd2e4887bf391400',
str(tx))
self.assertEqual('0e8fdc8257a85ebe7eeab14a53c2c258c61a511f64176b7f8fc016bc2263d307', tx.txid())
self.assertEqual('0e8fdc8257a85ebe7eeab14a53c2c258c61a511f64176b7f8fc016bc2263d307', tx.wtxid())
class TestWalletHistory_SimpleRandomOrder(TestCaseForTestnet):
transactions = {
"0f4972c84974b908a58dda2614b68cf037e6c03e8291898c719766f213217b67": "01000000029d1bdbe67f0bd0d7bd700463f5c29302057c7b52d47de9e2ca5069761e139da2000000008b483045022100a146a2078a318c1266e42265a369a8eef8993750cb3faa8dd80754d8d541d5d202207a6ab8864986919fd1a7fd5854f1e18a8a0431df924d7a878ec3dc283e3d75340141045f7ba332df2a7b4f5d13f246e307c9174cfa9b8b05f3b83410a3c23ef8958d610be285963d67c7bc1feb082f168fa9877c25999963ff8b56b242a852b23e25edfeffffff9d1bdbe67f0bd0d7bd700463f5c29302057c7b52d47de9e2ca5069761e139da2010000008a47304402201c7fa37b74a915668b0244c01f14a9756bbbec1031fb69390bcba236148ab37e02206151581f9aa0e6758b503064c1e661a726d75c6be3364a5a121a8c12cf618f64014104dc28da82e141416aaf771eb78128d00a55fdcbd13622afcbb7a3b911e58baa6a99841bfb7b99bcb7e1d47904fda5d13fdf9675cdbbe73e44efcc08165f49bac6feffffff02b0183101000000001976a914ca14915184a2662b5d1505ce7142c8ca066c70e288ac005a6202000000001976a9145eb4eeaefcf9a709f8671444933243fbd05366a388ac54c51200",
"2791cdc98570cc2b6d9d5b197dc2d002221b074101e3becb19fab4b79150446d": "010000000132201ff125888a326635a2fc6e971cd774c4d0c1a757d742d0f6b5b020f7203a050000006a47304402201d20bb5629a35b84ff9dd54788b98e265623022894f12152ac0e6158042550fe02204e98969e1f7043261912dd0660d3da64e15acf5435577fc02a00eccfe76b323f012103a336ad86546ab66b6184238fe63bb2955314be118b32fa45dd6bd9c4c5875167fdffffff0254959800000000001976a9148d2db0eb25b691829a47503006370070bc67400588ac80969800000000001976a914f96669095e6df76cfdf5c7e49a1909f002e123d088ace8ca1200",
"2d216451b20b6501e927d85244bcc1c7c70598332717df91bb571359c358affd": "010000000001036cdf8d2226c57d7cc8485636d8e823c14790d5f24e6cf38ba9323babc7f6db2901000000171600143fc0dbdc2f939c322aed5a9c3544468ec17f5c3efdffffff507dce91b2a8731636e058ccf252f02b5599489b624e003435a29b9862ccc38c0200000017160014c50ff91aa2a790b99aa98af039ae1b156e053375fdffffff6254162cf8ace3ddfb3ec242b8eade155fa91412c5bde7f55decfac5793743c1010000008b483045022100de9599dcd7764ca8d4fcbe39230602e130db296c310d4abb7f7ae4d139c4d46402200fbfd8e6dc94d90afa05b0c0eab3b84feb465754db3f984fbf059447282771c30141045eecefd39fabba7b0098c3d9e85794e652bdbf094f3f85a3de97a249b98b9948857ea1e8209ee4f196a6bbcfbad103a38698ee58766321ba1cdee0cbfb60e7b2fdffffff01e85af70100000000160014e8d29f07cd5f813317bec4defbef337942d85d74024730440220218049aee7bbd34a7fa17f972a8d24a0469b0131d943ef3e30860401eaa2247402203495973f006e6ee6ae74a83228623029f238f37390ee4b587d95cdb1d1aaee9901210392ba263f3a2b260826943ff0df25e9ca4ef603b98b0a916242c947ae0626575f02473044022002603e5ceabb4406d11aedc0cccbf654dd391ce68b6b2228a40e51cf8129310d0220533743120d93be8b6c1453973935b911b0a2322e74708d23e8b5f90e74b0f192012103221b4ee0f508ba595fc1b9c2252ed9d03e99c73b97344dae93263c68834f034800ed161300",
"31494e7e9f42f4bd736769b07cc602e2a1019617b2c72a03ec945b667aada78f": "0100000000010454022b1b4d3b45e7fcac468de2d6df890a9f41050c05d80e68d4b083f728e76a000000008b483045022100ea8fe74db2aba23ad36ac66aaa481bad2b4d1b3c331869c1d60a28ce8cfad43c02206fa817281b33fbf74a6dd7352bdc5aa1d6d7966118a4ad5b7e153f37205f1ae80141045f7ba332df2a7b4f5d13f246e307c9174cfa9b8b05f3b83410a3c23ef8958d610be285963d67c7bc1feb082f168fa9877c25999963ff8b56b242a852b23e25edfdffffff54022b1b4d3b45e7fcac468de2d6df890a9f41050c05d80e68d4b083f728e76a01000000171600146dfe07e12af3db7c715bf1c455f8517e19c361e7fdffffff54022b1b4d3b45e7fcac468de2d6df890a9f41050c05d80e68d4b083f728e76a020000006a47304402200b1fb89e9a772a8519294acd61a53a29473ce76077165447f49a686f1718db5902207466e2e8290f84114dc9d6c56419cb79a138f03d7af8756de02c810f19e4e03301210222bfebe09c2638cfa5aa8223fb422fe636ba9675c5e2f53c27a5d10514f49051fdffffff54022b1b4d3b45e7fcac468de2d6df890a9f41050c05d80e68d4b083f728e76a0300000000fdffffff018793140d000000001600144b3e27ddf4fc5f367421ee193da5332ef351b700000247304402207ba52959938a3853bcfd942d8a7e6a181349069cde3ea73dbde43fa9669b8d5302207a686b92073863203305cb5d5550d88bdab0d21b9e9761ba4a106ea3970e08d901210265c1e014112ed19c9f754143fb6a2ff89f8630d62b33eb5ae708c9ea576e61b50002473044022029e868a905aa3ecae6eafcbd5959aefff0e5f39c1fc7a131a174828806e74e5202202f0aaa7c3cb3d9a9d526e5428ce37c0f0af0d774aa30b09ded8bc2230e7ffaf2012102fe0104455dc52b1689bba130664e452642180eb865217acfc6997260b7d946ae22c71200",
"336eee749da7d1c537fd5679157fae63005bfd4bb8cf47ae73600999cbc9beaa": "0100000000010232201ff125888a326635a2fc6e971cd774c4d0c1a757d742d0f6b5b020f7203a020000006a4730440220198c0ba2b2aefa78d8cca01401d408ecdebea5ac05affce36f079f6e5c8405ca02200eabb1b9a01ff62180cf061dfacedba6b2e07355841b9308de2d37d83489c7b80121031c663e5534fe2a6de816aded6bb9afca09b9e540695c23301f772acb29c64a05fdfffffffb28ff16811d3027a2405be68154be8fdaff77284dbce7a2314c4107c2c941600000000000fdffffff015e104f01000000001976a9146dfd56a0b5d0c9450d590ad21598ecfeaa438bd788ac000247304402207d6dc521e3a4577685535f098e5bac4601aa03658b924f30bf7afef1850e437e022045b76771d8b6ca1939352d6b759fca31029e5b2edffa44dc747fe49770e746cd012102c7f36d4ceed353b90594ebaf3907972b6d73289bdf4707e120de31ec4e1eb11679f31200",
"3a6ed17d34c49dfdf413398e113cf5f71710d59e9f4050bbc601d513a77eb308": "010000000168091e76227e99b098ef8d6d5f7c1bb2a154dd49103b93d7b8d7408d49f07be0000000008a47304402202f683a63af571f405825066bd971945a35e7142a75c9a5255d364b25b7115d5602206c59a7214ae729a519757e45fdc87061d357813217848cf94df74125221267ac014104aecb9d427e10f0c370c32210fe75b6e72ccc4f415076cf1a6318fbed5537388862c914b29269751ab3a04962df06d96f5f4f54e393a0afcbfa44b590385ae61afdffffff0240420f00000000001976a9145f917fd451ca6448978ebb2734d2798274daf00b88aca8063d00000000001976a914e1232622a96a04f5e5a24ca0792bb9c28b089d6e88ace9ca1200",
"475c149be20c8a73596fad6cb8861a5af46d4fcf8e26a9dbf6cedff7ff80b70d": "01000000013a7e6f19a963adc7437d2f3eb0936f1fc9ef4ba7e083e19802eb1111525a59c2000000008b483045022100958d3931051306489d48fe69b32561e0a16e82a2447c07be9d1069317084b5e502202f70c2d9be8248276d334d07f08f934ffeea83977ad241f9c2de954a2d577f94014104d950039cec15ad10ad4fb658873bc746148bc861323959e0c84bf10f8633104aa90b64ce9f80916ab0a4238e025dcddf885b9a2dd6e901fe043a433731db8ab4fdffffff02a086010000000000160014bbfab2cc3267cea2df1b68c392cb3f0294978ca922940d00000000001976a914760f657c67273a06cad5b1d757a95f4ed79f5a4b88ac4c8d1300",
"56a65810186f82132cea35357819499468e4e376fca685c023700c75dc3bd216": "01000000000101614b142aeeb827d35d2b77a5b11f16655b6776110ddd9f34424ff49d85706cf90200000000fdffffff02784a4c00000000001600148464f47f35cbcda2e4e5968c5a3a862c43df65a1404b4c00000000001976a914c9efecf0ecba8b42dce0ae2b28e3ea0573d351c988ac0247304402207d8e559ed1f56cb2d02c4cb6c95b95c470f4b3cb3ce97696c3a58e39e55cd9b2022005c9c6f66a7154032a0bb2edc1af1f6c8f488bec52b6581a3a780312fb55681b0121024f83b87ac3440e9b30cec707b7e1461ecc411c2f45520b45a644655528b0a68ae9ca1200",
"6ae728f783b0d4680ed8050c05419f0a89dfd6e28d46acfce7453b4d1b2b0254": "0100000000010496941b9f18710b39bacde890e39a7fa401e6bf49985857cb7adfb8a45147ef1e000000001716001441aec99157d762708339d7faf7a63a8c479ed84cfdffffff96941b9f18710b39bacde890e39a7fa401e6bf49985857cb7adfb8a45147ef1e0100000000fdffffff1a5d1e4ca513983635b0df49fd4f515c66dd26d7bff045cfbd4773aa5d93197f000000006a4730440220652145460092ef42452437b942cb3f563bf15ad90d572d0b31d9f28449b7a8dd022052aae24f58b8f76bd2c9cf165cc98623f22870ccdbef1661b6dbe01c0ef9010f01210375b63dd8e93634bbf162d88b25d6110b5f5a9638f6fe080c85f8b21c2199a1fdfdffffff1a5d1e4ca513983635b0df49fd4f515c66dd26d7bff045cfbd4773aa5d93197f010000008a47304402207517c52b241e6638a84b05385e0b3df806478c2e444f671ca34921f6232ee2e70220624af63d357b83e3abe7cdf03d680705df0049ec02f02918ee371170e3b4a73d014104de408e142c00615294813233cdfe9e7774615ae25d18ba4a1e3b70420bb6666d711464518457f8b947034076038c6f0cfc8940d85d3de0386e0ad88614885c7cfdffffff0480969800000000001976a9149cd3dfb0d87a861770ae4e268e74b45335cf00ab88ac809698000000000017a914f2a76207d7b54bd34282281205923841341d9e1f87002d3101000000001976a914b8d4651937cd7db5bcf5fc98e6d2d8cfa131e85088ac743db20a00000000160014c7d0df09e03173170aed0247243874c6872748ed02483045022100b932cda0aeb029922e126568a48c05d79317747dcd77e61dce44e190e140822002202d13f84338bb272c531c4086277ac11e166c59612f4aefa6e20f78455bdc09970121028e6808a8ac1e9ede621aaabfcad6f86662dbe0ace0236f078eb23c24bc88bd5e02483045022100d74a253262e3898626c12361ba9bb5866f9303b42eec0a55ced0578829e2e61e022059c08e61d90cd63c84de61c796c9d1bc1e2f8217892a7c07b383af357ddd7a730121028641e89822127336fc12ff99b1089eb1a124847639a0e98d17ff03a135ad578b000020c71200",
"72419d187c61cfc67a011095566b374dc2c01f5397e36eafe68e40fc44474112": "0100000002677b2113f26697718c8991823ec0e637f08cb61426da8da508b97449c872490f000000008b4830450221009c50c0f56f34781dfa7b3d540ac724436c67ffdc2e5b2d5a395c9ebf72116ef802205a94a490ea14e4824f36f1658a384aeaecadd54839600141eb20375a49d476d1014104c291245c2ee3babb2a35c39389df56540867f93794215f743b9aa97f5ba114c4cdee8d49d877966728b76bc649bb349efd73adef1d77452a9aac26f8c51ae1ddfdffffff677b2113f26697718c8991823ec0e637f08cb61426da8da508b97449c872490f010000008b483045022100ae0b286493491732e7d3f91ab4ac4cebf8fe8a3397e979cb689e62d350fdcf2802206cf7adf8b29159dd797905351da23a5f6dab9b9dbf5028611e86ccef9ff9012e014104c62c4c4201d5c6597e5999f297427139003fdb82e97c2112e84452d1cfdef31f92dd95e00e4d31a6f5f9af0dadede7f6f4284b84144e912ff15531f36358bda7fdffffff019f7093030000000022002027ce908c4ee5f5b76b4722775f23e20c5474f459619b94040258290395b88afb6ec51200",
"76bcf540b27e75488d95913d0950624511900ae291a37247c22d996bb7cde0b4": "0100000001f4ba9948cdc4face8315c7f0819c76643e813093ffe9fbcf83d798523c7965db000000006a473044022061df431a168483d144d4cffe1c5e860c0a431c19fc56f313a899feb5296a677c02200208474cc1d11ad89b9bebec5ec00b1e0af0adaba0e8b7f28eed4aaf8d409afb0121039742bf6ab70f12f6353e9455da6ed88f028257950450139209b6030e89927997fdffffff01d4f84b00000000001976a9140b93db89b6bf67b5c2db3370b73d806f458b3d0488ac0a171300",
"7f19935daa7347bdcf45f0bfd726dd665c514ffd49dfb035369813a54c1e5d1a": "01000000000102681b6a8dd3a406ee10e4e4aece3c2e69f6680c02f53157be6374c5c98322823a00000000232200209adfa712053a06cc944237148bcefbc48b16eb1dbdc43d1377809bcef1bea9affdffffff681b6a8dd3a406ee10e4e4aece3c2e69f6680c02f53157be6374c5c98322823a0100000023220020f40ed2e3fbffd150e5b74f162c3ce5dae0dfeba008a7f0f8271cf1cf58bfb442fdffffff02801d2c04000000001976a9140cc01e19090785d629cdcc98316f328df554de4f88ac6d455d05000000001976a914b9e828990a8731af4527bcb6d0cddf8d5ffe90ce88ac040047304402206eb65bd302eefae24eea05781e8317503e68584067d35af028a377f0751bb55b0220226453d00db341a4373f1bcac2391f886d3a6e4c30dd15133d1438018d2aad24014730440220343e578591fab0236d28fb361582002180d82cb1ba79eec9139a7a9519fca4260220723784bd708b4a8ed17bb4b83a5fd2e667895078e80eec55119015beb3592fd2016952210222eca5665ed166d090a5241d9a1eb27a92f85f125aaf8df510b2b5f701f3f534210227bca514c22353a7ae15c61506522872afecf10df75e599aabe4d562d0834fce2103601d7d49bada5a57a4832eafe4d1f1096d7b0b051de4a29cd5fc8ad62865e0a553ae0400483045022100b15ea9daacd809eb4d783a1449b7eb33e2965d4229e1a698db10869299dddc670220128871ffd27037a3e9dac6748ce30c14b145dd7f9d56cc9dcde482461fb6882601483045022100cb659e1de65f8b87f64d1b9e62929a5d565bbd13f73a1e6e9dd5f4efa024b6560220667b13ce2e1a3af2afdcedbe83e2120a6e8341198a79efb855b8bc5f93b4729f0169522102d038600af253cf5019f9d5637ca86763eca6827ed7b2b7f8cc6326dffab5eb68210315cdb32b7267e9b366fb93efe29d29705da3db966e8c8feae0c8eb51a7cf48e82103f0335f730b9414acddad5b3ee405da53961796efd8c003e76e5cd306fcc8600c53ae1fc71200",
"9de08bcafc602a3d2270c46cbad1be0ef2e96930bec3944739089f960652e7cb": "010000000001013409c10fd732d9e4b3a9a1c4beb511fa5eb32bc51fd169102a21aa8519618f800000000000fdffffff0640420f00000000001976a9149cd3dfb0d87a861770ae4e268e74b45335cf00ab88ac40420f00000000001976a9149cd3dfb0d87a861770ae4e268e74b45335cf00ab88ac40420f00000000001976a9149cd3dfb0d87a861770ae4e268e74b45335cf00ab88ac80841e00000000001976a9149cd3dfb0d87a861770ae4e268e74b45335cf00ab88ac64064a000000000016001469825d422ca80f2a5438add92d741c7df45211f280969800000000001976a9149cd3dfb0d87a861770ae4e268e74b45335cf00ab88ac02483045022100b4369b18bccb74d72b6a38bd6db59122a9e8af3356890a5ecd84bdb8c7ffe317022076a5aa2b817be7b3637d179106fccebb91acbc34011343c8e8177acc2da4882e0121033c8112bbf60855f4c3ae489954500c4b8f3408665d8e1f63cf3216a76125c69865281300",
"a29d131e766950cae2e97dd4527b7c050293c2f5630470bdd7d00b7fe6db1b9d": "010000000400899af3606e93106a5d0f470e4e2e480dfc2fd56a7257a1f0f4d16fd5961a0f000000006a47304402205b32a834956da303f6d124e1626c7c48a30b8624e33f87a2ae04503c87946691022068aa7f936591fb4b3272046634cf526e4f8a018771c38aff2432a021eea243b70121034bb61618c932b948b9593d1b506092286d9eb70ea7814becef06c3dfcc277d67fdffffff4bc2dcc375abfc7f97d8e8c482f4c7b8bc275384f5271678a32c35d955170753000000006b483045022100de775a580c6cb47061d5a00c6739033f468420c5719f9851f32c6992610abd3902204e6b296e812bb84a60c18c966f6166718922780e6344f243917d7840398eb3db0121025d7317c6910ad2ad3d29a748c7796ddf01e4a8bc5e3bf2a98032f0a20223e4aafdffffff4bc2dcc375abfc7f97d8e8c482f4c7b8bc275384f5271678a32c35d955170753010000006a4730440220615a26f38bf6eb7043794c08fb81f273896b25783346332bec4de8dfaf7ed4d202201c2bc4515fc9b07ded5479d5be452c61ce785099f5e33715e9abd4dbec410e11012103caa46fcb1a6f2505bf66c17901320cc2378057c99e35f0630c41693e97ebb7cffdffffff4bc2dcc375abfc7f97d8e8c482f4c7b8bc275384f5271678a32c35d955170753030000006b483045022100c8fba762dc50041ee3d5c7259c01763ed913063019eefec66678fb8603624faa02200727783ccbdbda8537a6201c63e30c0b2eb9afd0e26cb568d885e6151ef2a8540121027254a862a288cfd98853161f575c49ec0b38f79c3ef0bf1fb89986a3c36a8906fdffffff0240787d01000000001976a9149cd3dfb0d87a861770ae4e268e74b45335cf00ab88ac3bfc1502000000001976a914c30f2af6a79296b6531bf34dba14c8419be8fb7d88ac52c51200",
"c1433779c5faec5df5e7bdc51214a95f15deeab842c23efbdde3acf82c165462": "0100000003aabec9cb99096073ae47cfb84bfd5b0063ae7f157956fd37c5d1a79d74ee6e33000000008b4830450221008136fc880d5e24fdd9d2a43f5085f374fef013b814f625d44a8075104981d92a0220744526ec8fc7887c586968f22403f0180d54c9b7ff8db9b553a3c4497982e8250141047b8b4c91c5a93a1f2f171c619ca41770427aa07d6de5130c3ba23204b05510b3bd58b7a1b35b9c4409104cfe05e1677fc8b51c03eac98b206e5d6851b31d2368fdffffff16d23bdc750c7023c085a6fc76e3e468944919783535ea2c13826f181058a656010000008a47304402204148410f2d796b1bb976b83904167d28b65dcd7c21b3876022b4fa70abc86280022039ea474245c3dc8cd7e5a572a155df7a6a54496e50c73d9fed28e76a1cf998c00141044702781daed201e35aa07e74d7bda7069e487757a71e3334dc238144ad78819de4120d262e8488068e16c13eea6092e3ab2f729c13ef9a8c42136d6365820f7dfdffffff68091e76227e99b098ef8d6d5f7c1bb2a154dd49103b93d7b8d7408d49f07be0010000008b4830450221008228af51b61a4ee09f58b4a97f204a639c9c9d9787f79b2fc64ea54402c8547902201ed81fca828391d83df5fbd01a3fa5dd87168c455ed7451ba8ccb5bf06942c3b0141046fcdfab26ac08c827e68328dbbf417bbe7577a2baaa5acc29d3e33b3cc0c6366df34455a9f1754cb0952c48461f71ca296b379a574e33bcdbb5ed26bad31220bfdffffff0210791c00000000001976a914a4b991e7c72996c424fe0215f70be6aa7fcae22c88ac80c3c901000000001976a914b0f6e64ea993466f84050becc101062bb502b4e488ac7af31200",
"c2595a521111eb0298e183e0a74befc91f6f93b03e2f7d43c7ad63a9196f7e3a": "01000000018557003cb450f53922f63740f0f77db892ef27e15b2614b56309bfcee96a0ad3010000006a473044022041923c905ae4b5ed9a21aa94c60b7dbcb8176d58d1eb1506d9fb1e293b65ce01022015d6e9d2e696925c6ad46ce97cc23dec455defa6309b839abf979effc83b8b160121029332bf6bed07dcca4be8a5a9d60648526e205d60c75a21291bffcdefccafdac3fdffffff01c01c0f00000000001976a914a2185918aa1006f96ed47897b8fb620f28a1b09988ac01171300",
"e07bf0498d40d7b8d7933b1049dd54a1b21b7c5f6d8def98b0997e22761e0968": "01000000016d445091b7b4fa19cbbee30141071b2202d0c27d195b9d6d2bcc7085c9cd9127010000008b483045022100daf671b52393af79487667eddc92ebcc657e8ae743c387b25d1c1a2e19c7a4e7022015ef2a52ea7e94695de8898821f9da539815775516f18329896e5fc52a3563b30141041704a3daafaace77c8e6e54cf35ed27d0bf9bb8bcd54d1b955735ff63ec54fe82a80862d455c12e739108b345d585014bf6aa0cbd403817c89efa18b3c06d6b5fdffffff02144a4c00000000001976a9148942ac692ace81019176c4fb0ac408b18b49237f88ac404b4c00000000001976a914dd36d773acb68ac1041bc31b8a40ee504b164b2e88ace9ca1200",
"e453e7346693b507561691b5ea73f8eba60bfc8998056226df55b2fac88ba306": "010000000125af87b0c2ebb9539d644e97e6159ccb8e1aa80fe986d01f60d2f3f37f207ae8010000008b483045022100baed0747099f7b28a5624005d50adf1069120356ac68c471a56c511a5bf6972b022046fbf8ec6950a307c3c18ca32ad2955c559b0d9bbd9ec25b64f4806f78cadf770141041ea9afa5231dc4d65a2667789ebf6806829b6cf88bfe443228f95263730b7b70fb8b00b2b33777e168bcc7ad8e0afa5c7828842794ce3814c901e24193700f6cfdffffff02a0860100000000001976a914ade907333744c953140355ff60d341cedf7609fd88ac68830a00000000001976a9145d48feae4c97677e4ca7dcd73b0d9fd1399c962b88acc9cc1300",
"e87a207ff3f3d2601fd086e90fa81a8ecb9c15e6974e649d53b9ebc2b087af25": "01000000010db780fff7dfcef6dba9268ecf4f6df45a1a86b86cad6f59738a0ce29b145c47010000008a47304402202887ec6ec200e4e2b4178112633011cbdbc999e66d398b1ff3998e23f7c5541802204964bd07c0f18c48b7b9c00fbe34c7bc035efc479e21a4fa196027743f06095f0141044f1714ed25332bb2f74be169784577d0838aa66f2374f5d8cbbf216063626822d536411d13cbfcef1ff3cc1d58499578bc4a3c4a0be2e5184b2dd7963ef67713fdffffff02a0860100000000001600145bbdf3ba178f517d4812d286a40c436a9088076e6a0b0c00000000001976a9143fc16bef782f6856ff6638b1b99e4d3f863581d388acfbcb1300"
}
txid_list = sorted(list(transactions))
def setUp(self):
super().setUp()
self.config = SimpleConfig({'electrum_path': self.electrum_path})
def create_old_wallet(self):
ks = keystore.from_old_mpk('e9d4b7866dd1e91c862aebf62a49548c7dbf7bcc6e4b7b8c9da820c7737968df9c09d5a3e271dc814a29981f81b3faaf2737b551ef5dcc6189cf0f8252c442b3')
# seed words: powerful random nobody notice nothing important anyway look away hidden message over
w = WalletIntegrityHelper.create_standard_wallet(ks, gap_limit=20, config=self.config)
# some txns are beyond gap limit:
w.create_new_address(for_change=True)
return w
@unittest.skip("skip until replace with zcash wallet")
@mock.patch.object(wallet.Abstract_Wallet, 'save_db')
def test_restoring_old_wallet_txorder1(self, mock_save_db):
w = self.create_old_wallet()
for i in [2, 12, 7, 9, 11, 10, 16, 6, 17, 1, 13, 15, 5, 8, 4, 0, 14, 18, 3]:
tx = Transaction(self.transactions[self.txid_list[i]])
w.receive_tx_callback(tx.txid(), tx, TX_HEIGHT_UNCONFIRMED)
self.assertEqual(27633300, sum(w.get_balance()))
@unittest.skip("skip until replace with zcash wallet")
@mock.patch.object(wallet.Abstract_Wallet, 'save_db')
def test_restoring_old_wallet_txorder2(self, mock_save_db):
w = self.create_old_wallet()
for i in [9, 18, 2, 0, 13, 3, 1, 11, 4, 17, 7, 14, 12, 15, 10, 8, 5, 6, 16]:
tx = Transaction(self.transactions[self.txid_list[i]])
w.receive_tx_callback(tx.txid(), tx, TX_HEIGHT_UNCONFIRMED)
self.assertEqual(27633300, sum(w.get_balance()))
@unittest.skip("skip until replace with zcash wallet")
@mock.patch.object(wallet.Abstract_Wallet, 'save_db')
def test_restoring_old_wallet_txorder3(self, mock_save_db):
w = self.create_old_wallet()
for i in [5, 8, 17, 0, 9, 10, 12, 3, 15, 18, 2, 11, 14, 7, 16, 1, 4, 6, 13]:
tx = Transaction(self.transactions[self.txid_list[i]])
w.receive_tx_callback(tx.txid(), tx, TX_HEIGHT_UNCONFIRMED)
self.assertEqual(27633300, sum(w.get_balance()))
class TestWalletHistory_EvilGapLimit(TestCaseForTestnet):
transactions = {
# txn A:
"511a35e240f4c8855de4c548dad932d03611a37e94e9203fdb6fc79911fe1dd4": "010000000001018aacc3c8f98964232ebb74e379d8ff4e800991eecfcf64bd1793954f5e50a8790100000000fdffffff0340420f0000000000160014dbf321e905d544b54b86a2f3ed95b0ac66a3ddb0ff0514000000000016001474f1c130d3db22894efb3b7612b2c924628d0d7e80841e000000000016001488492707677190c073b6555fb08d37e91bbb75d802483045022100cf2904e09ea9d2670367eccc184d92fcb8a9b9c79a12e4efe81df161077945db02203530276a3401d944cf7a292e0660f36ee1df4a1c92c131d2c0d31d267d52524901210215f523a412a5262612e1a5ef9842dc864b0d73dc61fb4c6bfd480a867bebb1632e181400",
# txn B:
"fde0b68938709c4979827caa576e9455ded148537fdb798fd05680da64dc1b4f": "01000000000101a317998ac6cc717de17213804e1459900fe257b9f4a3b9b9edd29806728277530100000000fdffffff03c0c62d00000000001600149543301687b1ca2c67718d55fbe10413c73ddec200093d00000000001600141bc12094a4475dcfbf24f9920dafddf9104ca95b3e4a4c0000000000160014b226a59f2609aa7da4026fe2c231b5ae7be12ac302483045022100f1082386d2ce81612a3957e2801803938f6c0066d76cfbd853918d4119f396df022077d05a2b482b89707a8a600013cb08448cf211218a462f2a23c2c0d80a8a0ca7012103f4aac7e189de53d95e0cb2e45d3c0b2be18e93420734934c61a6a5ad88dd541033181400",
# txn C:
"268fce617aaaa4847835c2212b984d7b7741fdab65de22813288341819bc5656": "010000000001014f1bdc64da8056d08f79db7f5348d1de55946e57aa7c8279499c703889b6e0fd0100000000fdffffff0260e316000000000016001445e9879cf7cd5b4a15df7ddcaf5c6dca0e1508bacc242600000000001600141bc12094a4475dcfbf24f9920dafddf9104ca95b02483045022100ae3618912f341fefee11b67e0047c47c88c4fa031561c3fafe993259dd14d846022056fa0a5b5d8a65942fa68bcc2f848fd71fa455ba42bc2d421b67eb49ba62aa4e01210394d8f4f06c2ea9c569eb050c897737a7315e7f2104d9b536b49968cc89a1f11033181400",
}
def setUp(self):
super().setUp()
self.config = SimpleConfig({
'electrum_path': self.electrum_path,
'skipmerklecheck': True, # needed for Synchronizer to generate new addresses without SPV
})
def create_wallet(self):
ks = keystore.from_xpub('vpub5Vhmk4dEJKanDTTw6immKXa3thw45u3gbd1rPYjREB6viP13sVTWcH6kvbR2YeLtGjradr6SFLVt9PxWDBSrvw1Dc1nmd3oko3m24CQbfaJ')
# seed words: nephew work weather maze pyramid employ check permit garment scene kiwi smooth
w = WalletIntegrityHelper.create_standard_wallet(ks, gap_limit=20, config=self.config)
return w
@unittest.skip("skip until replace with zcash wallet")
@mock.patch.object(wallet.Abstract_Wallet, 'save_db')
def test_restoring_wallet_txorder1(self, mock_save_db):
w = self.create_wallet()
w.db.put('stored_height', 1316917 + 100)
for txid in self.transactions:
tx = Transaction(self.transactions[txid])
w.add_transaction(tx)
# txn A is an external incoming txn paying to addr (3) and (15)
# txn B is an external incoming txn paying to addr (4) and (25)
# txn C is an internal transfer txn from addr (25) -- to -- (1) and (25)
w.receive_history_callback('tb1qgh5c088he4d559wl0hw27hrdeg8p2z96pefn4q', # HD index 1
[('268fce617aaaa4847835c2212b984d7b7741fdab65de22813288341819bc5656', 1316917)],
{})
w.synchronize()
w.receive_history_callback('tb1qm0ejr6g964zt2jux5te7m9ds43n28hdsdz9ull', # HD index 3
[('511a35e240f4c8855de4c548dad932d03611a37e94e9203fdb6fc79911fe1dd4', 1316912)],
{})
w.synchronize()
w.receive_history_callback('tb1qj4pnq958k89zcem3342lhcgyz0rnmhkzl6x0cl', # HD index 4
[('fde0b68938709c4979827caa576e9455ded148537fdb798fd05680da64dc1b4f', 1316917)],
{})
w.synchronize()
w.receive_history_callback('tb1q3pyjwpm8wxgvquak240mprfhaydmkawcsl25je', # HD index 15
[('511a35e240f4c8855de4c548dad932d03611a37e94e9203fdb6fc79911fe1dd4', 1316912)],
{})
w.synchronize()
w.receive_history_callback('tb1qr0qjp99ygawul0eylxfqmt7alygye22mj33vej', # HD index 25
[('fde0b68938709c4979827caa576e9455ded148537fdb798fd05680da64dc1b4f', 1316917),
('268fce617aaaa4847835c2212b984d7b7741fdab65de22813288341819bc5656', 1316917)],
{})
w.synchronize()
self.assertEqual(9999788, sum(w.get_balance()))
class TestWalletHistory_DoubleSpend(TestCaseForTestnet):
transactions = {
# txn A:
"0cce62d61ec87ad3e391e8cd752df62e0c952ce45f52885d6d10988e02794060": "0200000001191601a44a81e061502b7bfbc6eaa1cef6d1e6af5308ef96c9342f71dbf4b9b5000000006b483045022100a6d44d0a651790a477e75334adfb8aae94d6612d01187b2c02526e340a7fd6c8022028bdf7a64a54906b13b145cd5dab21a26bd4b85d6044e9b97bceab5be44c2a9201210253e8e0254b0c95776786e40984c1aa32a7d03efa6bdacdea5f421b774917d346feffffff026b20fa04000000001976a914dc3a05eb562fb6f3ef8076946514d4730cff299988aca0860100000000001976a91421919b94ae5cefcdf0271191459157cdb41c4cbf88aca6240700",
# txn B:
"e7f4e47f41421e37a8600b6350befd586f30db60a88d0992d54df280498f0968": "0200000001604079028e98106d5d88525fe42c950c2ef62d75cde891e3d37ac81ed662ce0c000000006b483045022100a6d44d0a651790a477e75334adfb8aae94d6612d01187b2c02526e340a7fd6c8022028bdf7a64a54906b13b145cd5dab21a26bd4b85d6044e9b97bceab5be44c2a9201210253e8e0254b0c95776786e40984c1aa32a7d03efa6bdacdea5f421b774917d346feffffff01831cfa04000000001976a914024db2e87dd7cfd0e5f266c5f212e21a31d805a588aca6240700",
# txn C:
"a04328fbc9f28268378a8b9cf103db21ca7d673bf1cc7fa4d61b6a7265f07a6b": "0200000001604079028e98106d5d88525fe42c950c2ef62d75cde891e3d37ac81ed662ce0c000000006b483045022100a6d44d0a651790a477e75334adfb8aae94d6612d01187b2c02526e340a7fd6c8022028bdf7a64a54906b13b145cd5dab21a26bd4b85d6044e9b97bceab5be44c2a9201210253e8e0254b0c95776786e40984c1aa32a7d03efa6bdacdea5f421b774917d346feffffff01831cfa04000000001976a914899cdc441b5ec43f1e157d1f93e2ffbea99e051f88aca6240700",
}
def setUp(self):
super().setUp()
self.config = SimpleConfig({'electrum_path': self.electrum_path})
@mock.patch.object(wallet.Abstract_Wallet, 'save_db')
def test_restoring_wallet_without_manual_delete(self, mock_save_db):
w = restore_wallet_from_text("hint shock chair puzzle shock traffic drastic note dinosaur mention suggest sweet",
path='if_this_exists_mocking_failed_648151893',
gap_limit=5,
config=self.config)['wallet'] # type: Abstract_Wallet
for txid in self.transactions:
tx = Transaction(self.transactions[txid])
w.add_transaction(tx)
# txn A is an external incoming txn funding the wallet
# txn B is an outgoing payment to an external address
# txn C is double-spending txn B, to a wallet address
self.assertEqual(83500163, sum(w.get_balance()))
@mock.patch.object(wallet.Abstract_Wallet, 'save_db')
def test_restoring_wallet_with_manual_delete(self, mock_save_db):
w = restore_wallet_from_text("hint shock chair puzzle shock traffic drastic note dinosaur mention suggest sweet",
path='if_this_exists_mocking_failed_648151893',
gap_limit=5,
config=self.config)['wallet'] # type: Abstract_Wallet
# txn A is an external incoming txn funding the wallet
txA = Transaction(self.transactions["0cce62d61ec87ad3e391e8cd752df62e0c952ce45f52885d6d10988e02794060"])
w.add_transaction(txA)
# txn B is an outgoing payment to an external address
txB = Transaction(self.transactions["e7f4e47f41421e37a8600b6350befd586f30db60a88d0992d54df280498f0968"])
w.add_transaction(txB)
# now the user manually deletes txn B to attempt the double spend
# txn C is double-spending txn B, to a wallet address
# rationale1: user might do this with opt-in RBF transactions
# rationale2: this might be a local transaction, in which case the GUI even allows it
w.remove_transaction(txB.txid())
txC = Transaction(self.transactions["a04328fbc9f28268378a8b9cf103db21ca7d673bf1cc7fa4d61b6a7265f07a6b"])
w.add_transaction(txC)
self.assertEqual(83500163, sum(w.get_balance()))
| 122.141538
| 7,306
| 0.87175
| 5,258
| 119,088
| 19.537847
| 0.161088
| 0.020588
| 0.005295
| 0.004906
| 0.389662
| 0.379704
| 0.37435
| 0.361501
| 0.357559
| 0.353305
| 0
| 0.455354
| 0.088355
| 119,088
| 974
| 7,307
| 122.26694
| 0.490135
| 0.029348
| 0
| 0.558233
| 0
| 0.002677
| 0.691546
| 0.668838
| 0
| 1
| 0
| 0
| 0.262383
| 1
| 0.056225
| false
| 0.025435
| 0.042838
| 0
| 0.13253
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
9c03b3a7e450bffbdf8c3187725f652e4abddb8f
| 24,301
|
py
|
Python
|
ezResearch/glass_lang/generate_glass_blockstate.py
|
Zoko061602/planar-artifice
|
1c64a38b7bffdc46a6b838aa809b4daf8fae6830
|
[
"MIT"
] | 3
|
2020-09-23T08:00:56.000Z
|
2020-09-30T17:15:51.000Z
|
ezResearch/glass_lang/generate_glass_blockstate.py
|
Zoko061602/planar-artifice
|
1c64a38b7bffdc46a6b838aa809b4daf8fae6830
|
[
"MIT"
] | 5
|
2020-09-26T00:46:20.000Z
|
2020-10-03T04:40:18.000Z
|
ezResearch/glass_lang/generate_glass_blockstate.py
|
Zoko061602/planar-artifice
|
1c64a38b7bffdc46a6b838aa809b4daf8fae6830
|
[
"MIT"
] | 1
|
2020-09-26T15:35:21.000Z
|
2020-09-26T15:35:21.000Z
|
variant = ["_clear", "_scratched", "_crystal", "_dim", "_dark", "_bright", "_ghostly", "_ethereal", "_foreboding", "_strong"]
colors = ["white", "orange", "magenta", "light_blue", "yellow", "lime", "pink", "gray", "silver", "cyan", "purple", "blue", "brown", "green", "red", "black"]
for prefix in variant:
with open("./glass" + prefix + ".json", "w") as f:
f.write('''
{
"forge_marker": 1,
"defaults": {
"model": "planarartifice:cube_ctm_cutout",
"textures": {
"all": "planarartifice:blocks/glass/glass''' + prefix + '''",
"particle": "planarartifice:blocks/glass/glass''' + prefix + '''",
"connected_tex": "planarartifice:blocks/glass/glass''' + prefix + '''_ctm"
}
},
"variants": {
"normal": [{}],
"inventory": [{}]
}
}
''')
with open("./glass" + prefix + "_rainbow.json", "w") as f:
f.write('''
{
"forge_marker": 1,
"defaults": {
"model": "planarartifice:cube_ctm_translucent",
"textures": {
"all": "planarartifice:blocks/glass/glass''' + prefix + '''_rainbow",
"particle": "planarartifice:blocks/glass/glass''' + prefix + '''_rainbow",
"connected_tex": "planarartifice:blocks/glass/glass''' + prefix + '''_rainbow_ctm"
}
},
"variants": {
"normal": [{}],
"inventory": [{}]
}
}
''')
with open("./glass_panel" + prefix + ".json", "w") as f:
f.write('''
{
"forge_marker": 1,
"defaults": {
"model": "planarartifice:panel_ctm_cutout",
"textures": {
"all": "planarartifice:blocks/glass/glass''' + prefix + '''",
"particle": "planarartifice:blocks/glass/glass''' + prefix + '''",
"connected_tex": "planarartifice:blocks/glass/glass''' + prefix + '''_ctm"
}
},
"variants": {
"normal": [{}],
"inventory": [{}]
}
}
''')
with open("./glass_panel" + prefix + "_rainbow.json", "w") as f:
f.write('''
{
"forge_marker": 1,
"defaults": {
"model": "planarartifice:panel_ctm_translucent",
"textures": {
"all": "planarartifice:blocks/glass/glass''' + prefix + '''_rainbow",
"particle": "planarartifice:blocks/glass/glass''' + prefix + '''_rainbow",
"connected_tex": "planarartifice:blocks/glass/glass''' + prefix + '''_rainbow_ctm"
}
},
"variants": {
"normal": [{}],
"inventory": [{}]
}
}
''')
##################### STAINED GLASS #########
with open("./stained_glass" + prefix + ".json", "w") as f:
q = ''
for color in colors:
q += '''"color=''' + color + '''": [{
"textures": {
"all": "planarartifice:blocks/glass/glass''' + prefix + '''_''' + color + '''",
"particle": "planarartifice:blocks/glass/glass''' + prefix + '''_''' + color + '''",
"connected_tex": "planarartifice:blocks/glass/glass''' + prefix + '''_''' + color + '''_ctm"
}
}],'''
f.write('''
{
"forge_marker": 1,
"defaults": {
"model": "planarartifice:cube_ctm_translucent"
},
"variants": {
''' + q[:-1] + '''
}
}
''')
with open("./stained_glass_panel" + prefix + ".json", "w") as f:
q = ''
for color in colors:
q += '''"color=''' + color + '''": [{
"textures": {
"all": "planarartifice:blocks/glass/glass''' + prefix + '''_''' + color + '''",
"particle": "planarartifice:blocks/glass/glass''' + prefix + '''_''' + color + '''",
"connected_tex": "planarartifice:blocks/glass/glass''' + prefix + '''_''' + color + '''_ctm"
}
}],'''
f.write('''
{
"forge_marker": 1,
"defaults": {
"model": "planarartifice:panel_ctm_translucent"
},
"variants": {
''' + q[:-1] + '''
}
}
''')
################# GLASS PANE ###########
with open("./glass_pane" + prefix + ".json", "w") as f:
f.write('''
{
"forge_marker": 1,
"defaults": {
"transform": "forge:default-item",
"textures": {
"edge" : "blocks/glass_pane_top",
"pane" : "planarartifice:blocks/glass/glass''' + prefix + '''",
"particle": "planarartifice:blocks/glass/glass''' + prefix + '''",
"pane_ct": "planarartifice:blocks/glass/glass''' + prefix + '''_ctm"
}
},
"variants": {
"inventory": [{ "model": "planarartifice:pane/ctm_ew" }],
"east=false,north=false,south=false,west=false": [{ "model": "planarartifice:pane/post" }],
"east=false,north=true,south=false,west=false" : [{ "model": "planarartifice:pane/ctm_n" }],
"east=true,north=false,south=false,west=false" : [{ "model": "planarartifice:pane/ctm_e" }],
"east=false,north=false,south=true,west=false" : [{ "model": "planarartifice:pane/ctm_s" }],
"east=false,north=false,south=false,west=true" : [{ "model": "planarartifice:pane/ctm_w" }],
"east=true,north=true,south=false,west=false" : [{ "model": "planarartifice:pane/ctm_ne" }],
"east=true,north=false,south=true,west=false" : [{ "model": "planarartifice:pane/ctm_se" }],
"east=false,north=false,south=true,west=true" : [{ "model": "planarartifice:pane/ctm_sw" }],
"east=false,north=true,south=false,west=true" : [{ "model": "planarartifice:pane/ctm_nw" }],
"east=false,north=true,south=true,west=false" : [{ "model": "planarartifice:pane/ctm_ns" }],
"east=true,north=false,south=false,west=true" : [{ "model": "planarartifice:pane/ctm_ew" }],
"east=true,north=true,south=true,west=false" : [{ "model": "planarartifice:pane/ctm_nse" }],
"east=true,north=false,south=true,west=true" : [{ "model": "planarartifice:pane/ctm_sew" }],
"east=false,north=true,south=true,west=true" : [{ "model": "planarartifice:pane/ctm_nsw" }],
"east=true,north=true,south=false,west=true" : [{ "model": "planarartifice:pane/ctm_new" }],
"east=true,north=true,south=true,west=true" : [{ "model": "planarartifice:pane/ctm_nsew" }]
}
}
''')
with open("./glass_pane" + prefix + "_rainbow.json", "w") as f:
f.write('''
{
"forge_marker": 1,
"defaults": {
"transform": "forge:default-item",
"textures": {
"edge" : "planarartifice:blocks/glass/glass_pane_top_rainbow",
"pane" : "planarartifice:blocks/glass/glass''' + prefix + '''_rainbow",
"particle": "planarartifice:blocks/glass/glass''' + prefix + '''_rainbow",
"pane_ct": "planarartifice:blocks/glass/glass''' + prefix + '''_rainbow_ctm"
}
},
"variants": {
"inventory": [{ "model": "planarartifice:pane/ctm_ew_translucent" }],
"east=false,north=false,south=false,west=false": [{ "model": "planarartifice:pane/post_translucent" }],
"east=false,north=true,south=false,west=false" : [{ "model": "planarartifice:pane/ctm_n_translucent" }],
"east=true,north=false,south=false,west=false" : [{ "model": "planarartifice:pane/ctm_e_translucent" }],
"east=false,north=false,south=true,west=false" : [{ "model": "planarartifice:pane/ctm_s_translucent" }],
"east=false,north=false,south=false,west=true" : [{ "model": "planarartifice:pane/ctm_w_translucent" }],
"east=true,north=true,south=false,west=false" : [{ "model": "planarartifice:pane/ctm_ne_translucent" }],
"east=true,north=false,south=true,west=false" : [{ "model": "planarartifice:pane/ctm_se_translucent" }],
"east=false,north=false,south=true,west=true" : [{ "model": "planarartifice:pane/ctm_sw_translucent" }],
"east=false,north=true,south=false,west=true" : [{ "model": "planarartifice:pane/ctm_nw_translucent" }],
"east=false,north=true,south=true,west=false" : [{ "model": "planarartifice:pane/ctm_ns_translucent" }],
"east=true,north=false,south=false,west=true" : [{ "model": "planarartifice:pane/ctm_ew_translucent" }],
"east=true,north=true,south=true,west=false" : [{ "model": "planarartifice:pane/ctm_nse_translucent" }],
"east=true,north=false,south=true,west=true" : [{ "model": "planarartifice:pane/ctm_sew_translucent" }],
"east=false,north=true,south=true,west=true" : [{ "model": "planarartifice:pane/ctm_nsw_translucent" }],
"east=true,north=true,south=false,west=true" : [{ "model": "planarartifice:pane/ctm_new_translucent" }],
"east=true,north=true,south=true,west=true" : [{ "model": "planarartifice:pane/ctm_nsew_translucent" }]
}
}
''')
############ STAINED GLASS PANE #####
with open("./stained_glass_pane" + prefix + ".json", "w") as f:
q = ''
for color in colors:
q += '''"color=''' + color + ''',east=false,north=false,south=false,west=false": [{ "textures": {
"edge" : "blocks/glass_pane_top_''' + color + '''",
"pane" : "planarartifice:blocks/glass/glass''' + prefix + '''_''' + color + '''",
"particle": "planarartifice:blocks/glass/glass''' + prefix + '''_''' + color + '''",
"pane_ct": "planarartifice:blocks/glass/glass''' + prefix + '''_''' + color + '''_ctm"
}, "model": "planarartifice:pane/post_translucent" }],
"color=''' + color + ''',east=false,north=true,south=false,west=false" : [{ "textures": {
"edge" : "blocks/glass_pane_top_''' + color + '''",
"pane" : "planarartifice:blocks/glass/glass''' + prefix + '''_''' + color + '''",
"particle": "planarartifice:blocks/glass/glass''' + prefix + '''_''' + color + '''",
"pane_ct": "planarartifice:blocks/glass/glass''' + prefix + '''_''' + color + '''_ctm"
}, "model": "planarartifice:pane/ctm_n_translucent" }],
"color=''' + color + ''',east=true,north=false,south=false,west=false" : [{ "textures": {
"edge" : "blocks/glass_pane_top_''' + color + '''",
"pane" : "planarartifice:blocks/glass/glass''' + prefix + '''_''' + color + '''",
"particle": "planarartifice:blocks/glass/glass''' + prefix + '''_''' + color + '''",
"pane_ct": "planarartifice:blocks/glass/glass''' + prefix + '''_''' + color + '''_ctm"
}, "model": "planarartifice:pane/ctm_e_translucent" }],
"color=''' + color + ''',east=false,north=false,south=true,west=false" : [{ "textures": {
"edge" : "blocks/glass_pane_top_''' + color + '''",
"pane" : "planarartifice:blocks/glass/glass''' + prefix + '''_''' + color + '''",
"particle": "planarartifice:blocks/glass/glass''' + prefix + '''_''' + color + '''",
"pane_ct": "planarartifice:blocks/glass/glass''' + prefix + '''_''' + color + '''_ctm"
}, "model": "planarartifice:pane/ctm_s_translucent" }],
"color=''' + color + ''',east=false,north=false,south=false,west=true" : [{ "textures": {
"edge" : "blocks/glass_pane_top_''' + color + '''",
"pane" : "planarartifice:blocks/glass/glass''' + prefix + '''_''' + color + '''",
"particle": "planarartifice:blocks/glass/glass''' + prefix + '''_''' + color + '''",
"pane_ct": "planarartifice:blocks/glass/glass''' + prefix + '''_''' + color + '''_ctm"
}, "model": "planarartifice:pane/ctm_w_translucent" }],
"color=''' + color + ''',east=true,north=true,south=false,west=false" : [{ "textures": {
"edge" : "blocks/glass_pane_top_''' + color + '''",
"pane" : "planarartifice:blocks/glass/glass''' + prefix + '''_''' + color + '''",
"particle": "planarartifice:blocks/glass/glass''' + prefix + '''_''' + color + '''",
"pane_ct": "planarartifice:blocks/glass/glass''' + prefix + '''_''' + color + '''_ctm"
}, "model": "planarartifice:pane/ctm_ne_translucent" }],
"color=''' + color + ''',east=true,north=false,south=true,west=false" : [{ "textures": {
"edge" : "blocks/glass_pane_top_''' + color + '''",
"pane" : "planarartifice:blocks/glass/glass''' + prefix + '''_''' + color + '''",
"particle": "planarartifice:blocks/glass/glass''' + prefix + '''_''' + color + '''",
"pane_ct": "planarartifice:blocks/glass/glass''' + prefix + '''_''' + color + '''_ctm"
}, "model": "planarartifice:pane/ctm_se_translucent" }],
"color=''' + color + ''',east=false,north=false,south=true,west=true" : [{ "textures": {
"edge" : "blocks/glass_pane_top_''' + color + '''",
"pane" : "planarartifice:blocks/glass/glass''' + prefix + '''_''' + color + '''",
"particle": "planarartifice:blocks/glass/glass''' + prefix + '''_''' + color + '''",
"pane_ct": "planarartifice:blocks/glass/glass''' + prefix + '''_''' + color + '''_ctm"
}, "model": "planarartifice:pane/ctm_sw_translucent" }],
"color=''' + color + ''',east=false,north=true,south=false,west=true" : [{ "textures": {
"edge" : "blocks/glass_pane_top_''' + color + '''",
"pane" : "planarartifice:blocks/glass/glass''' + prefix + '''_''' + color + '''",
"particle": "planarartifice:blocks/glass/glass''' + prefix + '''_''' + color + '''",
"pane_ct": "planarartifice:blocks/glass/glass''' + prefix + '''_''' + color + '''_ctm"
}, "model": "planarartifice:pane/ctm_nw_translucent" }],'''
q += '''"color=''' + color + ''',east=false,north=true,south=true,west=false" : [{ "textures": {
"edge" : "blocks/glass_pane_top_''' + color + '''",
"pane" : "planarartifice:blocks/glass/glass''' + prefix + '''_''' + color + '''",
"particle": "planarartifice:blocks/glass/glass''' + prefix + '''_''' + color + '''",
"pane_ct": "planarartifice:blocks/glass/glass''' + prefix + '''_''' + color + '''_ctm"
}, "model": "planarartifice:pane/ctm_ns_translucent" }],
"color=''' + color + ''',east=true,north=false,south=false,west=true" : [{ "textures": {
"edge" : "blocks/glass_pane_top_''' + color + '''",
"pane" : "planarartifice:blocks/glass/glass''' + prefix + '''_''' + color + '''",
"particle": "planarartifice:blocks/glass/glass''' + prefix + '''_''' + color + '''",
"pane_ct": "planarartifice:blocks/glass/glass''' + prefix + '''_''' + color + '''_ctm"
}, "model": "planarartifice:pane/ctm_ew_translucent" }],
"color=''' + color + ''',east=true,north=true,south=true,west=false" : [{ "textures": {
"edge" : "blocks/glass_pane_top_''' + color + '''",
"pane" : "planarartifice:blocks/glass/glass''' + prefix + '''_''' + color + '''",
"particle": "planarartifice:blocks/glass/glass''' + prefix + '''_''' + color + '''",
"pane_ct": "planarartifice:blocks/glass/glass''' + prefix + '''_''' + color + '''_ctm"
}, "model": "planarartifice:pane/ctm_nse_translucent" }],
"color=''' + color + ''',east=true,north=false,south=true,west=true" : [{ "textures": {
"edge" : "blocks/glass_pane_top_''' + color + '''",
"pane" : "planarartifice:blocks/glass/glass''' + prefix + '''_''' + color + '''",
"particle": "planarartifice:blocks/glass/glass''' + prefix + '''_''' + color + '''",
"pane_ct": "planarartifice:blocks/glass/glass''' + prefix + '''_''' + color + '''_ctm"
}, "model": "planarartifice:pane/ctm_sew_translucent" }],
"color=''' + color + ''',east=false,north=true,south=true,west=true" : [{ "textures": {
"edge" : "blocks/glass_pane_top_''' + color + '''",
"pane" : "planarartifice:blocks/glass/glass''' + prefix + '''_''' + color + '''",
"particle": "planarartifice:blocks/glass/glass''' + prefix + '''_''' + color + '''",
"pane_ct": "planarartifice:blocks/glass/glass''' + prefix + '''_''' + color + '''_ctm"
}, "model": "planarartifice:pane/ctm_nsw_translucent" }],
"color=''' + color + ''',east=true,north=true,south=false,west=true" : [{ "textures": {
"edge" : "blocks/glass_pane_top_''' + color + '''",
"pane" : "planarartifice:blocks/glass/glass''' + prefix + '''_''' + color + '''",
"particle": "planarartifice:blocks/glass/glass''' + prefix + '''_''' + color + '''",
"pane_ct": "planarartifice:blocks/glass/glass''' + prefix + '''_''' + color + '''_ctm"
}, "model": "planarartifice:pane/ctm_new_translucent" }],
"color=''' + color + ''',east=true,north=true,south=true,west=true" : [{ "textures": {
"edge" : "blocks/glass_pane_top_''' + color + '''",
"pane" : "planarartifice:blocks/glass/glass''' + prefix + '''_''' + color + '''",
"particle": "planarartifice:blocks/glass/glass''' + prefix + '''_''' + color + '''",
"pane_ct": "planarartifice:blocks/glass/glass''' + prefix + '''_''' + color + '''_ctm"
}, "model": "planarartifice:pane/ctm_nsew_translucent" }],'''
f.write('''
{
"forge_marker": 1,
"defaults": {
"transform": "forge:default-item"
},
"variants": {
''' + q[:-1] + '''
}
}
''')
with open("./glass_rainbow.json", "w") as f:
f.write('''
{
"forge_marker": 1,
"defaults": {
"model": "planarartifice:cube_ctm_translucent",
"textures": {
"all": "planarartifice:blocks/glass/glass_rainbow",
"particle": "planarartifice:blocks/glass/glass_rainbow",
"connected_tex": "planarartifice:blocks/glass/glass_rainbow_ctm"
}
},
"variants": {
"normal": [{}],
"inventory": [{}]
}
}
''')
with open("./glass_pane_rainbow.json", "w") as f:
f.write('''
{
"forge_marker": 1,
"defaults": {
"transform": "forge:default-item",
"textures": {
"edge" : "planarartifice:blocks/glass/glass_pane_top_rainbow",
"pane" : "planarartifice:blocks/glass/glass_rainbow",
"particle": "planarartifice:blocks/glass/glass_rainbow",
"pane_ct": "planarartifice:blocks/glass/glass_rainbow_ctm"
}
},
"variants": {
"inventory": [{ "model": "planarartifice:pane/ctm_ew_translucent" }],
"east=false,north=false,south=false,west=false": [{ "model": "planarartifice:pane/post_translucent" }],
"east=false,north=true,south=false,west=false" : [{ "model": "planarartifice:pane/ctm_n_translucent" }],
"east=true,north=false,south=false,west=false" : [{ "model": "planarartifice:pane/ctm_e_translucent" }],
"east=false,north=false,south=true,west=false" : [{ "model": "planarartifice:pane/ctm_s_translucent" }],
"east=false,north=false,south=false,west=true" : [{ "model": "planarartifice:pane/ctm_w_translucent" }],
"east=true,north=true,south=false,west=false" : [{ "model": "planarartifice:pane/ctm_ne_translucent" }],
"east=true,north=false,south=true,west=false" : [{ "model": "planarartifice:pane/ctm_se_translucent" }],
"east=false,north=false,south=true,west=true" : [{ "model": "planarartifice:pane/ctm_sw_translucent" }],
"east=false,north=true,south=false,west=true" : [{ "model": "planarartifice:pane/ctm_nw_translucent" }],
"east=false,north=true,south=true,west=false" : [{ "model": "planarartifice:pane/ctm_ns_translucent" }],
"east=true,north=false,south=false,west=true" : [{ "model": "planarartifice:pane/ctm_ew_translucent" }],
"east=true,north=true,south=true,west=false" : [{ "model": "planarartifice:pane/ctm_nse_translucent" }],
"east=true,north=false,south=true,west=true" : [{ "model": "planarartifice:pane/ctm_sew_translucent" }],
"east=false,north=true,south=true,west=true" : [{ "model": "planarartifice:pane/ctm_nsw_translucent" }],
"east=true,north=true,south=false,west=true" : [{ "model": "planarartifice:pane/ctm_new_translucent" }],
"east=true,north=true,south=true,west=true" : [{ "model": "planarartifice:pane/ctm_nsew_translucent" }]
}
}
''')
with open("./glass_panel.json", "w") as f:
f.write('''
{
"forge_marker": 1,
"defaults": {
"model": "planarartifice:panel_ctm_cutout",
"textures": {
"all": "blocks/glass",
"particle": "blocks/glass",
"connected_tex": "blocks/glass_ctm"
}
},
"variants": {
"normal": [{}],
"inventory": [{}]
}
}
''')
with open("./glass_panel_rainbow.json", "w") as f:
f.write('''
{
"forge_marker": 1,
"defaults": {
"model": "planarartifice:panel_ctm_translucent",
"textures": {
"all": "planarartifice:blocks/glass/glass_rainbow",
"particle": "planarartifice:blocks/glass/glass_rainbow",
"connected_tex": "planarartifice:blocks/glass/glass_rainbow_ctm"
}
},
"variants": {
"normal": [{}],
"inventory": [{}]
}
}
''')
with open("./stained_glass_panel.json", "w") as f:
q = ''
for color in colors:
q += '''"color=''' + color + '''": [{
"textures": {
"all": "blocks/glass_''' + color + '''",
"particle": "blocks/glass_''' + color + '''",
"connected_tex": "blocks/glass_''' + color + '''_ctm"
}
}],'''
f.write('''
{
"forge_marker": 1,
"defaults": {
"model": "planarartifice:panel_ctm_translucent"
},
"variants": {
''' + q[:-1] + '''
}
}
''')
| 57.178824
| 157
| 0.489733
| 2,086
| 24,301
| 5.529243
| 0.043624
| 0.101092
| 0.179903
| 0.215883
| 0.966447
| 0.959598
| 0.950321
| 0.94061
| 0.927172
| 0.919456
| 0
| 0.001073
| 0.309452
| 24,301
| 425
| 158
| 57.178824
| 0.686252
| 0.001852
| 0
| 0.626238
| 0
| 0.158416
| 0.826344
| 0.382134
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
9c61623073bbb5cb280ce5b258a6754a2a079d27
| 107
|
py
|
Python
|
lib/ctlr/__init__.py
|
g0v/news-diff
|
62735843716159a242aef697571f364d61be788f
|
[
"MIT"
] | 10
|
2015-09-27T14:28:49.000Z
|
2022-03-31T05:42:25.000Z
|
lib/ctlr/__init__.py
|
g0v/news-diff
|
62735843716159a242aef697571f364d61be788f
|
[
"MIT"
] | null | null | null |
lib/ctlr/__init__.py
|
g0v/news-diff
|
62735843716159a242aef697571f364d61be788f
|
[
"MIT"
] | 4
|
2015-02-19T08:22:31.000Z
|
2019-11-13T04:18:41.000Z
|
# -*- coding: utf-8 -*-
#
# 將子類別提升至此層引用
#
from base import Ctlr_Base
from rss_2_0 import Ctlr_Base_RSS_2_0
| 15.285714
| 37
| 0.728972
| 19
| 107
| 3.736842
| 0.578947
| 0.28169
| 0.394366
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.055556
| 0.158879
| 107
| 6
| 38
| 17.833333
| 0.733333
| 0.308411
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
9c83a21d2eacee07cbbe7e274f0e29646cce1460
| 20,358
|
py
|
Python
|
broker.all_tenders/load_alltenders.py
|
lesiavl/Locust-Load-Tests
|
d76158a0ba55812010b22c53dfb028bb82619c70
|
[
"Apache-2.0"
] | null | null | null |
broker.all_tenders/load_alltenders.py
|
lesiavl/Locust-Load-Tests
|
d76158a0ba55812010b22c53dfb028bb82619c70
|
[
"Apache-2.0"
] | null | null | null |
broker.all_tenders/load_alltenders.py
|
lesiavl/Locust-Load-Tests
|
d76158a0ba55812010b22c53dfb028bb82619c70
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from locust import HttpLocust, TaskSet, task
from locust.clients import HttpSession
import requests
from time import sleep
import arrow
from datetime import timedelta
import logging
# http://www.accept-online.com.ua
requests_log = logging.getLogger("requests")
requests_log.setLevel(logging.DEBUG)
headers = {'Content-Type': 'application/json'}
login_owner_json = {'login': "butov@mail.ru", 'password': "111"}
login_provider1_json = {"login": "murashkot@bigmir.net", "password": "24091980"}
login_provider2_json = {"login": "s.titarchuk@a-express.com.ua", "password": "gfhjkm"}
create_tender_json = {"id": None, "$is_plan": False,
"$date": "{}".format(arrow.now()),
"items": [{"description": "BelowThreshold_LOAD",
"classification": {"scheme": "CPV","id": "03111000-2", "description": u"Насіння"},
"additionalClassifications": [{"scheme": u"ДКПП", "id": "01.11.1", "description": u"Пшениця"}],
"unit": {"code": "NMP", "name": u"пачок"},
"quantity": 130,
"deliveryAddress": {"streetAddress": u"01034, м.Київ, Шевченківський район, ВУЛИЦЯ ЯРОСЛАВІВ ВАЛ, будинок 38", "locality": u"киев", "region": u"киевская", "postalCode": "01034", "countryName": u"Україна"},
"deliveryDate": {"startDate": "{}".format(arrow.now() + timedelta(days=2)), "endDate": "{}".format(arrow.now() + timedelta(days=3))},
"id": "5f0d1afead1e44a8b6a45c2be35541a2"}],
"title": "BelowThreshold_LOAD", "description": "BelowThreshold_LOAD",
"procurementMethodType": "belowThreshold",
"value": {"amount": 10000, "currency": "UAH", "valueAddedTaxIncluded": False},
"minimalStep": {"amount": 50, "currency": "UAH", "valueAddedTaxIncluded": False},
"$status": "draft",
"tenderID": None,
"enquiryPeriod": {"startDate": "{}".format(arrow.now() + timedelta(minutes=2)),
"endDate": "{}".format(arrow.now() + timedelta(minutes=4))},
"tenderPeriod": {"startDate": "{}".format(arrow.now() + timedelta(minutes=6)),
"endDate": "{}".format(arrow.now() + timedelta(hours=1))},
"procuringEntity": {"kind": "general", "name": u"ТОВАРИСТВО З ОБМЕЖЕНОЮ ВІДПОВІДАЛЬНІСТЮ \"ПАУЕР ГРУП\"",
"address":
{"region": u"киевская", "locality": u"киев", "postalCode": "01034", "countryName": u"Україна", "streetAddress": u"01034, м.Київ, Шевченківський район, ВУЛИЦЯ ЯРОСЛАВІВ ВАЛ, будинок 38"},
"name_en": u"LTD \"Pauer group\"",
"identifier": {"id": "35592115", "scheme": "UA-EDR", "legalName": u"ТОВАРИСТВО З ОБМЕЖЕНОЮ ВІДПОВІДАЛЬНІСТЮ \"ПАУЕР ГРУП\"", "legalName_en": "LTD \"Pauer group\""},
"contactPoint": {"url": "http://conan.com", "name": u"Конан Варвар", "email": "barbarian@conan.com", "faxNumber": "333222333", "telephone": "222333222"}},
"mode": "test"}
bid_json = {"value": [{"value": 10000}],
"contact": {"name": u"гнглнглгн", "telephone": "25645652",
"email": "murashkot@bigmir.net", "url": "https://prozorro.shadowy.eu"},
"features": []}
bid_json_2 = {"value": [{"value": 10000}],
"contact": {"name": u"Сергей", "telephone": "0503580699",
"email": "s.titarchuk@a-express.com.ua", "url": "https://prozorro.shadowy.eu"},
"features": []}
login_uri = '/api/v1/user/login'
draft_tender_uri = '/api/1.0/fe/tenders-draft'
class WebsiteTasks(TaskSet):
@task
def create_tender(self):
with self.client.get('/myTender',
data=login_owner_json,
catch_response=True) as get_cookie:
if get_cookie.status_code == 200:
get_cookie.success()
else:
get_cookie.failure("Fail")
with self.client.post(login_uri,
data=login_owner_json,
headers=headers,
catch_response=True) as login_response:
if login_response.status_code == 200:
login_response.success()
else:
login_response.failure("Not authorized")
with self.client.get('/api/v1/tender/new/belowThreshold',
headers=headers,
catch_response=True) as new_tender_response:
if new_tender_response.status_code == 200:
new_tender_response.success()
else:
new_tender_response.failure("Fail")
with open('locust_results.txt', 'a') as f:
f.write('{} -------- STARTED\n'.format(arrow.now().format('DD.MM.YYYY HH:mm:ss')))
f.close()
with self.client.post('/api/v1/tender',
json=create_tender_json,
headers=headers,
catch_response=True) as tender_response:
if tender_response.status_code == 200:
tender_response.success()
else:
tender_response.failure("Fail")
api_tender_id = tender_response.json()['data']['$id']
with self.client.get('/api/v1/tender/-{}'.format(api_tender_id),
headers=headers,
catch_response=True) as get_tender_response:
if get_tender_response.status_code == 200:
get_tender_response.success()
else:
get_tender_response.failure("Fail")
submit_tender_json = {"id": None,
"$id": api_tender_id,
"mode": "test", "$date": "{}".format(arrow.now()),
"items": [{"id": "5f0d1afead1e44a8b6a45c2be35541a2",
"unit": {"code": "NMP", "name": u"пачок"},
"quantity": 130, "description": "BelowThreshold_LOAD",
"deliveryDate": {"endDate": "{}".format(arrow.now() + timedelta(days=3)),
"startDate": "{}".format(arrow.now() + timedelta(days=2))},
"classification": {"scheme": "CPV", "id": "03111000-2",
"description": u"Насіння"},
"additionalClassifications": [
{"scheme": u"ДКПП", "id": "01.11.1", "description": u"Пшениця"}],
"deliveryAddress": {
"streetAddress": u"01034, м.Київ, Шевченківський район, ВУЛИЦЯ ЯРОСЛАВІВ ВАЛ, будинок 38",
"locality": u"киев", "region": u"киевская", "postalCode": "01034",
"countryName": u"Україна"}}],
"title": "BelowThreshold_LOAD",
"value": {"amount": 10000, "currency": "UAH", "valueAddedTaxIncluded": False},
"$doc_id": None, "$status": "draft", "$is_plan": False, "tenderID": None,
"description": "BelowThreshold_LOAD",
"minimalStep": {"amount": 50, "currency": "UAH", "valueAddedTaxIncluded": False},
"enquiryPeriod": {"endDate": "{}".format(arrow.now() + timedelta(minutes=4)),
"startDate": "{}".format(arrow.now() + timedelta(minutes=2))},
"tenderPeriod": {"endDate": "{}".format(arrow.now() + timedelta(hours=1)),
"startDate": "{}".format(arrow.now() + timedelta(minutes=6))},
"procuringEntity": {"kind": "general",
"name": u"ТОВАРИСТВО З ОБМЕЖЕНОЮ ВІДПОВІДАЛЬНІСТЮ \"ПАУЕР ГРУП\"",
"address": {"region": u"киевская", "locality": u"киев",
"postalCode": "01034", "countryName": u"Україна",
"streetAddress": u"01034, м.Київ, Шевченківський район, ВУЛИЦЯ ЯРОСЛАВІВ ВАЛ, будинок 38"},
"name_en": u"LTD \"Pauer group\"",
"identifier": {"id": "35592115", "scheme": "UA-EDR",
"legalName": u"ТОВАРИСТВО З ОБМЕЖЕНОЮ ВІДПОВІДАЛЬНІСТЮ \"ПАУЕР ГРУП\"",
"legalName_en": u"LTD \"Pauer group\""},
"contactPoint": {"url": "http://conan.com", "name": u"Конан Варвар",
"email": "barbarian@conan.com",
"faxNumber": "333222333", "telephone": "222333222"}},
"procurementMethodType": "belowThreshold"}
with self.client.patch('/api/v1/tender/orig/{}/active.enquiries'.format(api_tender_id),
json=submit_tender_json,
headers=headers,
catch_response=True) as active_enquiries_response:
if active_enquiries_response.status_code == 200:
active_enquiries_response.success()
else:
active_enquiries_response.failure("Fail")
tender_id = active_enquiries_response.json()['data']['id']
doc_id = active_enquiries_response.json()['data']['$doc_id']
print tender_id, doc_id
with open('locust_results.txt', 'a') as f:
f.write(
'{} at {} —--------- FINISHED\n'.format(tender_id, arrow.now().format('DD.MM.YYYY HH:mm:ss')))
f.close()
@task
def create_tender_make_bid(self):
with self.client.get('/myTender',
data=login_owner_json,
catch_response=True) as get_cookie:
if get_cookie.status_code == 200:
get_cookie.success()
else:
get_cookie.failure("Fail")
with self.client.post(login_uri,
data=login_owner_json,
headers=headers,
catch_response=True) as login_response:
if login_response.status_code == 200:
login_response.success()
else:
login_response.failure("Not authorized")
with self.client.get('/api/v1/tender/new/belowThreshold',
headers=headers,
catch_response=True) as new_tender_response:
if new_tender_response.status_code == 200:
new_tender_response.success()
else:
new_tender_response.failure("Fail")
with open('locust_results.txt', 'a') as f:
f.write('{} -------- STARTED\n'.format(arrow.now().format('DD.MM.YYYY HH:mm:ss')))
f.close()
with self.client.post('/api/v1/tender',
json=create_tender_json,
headers=headers,
catch_response=True) as tender_response:
if tender_response.status_code == 200:
tender_response.success()
else:
tender_response.failure("Fail")
api_tender_id = tender_response.json()['data']['$id']
with self.client.get('/api/v1/tender/-{}'.format(api_tender_id),
headers=headers,
catch_response=True) as get_tender_response:
if get_tender_response.status_code == 200:
get_tender_response.success()
else:
get_tender_response.failure("Fail")
submit_tender_json = {"id": None,
"$id": api_tender_id,
"mode": "test", "$date": "{}".format(arrow.now()),
"items": [{"id": "5f0d1afead1e44a8b6a45c2be35541a2",
"unit": {"code": "NMP", "name": u"пачок"},
"quantity": 130, "description":"BelowThreshold_LOAD",
"deliveryDate":{"endDate": "{}".format(arrow.now() + timedelta(days=3)), "startDate": "{}".format(arrow.now() + timedelta(days=2))},
"classification": {"scheme": "CPV","id": "03111000-2", "description": u"Насіння"},
"additionalClassifications": [{"scheme": u"ДКПП", "id": "01.11.1", "description": u"Пшениця"}],
"deliveryAddress": {"streetAddress": u"01034, м.Київ, Шевченківський район, ВУЛИЦЯ ЯРОСЛАВІВ ВАЛ, будинок 38", "locality": u"киев", "region": u"киевская", "postalCode": "01034", "countryName": u"Україна"}}],
"title": "BelowThreshold_LOAD", "value": {"amount": 10000, "currency": "UAH", "valueAddedTaxIncluded": False},
"$doc_id": None, "$status": "draft", "$is_plan": False, "tenderID": None, "description": "BelowThreshold_LOAD",
"minimalStep": {"amount": 50, "currency": "UAH","valueAddedTaxIncluded": False},
"enquiryPeriod": {"endDate": "{}".format(arrow.now() + timedelta(minutes=4)),
"startDate": "{}".format(arrow.now() + timedelta(minutes=2))},
"tenderPeriod": {"endDate": "{}".format(arrow.now() + timedelta(hours=1)),
"startDate": "{}".format(arrow.now() + timedelta(minutes=6))},
"procuringEntity": {"kind": "general", "name": u"ТОВАРИСТВО З ОБМЕЖЕНОЮ ВІДПОВІДАЛЬНІСТЮ \"ПАУЕР ГРУП\"",
"address": {"region": u"киевская", "locality": u"киев", "postalCode": "01034", "countryName": u"Україна",
"streetAddress": u"01034, м.Київ, Шевченківський район, ВУЛИЦЯ ЯРОСЛАВІВ ВАЛ, будинок 38"},
"name_en": u"LTD \"Pauer group\"",
"identifier": {"id": "35592115", "scheme": "UA-EDR", "legalName": u"ТОВАРИСТВО З ОБМЕЖЕНОЮ ВІДПОВІДАЛЬНІСТЮ \"ПАУЕР ГРУП\"", "legalName_en": u"LTD \"Pauer group\""},
"contactPoint": {"url": "http://conan.com", "name": u"Конан Варвар","email": "barbarian@conan.com", "faxNumber": "333222333", "telephone": "222333222"}},
"procurementMethodType": "belowThreshold"}
with self.client.patch('/api/v1/tender/orig/{}/active.enquiries'.format(api_tender_id),
json=submit_tender_json,
headers=headers,
catch_response=True) as active_enquiries_response:
if active_enquiries_response.status_code == 200:
active_enquiries_response.success()
else:
active_enquiries_response.failure("Fail")
tender_id = active_enquiries_response.json()['data']['id']
doc_id = active_enquiries_response.json()['data']['$doc_id']
print tender_id, doc_id
with open('locust_results.txt', 'a') as f:
f.write(
'{} at {} —--------- FINISHED\n'.format(tender_id, arrow.now().format('DD.MM.YYYY HH:mm:ss')))
f.close()
get_tender_status = requests.get('https://lb.api-sandbox.openprocurement.org/api/2.3/tenders/{}'.format(tender_id))
tender_status = get_tender_status.json()['data']['status']
while tender_status != "active.tendering":
sleep(20)
r = requests.get('https://lb.api-sandbox.openprocurement.org/api/2.3/tenders/{}'.format(tender_id))
if r.status_code == 200:
tender_status = r.json()['data']['status']
print tender_status
with self.client.post(login_uri,
data=login_provider1_json,
headers=headers,
catch_response=True) as login_provider1_response:
if login_provider1_response.status_code == 200:
login_provider1_response.success()
else:
login_provider1_response.failure("Not authorized")
with self.client.get('/api/v1/user/contact/default',
headers=headers,
catch_response=True) as prepare_bid_response:
if prepare_bid_response.status_code == 200:
prepare_bid_response.success()
else:
prepare_bid_response.failure("Fail")
with self.client.post('/api/v1/tender/{}/bid'.format(doc_id),
json=bid_json,
headers=headers,
catch_response=True) as bid_draft_response:
if bid_draft_response.status_code == 200:
bid_draft_response.success()
else:
bid_draft_response.failure("Fail")
api_bid_id = bid_draft_response.json()['data']['$id']
with self.client.post('/api/v1/bid/{}/active'.format(api_bid_id),
headers=headers,
catch_response=True) as submit_bid_response:
if submit_bid_response.status_code == 200:
submit_bid_response.success()
else:
submit_bid_response.failure("Fail")
with self.client.post(login_uri,
data=login_provider2_json,
headers=headers,
catch_response=True) as login_provider2_response:
if login_provider2_response.status_code == 200:
login_provider2_response.success()
else:
login_provider2_response.failure("Not authorized")
with self.client.get('/api/v1/user/contact/default',
headers=headers,
catch_response=True) as prepare_bid2_response:
if prepare_bid2_response.status_code == 200:
prepare_bid2_response.success()
else:
prepare_bid2_response.failure("Fail")
with self.client.post('/api/v1/tender/{}/bid'.format(doc_id),
json=bid_json,
headers=headers,
catch_response=True) as bid2_draft_response:
if bid2_draft_response.status_code == 200:
bid2_draft_response.success()
else:
bid2_draft_response.failure("Fail")
api_bid_id_2 = bid_draft_response.json()['data']['$id']
with self.client.post('/api/v1/bid/{}/active'.format(api_bid_id_2),
headers=headers,
catch_response=True) as submit_bid2_response:
if submit_bid2_response.status_code == 200:
submit_bid2_response.success()
else:
submit_bid2_response.failure("Fail")
| 54.725806
| 247
| 0.490028
| 1,804
| 20,358
| 5.35643
| 0.133592
| 0.037669
| 0.033323
| 0.039325
| 0.838352
| 0.812377
| 0.795198
| 0.745835
| 0.728759
| 0.728759
| 0
| 0.034925
| 0.375528
| 20,358
| 371
| 248
| 54.873315
| 0.725006
| 0.003635
| 0
| 0.605351
| 0
| 0
| 0.236034
| 0.035748
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.010033
| 0.023411
| null | null | 0.010033
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
92c0ef3262cd87441bb1e9057bf815ecd9eb45d9
| 11,759
|
py
|
Python
|
nscl/nn/monet/scene_graph.py
|
mooninrain/NSCL-PyTorch-Release
|
ab38e91703e46b70efaa3afc05654f30483258a0
|
[
"MIT"
] | null | null | null |
nscl/nn/monet/scene_graph.py
|
mooninrain/NSCL-PyTorch-Release
|
ab38e91703e46b70efaa3afc05654f30483258a0
|
[
"MIT"
] | null | null | null |
nscl/nn/monet/scene_graph.py
|
mooninrain/NSCL-PyTorch-Release
|
ab38e91703e46b70efaa3afc05654f30483258a0
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# File : scene_graph.py
# Author : Jiayuan Mao
# Email : maojiayuan@gmail.com
# Date : 07/19/2018
#
# This file is part of NSCL-PyTorch.
# Distributed under terms of the MIT license.
"""
Scene Graph generation.
"""
import os
import torch
import torch.nn as nn
import jactorch
import jactorch.nn as jacnn
from . import monet
from . import module
from nscl.nn.utils import get_memory
__all__ = ['scene_graph_with_monet']
class scene_graph_with_monet_v1(nn.Module):
def __init__(self, feature_dim, output_dims, loss_type, pretrained_monet=None, _freeze_=False):
super().__init__()
self.loss_type = loss_type
self.h_f, self.w_f = 16, 24
self.h_m, self.w_m = 64, 64
self.h_raw ,self.w_raw = 256, 384
self.h_raw_raw, self.w_raw_raw = 320, 480
self.slot_num = 11
self.feature_dim = feature_dim
self.output_dims = output_dims
self.image_resize = module.resize_module_cv2(h1=self.h_raw,w1=self.w_raw,h2=self.h_m,w2=self.w_m)
self.monet_mask_extract = monet.MONet(pretrained_monet,_freeze_)
self.true_mask_resize = module.resize_module_cv2(h1=self.h_raw_raw,w1=self.w_raw_raw,h2=self.h_m,w2=self.w_m)
self.mask_resize = module.resize_module(h1=self.h_m,w1=self.w_m,h2=self.h_f,w2=self.w_f)
self.context_feature_extract = nn.Conv2d(feature_dim, feature_dim, 1)
self.relation_feature_extract = nn.Conv2d(feature_dim, feature_dim // 2 * 3, 1)
self.object_feature_fuse = nn.Conv2d(feature_dim * 2, output_dims[1], 1)
self.relation_feature_fuse = nn.Conv2d(feature_dim // 2 * 3 + output_dims[1] * 2, output_dims[2], 1)
self.object_feature_fc = nn.Sequential(nn.ReLU(True), nn.Linear(output_dims[1]*self.h_f*self.w_f, output_dims[1]))
self.relation_feature_fc = nn.Sequential(nn.ReLU(True), nn.Linear(output_dims[2]*self.h_f*self.w_f, output_dims[2]))
self.reset_parameters()
def reset_parameters(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight.data)
if 'bias' in m.__dict__:
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
nn.init.kaiming_normal_(m.weight.data)
if 'bias' in m.__dict__:
m.bias.data.zero_()
def forward(self, input, image, masks=None):
object_features = input #[batch_size,feature_dim,h_f,w_f]
context_features = self.context_feature_extract(input) #[batch_size,feature_dim,h_f,w_f]
relation_features = self.relation_feature_extract(input) #[batch_size,feature_dim//2*3,h_f,w_f]
masks_monet = self.monet_mask_extract(self.image_resize(image)) # [batch_size,slot_num,h_m,w_m]
if masks is None:
masks = masks_monet
else:
masks = self.true_mask_resize(masks.view(input.shape[0]*self.slot_num,1,self.h_raw_raw,self.w_raw_raw))
masks = masks.view(input.shape[0],self.slot_num,self.h_m,self.w_m)
self.monet_mask_extract.m = masks
if self.loss_type == 'separate':
masks = masks.detach()
masks = self.mask_resize(masks.view(-1,1,self.h_m,self.w_m)).view(input.shape[0],-1,self.h_f,self.w_f)
sub_id, obj_id = jactorch.meshgrid(torch.arange(self.slot_num, dtype=torch.long, device=input.device), dim=0)
sub_id, obj_id = sub_id.contiguous().view(-1), obj_id.contiguous().view(-1)
masked_object_features = object_features.unsqueeze(1) * masks.unsqueeze(2) #[batch_size,slot_num,feature_dim,h_f,w_f]
masked_context_features = context_features.unsqueeze(1) * masks.unsqueeze(2)
masked_relation_features = relation_features.unsqueeze(1) * (masks[:,sub_id]+masks[:,obj_id]).unsqueeze(2)
x_context,y_context = masked_context_features.chunk(2,dim=2)
combined_object_features = torch.cat([masked_object_features,x_context,y_context*masks.unsqueeze(2)],dim=2)
combined_object_features = combined_object_features.view(-1,self.feature_dim*2,self.h_f,self.w_f)
combined_object_features = self.object_feature_fuse(combined_object_features)
combined_object_features = combined_object_features.view(input.shape[0],self.slot_num,self.output_dims[1],self.h_f,self.w_f)
x_relation,y_relation,z_relation = masked_relation_features.chunk(3,dim=2)
combined_relation_features = torch.cat([combined_object_features[:,sub_id],combined_object_features[:,obj_id],
x_relation,y_relation*masks[:, sub_id].unsqueeze(2),z_relation*masks[:,obj_id].unsqueeze(2)],dim=2)
combined_relation_features = combined_relation_features.view(-1,self.feature_dim // 2 * 3 + self.output_dims[1] * 2,self.h_f,self.w_f)
combined_relation_features = self.relation_feature_fuse(combined_relation_features)
combined_object_features = combined_object_features.view(masks.shape[0]*masks.shape[1],-1)
combined_object_features = self._norm(self.object_feature_fc(combined_object_features))
combined_object_features = combined_object_features.view(masks.shape[0],masks.shape[1],-1)
combined_relation_features = combined_relation_features.view(masks.shape[0]*masks.shape[1]**2,-1)
combined_relation_features = self._norm(self.object_feature_fc(combined_relation_features))
combined_relation_features = combined_relation_features.view(masks.shape[0],masks.shape[1],masks.shape[1],-1)
outputs = []
for i in range(input.shape[0]):
outputs.append([
None,
combined_object_features[i],
combined_relation_features[i]
])
return outputs
def get_monitor(self):
return self.monet_mask_extract.get_monitor()
def _norm(self, x):
return x / x.norm(2, dim=-1, keepdim=True)
class scene_graph_with_monet_v2(nn.Module):
def __init__(self, feature_dim, output_dims, loss_type, pretrained_monet=None, _freeze_=False):
super().__init__()
self.loss_type = loss_type
self.h_f, self.w_f = 16, 24
self.h_m, self.w_m = 64, 64
self.h_fc, self.w_fc = 7, 7
self.h_raw ,self.w_raw = 256, 384
self.h_raw_raw, self.w_raw_raw = 320, 480
self.slot_num = 11
self.mask_feature_dim = 32
self.feature_dim = feature_dim
self.output_dims = output_dims
self.image_resize = module.resize_module_cv2(h1=self.h_raw,w1=self.w_raw,h2=self.h_m,w2=self.w_m)
self.monet_mask_extract = monet.MONet(pretrained_monet,_freeze_)
self.true_mask_resize = module.resize_module_cv2(h1=self.h_raw_raw,w1=self.w_raw_raw,h2=self.h_m,w2=self.w_m)
self.mask_resize = module.resize_module(h1=self.h_m,w1=self.w_m,h2=self.h_f,w2=self.w_f)
self.mask_feature_extract = nn.Conv2d(1, self.mask_feature_dim, 1)
self.context_feature_extract = nn.Conv2d(feature_dim, feature_dim, 1)
self.relation_feature_extract = nn.Conv2d(feature_dim, feature_dim // 2 * 3, 1)
self.object_feature_fuse = nn.Conv2d(feature_dim * 2 + self.mask_feature_dim, output_dims[1], 1)
self.relation_feature_fuse = nn.Conv2d(feature_dim // 2 * 3 + output_dims[1] * 2 + self.mask_feature_dim * 2, output_dims[2], 1)
self.object_feature_fc = nn.Sequential(nn.ReLU(True), nn.Linear(output_dims[1]*self.h_f*self.w_f, output_dims[1]))
self.relation_feature_fc = nn.Sequential(nn.ReLU(True), nn.Linear(output_dims[2]*self.h_f*self.w_f, output_dims[2]))
self.reset_parameters()
def reset_parameters(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight.data)
if 'bias' in m.__dict__:
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
nn.init.kaiming_normal_(m.weight.data)
if 'bias' in m.__dict__:
m.bias.data.zero_()
def forward(self, input, image, masks=None):
object_features = input #[batch_size,feature_dim,h_f,w_f]
context_features = self.context_feature_extract(input) #[batch_size,feature_dim,h_f,w_f]
relation_features = self.relation_feature_extract(input) #[batch_size,feature_dim//2*3,h_f,w_f]
if masks is None:
masks = self.monet_mask_extract(self.image_resize(image)) # [batch_size,slot_num,h_m,w_m]
else:
masks = self.true_mask_resize(masks.view(-1,self.slot_num,self.h_raw_raw,self.w_raw_raw))
if self.loss_type == 'separate':
masks = masks.detach()
masks = self.mask_resize(masks.view(input.shape[0]*self.slot_num,1,self.h_m,self.w_m)).view(input.shape[0],self.slot_num,self.h_f,self.w_f)
mask_features = self.mask_feature_extract(masks.view(input.shape[0]*self.slot_num,1,self.h_f,self.w_f))
mask_features = mask_features.view(input.shape[0],self.slot_num,self.mask_feature_dim,self.h_f,self.w_f)
sub_id, obj_id = jactorch.meshgrid(torch.arange(self.slot_num, dtype=torch.long, device=input.device), dim=0)
sub_id, obj_id = sub_id.contiguous().view(-1), obj_id.contiguous().view(-1)
masked_object_features = object_features.unsqueeze(1) * masks.unsqueeze(2) #[batch_size,slot_num,feature_dim,h_f,w_f]
masked_context_features = context_features.unsqueeze(1) * masks.unsqueeze(2)
masked_relation_features = relation_features.unsqueeze(1) * (masks[:,sub_id]+masks[:,obj_id]).unsqueeze(2)
x_context,y_context = masked_context_features.chunk(2,dim=2)
combined_object_features = torch.cat([masked_object_features,x_context,y_context*masks.unsqueeze(2),mask_features],dim=2)
combined_object_features = combined_object_features.view(input.shape[0]*self.slot_num,-1,self.h_f,self.w_f)
combined_object_features = self.object_feature_fuse(combined_object_features)
combined_object_features = combined_object_features.view(input.shape[0],self.slot_num,-1,self.h_f,self.w_f)
x_relation,y_relation,z_relation = masked_relation_features.chunk(3,dim=2)
combined_relation_features = torch.cat([combined_object_features[:,sub_id],combined_object_features[:,obj_id],
x_relation,y_relation*masks[:, sub_id].unsqueeze(2),z_relation*masks[:,obj_id].unsqueeze(2),
mask_features[:, sub_id], mask_features[:, obj_id]],dim=2)
combined_relation_features = combined_relation_features.view(input.shape[0]*self.slot_num**2,-1,self.h_f,self.w_f)
combined_relation_features = self.relation_feature_fuse(combined_relation_features)
combined_object_features = combined_object_features.view(masks.shape[0]*masks.shape[1],-1)
combined_object_features = self._norm(self.object_feature_fc(combined_object_features))
combined_object_features = combined_object_features.view(masks.shape[0],masks.shape[1],-1)
combined_relation_features = combined_relation_features.view(masks.shape[0]*masks.shape[1]**2,-1)
combined_relation_features = self._norm(self.object_feature_fc(combined_relation_features))
combined_relation_features = combined_relation_features.view(masks.shape[0],masks.shape[1],masks.shape[1],-1)
outputs = []
for i in range(input.shape[0]):
outputs.append([
None,
combined_object_features[i],
combined_relation_features[i]
])
return outputs
def get_monitor(self):
return self.monet_mask_extract.get_monitor()
def _norm(self, x):
return x / x.norm(2, dim=-1, keepdim=True)
| 50.252137
| 147
| 0.693171
| 1,794
| 11,759
| 4.214604
| 0.088071
| 0.026452
| 0.093109
| 0.021161
| 0.929639
| 0.910594
| 0.910462
| 0.902791
| 0.882555
| 0.847242
| 0
| 0.025052
| 0.185305
| 11,759
| 234
| 148
| 50.252137
| 0.764196
| 0.049834
| 0
| 0.764706
| 0
| 0
| 0.004844
| 0.001973
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0.047059
| 0.023529
| 0.152941
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
92ea6e8b44bb31d67e99ba5a0d2e1e68b803aef5
| 5,753
|
py
|
Python
|
ws2812b/paoma.py
|
tonghs/RaspberryPi
|
efb1be9f5425705107a9ef0f65094157db61bbfe
|
[
"MIT"
] | null | null | null |
ws2812b/paoma.py
|
tonghs/RaspberryPi
|
efb1be9f5425705107a9ef0f65094157db61bbfe
|
[
"MIT"
] | null | null | null |
ws2812b/paoma.py
|
tonghs/RaspberryPi
|
efb1be9f5425705107a9ef0f65094157db61bbfe
|
[
"MIT"
] | null | null | null |
from rpi_ws281x import PixelStrip, Color
from signal import pause
import time
import numpy as np
COLUMN = 32
ROW = 8
# LED strip configuration:
LED_COUNT = ROW*COLUMN # Number of LED pixels.
LED_PIN = 18 # GPIO pin connected to the pixels (18 uses PWM!).
# LED_PIN = 10 # GPIO pin connected to the pixels (10 uses SPI /dev/spidev0.0).
LED_FREQ_HZ = 800000 # LED signal frequency in hertz (usually 800khz)
LED_DMA = 10 # DMA channel to use for generating signal (try 10)
LED_BRIGHTNESS = 10 # Set to 0 for darkest and 255 for brightest
# True to invert the signal (when using NPN transistor level shift)
LED_INVERT = False
LED_CHANNEL = 0 # set to '1' for GPIOs 13, 19, 41, 45 or 53
# Create NeoPixel object with appropriate configuration.
strip = PixelStrip(
LED_COUNT,
LED_PIN,
LED_FREQ_HZ,
LED_DMA,
LED_INVERT,
LED_BRIGHTNESS,
LED_CHANNEL,
)
def xy_to_arrayIndex(x, y):
if x % 2 == 0:
return (x * 8) + y
else:
return (x * 8) + (8 - y - 1)
def drawBMP(matrix, width, height):
for row in range(height):
for col in range(width):
color_r, color_g, color_b = matrix[row][col]
strip.setPixelColor(xy_to_arrayIndex(col, row), Color(color_r, color_g, color_b))
strip.show()
# 色块矩阵
# print(matrix)
def move_matrix(matrix, step=1):
return np.roll(matrix, step, axis=1).tolist()
def bmp_to_matrix(bmp, width, height):
matrix = []
for y in range(height):
row = []
for x in range(width):
index = x+(y*width)
color = bmp[index]
color_r = color >> 24 & 0xFF
color_g = color >> 16 & 0xFF
color_b = color >> 8 & 0xFF
row.append((color_r, color_g, color_b))
matrix.append(row)
return matrix
youtubeBMP = [
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xff000000, 0xff000000, 0xff000000, 0xff000000, 0xff000000, 0x00000000, 0x00000000, 0xff000000, 0x00000000, 0x00000000, 0x00000000, 0xff000000, 0x00000000, 0x00000000, 0xff000000, 0xff000000, 0xff000000, 0xff000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xff000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xff000000, 0x00000000, 0x00000000, 0x00000000, 0xff000000, 0x00000000, 0x00000000, 0xff000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xff000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xff000000, 0xff000000, 0xff000000, 0xff000000, 0xff000000, 0x00000000, 0x00000000, 0xff000000, 0xff000000, 0xff000000, 0xff000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xff000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xff000000, 0x00000000, 0x00000000, 0x00000000, 0xff000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xff000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xff000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xff000000, 0x00000000, 0x00000000, 0x00000000, 0xff000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xff000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xff000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xff000000, 0x00000000, 0x00000000, 0x00000000, 0xff000000, 0x00000000, 0x00000000, 0xff000000, 0xff000000, 0xff000000, 0xff000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000
]
# Define functions which animate LEDs in various ways.
def colorWipe(strip, color, wait_ms=20):
"""Wipe color across display a pixel at a time."""
for i in range(strip.numPixels()):
strip.setPixelColor(i, color)
strip.show()
time.sleep(wait_ms / 1000.0)
def init():
# Intialize the library (must be called once before other functions).
strip.begin()
def main():
matrix = bmp_to_matrix(youtubeBMP, COLUMN, ROW)
while True:
drawBMP(matrix, COLUMN, ROW)
time.sleep(100/1000)
matrix = move_matrix(matrix)
# Main program logic follows:
if __name__ == "__main__":
try:
init()
main()
except KeyboardInterrupt:
for i in range(LED_COUNT):
strip.setPixelColor(i, Color(0, 0, 0))
strip.show()
| 51.828829
| 388
| 0.73353
| 639
| 5,753
| 6.524257
| 0.241002
| 0.921084
| 1.208923
| 1.429599
| 0.64284
| 0.64284
| 0.62557
| 0.614056
| 0.614056
| 0.614056
| 0
| 0.485769
| 0.175561
| 5,753
| 110
| 389
| 52.3
| 0.393211
| 0.12098
| 0
| 0.063291
| 0
| 0
| 0.001589
| 0
| 0
| 0
| 0.510723
| 0
| 0
| 1
| 0.088608
| false
| 0
| 0.050633
| 0.012658
| 0.189873
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
132660a23820a0455d8a72afb53d43aa1276a3ce
| 116
|
py
|
Python
|
build/api/utils.py
|
m-housh/cnx_base_microservice
|
d72ad4a480078730729745d61744601fdcb3a72a
|
[
"MIT"
] | 1
|
2017-03-28T08:53:57.000Z
|
2017-03-28T08:53:57.000Z
|
build/api/utils.py
|
m-housh/cnx_base_microservice
|
d72ad4a480078730729745d61744601fdcb3a72a
|
[
"MIT"
] | null | null | null |
build/api/utils.py
|
m-housh/cnx_base_microservice
|
d72ad4a480078730729745d61744601fdcb3a72a
|
[
"MIT"
] | null | null | null |
from .db import DbModel, engine
from .orm import *
def create_all():
DbModel.metadata.create_all(bind=engine)
| 16.571429
| 44
| 0.741379
| 17
| 116
| 4.941176
| 0.647059
| 0.214286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.155172
| 116
| 6
| 45
| 19.333333
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
13266bb3cfffcaa367c11686ceb6b3c3caa6625a
| 328
|
py
|
Python
|
icevision/utils/__init__.py
|
bluseking/-first-agnostic-computer-vision-framework-to-offer-a-curated-collection-with-hundreds-of-high-qualit
|
2d91eacfab7fcaf09c93352f1e7816ccb2c252b9
|
[
"Apache-2.0"
] | null | null | null |
icevision/utils/__init__.py
|
bluseking/-first-agnostic-computer-vision-framework-to-offer-a-curated-collection-with-hundreds-of-high-qualit
|
2d91eacfab7fcaf09c93352f1e7816ccb2c252b9
|
[
"Apache-2.0"
] | null | null | null |
icevision/utils/__init__.py
|
bluseking/-first-agnostic-computer-vision-framework-to-offer-a-curated-collection-with-hundreds-of-high-qualit
|
2d91eacfab7fcaf09c93352f1e7816ccb2c252b9
|
[
"Apache-2.0"
] | 1
|
2021-05-29T12:21:37.000Z
|
2021-05-29T12:21:37.000Z
|
from icevision.utils.utils import *
from icevision.utils.torch_utils import *
from icevision.utils.imageio import *
from icevision.utils.get_files import *
from icevision.utils.download_utils import *
from icevision.utils.data_dir import *
from icevision.utils.capture_stdout import *
from icevision.utils.logger_utils import *
| 36.444444
| 44
| 0.829268
| 46
| 328
| 5.782609
| 0.304348
| 0.390977
| 0.541353
| 0.631579
| 0.327068
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.097561
| 328
| 8
| 45
| 41
| 0.898649
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
132fa32135959e0dcd3abb5e607254a1448a99c0
| 164
|
py
|
Python
|
app/route/route.py
|
Pondok-Diya/bank-soal-backend
|
331b9be21594b939118a699e203d0581d79ca6a0
|
[
"MIT"
] | null | null | null |
app/route/route.py
|
Pondok-Diya/bank-soal-backend
|
331b9be21594b939118a699e203d0581d79ca6a0
|
[
"MIT"
] | null | null | null |
app/route/route.py
|
Pondok-Diya/bank-soal-backend
|
331b9be21594b939118a699e203d0581d79ca6a0
|
[
"MIT"
] | 1
|
2019-11-27T01:31:29.000Z
|
2019-11-27T01:31:29.000Z
|
from app import api
from app.controller.soal import Soal
from app.controller.soal import Jawab
api.add_resource(Soal,'/soal')
api.add_resource(Jawab,'/soal/jawab')
| 27.333333
| 37
| 0.79878
| 27
| 164
| 4.777778
| 0.333333
| 0.162791
| 0.263566
| 0.325581
| 0.418605
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085366
| 164
| 6
| 38
| 27.333333
| 0.86
| 0
| 0
| 0
| 0
| 0
| 0.09697
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.6
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
132faec196560b9eb89a62cda0ff62133bf015fb
| 21,577
|
py
|
Python
|
smc_no_gui/src/twitter_tweets.py
|
joaopfonseca/social_media_crawler
|
3abce6c850d203805f705e82612d98abf2eb4a47
|
[
"MIT"
] | 1
|
2021-11-14T18:45:01.000Z
|
2021-11-14T18:45:01.000Z
|
smc_no_gui/src/twitter_tweets.py
|
joaopfonseca/social_media_crawler
|
3abce6c850d203805f705e82612d98abf2eb4a47
|
[
"MIT"
] | 2
|
2021-06-08T19:27:42.000Z
|
2021-06-08T19:32:43.000Z
|
smc_no_gui/src/twitter_tweets.py
|
joaopfonseca/social_media_crawler
|
3abce6c850d203805f705e82612d98abf2eb4a47
|
[
"MIT"
] | null | null | null |
import tweepy
import csv
import datetime
import pandas as pd
import os
def twitter_csvcreatefile_header(keyword):
directory = 'data/'+keyword
if not os.path.exists(directory):
os.makedirs(directory)
f = open(directory+'/%s_tweets.csv' % keyword, 'w')
with f as file:
w = csv.writer(file)
w.writerow(['contributors',
'coordinates',
'created_at',
'entities_hashtags',
'entities_symbols',
'entities_urls',
'entities_user_mentions',
'favorite_count',
'favorited',
'geo',
'id',
'id_str',
'in_reply_to_screen_name',
'in_reply_to_status_id',
'in_reply_to_status_id_str',
'in_reply_to_user_id_iso_language_code',
'in_reply_to_user_id_str_result_type',
'is_quote_status',
'lang',
'metadata_iso_language_code',
'metadata_result_type',
'place',
'retweet_count',
'retweeted',
'retweeted_status_contributors',
'retweeted_status_coordinates',
'retweeted_status_created_at',
'retweeted_status_entities',
'retweeted_status_favorite_count',
'retweeted_status_favorited',
'retweeted_status_geo',
'retweeted_status_id',
'retweeted_status_id_str',
'retweeted_status_in_reply_to_screen_name',
'retweeted_status_in_reply_to_status_id',
'retweeted_status_in_reply_to_status_id_str',
'retweeted_status_in_reply_to_user_id',
'retweeted_status_in_reply_to_user_id_str',
'retweeted_status_is_quote_status',
'retweeted_status_lang',
'retweeted_status_metadata',
'retweeted_status_place',
'retweeted_status_retweet_count',
'retweeted_status_retweeted',
'retweeted_status_source',
'retweeted_status_text',
'retweeted_status_truncated',
'retweeted_status_user',
'source',
'text',
'truncated',
'user_contributors_enabled',
'user_created_at',
'user_default_profile',
'user_default_profile_image',
'user_description',
'user_favourites_count',
'user_follow_request_sent',
'user_followers_count',
'user_following',
'user_friends_count',
'user_geo_enabled',
'user_has_extended_profile',
'user_id',
'user_id_str',
'user_is_translation_enabled',
'user_is_translator',
'user_lang',
'user_listed_count',
'user_location',
'user_name',
'user_notifications',
'user_profile_background_color',
'user_profile_background_image_url',
'user_profile_background_image_url_https',
'user_profile_background_tile',
'user_profile_banner_url',
'user_profile_image_url',
'user_profile_image_url_https',
'user_profile_link_color',
'user_profile_sidebar_border_color',
'user_profile_sidebar_fill_color',
'user_profile_text_color',
'user_profile_use_background_image',
'user_protected',
'user_screen_name',
'user_statuses_count',
'user_time_zone',
'user_translator_type',
'user_url',
'user_utc_offset',
'user_verified',
'time_crawled'
])
def update_tweets(keyword, twitter_creds):
def if_empty(json_input):
if json_input == '':
return ''
else:
return json_input
def json_check_keys(jsono):
print(jsono.keys())
jsono = ['contributors','coordinates','created_at','entities','favorite_count','favorited',
'geo','id','id_str','in_reply_to_screen_name','in_reply_to_status_id',
'in_reply_to_status_id_str','in_reply_to_user_id','in_reply_to_user_id_str',
'is_quote_status','lang','metadata','place','retweet_count','retweeted',
'retweeted_status','source','text','truncated','user']
fields_with_subfields = ['entities','in_reply_to_user_id','in_reply_to_user_id_str',
'metadata','retweeted_status','user']
subfields= {'entities':['hashtags','symbols','urls','user_mentions'],
'in_reply_to_user_id':['iso_language_code'],
'in_reply_to_user_id_str':['result_type'],
'metadata':['iso_language_code','result_type'],
'retweeted_status': ['contributors','coordinates','created_at','entities',
'favorite_count','favorited','geo','id','id_str',
'in_reply_to_screen_name','in_reply_to_status_id',
'in_reply_to_status_id_str','in_reply_to_user_id',
'in_reply_to_user_id_str','is_quote_status','lang',
'metadata','place','retweet_count','retweeted',
'source','text','truncated','user'],
'user':['contributors_enabled','created_at','default_profile',
'default_profile_image','description','favourites_count',
'follow_request_sent','followers_count','following','friends_count',
'geo_enabled','has_extended_profile','id','id_str',
'is_translation_enabled','is_translator','lang','listed_count','location',
'name','notifications','profile_background_color',
'profile_background_image_url','profile_background_image_url_https',
'profile_background_tile','profile_banner_url','profile_image_url',
'profile_image_url_https','profile_link_color',
'profile_sidebar_border_color','profile_sidebar_fill_color',
'profile_text_color','profile_use_background_image','protected',
'screen_name','statuses_count','time_zone','translator_type','url',
'utc_offset','verified']}
API_KEY = twitter_creds['API_KEY']
API_SECRET = twitter_creds['API_SECRET']
ACCESS_TOKEN = twitter_creds['ACCESS_TOKEN']
ACCESS_TOKEN_SECRET = twitter_creds['ACCESS_TOKEN_SECRET']
auth = tweepy.OAuthHandler(API_KEY, API_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
api= tweepy.API(auth)
max_tweets = 2000
print ( 'Processing %s Tweets containing the term \"%s\": %s' % (max_tweets,keyword,datetime.datetime.now()) )
try:
searched_tweets = [status for status in tweepy.Cursor(api.search, q=keyword).items(max_tweets)]
directory = 'data/'+keyword
if not os.path.exists(directory):
os.makedirs(directory)
f = open(directory+'/%s_tweets.csv' % keyword, 'a')
with f as file:
i=0
w = csv.writer(file)
for tweet in searched_tweets:
i=i+1
data_row=[]
for field in jsono:
if field in tweet._json.keys():
if field in fields_with_subfields:
for subfield in subfields[field]:
try:
data_row.append(tweet._json[field][subfield])
except:
data_row.append('')
else:
if_empty(data_row.append(tweet._json[field]))
else:
data_row.append('')
if 'retweeted_status' not in tweet._json.keys():
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.append(datetime.datetime.now())
w.writerow(data_row)
df = pd.read_csv(directory+'/%s_tweets.csv' % keyword)
df['id'] = df['id'].apply(str)
df.sort_values(['time_crawled'], ascending=False).drop_duplicates(['id'], keep='first').sort_values(['created_at'], ascending=False).to_csv(directory+'/%s_tweets.csv' % keyword, index=False)
print('Done! %s Tweets processed: %s' % (i, datetime.datetime.now()))
except:
print('Failed to send request: Read timed out.')
def get_tweets(keyword, twitter_creds):
def if_empty(json_input):
if json_input == '':
return ''
else:
return json_input
def json_check_keys(jsono):
print(jsono.keys())
jsono = ['contributors','coordinates','created_at','entities','favorite_count','favorited',
'geo','id','id_str','in_reply_to_screen_name','in_reply_to_status_id',
'in_reply_to_status_id_str','in_reply_to_user_id','in_reply_to_user_id_str',
'is_quote_status','lang','metadata','place','retweet_count','retweeted',
'retweeted_status','source','text','truncated','user']
fields_with_subfields = ['entities','in_reply_to_user_id','in_reply_to_user_id_str',
'metadata','retweeted_status','user']
subfields= {'entities':['hashtags','symbols','urls','user_mentions'],
'in_reply_to_user_id':['iso_language_code'],
'in_reply_to_user_id_str':['result_type'],
'metadata':['iso_language_code','result_type'],
'retweeted_status': ['contributors','coordinates','created_at','entities',
'favorite_count','favorited','geo','id','id_str',
'in_reply_to_screen_name','in_reply_to_status_id',
'in_reply_to_status_id_str','in_reply_to_user_id',
'in_reply_to_user_id_str','is_quote_status','lang',
'metadata','place','retweet_count','retweeted',
'source','text','truncated','user'],
'user':['contributors_enabled','created_at','default_profile',
'default_profile_image','description','favourites_count',
'follow_request_sent','followers_count','following','friends_count',
'geo_enabled','has_extended_profile','id','id_str',
'is_translation_enabled','is_translator','lang','listed_count','location',
'name','notifications','profile_background_color',
'profile_background_image_url','profile_background_image_url_https',
'profile_background_tile','profile_banner_url','profile_image_url',
'profile_image_url_https','profile_link_color',
'profile_sidebar_border_color','profile_sidebar_fill_color',
'profile_text_color','profile_use_background_image','protected',
'screen_name','statuses_count','time_zone','translator_type','url',
'utc_offset','verified']}
API_KEY = twitter_creds['API_KEY']
API_SECRET = twitter_creds['API_SECRET']
ACCESS_TOKEN = twitter_creds['ACCESS_TOKEN']
ACCESS_TOKEN_SECRET = twitter_creds['ACCESS_TOKEN_SECRET']
auth = tweepy.OAuthHandler(API_KEY, API_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
api= tweepy.API(auth)
max_tweets = 2000
print ( 'Processing %s Tweets containing the term \"%s\": %s' % (max_tweets,keyword,datetime.datetime.now()) )
searched_tweets = [status for status in tweepy.Cursor(api.search, q=keyword).items(max_tweets)]
directory = 'data/'+keyword
if not os.path.exists(directory):
os.makedirs(directory)
f = open(directory+'/%s_tweets.csv' % keyword, 'a')
with f as file:
i=0
w = csv.writer(file)
for tweet in searched_tweets:
i=i+1
data_row=[]
for field in jsono:
if field in tweet._json.keys():
if field in fields_with_subfields:
for subfield in subfields[field]:
try:
data_row.append(tweet._json[field][subfield])
except:
data_row.append('')
else:
if_empty(data_row.append(tweet._json[field]))
else:
data_row.append('')
if 'retweeted_status' not in tweet._json.keys():
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.append(datetime.datetime.now())
w.writerow(data_row)
print('Done! %s Tweets processed: %s' % (i, datetime.datetime.now()))
def update_tweets_gui(keyword, twitter_creds):
def if_empty(json_input):
if json_input == '':
return ''
else:
return json_input
def json_check_keys(jsono):
print(jsono.keys())
jsono = ['contributors','coordinates','created_at','entities','favorite_count','favorited',
'geo','id','id_str','in_reply_to_screen_name','in_reply_to_status_id',
'in_reply_to_status_id_str','in_reply_to_user_id','in_reply_to_user_id_str',
'is_quote_status','lang','metadata','place','retweet_count','retweeted',
'retweeted_status','source','text','truncated','user']
fields_with_subfields = ['entities','in_reply_to_user_id','in_reply_to_user_id_str',
'metadata','retweeted_status','user']
subfields= {'entities':['hashtags','symbols','urls','user_mentions'],
'in_reply_to_user_id':['iso_language_code'],
'in_reply_to_user_id_str':['result_type'],
'metadata':['iso_language_code','result_type'],
'retweeted_status': ['contributors','coordinates','created_at','entities',
'favorite_count','favorited','geo','id','id_str',
'in_reply_to_screen_name','in_reply_to_status_id',
'in_reply_to_status_id_str','in_reply_to_user_id',
'in_reply_to_user_id_str','is_quote_status','lang',
'metadata','place','retweet_count','retweeted',
'source','text','truncated','user'],
'user':['contributors_enabled','created_at','default_profile',
'default_profile_image','description','favourites_count',
'follow_request_sent','followers_count','following','friends_count',
'geo_enabled','has_extended_profile','id','id_str',
'is_translation_enabled','is_translator','lang','listed_count','location',
'name','notifications','profile_background_color',
'profile_background_image_url','profile_background_image_url_https',
'profile_background_tile','profile_banner_url','profile_image_url',
'profile_image_url_https','profile_link_color',
'profile_sidebar_border_color','profile_sidebar_fill_color',
'profile_text_color','profile_use_background_image','protected',
'screen_name','statuses_count','time_zone','translator_type','url',
'utc_offset','verified']}
API_KEY = twitter_creds['API_KEY']
API_SECRET = twitter_creds['API_SECRET']
ACCESS_TOKEN = twitter_creds['ACCESS_TOKEN']
ACCESS_TOKEN_SECRET = twitter_creds['ACCESS_TOKEN_SECRET']
directory = 'data/'+keyword
if not os.path.exists(directory):
os.makedirs(directory)
auth = tweepy.OAuthHandler(API_KEY, API_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
api= tweepy.API(auth)
max_tweets = 2000
print ( 'Processing %s Tweets containing the term \"%s\": %s' % (max_tweets,keyword,datetime.datetime.now()) )
try:
searched_tweets = [status for status in tweepy.Cursor(api.search, q=keyword).items(max_tweets)]
f = open(directory+'/%s_tweets.csv' % keyword, 'a')
with f as file:
i=0
w = csv.writer(file)
for tweet in searched_tweets:
i=i+1
data_row=[]
for field in jsono:
if field in tweet._json.keys():
if field in fields_with_subfields:
for subfield in subfields[field]:
try:
data_row.append(tweet._json[field][subfield])
except:
data_row.append('')
else:
if_empty(data_row.append(tweet._json[field]))
else:
data_row.append('')
if 'retweeted_status' not in tweet._json.keys():
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.append(datetime.datetime.now())
w.writerow(data_row)
df = pd.read_csv(directory+'/%s_tweets.csv' % keyword)
df['id'] = df['id'].apply(str)
df.sort_values(['time_crawled'], ascending=False).drop_duplicates(['id'], keep='first').sort_values(['created_at'], ascending=False).to_csv(directory+'/%s_tweets.csv' % keyword, index=False)
print('Done! %s Tweets processed: %s' % (i, datetime.datetime.now()))
except Exception as e:
print(e)
| 46.006397
| 198
| 0.520693
| 2,152
| 21,577
| 4.813662
| 0.08039
| 0.060817
| 0.086591
| 0.099913
| 0.869968
| 0.850082
| 0.845835
| 0.836953
| 0.830196
| 0.830196
| 0
| 0.011343
| 0.362608
| 21,577
| 468
| 199
| 46.104701
| 0.741875
| 0
| 0
| 0.740291
| 0
| 0
| 0.315985
| 0.128609
| 0
| 0
| 0
| 0
| 0
| 1
| 0.024272
| false
| 0
| 0.012136
| 0
| 0.050971
| 0.026699
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
13a42b9b27d4e8edf9a258a5eb14414191237f48
| 87
|
py
|
Python
|
turf/point_to_line_distance/__init__.py
|
malroc/pyturf
|
c89b6ea7094bd5ca26cf589d9dcd15bd819d82e9
|
[
"MIT"
] | 11
|
2020-08-26T11:04:55.000Z
|
2022-01-26T14:53:10.000Z
|
turf/point_to_line_distance/__init__.py
|
malroc/pyturf
|
c89b6ea7094bd5ca26cf589d9dcd15bd819d82e9
|
[
"MIT"
] | 36
|
2020-04-09T16:49:05.000Z
|
2020-06-01T14:39:37.000Z
|
turf/point_to_line_distance/__init__.py
|
malroc/pyturf
|
c89b6ea7094bd5ca26cf589d9dcd15bd819d82e9
|
[
"MIT"
] | 5
|
2020-07-30T23:37:35.000Z
|
2021-08-24T08:10:28.000Z
|
from turf.point_to_line_distance._point_to_line_distance import point_to_line_distance
| 43.5
| 86
| 0.931034
| 15
| 87
| 4.733333
| 0.466667
| 0.295775
| 0.464789
| 0.802817
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.045977
| 87
| 1
| 87
| 87
| 0.855422
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
13c89ea14b9b755b6b9f2f8295dbf7ba4ca1b4c1
| 1,547
|
py
|
Python
|
flow/networks/i24_subnetwork.py
|
kumarms1/flow
|
6869f3c14838a1edec3694c3eb85281e71b7cd31
|
[
"MIT"
] | null | null | null |
flow/networks/i24_subnetwork.py
|
kumarms1/flow
|
6869f3c14838a1edec3694c3eb85281e71b7cd31
|
[
"MIT"
] | null | null | null |
flow/networks/i24_subnetwork.py
|
kumarms1/flow
|
6869f3c14838a1edec3694c3eb85281e71b7cd31
|
[
"MIT"
] | null | null | null |
from flow.networks.base import Network
EDGES_DISTRIBUTION = [
# Main Highway
"108162443",
"108162396",
"108162303",
"634155184",
"108162054",
"108161914#0",
"108161914#1",
"108161914#2",
"108161914#3",
"108161914#4",
"108161914#5",
"635462234",
"173720363#0",
"173720363#1",
"173720363#2",
"173720363#3",
"173720363#4",
"173720351",
"173720358#0",
"173720358#1",
"173720350",
"173720361",
"173720354",
"173720360",
]
class I24SubNetwork(Network):
def specify_routes(self, net_params):
return {
"108162443": ["108162443",
"108162396",
"108162303",
"634155184",
"108162054",
"108161914#0",
"108161914#1",
"108161914#2",
"108161914#3",
"108161914#4",
"108161914#5",
"635462234",
"173720363#0",
"173720363#1",
"173720363#2",
"173720363#3",
"173720363#4",
"173720351",
"173720358#0",
"173720358#1",
"173720350",
"173720361",
"173720354",
"173720360"]
}
| 25.783333
| 41
| 0.38203
| 95
| 1,547
| 6.189474
| 0.410526
| 0.061224
| 0.091837
| 0.122449
| 0.778912
| 0.778912
| 0.778912
| 0.778912
| 0.778912
| 0.778912
| 0
| 0.601282
| 0.495798
| 1,547
| 60
| 42
| 25.783333
| 0.152564
| 0.007757
| 0
| 0.8
| 0
| 0
| 0.321382
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.018182
| false
| 0
| 0.018182
| 0.018182
| 0.072727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
b9f11b4f6dfee4d444087f2f2d29b685bdc24e98
| 273
|
py
|
Python
|
test.py
|
MD-Levitan/hash_extender
|
a6ff56c1620e8f94986f7725f98d0cc48daa3f3a
|
[
"BSD-3-Clause"
] | null | null | null |
test.py
|
MD-Levitan/hash_extender
|
a6ff56c1620e8f94986f7725f98d0cc48daa3f3a
|
[
"BSD-3-Clause"
] | null | null | null |
test.py
|
MD-Levitan/hash_extender
|
a6ff56c1620e8f94986f7725f98d0cc48daa3f3a
|
[
"BSD-3-Clause"
] | null | null | null |
import hash_extender
v = hash_extender.hash_extender_default("data", 4, "append", 6, "6036708eba0d11f6ef52ad44e8b74d5b", 32, "md5", 6)
print(v)
# v = hash_extender.hash_extender_default("data", 4, "append", 6, "6036708eba0d11f6ef52ad44e8b74d5b", 32, "md4", 6)
# print(v)
| 34.125
| 115
| 0.736264
| 35
| 273
| 5.542857
| 0.4
| 0.309278
| 0.134021
| 0.175258
| 0.804124
| 0.804124
| 0.804124
| 0.804124
| 0.804124
| 0.804124
| 0
| 0.204082
| 0.102564
| 273
| 7
| 116
| 39
| 0.587755
| 0.446886
| 0
| 0
| 0
| 0
| 0.304054
| 0.216216
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0.333333
| 0
| 0
| 0
| null | 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 11
|
b9feedb106be405f8bdeb0cfc44de2e804be19fa
| 9,958
|
py
|
Python
|
examples/tof-viewer/external/newton_host_driver/src/host_api/examples/python/tests/test_maxram.py
|
rick-yhchen1013/aditof-sdk-rework
|
911465dd1e05dd0b1c5107197b3b4dc3a10f77f9
|
[
"MIT"
] | 5
|
2021-09-22T10:04:47.000Z
|
2022-02-08T17:55:09.000Z
|
examples/tof-viewer/external/newton_host_driver/src/host_api/examples/python/tests/test_maxram.py
|
rick-yhchen1013/aditof-sdk-rework
|
911465dd1e05dd0b1c5107197b3b4dc3a10f77f9
|
[
"MIT"
] | 99
|
2021-02-01T12:45:09.000Z
|
2022-03-08T09:54:13.000Z
|
examples/tof-viewer/external/newton_host_driver/src/host_api/examples/python/tests/test_maxram.py
|
rick-yhchen1013/aditof-sdk-rework
|
911465dd1e05dd0b1c5107197b3b4dc3a10f77f9
|
[
"MIT"
] | 4
|
2021-08-09T12:32:55.000Z
|
2021-12-13T05:38:55.000Z
|
#!/usr/bin/env python
""" Python script example
Usage:
test_maxram.py [--target=<ram_target>][--count=<word_count>]
Options:
--help Shows this help message.
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
from docopt import docopt
import sys
import io
import os
import time
import struct
import subprocess
import ctypes
from collections import OrderedDict
import threading
from newton_control_main import newton as newton
if __name__ == "__main__":
args = docopt(__doc__, version='0.1')
newtonTarget = os.environ["NEWTON_TARGET"]
if args['--target']:
ram_target = args['--target']
else:
ram_target = "all"
if args['--count']:
count = int( args['--count'] )
else:
count = 0
rc = newton.adi_newton_config( 0 )
if rc != 0:
print( "ERROR: newton.adi_newton_config return an error (" + str( rc ) + ")." )
sys.exit( rc )
if ram_target == "all" or ram_target == "useq_wave_ram":
if count == 0 or count > 2048:
count = 2048
cmd_file = os.path.expanduser( "~/host_api/dataFiles/useq_wave_ram.txt" )
cmd_file_bytes = cmd_file.encode(encoding='utf-8')
print( "INFO: Generating file \"" + cmd_file + "\" with count = " + str( count ) )
rc = os.system( "~/host_api/examples/python/generateBootImage.py useq_wave_ram " + cmd_file + " --seed=1 --count=" + str( count ) )
if rc != 0:
print( "ERROR: Error generateBootImage.py return an error (" + str( rc ) + ")." )
sys.exit( rc )
print( "INFO: Loading command file \"" + cmd_file + "\" ..." )
rc = newton.adi_load_command_file( cmd_file_bytes )
if rc != 0:
print( "ERROR: Error newton.adi_load_command_file returned error (" + str( rc ) + ")." )
sys.exit( rc )
print( "INFO: Verifying RAMs loaded by command file \"" + cmd_file + "\"" )
if newtonTarget == "FPGA":
rc = newton.adi_verify_command_file( cmd_file_bytes )
else:
rc = newton.adi_verify_command_file_hsp( cmd_file_bytes )
if rc != 0:
print( "ERROR: Error newton.adi_verify_command_file returned error (" + str( rc ) + ")." )
sys.exit( rc )
if ram_target == "all" or ram_target == "useq_map_ram":
if count == 0 or count > 128:
count = 128
cmd_file = os.path.expanduser( "~/host_api/dataFiles/useq_map_ram.txt" )
cmd_file_bytes = cmd_file.encode(encoding='utf-8')
print( "INFO: Generating file \"" + cmd_file + "\" with count = " + str( count ) )
rc = os.system( "~/host_api/examples/python/generateBootImage.py useq_map_ram " + cmd_file + " --seed=1 --count=" + str( count ) )
if rc != 0:
print( "ERROR: Error generateBootImage.py return an error (" + str( rc ) + ")." )
sys.exit( rc )
print( "INFO: Loading command file \"" + cmd_file + "\" ..." )
rc = newton.adi_load_command_file( cmd_file_bytes )
if rc != 0:
print( "ERROR: Error newton.adi_load_command_file returned error (" + str( rc ) + ")." )
sys.exit( rc )
print( "INFO: Verifying RAMs loaded by command file \"" + cmd_file + "\"" )
if newtonTarget == "FPGA":
rc = newton.adi_verify_command_file( cmd_file_bytes )
else:
rc = newton.adi_verify_command_file_hsp( cmd_file_bytes )
if rc != 0:
print( "ERROR: Error newton.adi_verify_command_file returned error (" + str( rc ) + ")." )
sys.exit( rc )
elif ram_target == "all" or ram_target == "useq_seq_ram":
if count == 0 or count > 4096:
count = 4096
cmd_file = os.path.expanduser( "~/host_api/dataFiles/useq_seq_ram.txt" )
cmd_file_bytes = cmd_file.encode(encoding='utf-8')
print( "INFO: Generating file \"" + cmd_file + "\" with count = " + str( count ) )
rc = os.system( "~/host_api/examples/python/generateBootImage.py useq_seq_ram " + cmd_file + " --seed=1 --count=" + str( count ) )
if rc != 0:
print( "ERROR: Error generateBootImage.py return an error (" + str( rc ) + ")." )
sys.exit( rc )
print( "INFO: Loading command file \"" + cmd_file + "\" ..." )
rc = newton.adi_load_command_file( cmd_file_bytes )
if rc != 0:
print( "ERROR: Error newton.adi_load_command_file returned error (" + str( rc ) + ")." )
sys.exit( rc )
print( "INFO: Verifying RAMs loaded by command file \"" + cmd_file + "\"" )
if newtonTarget == "FPGA":
rc = newton.adi_verify_command_file( cmd_file_bytes )
else:
rc = newton.adi_verify_command_file_hsp( cmd_file_bytes )
if rc != 0:
print( "ERROR: Error newton.adi_verify_command_file returned error (" + str( rc ) + ")." )
sys.exit( rc )
if ram_target == "all" or ram_target == "datapath_ram":
if count == 0 or count > 4096:
count = 4096
cmd_file = os.path.expanduser( "~/host_api/dataFiles/datapath_ram.txt" )
cmd_file_bytes = cmd_file.encode(encoding='utf-8')
print( "INFO: Generating file \"" + cmd_file + "\" with count = " + str( count ) )
rc = os.system( "~/host_api/examples/python/generateBootImage.py datapath_ram " + cmd_file + " --seed=1 --count=" + str( count ) )
if rc != 0:
print( "ERROR: Error generateBootImage.py return an error (" + str( rc ) + ")." )
sys.exit( rc )
print( "INFO: Loading command file \"" + cmd_file + "\" ..." )
rc = newton.adi_load_command_file( cmd_file_bytes )
if rc != 0:
print( "ERROR: Error newton.adi_load_command_file returned error (" + str( rc ) + ")." )
sys.exit( rc )
print( "INFO: Verifying RAMs loaded by command file \"" + cmd_file + "\"" )
if newtonTarget == "FPGA":
rc = newton.adi_verify_command_file( cmd_file_bytes )
else:
rc = newton.adi_verify_command_file_hsp( cmd_file_bytes )
if rc != 0:
print( "ERROR: Error newton.adi_verify_command_file returned error (" + str( rc ) + ")." )
sys.exit( rc )
if ram_target == "all" or ram_target == "de_ram":
if count == 0 or count > 512:
count = 512
cmd_file = os.path.expanduser( "~/host_api/dataFiles/de_ram.txt" )
cmd_file_bytes = cmd_file.encode(encoding='utf-8')
print( "INFO: Generating file \"" + cmd_file + "\" with count = " + str( count ) )
rc = os.system( "~/host_api/examples/python/generateBootImage.py de_ram " + cmd_file + " --seed=1 --count=" + str( count ) )
if rc != 0:
print( "ERROR: Error generateBootImage.py return an error (" + str( rc ) + ")." )
sys.exit( rc )
print( "INFO: Loading command file \"" + cmd_file + "\" ..." )
rc = newton.adi_load_command_file( cmd_file_bytes )
if rc != 0:
print( "ERROR: Error newton.adi_load_command_file returned error (" + str( rc ) + ")." )
sys.exit( rc )
print( "INFO: Verifying RAMs loaded by command file \"" + cmd_file + "\"" )
if newtonTarget == "FPGA":
rc = newton.adi_verify_command_file( cmd_file_bytes )
else:
rc = newton.adi_verify_command_file_hsp( cmd_file_bytes )
if rc != 0:
print( "ERROR: Error newton.adi_verify_command_file returned error (" + str( rc ) + ")." )
sys.exit( rc )
if ram_target == "all" or ram_target == "lps1_ram":
if count == 0 or count > 256:
count = 256
cmd_file = os.path.expanduser( "~/host_api/dataFiles/lps1_ram.txt" )
cmd_file_bytes = cmd_file.encode(encoding='utf-8')
print( "INFO: Generating file \"" + cmd_file + "\" with count = " + str( count ) )
rc = os.system( "~/host_api/examples/python/generateBootImage.py lps1_ram " + cmd_file + " --seed=1 --count=" + str( count ) )
if rc != 0:
print( "ERROR: Error generateBootImage.py return an error (" + str( rc ) + ")." )
sys.exit( rc )
print( "INFO: Loading command file \"" + cmd_file + "\" ..." )
rc = newton.adi_load_command_file( cmd_file_bytes )
if rc != 0:
print( "ERROR: Error newton.adi_load_command_file returned error (" + str( rc ) + ")." )
sys.exit( rc )
print( "INFO: Verifying RAMs loaded by command file \"" + cmd_file + "\"" )
if newtonTarget == "FPGA":
rc = newton.adi_verify_command_file( cmd_file_bytes )
else:
rc = newton.adi_verify_command_file_hsp( cmd_file_bytes )
if rc != 0:
print( "ERROR: Error newton.adi_verify_command_file returned error (" + str( rc ) + ")." )
sys.exit( rc )
if ram_target == "all" or ram_target == "lps2_ram":
if count == 0 or count > 256:
count = 256
cmd_file = os.path.expanduser( "~/host_api/dataFiles/lps2_ram.txt" )
cmd_file_bytes = cmd_file.encode(encoding='utf-8')
print( "INFO: Generating file \"" + cmd_file + "\" with count = " + str( count ) )
rc = os.system( "~/host_api/examples/python/generateBootImage.py lps2_ram \"" + cmd_file + "\" --seed=1 --count=" + str( count ) )
if rc != 0:
print( "ERROR: Error generateBootImage.py return an error (" + str( rc ) + ")." )
sys.exit( rc )
print( "INFO: Loading command file \"" + cmd_file + "\" ..." )
rc = newton.adi_load_command_file( cmd_file_bytes )
if rc != 0:
print( "ERROR: Error newton.adi_load_command_file returned error (" + str( rc ) + ")." )
sys.exit( rc )
print( "INFO: Verifying RAMs loaded by command file \"" + cmd_file + "\"" )
if newtonTarget == "FPGA":
rc = newton.adi_verify_command_file( cmd_file_bytes )
else:
rc = newton.adi_verify_command_file_hsp( cmd_file_bytes )
if rc != 0:
print( "ERROR: Error newton.adi_verify_command_file returned error (" + str( rc ) + ")." )
sys.exit( rc )
print( "INFO: test PASSED!" )
| 40.479675
| 137
| 0.597409
| 1,303
| 9,958
| 4.330008
| 0.083653
| 0.086849
| 0.068238
| 0.08933
| 0.873449
| 0.870791
| 0.861219
| 0.856434
| 0.848104
| 0.832152
| 0
| 0.0137
| 0.259691
| 9,958
| 245
| 138
| 40.644898
| 0.751628
| 0.016168
| 0
| 0.722513
| 0
| 0
| 0.347262
| 0.102677
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.005236
| 0.073298
| 0
| 0.073298
| 0.235602
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6a3994dafa887d9011ea571dc37ec25cd34ebb49
| 91
|
py
|
Python
|
imutils/utils/ResizeRight/resize_right/__init__.py
|
JacobARose/image-utils
|
aa0e005c0b4df5198d188b074f4e21f8d8f97962
|
[
"MIT"
] | null | null | null |
imutils/utils/ResizeRight/resize_right/__init__.py
|
JacobARose/image-utils
|
aa0e005c0b4df5198d188b074f4e21f8d8f97962
|
[
"MIT"
] | null | null | null |
imutils/utils/ResizeRight/resize_right/__init__.py
|
JacobARose/image-utils
|
aa0e005c0b4df5198d188b074f4e21f8d8f97962
|
[
"MIT"
] | null | null | null |
# __all__ = ['interp_methods', 'resize_right']
from . import interp_methods, resize_right
| 22.75
| 46
| 0.758242
| 11
| 91
| 5.545455
| 0.636364
| 0.42623
| 0.622951
| 0.786885
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.120879
| 91
| 3
| 47
| 30.333333
| 0.7625
| 0.483516
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
e03db806cfab1bf2129ee162325b3e2b5a3a9d9c
| 134
|
py
|
Python
|
src/spyd/server/metrics/__init__.py
|
DanSeraf/spyd
|
af893b7f9c67785613b25754eb2cf150523a9fe4
|
[
"Zlib"
] | 4
|
2015-05-05T16:44:42.000Z
|
2020-10-27T09:45:23.000Z
|
src/spyd/server/metrics/__init__.py
|
DanSeraf/spyd
|
af893b7f9c67785613b25754eb2cf150523a9fe4
|
[
"Zlib"
] | null | null | null |
src/spyd/server/metrics/__init__.py
|
DanSeraf/spyd
|
af893b7f9c67785613b25754eb2cf150523a9fe4
|
[
"Zlib"
] | 2
|
2016-12-13T22:21:08.000Z
|
2020-03-14T16:44:20.000Z
|
from spyd.server.metrics.get_metrics_service import get_metrics_service
from spyd.server.metrics.rate_aggregator import RateAggregator
| 67
| 71
| 0.902985
| 19
| 134
| 6.105263
| 0.526316
| 0.137931
| 0.241379
| 0.362069
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.052239
| 134
| 2
| 72
| 67
| 0.913386
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
e067318c11387a87d5c5d2af11f570ba41cb095f
| 202
|
py
|
Python
|
tests/core/cube/test_constants.py
|
jorisvandenbossche/kartothek
|
18b11e7b060bb778668ffc4e2f468910120e6385
|
[
"MIT"
] | 171
|
2019-05-02T15:47:20.000Z
|
2022-02-17T15:12:15.000Z
|
tests/core/cube/test_constants.py
|
jorisvandenbossche/kartothek
|
18b11e7b060bb778668ffc4e2f468910120e6385
|
[
"MIT"
] | 414
|
2019-05-03T09:24:26.000Z
|
2022-03-30T21:02:40.000Z
|
tests/core/cube/test_constants.py
|
jorisvandenbossche/kartothek
|
18b11e7b060bb778668ffc4e2f468910120e6385
|
[
"MIT"
] | 57
|
2019-05-03T08:00:18.000Z
|
2022-02-16T18:38:22.000Z
|
from kartothek.core.cube.constants import KTK_CUBE_UUID_SEPERATOR
from kartothek.core.dataset import _validate_uuid
def test_uuid_seperator_valid():
assert _validate_uuid(KTK_CUBE_UUID_SEPERATOR)
| 28.857143
| 65
| 0.861386
| 29
| 202
| 5.551724
| 0.517241
| 0.242236
| 0.21118
| 0.248447
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.089109
| 202
| 6
| 66
| 33.666667
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0.25
| true
| 0
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 9
|
0edd63e79086966238e83d95307097f3919f2d21
| 6,178
|
py
|
Python
|
experiments/launcher/experiments_mnist_usps.py
|
mkirchmeyer/adaptation-imputation
|
7ef683f2da08699b3f877467fdb0e00d3b02bccc
|
[
"MIT"
] | 2
|
2021-09-15T14:03:11.000Z
|
2022-02-17T15:32:13.000Z
|
experiments/launcher/experiments_mnist_usps.py
|
mkirchmeyer/adaptation-imputation
|
7ef683f2da08699b3f877467fdb0e00d3b02bccc
|
[
"MIT"
] | null | null | null |
experiments/launcher/experiments_mnist_usps.py
|
mkirchmeyer/adaptation-imputation
|
7ef683f2da08699b3f877467fdb0e00d3b02bccc
|
[
"MIT"
] | 1
|
2021-09-15T14:03:14.000Z
|
2021-09-15T14:03:14.000Z
|
activate_mse = 1
activate_adaptation_imp = 1
activate_adaptation_d1 = 1
weight_d2 = 1.0
weight_mse = 1.0
refinement = 1
n_epochs_refinement = 10
lambda_regul = [0.01]
lambda_regul_s = [0.01]
threshold_value = [0.95]
compute_variance = False
random_seed = [1985] if not compute_variance else [1985, 2184, 51, 12, 465]
class DannMNISTUSPS(object):
MAX_NB_PROCESSES = 3
DEBUG = False
BINARY = "experiments/launcher/digits_binary.py"
GRID = {
"-mode": ["dann"],
"-upper_bound": [1],
"-adaptive_lr": [1],
"-is_balanced": [1],
"-source": ["MNIST"],
"-target": ["USPS"],
"-epoch_to_start_align": [11],
"-n_epochs": [100],
"-batch_size": [128],
"-initialize_model": [1],
"-init_batch_size": [32],
"-refinement": [refinement],
"-n_epochs_refinement": [n_epochs_refinement],
"-lambda_regul": lambda_regul,
"-lambda_regul_s": lambda_regul_s,
"-threshold_value": threshold_value,
"-random_seed": random_seed
}
class DannIgnoreMNISTUSPS(object):
MAX_NB_PROCESSES = 3
DEBUG = False
BINARY = "experiments/launcher/digits_binary.py"
GRID = {
"-mode": ["dann"],
"-upper_bound": [1],
"-adaptive_lr": [1],
"-is_balanced": [1],
"-source": ["MNIST"],
"-target": ["USPS"],
"-epoch_to_start_align": [11],
"-n_epochs": [100],
"-batch_size": [128],
"-initialize_model": [1],
"-init_batch_size": [32],
"-adapt_only_first": [1],
"-refinement": [refinement],
"-n_epochs_refinement": [n_epochs_refinement],
"-lambda_regul": lambda_regul,
"-threshold_value": threshold_value,
"-random_seed": random_seed
}
class DannZeroImputMNISTUSPS(object):
MAX_NB_PROCESSES = 3
DEBUG = False
BINARY = "experiments/launcher/digits_binary.py"
GRID = {
"-mode": ["dann"],
"-upper_bound": [0],
"-adaptive_lr": [1],
"-is_balanced": [1],
"-source": ["MNIST"],
"-target": ["USPS"],
"-epoch_to_start_align": [11],
"-n_epochs": [100],
"-batch_size": [128],
"-initialize_model": [1],
"-init_batch_size": [32],
"-init_lr": [10 ** -2.5],
"-refinement": [refinement],
"-n_epochs_refinement": [n_epochs_refinement],
"-lambda_regul": lambda_regul,
"-threshold_value": threshold_value,
"-random_seed": random_seed
}
class DannImputMNISTUSPS(object):
MAX_NB_PROCESSES = 2
DEBUG = False
BINARY = "experiments/launcher/digits_binary.py"
GRID = {
"-mode": ["dann_imput"],
"-adaptive_lr": [1],
"-is_balanced": [1],
"-source": ["MNIST"],
"-target": ["USPS"],
"-epoch_to_start_align": [11],
"-stop_grad": [0],
"-n_epochs": [100],
"-batch_size": [128],
"-initialize_model": [1],
"-init_batch_size": [32],
"-weight_d2": [weight_d2],
"-weight_mse": [weight_mse],
"-activate_mse": [activate_mse],
"-activate_adaptation_imp": [activate_adaptation_imp],
"-activate_adaptation_d1": [activate_adaptation_d1],
"-init_lr": [10 ** -2],
"-refinement": [refinement],
"-n_epochs_refinement": [n_epochs_refinement],
"-lambda_regul": lambda_regul,
"-lambda_regul_s": lambda_regul_s,
"-threshold_value": threshold_value,
"-random_seed": random_seed
}
class DjdotMNISTUSPS(object):
MAX_NB_PROCESSES = 2
DEBUG = False
BINARY = "experiments/launcher/digits_binary.py"
GRID = {
"-mode": ["djdot"],
"-upper_bound": [1],
"-is_balanced": [1],
"-djdot_alpha": [0.1],
"-adaptive_lr": [1],
"-source": ["MNIST"],
"-target": ["USPS"],
"-epoch_to_start_align": [11],
"-n_epochs": [100],
"-batch_size": [500],
"-initialize_model": [1],
"-init_batch_size": [32],
"-init_lr": [10 ** -2],
"-random_seed": random_seed
}
class DjdotIgnoreMNISTUSPS(object):
MAX_NB_PROCESSES = 2
DEBUG = False
BINARY = "experiments/launcher/digits_binary.py"
GRID = {
"-mode": ["djdot"],
"-upper_bound": [1],
"-is_balanced": [1],
"-djdot_alpha": [0.1],
"-adaptive_lr": [1],
"-source": ["MNIST"],
"-target": ["USPS"],
"-epoch_to_start_align": [11],
"-n_epochs": [100],
"-batch_size": [500],
"-initialize_model": [1],
"-output_fig": [1],
"-init_batch_size": [32],
"-init_lr": [10 ** -2],
"-adapt_only_first": [1],
"-random_seed": random_seed
}
class DjdotZeroImputMNISTUSPS(object):
MAX_NB_PROCESSES = 2
DEBUG = False
BINARY = "experiments/launcher/digits_binary.py"
GRID = {
"-mode": ["djdot"],
"-upper_bound": [0],
"-adaptive_lr": [1],
"-is_balanced": [1],
"-djdot_alpha": [0.1],
"-source": ["MNIST"],
"-target": ["USPS"],
"-epoch_to_start_align": [11],
"-n_epochs": [100],
"-batch_size": [500],
"-initialize_model": [1],
"-init_batch_size": [32],
"-init_lr": [10 ** -2],
"-random_seed": random_seed
}
class DjdotImputMNISTUSPS(object):
MAX_NB_PROCESSES = 2
DEBUG = False
BINARY = "experiments/launcher/digits_binary.py"
GRID = {
"-mode": ["djdot_imput"],
"-adaptive_lr": [1],
"-source": ["MNIST"],
"-target": ["USPS"],
"-is_balanced": [1],
"-epoch_to_start_align": [11],
"-stop_grad": [1],
"-djdot_alpha": [0.1],
"-bigger_reconstructor": [1],
"-n_epochs": [100],
"-batch_size": [500],
"-initialize_model": [1],
"-init_batch_size": [32],
"-init_lr": [10 ** -2],
"-activate_mse": [activate_mse],
"-activate_adaptation_imp": [activate_adaptation_imp],
"-activate_adaptation_d1": [activate_adaptation_d1],
"-random_seed": random_seed
}
| 28.869159
| 75
| 0.540952
| 651
| 6,178
| 4.786482
| 0.144393
| 0.03819
| 0.049101
| 0.051348
| 0.832157
| 0.819961
| 0.819961
| 0.79846
| 0.794929
| 0.779204
| 0
| 0.044131
| 0.277436
| 6,178
| 213
| 76
| 29.004695
| 0.653898
| 0
| 0
| 0.811224
| 0
| 0
| 0.34202
| 0.09372
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0.204082
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0ee007aab352132504806fe663ad629fdf85d09d
| 4,885
|
py
|
Python
|
tests/components/zwave_js/test_fan.py
|
tbarbette/core
|
8e58c3aa7bc8d2c2b09b6bd329daa1c092d52d3c
|
[
"Apache-2.0"
] | 1
|
2017-05-30T22:21:05.000Z
|
2017-05-30T22:21:05.000Z
|
tests/components/zwave_js/test_fan.py
|
tbarbette/core
|
8e58c3aa7bc8d2c2b09b6bd329daa1c092d52d3c
|
[
"Apache-2.0"
] | 58
|
2020-08-03T07:33:02.000Z
|
2022-03-31T06:02:05.000Z
|
tests/components/zwave_js/test_fan.py
|
tbarbette/core
|
8e58c3aa7bc8d2c2b09b6bd329daa1c092d52d3c
|
[
"Apache-2.0"
] | 2
|
2021-03-22T21:42:48.000Z
|
2021-04-12T12:26:39.000Z
|
"""Test the Z-Wave JS fan platform."""
import pytest
from zwave_js_server.event import Event
from homeassistant.components.fan import ATTR_SPEED, SPEED_MEDIUM
FAN_ENTITY = "fan.in_wall_smart_fan_control"
async def test_fan(hass, client, in_wall_smart_fan_control, integration):
"""Test the fan entity."""
node = in_wall_smart_fan_control
state = hass.states.get(FAN_ENTITY)
assert state
assert state.state == "off"
# Test turn on setting speed
await hass.services.async_call(
"fan",
"turn_on",
{"entity_id": FAN_ENTITY, "speed": SPEED_MEDIUM},
blocking=True,
)
assert len(client.async_send_command_no_wait.call_args_list) == 1
args = client.async_send_command_no_wait.call_args[0][0]
assert args["command"] == "node.set_value"
assert args["nodeId"] == 17
assert args["valueId"] == {
"commandClassName": "Multilevel Switch",
"commandClass": 38,
"endpoint": 0,
"property": "targetValue",
"propertyName": "targetValue",
"metadata": {
"label": "Target value",
"max": 99,
"min": 0,
"type": "number",
"readable": True,
"writeable": True,
"label": "Target value",
},
}
assert args["value"] == 66
client.async_send_command_no_wait.reset_mock()
# Test setting unknown speed
with pytest.raises(ValueError):
await hass.services.async_call(
"fan",
"set_speed",
{"entity_id": FAN_ENTITY, "speed": 99},
blocking=True,
)
client.async_send_command_no_wait.reset_mock()
# Test turn on no speed
await hass.services.async_call(
"fan",
"turn_on",
{"entity_id": FAN_ENTITY},
blocking=True,
)
assert len(client.async_send_command_no_wait.call_args_list) == 1
args = client.async_send_command_no_wait.call_args[0][0]
assert args["command"] == "node.set_value"
assert args["nodeId"] == 17
assert args["valueId"] == {
"commandClassName": "Multilevel Switch",
"commandClass": 38,
"endpoint": 0,
"property": "targetValue",
"propertyName": "targetValue",
"metadata": {
"label": "Target value",
"max": 99,
"min": 0,
"type": "number",
"readable": True,
"writeable": True,
"label": "Target value",
},
}
assert args["value"] == 255
client.async_send_command_no_wait.reset_mock()
# Test turning off
await hass.services.async_call(
"fan",
"turn_off",
{"entity_id": FAN_ENTITY},
blocking=True,
)
assert len(client.async_send_command_no_wait.call_args_list) == 1
args = client.async_send_command_no_wait.call_args[0][0]
assert args["command"] == "node.set_value"
assert args["nodeId"] == 17
assert args["valueId"] == {
"commandClassName": "Multilevel Switch",
"commandClass": 38,
"endpoint": 0,
"property": "targetValue",
"propertyName": "targetValue",
"metadata": {
"label": "Target value",
"max": 99,
"min": 0,
"type": "number",
"readable": True,
"writeable": True,
"label": "Target value",
},
}
assert args["value"] == 0
client.async_send_command_no_wait.reset_mock()
# Test speed update from value updated event
event = Event(
type="value updated",
data={
"source": "node",
"event": "value updated",
"nodeId": 17,
"args": {
"commandClassName": "Multilevel Switch",
"commandClass": 38,
"endpoint": 0,
"property": "currentValue",
"newValue": 99,
"prevValue": 0,
"propertyName": "currentValue",
},
},
)
node.receive_event(event)
state = hass.states.get(FAN_ENTITY)
assert state.state == "on"
assert state.attributes[ATTR_SPEED] == "high"
client.async_send_command_no_wait.reset_mock()
event = Event(
type="value updated",
data={
"source": "node",
"event": "value updated",
"nodeId": 17,
"args": {
"commandClassName": "Multilevel Switch",
"commandClass": 38,
"endpoint": 0,
"property": "currentValue",
"newValue": 0,
"prevValue": 0,
"propertyName": "currentValue",
},
},
)
node.receive_event(event)
state = hass.states.get(FAN_ENTITY)
assert state.state == "off"
assert state.attributes[ATTR_SPEED] == "off"
| 28.236994
| 73
| 0.544319
| 498
| 4,885
| 5.13253
| 0.190763
| 0.046948
| 0.064554
| 0.094679
| 0.850548
| 0.791862
| 0.780516
| 0.767606
| 0.738263
| 0.6741
| 0
| 0.016939
| 0.323234
| 4,885
| 172
| 74
| 28.401163
| 0.756201
| 0.034596
| 0
| 0.75
| 0
| 0
| 0.232906
| 0.006197
| 0
| 0
| 0
| 0
| 0.145833
| 1
| 0
| false
| 0
| 0.020833
| 0
| 0.020833
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
162d3cf759584feb8fcd2baef9ed9952485105b3
| 12,871
|
py
|
Python
|
pbs/filters.py
|
jawaidm/pbs
|
87f5c535c976d6a5eccbfbbf2073589b6e366d04
|
[
"Apache-2.0"
] | null | null | null |
pbs/filters.py
|
jawaidm/pbs
|
87f5c535c976d6a5eccbfbbf2073589b6e366d04
|
[
"Apache-2.0"
] | 12
|
2019-10-22T23:16:38.000Z
|
2022-03-11T23:17:45.000Z
|
pbs/filters.py
|
jawaidm/pbs
|
87f5c535c976d6a5eccbfbbf2073589b6e366d04
|
[
"Apache-2.0"
] | 5
|
2019-12-19T06:18:42.000Z
|
2022-01-07T01:16:18.000Z
|
from django.contrib.admin import filters
from django.db import models
from django.contrib.admin.util import (get_model_from_relation,)
class ExcludeListFilterMixin(object):
def queryset(self, request, queryset):
queryset = super(ExcludeListFilterMixin,self).queryset(request,queryset)
try:
if self.used_parameters_exclude:
return queryset.exclude(**self.used_parameters_exclude)
else:
return queryset
except ValidationError as e:
raise IncorrectLookupParameters(e)
class BooleanFieldListFilter(filters.BooleanFieldListFilter):
def __init__(self, field, request, params, model, model_admin, field_path):
self.lookup_kwarg1 = '%s' % field_path
self.lookup_kwarg3 = '%s__in' % field_path
self.lookup_val1 = request.GET.get(self.lookup_kwarg1, None)
self.lookup_val3 = request.GET.get(self.lookup_kwarg3, None)
super(BooleanFieldListFilter,self).__init__(field,request, params, model, model_admin, field_path)
self.is_nullable = isinstance(self.field, models.NullBooleanField)
to_bool = lambda v :(None if v == "" else (True if v in ("1","true","yes","on") else False)) if isinstance(v,basestring) else (True if v else False)
for kwarg in (self.lookup_kwarg,self.lookup_kwarg1):
if kwarg in self.used_parameters:
val = to_bool(self.used_parameters[kwarg])
if val is None:
del self.used_parameters[kwarg]
else:
self.used_parameters[kwarg] = val
if self.lookup_kwarg3 in self.used_parameters:
if isinstance(self.used_parameters[self.lookup_kwarg3],(list,tuple)):
vals = None
for v in self.used_parameters[self.lookup_kwarg3]:
val = to_bool(v)
if val is None:
continue
if vals is None:
vals = [val]
elif val not in vals:
vals.append(val)
if vals is None:
del self.used_parameters[self.lookup_kwarg3]
elif len(vals) == 1:
del self.used_parameters[self.lookup_kwarg3]
self.used_parameters[self.lookup_kwarg] = vals[0]
elif self.is_nullable:
self.used_parameters[self.lookup_kwarg3] = vals
else:
del self.used_parameters[self.lookup_kwarg3]
else:
val = to_bool(self.used_parameters[self.lookup_kwarg3])
if val is None:
del self.used_parameters[self.lookup_kwarg3]
else:
del self.used_parameters[self.lookup_kwarg3]
self.used_parameters[self.lookup_kwarg] = val
def expected_parameters(self):
return [self.lookup_kwarg,self.lookup_kwarg1, self.lookup_kwarg2,self.lookup_kwarg3]
class CrossTenureApprovedListFilter(ExcludeListFilterMixin,BooleanFieldListFilter):
def __init__(self, field, request, params, model, model_admin, field_path):
super(CrossTenureApprovedListFilter,self).__init__(field,request, params, model, model_admin, field_path)
self.used_parameters_exclude = {}
for kwarg in (self.lookup_kwarg,self.lookup_kwarg1):
if kwarg in self.used_parameters:
if self.used_parameters[kwarg] == False:
self.used_parameters_exclude[kwarg] = True
del self.used_parameters[kwarg]
if self.lookup_kwarg3 in self.used_parameters:
if False in self.used_parameters[self.lookup_kwarg3] :
if True in self.used_parameters[self.lookup_kwarg3] :
del self.used_parameters[self.lookup_kwarg3]
else:
self.used_parameters_exclude[self.lookup_kwarg] = True
del self.used_parameters[self.lookup_kwarg3]
else:
self.used_parameters[self.lookup_kwarg] = True
del self.used_parameters[self.lookup_kwarg3]
class IntChoicesFieldListFilter(filters.ChoicesFieldListFilter):
def __init__(self, field, request, params, model, model_admin, field_path):
self.lookup_kwarg1 = '%s' % field_path
self.lookup_val1 = request.GET.get(self.lookup_kwarg1, None)
self.lookup_kwarg2 = '%s__in' % field_path
self.lookup_val2 = request.GET.get(self.lookup_kwarg2, None)
super(IntChoicesFieldListFilter,self).__init__(field,request, params, model, model_admin, field_path)
to_int = lambda v :None if v == "" else int(v)
for kwarg in (self.lookup_kwarg,self.lookup_kwarg1):
if kwarg in self.used_parameters:
val = to_int(self.used_parameters[kwarg])
if val is None:
del self.used_parameters[kwarg]
else:
self.used_parameters[kwarg] = val
if self.lookup_kwarg2 in self.used_parameters:
if isinstance(self.used_parameters[self.lookup_kwarg2],(list,tuple)):
vals = None
for v in self.used_parameters[self.lookup_kwarg2]:
val = to_int(v)
if val is None:
continue
if vals is None:
vals = [val]
else:
vals.append(val)
if vals is None:
del self.used_parameters[self.lookup_kwarg2]
elif len(vals) == 1:
del self.used_parameters[self.lookup_kwarg2]
self.used_parameters[self.lookup_kwarg] = vals[0]
else:
self.used_parameters[self.lookup_kwarg2] = vals
else:
val = to_int(self.used_parameters[self.lookup_kwarg2])
if val is None:
del self.used_parameters[self.lookup_kwarg2]
else:
del self.used_parameters[self.lookup_kwarg2]
self.used_parameters[self.lookup_kwarg] = val
def expected_parameters(self):
return [self.lookup_kwarg,self.lookup_kwarg1, self.lookup_kwarg2]
class RelatedFieldListFilter(filters.RelatedFieldListFilter):
def __init__(self, field, request, params, model, model_admin, field_path):
other_model = get_model_from_relation(field)
if hasattr(field, 'rel'):
rel_name = field.rel.get_related_field().name
else:
rel_name = other_model._meta.pk.name
self.lookup_kwarg1 = '%s__%s' % (field_path,rel_name)
self.lookup_val1 = request.GET.get(self.lookup_kwarg1, None)
self.lookup_kwarg2 = '%s__%s__in' % (field_path,rel_name)
self.lookup_val2 = request.GET.get(self.lookup_kwarg2, None)
super(RelatedFieldListFilter, self).__init__(field, request, params, model, model_admin, field_path)
to_int = lambda v :None if v == "" else int(v)
for kwarg in (self.lookup_kwarg,self.lookup_kwarg1):
if kwarg in self.used_parameters:
val = to_int(self.used_parameters[kwarg])
if val is None:
del self.used_parameters[kwarg]
else:
self.used_parameters[kwarg] = val
if self.lookup_kwarg2 in self.used_parameters:
if isinstance(self.used_parameters[self.lookup_kwarg2],(list,tuple)):
vals = None
for v in self.used_parameters[self.lookup_kwarg2]:
val = to_int(v)
if val is None:
continue
if vals is None:
vals = [val]
else:
vals.append(val)
if vals is None:
del self.used_parameters[self.lookup_kwarg2]
elif len(vals) == 1:
del self.used_parameters[self.lookup_kwarg2]
self.used_parameters[self.lookup_kwarg] = vals[0]
else:
self.used_parameters[self.lookup_kwarg2] = vals
else:
val = to_int(self.used_parameters[self.lookup_kwarg2])
if val is None:
del self.used_parameters[self.lookup_kwarg2]
else:
del self.used_parameters[self.lookup_kwarg2]
self.used_parameters[self.lookup_kwarg] = val
def expected_parameters(self):
return [self.lookup_kwarg,self.lookup_kwarg1,self.lookup_kwarg2, self.lookup_kwarg_isnull]
class IntValuesFieldListFilter(filters.AllValuesFieldListFilter):
def __init__(self, field, request, params, model, model_admin, field_path):
self.lookup_kwarg2 = '%s__in' % field_path
self.lookup_val2 = request.GET.get(self.lookup_kwarg2, None)
super(IntValuesFieldListFilter, self).__init__(field, request, params, model, model_admin, field_path)
to_int = lambda v :None if v == "" else int(v)
for kwarg in (self.lookup_kwarg,):
if kwarg in self.used_parameters:
val = to_int(self.used_parameters[kwarg])
if val is None:
del self.used_parameters[kwarg]
else:
self.used_parameters[kwarg] = val
if self.lookup_kwarg2 in self.used_parameters:
if isinstance(self.used_parameters[self.lookup_kwarg2],(list,tuple)):
vals = None
for v in self.used_parameters[self.lookup_kwarg2]:
val = to_int(v)
if val is None:
continue
if vals is None:
vals = [val]
else:
vals.append(val)
if vals is None:
del self.used_parameters[self.lookup_kwarg2]
elif len(vals) == 1:
del self.used_parameters[self.lookup_kwarg2]
self.used_parameters[self.lookup_kwarg] = vals[0]
else:
self.used_parameters[self.lookup_kwarg2] = vals
else:
val = to_int(self.used_parameters[self.lookup_kwarg2])
if val is None:
del self.used_parameters[self.lookup_kwarg2]
else:
del self.used_parameters[self.lookup_kwarg2]
self.used_parameters[self.lookup_kwarg] = val
def expected_parameters(self):
return [self.lookup_kwarg,self.lookup_kwarg2, self.lookup_kwarg_isnull]
class StringValuesFieldListFilter(filters.AllValuesFieldListFilter):
def __init__(self, field, request, params, model, model_admin, field_path):
self.lookup_kwarg2 = '%s__in' % field_path
self.lookup_val2 = request.GET.get(self.lookup_kwarg2, None)
super(StringValuesFieldListFilter, self).__init__(field, request, params, model, model_admin, field_path)
to_str = lambda v :None if v == "" else str(v)
for kwarg in (self.lookup_kwarg,):
if kwarg in self.used_parameters:
val = to_int(self.used_parameters[kwarg])
if val is None:
del self.used_parameters[kwarg]
else:
self.used_parameters[kwarg] = val
if self.lookup_kwarg2 in self.used_parameters:
if isinstance(self.used_parameters[self.lookup_kwarg2],(list,tuple)):
vals = None
for v in self.used_parameters[self.lookup_kwarg2]:
val = to_str(v)
if val is None:
continue
if vals is None:
vals = [val]
else:
vals.append(val)
if vals is None:
del self.used_parameters[self.lookup_kwarg2]
elif len(vals) == 1:
del self.used_parameters[self.lookup_kwarg2]
self.used_parameters[self.lookup_kwarg] = vals[0]
else:
self.used_parameters[self.lookup_kwarg2] = vals
else:
val = to_int(self.used_parameters[self.lookup_kwarg2])
if val is None:
del self.used_parameters[self.lookup_kwarg2]
else:
del self.used_parameters[self.lookup_kwarg2]
self.used_parameters[self.lookup_kwarg] = val
def expected_parameters(self):
return [self.lookup_kwarg,self.lookup_kwarg2, self.lookup_kwarg_isnull]
| 45.641844
| 156
| 0.580374
| 1,446
| 12,871
| 4.923928
| 0.064315
| 0.161517
| 0.230056
| 0.176124
| 0.826124
| 0.82191
| 0.800281
| 0.78427
| 0.773736
| 0.76559
| 0
| 0.011759
| 0.33929
| 12,871
| 281
| 157
| 45.80427
| 0.825494
| 0
| 0
| 0.780488
| 0
| 0
| 0.004429
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04878
| false
| 0
| 0.012195
| 0.020325
| 0.117886
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
1642d59d2b6cb48a18b837cd543ca9ee9091f18d
| 19,630
|
py
|
Python
|
DegenerateArrow/home/views.py
|
Asura0528/Degenerate-arrow
|
7f624027dbfd9345ddcf51bcdc2a5e7161e44f2e
|
[
"MIT"
] | 1
|
2021-10-20T16:19:15.000Z
|
2021-10-20T16:19:15.000Z
|
DegenerateArrow/home/views.py
|
Asura0528/Degenerate-arrow
|
7f624027dbfd9345ddcf51bcdc2a5e7161e44f2e
|
[
"MIT"
] | null | null | null |
DegenerateArrow/home/views.py
|
Asura0528/Degenerate-arrow
|
7f624027dbfd9345ddcf51bcdc2a5e7161e44f2e
|
[
"MIT"
] | null | null | null |
import re
from django.contrib.auth import login
from django.db import DatabaseError
from django.http import HttpResponseRedirect
from django.shortcuts import render, redirect
from django.views import View
from django.contrib import messages
from django.urls import reverse
from django_redis import get_redis_connection
from home.models import ArticleCategory, Article, CarouselImg, Comment, PublicOffering, Tag, TagType, AgentAndTag
from django.http import HttpResponseNotFound
from django.core.paginator import Paginator, EmptyPage
# Create your views here.
from users.models import User
from users.views import ForgetView, LoginView
class IndexView(View):
"""
首页视图
方法:
get
"""
@staticmethod
def get(request):
context = {
'carousel_img': CarouselImg.carousel_img.url
}
# noinspection PyBroadException
try:
is_login = request.COOKIES['is_login']
if is_login:
user = request.user
# 组织模板渲染数据
context_update = {
'username': user.username,
'avatar': user.avatar.url if user.avatar else None,
'user_desc': user.user_desc,
}
context.update(context_update)
return render(request, 'index.html', context=context)
except Exception:
return render(request, 'index.html', context=context)
return render(request, 'index.html')
class BlogView(View):
@staticmethod
def get(request):
"""提供首页广告界面"""
# 接受参数
mobile = request.GET.get('mobile')
if mobile is not None:
response = ForgetView().post(request)
return response
# ?cat_id=xxx&page_num=xxx&page_size=xxx
cat_id = request.GET.get('cat_id', 1)
page_num = request.GET.get('page_num', 1)
page_size = request.GET.get('page_size', 3)
# 判断分类id
try:
category = ArticleCategory.objects.get(id=cat_id)
except ArticleCategory.DoesNotExist:
return HttpResponseNotFound('没有此分类')
# 获取博客分类信息
categories = ArticleCategory.objects.all()
# 分页数据
articles = Article.objects.filter(
category=category
)
# 创建分页器:每页N条记录
paginator = Paginator(articles, page_size)
# 获取每页商品数据
try:
page_articles = paginator.page(page_num)
except EmptyPage:
# 如果没有分页数据,默认给用户404
return HttpResponseNotFound('empty page')
# 获取列表页总页数
total_page = paginator.num_pages
# noinspection PyBroadException
try:
is_login = request.COOKIES['is_login']
if is_login:
user = request.user
# 组织模板渲染数据
context = {
'username': user.username,
'avatar': user.avatar.url if user.avatar else None,
'user_desc': user.user_desc,
'categories': categories,
'category': category,
'articles': page_articles,
'page_size': page_size,
'total_page': total_page,
'page_num': page_num,
}
return render(request, 'blog.html', context=context)
except Exception:
context = {
'categories': categories,
'category': category,
'articles': page_articles,
'page_size': page_size,
'total_page': total_page,
'page_num': page_num,
}
return render(request, 'blog.html', context=context)
return render(request, 'blog.html')
@staticmethod
def post(request):
# 获取参数
mobile = request.POST.get('mobile')
password = request.POST.get('password')
password2 = request.POST.get('password2')
smscode = request.POST.get('sms_code')
# 如果不存在手机验证码则进入到登录视图
if smscode is None:
response = LoginView().post(request)
return response
# 判断参数是否齐全
if not all([mobile, password, password2, smscode]):
messages.info(request, '注册失败,请完成填写')
return HttpResponseRedirect(reverse('home:index'))
# 判断手机号是否合法
if not re.match(r'^1[3-9]\d{9}$', mobile):
messages.info(request, '请输入正确手机号')
return HttpResponseRedirect(reverse('home:index'))
# 判断密码是否为8-20位
if not re.match(r'^[0-9A-Za-z]{8,20}$', password):
messages.info(request, '请输入8-20位的密码')
return HttpResponseRedirect(reverse('home:index'))
# 判断两次密码是否一致
if password != password2:
messages.info(request, '两次输入的密码不一致')
return HttpResponseRedirect(reverse('home:index'))
# 验证短信验证码
redis_conn = get_redis_connection('default')
sms_code_server = redis_conn.get('sms:%s' % mobile)
if sms_code_server is None:
messages.info(request, '短信验证码已过期')
return HttpResponseRedirect(reverse('home:index'))
if smscode != sms_code_server.decode():
messages.info(request, '短信验证码错误')
return HttpResponseRedirect(reverse('home:index'))
try:
user = User.objects.create_user(username=mobile, mobile=mobile, password=password)
except DatabaseError:
# 如果该手机号不存在,则提醒用户
messages.info(request, '注册失败,账户已存在')
return HttpResponseRedirect(reverse('home:index'))
# 实现状态保持
login(request, user)
# 跳转到首页
response = redirect(reverse('home:index'))
# 设置cookie
# 登录状态,会话结束后自动过期
response.set_cookie('is_login', True)
# 设置用户名有效期一个月
response.set_cookie('username', user.username, max_age=30 * 24 * 3600)
return response
class DetailView(View):
@staticmethod
def get(request):
# 接受参数
mobile = request.GET.get('mobile')
if mobile is not None:
response = ForgetView().post(request)
return response
# detail/?id=xxx&page_num=xxx&page_size=xxx
# 获取文档id
id = request.GET.get('id')
page_num = request.GET.get('page_num', 1)
page_size = request.GET.get('page_size', 5)
# 获取博客分类信息
categories = ArticleCategory.objects.all()
try:
article = Article.objects.get(id=id)
except Article.DoesNotExist:
messages.info(request, 'empty page')
return HttpResponseRedirect(reverse('home:detail'))
else:
article.total_views += 1
article.save()
# 获取当前文章的评论数据
comments = Comment.objects.filter(
article=article
).order_by('-created')
# 获取评论总数
total_count = comments.count()
# 创建分页器:每页N条记录
paginator = Paginator(comments, page_size)
# 获取每页评论数据
try:
page_comments = paginator.page(page_num)
except EmptyPage:
# 如果page_num不正确,默认给用户404
messages.info(request, 'empty page')
return HttpResponseRedirect(reverse('home:detail'))
# 获取列表页总页数
total_page = paginator.num_pages
# noinspection PyBroadException
try:
is_login = request.COOKIES['is_login']
if is_login:
user = request.user
# 组织模板渲染数据
context = {
'username': user.username,
'avatar': user.avatar.url if user.avatar else None,
'user_desc': user.user_desc,
'categories': categories,
'category': article.category,
'article': article,
'total_count': total_count,
'comments': page_comments,
'page_size': page_size,
'total_page': total_page,
'page_num': page_num,
}
return render(request, 'detail.html', context=context)
except Exception:
context = {
'categories': categories,
'category': article.category,
'article': article,
'total_count': total_count,
'comments': page_comments,
'page_size': page_size,
'total_page': total_page,
'page_num': page_num,
}
return render(request, 'detail.html', context=context)
return render(request, 'detail.html')
@staticmethod
def post(request):
user = request.user
if user and user.is_authenticated:
# 接收数据
id = request.POST.get('id')
content = request.POST.get('content')
# 判断文章是否存在
try:
article = Article.objects.get(id=id)
except Article.DoesNotExist:
messages.info(request, '没有此文章')
return redirect(reverse('home:blog'))
# 数据入库
Comment.objects.create(
content=content,
article=article,
user=user
)
# 修改文章评论数量
article.comments_count += 1
article.save()
# 拼接跳转路由
path = reverse('home:detail') + '?id={}'.format(article.id)
return redirect(path)
# 获取参数
mobile = request.POST.get('mobile')
password = request.POST.get('password')
password2 = request.POST.get('password2')
smscode = request.POST.get('sms_code')
# 如果不存在手机验证码则进入到登录视图
if smscode is None:
response = LoginView().post(request)
return response
# 判断参数是否齐全
if not all([mobile, password, password2, smscode]):
messages.info(request, '注册失败,请完成填写')
return HttpResponseRedirect(reverse('home:index'))
# 判断手机号是否合法
if not re.match(r'^1[3-9]\d{9}$', mobile):
messages.info(request, '请输入正确手机号')
return HttpResponseRedirect(reverse('home:index'))
# 判断密码是否为8-20位
if not re.match(r'^[0-9A-Za-z]{8,20}$', password):
messages.info(request, '请输入8-20位的密码')
return HttpResponseRedirect(reverse('home:index'))
# 判断两次密码是否一致
if password != password2:
messages.info(request, '两次输入的密码不一致')
return HttpResponseRedirect(reverse('home:index'))
# 验证短信验证码
redis_conn = get_redis_connection('default')
sms_code_server = redis_conn.get('sms:%s' % mobile)
if sms_code_server is None:
messages.info(request, '短信验证码已过期')
return HttpResponseRedirect(reverse('home:index'))
if smscode != sms_code_server.decode():
messages.info(request, '短信验证码错误')
return HttpResponseRedirect(reverse('home:index'))
try:
user = User.objects.create_user(username=mobile, mobile=mobile, password=password)
except DatabaseError:
# 如果该手机号不存在,则提醒用户
messages.info(request, '注册失败,账户已存在')
return HttpResponseRedirect(reverse('home:index'))
# 实现状态保持
login(request, user)
# 跳转到首页
response = redirect(reverse('home:index'))
# 设置cookie
# 登录状态,会话结束后自动过期
response.set_cookie('is_login', True)
# 设置用户名有效期一个月
response.set_cookie('username', user.username, max_age=30 * 24 * 3600)
return response
class ToolView(View):
@staticmethod
def get(request):
# 接受参数
mobile = request.GET.get('mobile')
cat_id = request.GET.get('cat_id', 1)
categories = ArticleCategory.objects.all()
# noinspection PyBroadException
try:
category = ArticleCategory.objects.get(id=cat_id)
except Exception:
messages.info(request, '没有此分类信息')
return HttpResponseRedirect(reverse('home:tool'))
if mobile is not None:
response = ForgetView().post(request)
return response
context = {
'categories': categories,
'category': category,
}
# noinspection PyBroadException
try:
is_login = request.COOKIES['is_login']
if is_login:
user = request.user
# 组织模板渲染数据
context_update = {
'username': user.username,
'avatar': user.avatar.url if user.avatar else None,
}
context.update(context_update)
return render(request, 'tool.html', context=context)
except Exception:
return render(request, 'tool.html', context=context)
return render(request, 'tool.html')
@staticmethod
def post(request):
# 获取参数
mobile = request.POST.get('mobile')
password = request.POST.get('password')
password2 = request.POST.get('password2')
smscode = request.POST.get('sms_code')
# 如果不存在手机验证码则进入到登录视图
if smscode is None:
response = LoginView().post(request)
return response
# 判断参数是否齐全
if not all([mobile, password, password2, smscode]):
messages.info(request, '注册失败,请完成填写')
return HttpResponseRedirect(reverse('home:tool'))
# 判断手机号是否合法
if not re.match(r'^1[3-9]\d{9}$', mobile):
messages.info(request, '请输入正确手机号')
return HttpResponseRedirect(reverse('home:tool'))
# 判断密码是否为8-20位
if not re.match(r'^[0-9A-Za-z]{8,20}$', password):
messages.info(request, '请输入8-20位的密码')
return HttpResponseRedirect(reverse('home:tool'))
# 判断两次密码是否一致
if password != password2:
messages.info(request, '两次输入的密码不一致')
return HttpResponseRedirect(reverse('home:tool'))
# 验证短信验证码
redis_conn = get_redis_connection('default')
sms_code_server = redis_conn.get('sms:%s' % mobile)
if sms_code_server is None:
messages.info(request, '短信验证码已过期')
return HttpResponseRedirect(reverse('home:tool'))
if smscode != sms_code_server.decode():
messages.info(request, '短信验证码错误')
return HttpResponseRedirect(reverse('home:tool'))
try:
user = User.objects.create_user(username=mobile, mobile=mobile, password=password)
except DatabaseError:
# 如果该手机号不存在,则提醒用户
messages.info(request, '注册失败,账户已存在')
return HttpResponseRedirect(reverse('home:tool'))
# 实现状态保持
login(request, user)
# 跳转到首页
response = redirect(reverse('home:tool'))
# 设置cookie
# 登录状态,会话结束后自动过期
response.set_cookie('is_login', True)
# 设置用户名有效期一个月
response.set_cookie('username', user.username, max_age=30 * 24 * 3600)
return response
class ToolDetailView(View):
@staticmethod
def get(request):
# 接受参数
mobile = request.GET.get('mobile')
cat_id = request.GET.get('cat_id', 1)
categories = ArticleCategory.objects.all()
tag_type = TagType.objects.all()
tag = Tag.objects.all()
agents = PublicOffering.objects.all()
agents_tags = AgentAndTag.objects.all()
# noinspection PyBroadException
try:
category = ArticleCategory.objects.get(id=cat_id)
except Exception:
messages.info(request, '没有此分类信息')
return HttpResponseRedirect(reverse('home:tool_detail'))
if mobile is not None:
response = ForgetView().post(request)
return response
# noinspection PyBroadException
try:
tag_jq = request.GET.get('tag_jq')
print(tag_jq)
context = {
'tag_jq': tag_jq,
'tag_type': tag_type,
'tag': tag,
'agents': agents,
'categories': categories,
'category': category,
'agents_tags': agents_tags,
}
return render(request, 'tool_detail.html', context=context)
except Exception:
context = {
'tag_type': tag_type,
'tag': tag,
'agents': agents,
'categories': categories,
'category': category,
'agents_tags': agents_tags,
}
# noinspection PyBroadException
try:
is_login = request.COOKIES['is_login']
if is_login:
user = request.user
# 组织模板渲染数据
context_update = {
'username': user.username,
'avatar': user.avatar.url if user.avatar else None,
}
context.update(context_update)
return render(request, 'tool_detail.html', context=context)
except Exception:
return render(request, 'tool_detail.html', context=context)
@staticmethod
def post(request):
# 获取参数
mobile = request.POST.get('mobile')
password = request.POST.get('password')
password2 = request.POST.get('password2')
smscode = request.POST.get('sms_code')
# 如果不存在手机验证码则进入到登录视图
if smscode is None:
response = LoginView().post(request)
return response
# 判断参数是否齐全
if not all([mobile, password, password2, smscode]):
messages.info(request, '注册失败,请完成填写')
return HttpResponseRedirect(reverse('home:tool_detail'))
# 判断手机号是否合法
if not re.match(r'^1[3-9]\d{9}$', mobile):
messages.info(request, '请输入正确手机号')
return HttpResponseRedirect(reverse('home:tool_detail'))
# 判断密码是否为8-20位
if not re.match(r'^[0-9A-Za-z]{8,20}$', password):
messages.info(request, '请输入8-20位的密码')
return HttpResponseRedirect(reverse('home:tool_detail'))
# 判断两次密码是否一致
if password != password2:
messages.info(request, '两次输入的密码不一致')
return HttpResponseRedirect(reverse('home:tool_detail'))
# 验证短信验证码
redis_conn = get_redis_connection('default')
sms_code_server = redis_conn.get('sms:%s' % mobile)
if sms_code_server is None:
messages.info(request, '短信验证码已过期')
return HttpResponseRedirect(reverse('home:tool_detail'))
if smscode != sms_code_server.decode():
messages.info(request, '短信验证码错误')
return HttpResponseRedirect(reverse('home:tool_detail'))
try:
user = User.objects.create_user(username=mobile, mobile=mobile, password=password)
except DatabaseError:
# 如果该手机号不存在,则提醒用户
messages.info(request, '注册失败,账户已存在')
return HttpResponseRedirect(reverse('home:tool_detail'))
# 实现状态保持
login(request, user)
# 跳转到首页
response = redirect(reverse('home:tool_detail'))
# 设置cookie
# 登录状态,会话结束后自动过期
response.set_cookie('is_login', True)
# 设置用户名有效期一个月
response.set_cookie('username', user.username, max_age=30 * 24 * 3600)
return response
# class AjaxDealView(View):
# @staticmethod
# def ajax_get(request):
# data = request.GET.get('data')
# print(data)
# return HttpResponse(data)
| 33.555556
| 113
| 0.563525
| 1,889
| 19,630
| 5.753309
| 0.106406
| 0.038462
| 0.057692
| 0.108944
| 0.839989
| 0.820574
| 0.803828
| 0.793706
| 0.77972
| 0.763894
| 0
| 0.009402
| 0.333571
| 19,630
| 584
| 114
| 33.613014
| 0.821358
| 0.068976
| 0
| 0.811414
| 0
| 0
| 0.104132
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022333
| false
| 0.059553
| 0.034739
| 0
| 0.225806
| 0.002481
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
1683ea61bb13b07a5c1657abe6a80e43841a8728
| 24,005
|
py
|
Python
|
tests/test_cli.py
|
zalando-stups/mia
|
5521261ef0c4256bb0c0749554b75a6b2360eb25
|
[
"Apache-2.0"
] | 9
|
2015-04-07T14:27:33.000Z
|
2016-07-28T09:48:35.000Z
|
tests/test_cli.py
|
hjacobs/mai
|
5521261ef0c4256bb0c0749554b75a6b2360eb25
|
[
"Apache-2.0"
] | 25
|
2015-04-10T11:33:13.000Z
|
2016-12-14T14:29:56.000Z
|
tests/test_cli.py
|
hjacobs/mai
|
5521261ef0c4256bb0c0749554b75a6b2360eb25
|
[
"Apache-2.0"
] | 4
|
2015-09-23T11:24:25.000Z
|
2016-07-25T13:29:37.000Z
|
from click.testing import CliRunner
from unittest.mock import MagicMock
import yaml
from mai.cli import cli, login_with_profile
import mai
import os
import aws_saml_login.saml
import time
import pytest
TEST_CONFIG = {'example-Administrator': {'saml_identity_provider_url': 'https://auth.example.com',
'saml_role': ['arn:aws:iam::911:saml-provider/Shibboleth',
'arn:aws:iam::911:role/Shibboleth-Administrator',
'example'],
'saml_user': 'foo.bar@example.com'},
'example-User': {'saml_identity_provider_url': 'https://auth.example.com',
'saml_role': ['arn:aws:iam::911:saml-provider/Shibboleth',
'arn:aws:iam::911:role/Shibboleth-User',
'example'],
'saml_user': 'foo.bar@example.com'}}
SAML_RESPONSE_0_ROLES = ('''<xml xmlns="urn:oasis:names:tc:SAML:2.0:assertion"><Assertion>¬
</Assertion></xml>''', [])
SAML_RESPONSE_1_ROLE = ('''<xml xmlns="urn:oasis:names:tc:SAML:2.0:assertion"><Assertion>¬
<Attribute FriendlyName="Role" Name="https://aws.amazon.com/SAML/Attributes/Role">¬
<AttributeValue>arn:aws:iam::911:saml-provider/Shibboleth,arn:aws:iam::911:role/Shibboleth-User</AttributeValue>¬
</Attribute>¬
</Assertion></xml>''',
[('arn:aws:iam::911:saml-provider/Shibboleth', 'arn:aws:iam::911:role/Shibboleth-User', None)])
SAML_RESPONSE_2_ROLES = ('''<xml xmlns="urn:oasis:names:tc:SAML:2.0:assertion"><Assertion>¬
<Attribute FriendlyName="Role" Name="https://aws.amazon.com/SAML/Attributes/Role">¬
<AttributeValue>arn:aws:iam::911:saml-provider/Shibboleth,arn:aws:iam::911:role/Shibboleth-User</AttributeValue>¬
</Attribute>¬
<Attribute FriendlyName="Role" Name="https://aws.amazon.com/SAML/Attributes/Role">¬
<AttributeValue>arn:aws:iam::911:saml-provider/Shibboleth,arn:aws:iam::911:role/Shibboleth-Administrator</AttributeValue>¬
</Attribute>¬
</Assertion></xml>''',
[('arn:aws:iam::911:saml-provider/Shibboleth',
'arn:aws:iam::911:role/Shibboleth-User',
'example'),
('arn:aws:iam::911:saml-provider/Shibboleth',
'arn:aws:iam::911:role/Shibboleth-Administrator',
'example')])
def test_version():
runner = CliRunner()
with runner.isolated_filesystem():
result = runner.invoke(cli, ['--version'], catch_exceptions=False)
assert 'Mai {}'.format(mai.__version__) in result.output
def test_no_command():
runner = CliRunner()
with runner.isolated_filesystem():
result = runner.invoke(cli, ['--config-file', 'mai.yaml'], catch_exceptions=False)
assert 'No profile configured' in result.output
def test_cli():
runner = CliRunner()
data = {'myprofile': {}}
with runner.isolated_filesystem():
with open('mai.yaml', 'w') as fd:
yaml.safe_dump(data, fd)
result = runner.invoke(cli, ['--config-file', 'mai.yaml'], catch_exceptions=False)
assert 'Usage: cli' in result.output
assert 'Missing identity provider URL' in result.output
def test_cli_002():
runner = CliRunner()
data = {'myprofile': {'saml_identity_provider_url': 'https://auth.example.com'}}
with runner.isolated_filesystem():
with open('mai.yaml', 'w') as fd:
yaml.safe_dump(data, fd)
result = runner.invoke(cli, ['--config-file', 'mai.yaml'], catch_exceptions=False)
assert 'Usage: cli' in result.output
assert 'Missing SAML username' in result.output
def test_cli_global():
runner = CliRunner()
data = {'global': {'default_profile': 'myprofile'}, 'myprofile': {}}
with runner.isolated_filesystem():
with open('mai.yaml', 'w') as fd:
yaml.safe_dump(data, fd)
result = runner.invoke(cli, ['--config-file', 'mai.yaml'], catch_exceptions=False)
assert 'Usage: cli' in result.output
assert 'Missing identity provider URL' in result.output
def test_cli_list():
runner = CliRunner()
data = {'myprofile': {}}
with runner.isolated_filesystem():
with open('mai.yaml', 'w') as fd:
yaml.safe_dump(data, fd)
result = runner.invoke(cli, ['--config-file', 'mai.yaml', 'list'], catch_exceptions=False)
assert 'Name' in result.output
def test_create_001_missing_argument():
runner = CliRunner()
with runner.isolated_filesystem():
result = runner.invoke(cli, ['--config-file', 'mai.yaml', 'create'], catch_exceptions=False)
assert 'Usage: cli' in result.output
assert 'Missing argument "profile-name"' in result.output
def test_create_002_one_role(monkeypatch):
monkeypatch.setattr('keyring.get_password', MagicMock(return_value=''))
monkeypatch.setattr('keyring.set_password', MagicMock(return_value=''))
monkeypatch.setattr('mai.cli.authenticate', MagicMock(return_value=SAML_RESPONSE_1_ROLE))
runner = CliRunner()
with runner.isolated_filesystem():
result = runner.invoke(cli, ['--config-file', 'mai.yaml', 'create', 'foobar'],
catch_exceptions=False, input='auth.example.com\nfoo.bar@example.com\n1234567\n')
workingdir = os.getcwd()
assert os.path.exists('mai.yaml')
with open('mai.yaml') as fd:
generated_config = yaml.safe_load(fd)
assert generated_config['foobar']['saml_identity_provider_url'] == 'https://auth.example.com'
assert generated_config['foobar']['saml_role'][1] == 'arn:aws:iam::911:role/Shibboleth-User'
assert generated_config['foobar']['saml_user'] == 'foo.bar@example.com'
assert 'Identity provider URL: auth.example.com' in result.output
assert 'SAML username: foo.bar@example.com' in result.output
assert 'Authenticating against https://auth.example.com..\n OK' in result.output
assert 'Storing new profile in {}.. OK'.format(os.path.join(workingdir, 'mai.yaml')) in result.output
def test_create_003_no_roles(monkeypatch):
monkeypatch.setattr('keyring.get_password', MagicMock(return_value=''))
monkeypatch.setattr('keyring.set_password', MagicMock(return_value=''))
monkeypatch.setattr('mai.cli.authenticate', MagicMock(return_value=SAML_RESPONSE_0_ROLES))
runner = CliRunner()
with runner.isolated_filesystem():
result = runner.invoke(cli, ['--config-file', 'mai.yaml', 'create', 'foobar'],
catch_exceptions=False, input='auth.example.com\nfoo.bar@example.com\n1234567\n')
assert 'No roles found' in result.output
assert result.exit_code == 1
def test_create_004_two_roles(monkeypatch):
monkeypatch.setattr('keyring.get_password', MagicMock(return_value=''))
monkeypatch.setattr('keyring.set_password', MagicMock(return_value=''))
monkeypatch.setattr('mai.cli.authenticate', MagicMock(return_value=SAML_RESPONSE_2_ROLES))
runner = CliRunner()
with runner.isolated_filesystem():
result = runner.invoke(cli, ['--config-file', 'mai.yaml', 'create', 'foobar'],
catch_exceptions=False, input='auth.example.com\nfoo.bar@example.com\n1234567\n1\n')
workingdir = os.getcwd()
assert os.path.exists('mai.yaml')
with open('mai.yaml') as fd:
generated_config = yaml.safe_load(fd)
assert generated_config['foobar']['saml_identity_provider_url'] == 'https://auth.example.com'
assert generated_config['foobar']['saml_role'][1] == 'arn:aws:iam::911:role/Shibboleth-Administrator'
assert generated_config['foobar']['saml_user'] == 'foo.bar@example.com'
assert 'Identity provider URL: auth.example.com' in result.output
assert 'SAML username: foo.bar@example.com' in result.output
assert 'Authenticating against https://auth.example.com..\n OK' in result.output
assert 'Storing new profile in {}.. OK'.format(os.path.join(workingdir, 'mai.yaml')) in result.output
def test_create_005_two_roles_options(monkeypatch):
monkeypatch.setattr('keyring.get_password', MagicMock(return_value=''))
monkeypatch.setattr('keyring.set_password', MagicMock(return_value=''))
monkeypatch.setattr('mai.cli.authenticate', MagicMock(return_value=SAML_RESPONSE_2_ROLES))
runner = CliRunner()
with runner.isolated_filesystem():
result = runner.invoke(cli, ['--config-file', 'mai.yaml',
'create', 'foobar',
'--url', 'auth.example.com',
'--user', 'foo.bar@example.com'],
catch_exceptions=False, input='1234567\n1\n')
workingdir = os.getcwd()
assert os.path.exists('mai.yaml')
with open('mai.yaml') as fd:
generated_config = yaml.safe_load(fd)
assert generated_config['foobar']['saml_identity_provider_url'] == 'https://auth.example.com'
assert generated_config['foobar']['saml_role'][1] == 'arn:aws:iam::911:role/Shibboleth-Administrator'
assert generated_config['foobar']['saml_user'] == 'foo.bar@example.com'
assert 'Authenticating against https://auth.example.com..\n OK' in result.output
assert 'Storing new profile in {}.. OK'.format(os.path.join(workingdir, 'mai.yaml')) in result.output
def test_create_006_authentication_failed(monkeypatch):
monkeypatch.setattr('keyring.get_password', MagicMock(return_value=''))
monkeypatch.setattr('keyring.set_password', MagicMock(return_value=''))
def my_authenticate_mock(url, user, saml_password):
if saml_password == 'wrong':
raise aws_saml_login.saml.AuthenticationFailed()
else:
return SAML_RESPONSE_2_ROLES
monkeypatch.setattr('mai.cli.authenticate', my_authenticate_mock)
runner = CliRunner()
with runner.isolated_filesystem():
result = runner.invoke(cli, ['--config-file', 'mai.yaml', 'create', 'foobar'],
catch_exceptions=False,
input='auth.example.com\nfoo.bar@example.com\nwrong\n1234567\n2\n')
workingdir = os.getcwd()
assert os.path.exists('mai.yaml')
with open('mai.yaml') as fd:
generated_config = yaml.safe_load(fd)
assert generated_config['foobar']['saml_identity_provider_url'] == 'https://auth.example.com'
assert generated_config['foobar']['saml_role'][1] == 'arn:aws:iam::911:role/Shibboleth-User'
assert generated_config['foobar']['saml_user'] == 'foo.bar@example.com'
assert 'Identity provider URL: auth.example.com' in result.output
assert 'SAML username: foo.bar@example.com' in result.output
assert 'Authenticating against https://auth.example.com..\n Authentication Failed' in result.output
assert 'Please check your username/password and try again.' in result.output
assert 'Authenticating against https://auth.example.com..\n OK' in result.output
assert 'Storing new profile in {}.. OK'.format(os.path.join(workingdir, 'mai.yaml')) in result.output
def test_create_all_001(monkeypatch):
monkeypatch.setattr('keyring.get_password', MagicMock(return_value=''))
monkeypatch.setattr('keyring.set_password', MagicMock(return_value=''))
monkeypatch.setattr('mai.cli.authenticate', MagicMock(return_value=SAML_RESPONSE_2_ROLES))
runner = CliRunner()
with runner.isolated_filesystem():
result = runner.invoke(cli, ['--config-file', 'mai.yaml', 'create-all'],
catch_exceptions=False, input='auth.example.com\nfoo.bar@example.com\n123456\n')
workingdir = os.getcwd()
assert os.path.exists('mai.yaml')
with open('mai.yaml') as fd:
generated_config = yaml.safe_load(fd)
assert generated_config['example-Administrator']['saml_identity_provider_url'] == 'https://auth.example.com'
assert generated_config['example-Administrator']['saml_role'][1] == 'arn:aws:iam::911:role/Shibboleth-Administrator'
assert generated_config['example-Administrator']['saml_user'] == 'foo.bar@example.com'
assert generated_config['example-User']['saml_identity_provider_url'] == 'https://auth.example.com'
assert generated_config['example-User']['saml_role'][1] == 'arn:aws:iam::911:role/Shibboleth-User'
assert generated_config['example-User']['saml_user'] == 'foo.bar@example.com'
assert 'Identity provider URL: auth.example.com' in result.output
assert 'SAML username: foo.bar@example.com' in result.output
assert 'Authenticating against https://auth.example.com..\n OK' in result.output
assert 'Storing new profile in {}.. OK'.format(os.path.join(workingdir, 'mai.yaml')) in result.output
def test_create_all_002_no_roles(monkeypatch):
monkeypatch.setattr('keyring.get_password', MagicMock(return_value=''))
monkeypatch.setattr('keyring.set_password', MagicMock(return_value=''))
monkeypatch.setattr('mai.cli.authenticate', MagicMock(return_value=SAML_RESPONSE_0_ROLES))
runner = CliRunner()
with runner.isolated_filesystem():
result = runner.invoke(cli, ['--config-file', 'mai.yaml', 'create-all'],
catch_exceptions=False, input='auth.example.com\nfoo.bar@example.com\n1234567\n')
assert 'No roles found' in result.output
assert result.exit_code == 1
def test_create_all_003_one_role(monkeypatch):
monkeypatch.setattr('keyring.get_password', MagicMock(return_value=''))
monkeypatch.setattr('keyring.set_password', MagicMock(return_value=''))
monkeypatch.setattr('mai.cli.authenticate', MagicMock(return_value=SAML_RESPONSE_1_ROLE))
runner = CliRunner()
with runner.isolated_filesystem():
result = runner.invoke(cli, ['--config-file', 'mai.yaml', 'create-all'],
catch_exceptions=False, input='auth.example.com\nfoo.bar@example.com\n123456\n')
workingdir = os.getcwd()
assert os.path.exists('mai.yaml')
with open('mai.yaml') as fd:
generated_config = yaml.safe_load(fd)
assert generated_config['default-User']['saml_identity_provider_url'] == 'https://auth.example.com'
assert generated_config['default-User']['saml_role'][1] == 'arn:aws:iam::911:role/Shibboleth-User'
assert generated_config['default-User']['saml_user'] == 'foo.bar@example.com'
assert 'Identity provider URL: auth.example.com' in result.output
assert 'SAML username: foo.bar@example.com' in result.output
assert 'Authenticating against https://auth.example.com..\n OK' in result.output
assert 'Storing new profile in {}.. OK'.format(os.path.join(workingdir, 'mai.yaml')) in result.output
def test_set_default_001(monkeypatch):
data = TEST_CONFIG
runner = CliRunner()
with runner.isolated_filesystem():
with open('mai.yaml', 'w') as fd:
yaml.dump(data, fd)
result = runner.invoke(cli, ['--config-file', 'mai.yaml', 'set-default', 'example-User'])
workingdir = os.getcwd()
assert os.path.exists('mai.yaml')
with open('mai.yaml') as fd:
generated_config = yaml.safe_load(fd)
assert generated_config['global']['default_profile'] == 'example-User'
assert 'Storing configuration in {}.. OK'.format(os.path.join(workingdir, 'mai.yaml')) in result.output
def test_set_default_002_unknown_profile(monkeypatch):
data = TEST_CONFIG
runner = CliRunner()
with runner.isolated_filesystem():
with open('mai.yaml', 'w') as fd:
yaml.dump(data, fd)
result = runner.invoke(cli, ['--config-file', 'mai.yaml', 'set-default', 'foobar-User'])
assert 'Profile "foobar-User" does not exist' in result.output
assert result.exit_code == 2
def test_delete_profile_001(monkeypatch):
data = TEST_CONFIG
runner = CliRunner()
with runner.isolated_filesystem():
with open('mai.yaml', 'w') as fd:
yaml.dump(data, fd)
result = runner.invoke(cli, ['--config-file', 'mai.yaml', 'delete', 'example-User'])
workingdir = os.getcwd()
assert os.path.exists('mai.yaml')
with open('mai.yaml') as fd:
generated_config = yaml.safe_load(fd)
assert 'example-User' not in generated_config
assert 'Deleting profile from {}.. OK'.format(os.path.join(workingdir, 'mai.yaml')) in result.output
def test_delete_profile_002_unknown_profile(monkeypatch):
data = TEST_CONFIG
runner = CliRunner()
with runner.isolated_filesystem():
with open('mai.yaml', 'w') as fd:
yaml.dump(data, fd)
result = runner.invoke(cli, ['--config-file', 'mai.yaml', 'delete', 'foobar-User'])
assert 'Profile "foobar-User" does not exist' in result.output
assert result.exit_code == 2
def test_login_001(monkeypatch):
monkeypatch.setattr('keyring.get_password', MagicMock(return_value='123456'))
monkeypatch.setattr('keyring.set_password', MagicMock(return_value=''))
monkeypatch.setattr('mai.cli.authenticate', MagicMock(return_value=SAML_RESPONSE_2_ROLES))
monkeypatch.setattr('mai.cli.assume_role', MagicMock(return_value=('KEYID', 'SECRET', 'SESSION_TOKEN')))
monkeypatch.setattr('mai.cli.write_aws_credentials', MagicMock)
class sleep_counter:
count = 1
sleep_backup = time.sleep
def my_sleep(sec):
if sec == 120:
if sleep_counter.count > 3:
raise KeyboardInterrupt
sleep_counter.count += 1
sleep_backup(0.1)
else:
sleep_backup(sec)
monkeypatch.setattr('time.sleep', my_sleep)
data = TEST_CONFIG
runner = CliRunner()
with runner.isolated_filesystem():
with open('mai.yaml', 'w') as fd:
yaml.dump(data, fd)
result = runner.invoke(cli, ['--config-file', 'mai.yaml', 'login', 'example-User'],
catch_exceptions=False)
assert 'Authenticating against https://auth.example.com..\n OK' in result.output
assert 'Assuming role AWS Account 911 (example): Shibboleth-User.. OK' in result.output
assert 'Writing temporary AWS credentials.. OK' in result.output
result = runner.invoke(cli, ['--config-file', 'mai.yaml', 'login', 'example-User'],
catch_exceptions=False)
assert 'Authenticating against https://auth.example.com..\n OK' in result.output
assert 'Assuming role AWS Account 911 (example): Shibboleth-User.. OK' in result.output
assert 'Writing temporary AWS credentials.. OK' in result.output
result = runner.invoke(cli, ['--config-file', 'mai.yaml', 'login'],
catch_exceptions=False)
assert 'Authenticating against https://auth.example.com..\n OK' in result.output
assert 'Assuming role AWS Account 911 (example): Shibboleth-User.. OK' in result.output
assert 'Writing temporary AWS credentials.. OK' in result.output
result = runner.invoke(cli, ['--config-file', 'mai.yaml', 'login', '--refresh'],
catch_exceptions=False)
assert 'Authenticating against https://auth.example.com..\n OK' in result.output
assert 'Assuming role AWS Account 911 (example): Shibboleth-User.. OK' in result.output
assert 'Writing temporary AWS credentials.. OK' in result.output
assert 'Waiting 54 minutes before refreshing credentials.. . . . OK' in result.output
sleep_counter.count = 1
result = runner.invoke(cli, ['--config-file', 'mai.yaml', 'login', 'example-Administrator'],
catch_exceptions=False)
assert 'Authenticating against https://auth.example.com..\n OK' in result.output
assert 'Assuming role AWS Account 911 (example): Shibboleth-Administrator.. OK' in result.output
assert 'Writing temporary AWS credentials.. OK' in result.output
result = runner.invoke(cli, ['--config-file', 'mai.yaml', 'login'],
catch_exceptions=False)
assert 'Authenticating against https://auth.example.com..\n OK' in result.output
assert 'Assuming role AWS Account 911 (example): Shibboleth-Administrator.. OK' in result.output
assert 'Writing temporary AWS credentials.. OK' in result.output
result = runner.invoke(cli, ['--config-file', 'mai.yaml', 'login', '--refresh'],
catch_exceptions=False)
assert 'Authenticating against https://auth.example.com..\n OK' in result.output
assert 'Assuming role AWS Account 911 (example): Shibboleth-Administrator.. OK' in result.output
assert 'Writing temporary AWS credentials.. OK' in result.output
assert 'Waiting 54 minutes before refreshing credentials.. . . . OK' in result.output
def test_login_002_unknown_profile(monkeypatch):
data = TEST_CONFIG
runner = CliRunner()
with runner.isolated_filesystem():
with open('mai.yaml', 'w') as fd:
yaml.dump(data, fd)
result = runner.invoke(cli, ['--config-file', 'mai.yaml', 'login', 'foobar-User'])
assert 'Profile "foobar-User" does not exist' in result.output
assert result.exit_code == 2
def test_assume_role_failed(monkeypatch):
m_saml_login = MagicMock()
m_saml_login.return_value = 'xml', []
monkeypatch.setattr('mai.cli.saml_login', m_saml_login)
m_assume_role = MagicMock()
m_assume_role.side_effect = aws_saml_login.saml.AssumeRoleFailed('Test')
monkeypatch.setattr('mai.cli.assume_role', m_assume_role)
m_fatal_error = MagicMock()
m_fatal_error.side_effect = SystemExit(1)
monkeypatch.setattr('mai.cli.Action.fatal_error', m_fatal_error)
config = {'saml_identity_provider_url': 'example.com',
'saml_user': 'test_user',
'saml_role': ('provider_arn',
'arn:aws:iam::911:saml-provider/Shibboleth',
'name')}
with pytest.raises(SystemExit):
login_with_profile(None, None, config, None)
m_fatal_error.assert_called_once_with('Assuming role failed: Test')
def test_require(monkeypatch):
monkeypatch.setattr('keyring.get_password', MagicMock(return_value='123456'))
monkeypatch.setattr('keyring.set_password', MagicMock(return_value=''))
monkeypatch.setattr('mai.cli.authenticate', MagicMock(return_value=SAML_RESPONSE_2_ROLES))
monkeypatch.setattr('mai.cli.assume_role', MagicMock(return_value=('KEYID', 'SECRET', 'SESSION_TOKEN')))
monkeypatch.setattr('mai.cli.write_aws_credentials', MagicMock)
runner = CliRunner()
data = TEST_CONFIG
with runner.isolated_filesystem():
with open('mai.yaml', 'w') as fd:
yaml.dump(data, fd)
result = runner.invoke(cli, ['--config-file', 'mai.yaml', 'require', 'example-User'])
assert 'Assuming role' in result.output
result = runner.invoke(cli, ['--config-file', 'mai.yaml', 'require', 'example-User'])
assert 'Assuming role' not in result.output
# mock 1 hour later
with open('last_update.yaml', 'r+') as fd:
last_updated = yaml.load(fd)
last_updated['timestamp'] = last_updated['timestamp'] - 3600
fd.seek(0)
yaml.dump(last_updated, fd)
fd.truncate()
result = runner.invoke(cli, ['--config-file', 'mai.yaml', 'require', 'example-User'])
assert 'Assuming role' in result.output
| 45.12218
| 150
| 0.64924
| 2,897
| 24,005
| 5.248533
| 0.070763
| 0.035778
| 0.062611
| 0.05656
| 0.87925
| 0.875304
| 0.862085
| 0.851694
| 0.84413
| 0.84413
| 0
| 0.013785
| 0.211248
| 24,005
| 531
| 151
| 45.207156
| 0.788634
| 0.000708
| 0
| 0.679389
| 0
| 0.025445
| 0.35133
| 0.089927
| 0
| 0
| 0
| 0
| 0.282443
| 1
| 0.063613
| false
| 0.058524
| 0.022901
| 0
| 0.094148
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
16d2698764481c0a01397f5bf6e99014ebd4698f
| 4,790
|
py
|
Python
|
z2/part2/interactive/jm/random_fuzzy_arrows_1/516629538.py
|
kozakusek/ipp-2020-testy
|
09aa008fa53d159672cc7cbf969a6b237e15a7b8
|
[
"MIT"
] | 1
|
2020-04-16T12:13:47.000Z
|
2020-04-16T12:13:47.000Z
|
z2/part2/interactive/jm/random_fuzzy_arrows_1/516629538.py
|
kozakusek/ipp-2020-testy
|
09aa008fa53d159672cc7cbf969a6b237e15a7b8
|
[
"MIT"
] | 18
|
2020-03-06T17:50:15.000Z
|
2020-05-19T14:58:30.000Z
|
z2/part2/interactive/jm/random_fuzzy_arrows_1/516629538.py
|
kozakusek/ipp-2020-testy
|
09aa008fa53d159672cc7cbf969a6b237e15a7b8
|
[
"MIT"
] | 18
|
2020-03-06T17:45:13.000Z
|
2020-06-09T19:18:31.000Z
|
from part1 import (
gamma_board,
gamma_busy_fields,
gamma_delete,
gamma_free_fields,
gamma_golden_move,
gamma_golden_possible,
gamma_move,
gamma_new,
)
"""
scenario: test_random_actions
uuid: 516629538
"""
"""
random actions, total chaos
"""
board = gamma_new(7, 4, 3, 12)
assert board is not None
assert gamma_move(board, 1, 0, 2) == 1
assert gamma_move(board, 1, 4, 1) == 1
assert gamma_move(board, 2, 3, 2) == 1
assert gamma_move(board, 2, 5, 0) == 1
assert gamma_golden_possible(board, 2) == 1
assert gamma_move(board, 3, 3, 6) == 0
assert gamma_move(board, 1, 2, 4) == 0
assert gamma_move(board, 1, 4, 0) == 1
assert gamma_move(board, 2, 2, 2) == 1
assert gamma_move(board, 2, 5, 2) == 1
assert gamma_move(board, 3, 3, 2) == 0
assert gamma_move(board, 3, 0, 3) == 1
assert gamma_move(board, 1, 0, 0) == 1
assert gamma_free_fields(board, 1) == 19
assert gamma_move(board, 2, 3, 1) == 1
assert gamma_move(board, 3, 3, 5) == 0
assert gamma_move(board, 1, 0, 1) == 1
assert gamma_free_fields(board, 1) == 17
assert gamma_move(board, 2, 3, 1) == 0
assert gamma_move(board, 2, 2, 2) == 0
assert gamma_move(board, 3, 3, 5) == 0
assert gamma_move(board, 1, 0, 1) == 0
assert gamma_move(board, 1, 4, 2) == 1
assert gamma_busy_fields(board, 1) == 6
assert gamma_free_fields(board, 1) == 16
assert gamma_move(board, 2, 1, 6) == 0
assert gamma_move(board, 2, 3, 2) == 0
assert gamma_move(board, 3, 3, 2) == 0
assert gamma_move(board, 1, 3, 0) == 1
assert gamma_move(board, 2, 1, 3) == 1
assert gamma_move(board, 3, 3, 4) == 0
assert gamma_move(board, 3, 0, 3) == 0
assert gamma_golden_possible(board, 3) == 1
assert gamma_move(board, 1, 0, 0) == 0
assert gamma_move(board, 2, 0, 6) == 0
assert gamma_move(board, 2, 4, 3) == 1
assert gamma_move(board, 3, 6, 2) == 1
assert gamma_move(board, 1, 5, 0) == 0
assert gamma_move(board, 1, 3, 1) == 0
assert gamma_busy_fields(board, 1) == 7
assert gamma_golden_possible(board, 1) == 1
assert gamma_move(board, 2, 3, 3) == 1
assert gamma_move(board, 2, 2, 3) == 1
assert gamma_move(board, 3, 2, 1) == 1
assert gamma_move(board, 3, 2, 2) == 0
assert gamma_golden_move(board, 1, 1, 2) == 0
assert gamma_busy_fields(board, 2) == 9
assert gamma_move(board, 3, 1, 1) == 1
assert gamma_move(board, 3, 3, 2) == 0
assert gamma_move(board, 1, 5, 3) == 1
assert gamma_move(board, 2, 5, 2) == 0
assert gamma_move(board, 3, 0, 2) == 0
assert gamma_golden_move(board, 3, 3, 5) == 0
assert gamma_move(board, 1, 3, 6) == 0
assert gamma_move(board, 2, 1, 2) == 1
assert gamma_move(board, 2, 4, 0) == 0
assert gamma_move(board, 3, 3, 6) == 0
assert gamma_busy_fields(board, 3) == 4
assert gamma_move(board, 1, 1, 3) == 0
assert gamma_move(board, 2, 0, 6) == 0
assert gamma_move(board, 2, 0, 3) == 0
assert gamma_move(board, 3, 1, 5) == 0
assert gamma_move(board, 1, 1, 6) == 0
assert gamma_move(board, 2, 0, 2) == 0
assert gamma_golden_possible(board, 2) == 1
assert gamma_move(board, 3, 1, 1) == 0
assert gamma_free_fields(board, 3) == 6
board394073312 = gamma_board(board)
assert board394073312 is not None
assert board394073312 == ("322221.\n"
"1222123\n"
"13321..\n"
"1..112.\n")
del board394073312
board394073312 = None
assert gamma_move(board, 2, 0, 1) == 0
assert gamma_move(board, 2, 6, 2) == 0
assert gamma_move(board, 3, 0, 6) == 0
assert gamma_busy_fields(board, 3) == 4
assert gamma_free_fields(board, 3) == 6
assert gamma_move(board, 1, 1, 5) == 0
assert gamma_move(board, 1, 3, 1) == 0
assert gamma_move(board, 2, 4, 1) == 0
assert gamma_move(board, 2, 3, 1) == 0
assert gamma_move(board, 3, 0, 6) == 0
assert gamma_move(board, 3, 1, 3) == 0
board687338617 = gamma_board(board)
assert board687338617 is not None
assert board687338617 == ("322221.\n"
"1222123\n"
"13321..\n"
"1..112.\n")
del board687338617
board687338617 = None
assert gamma_move(board, 1, 3, 6) == 0
assert gamma_move(board, 2, 0, 6) == 0
assert gamma_move(board, 2, 6, 0) == 1
assert gamma_free_fields(board, 2) == 5
assert gamma_move(board, 3, 3, 6) == 0
assert gamma_move(board, 1, 0, 2) == 0
assert gamma_busy_fields(board, 1) == 8
assert gamma_move(board, 2, 1, 5) == 0
assert gamma_move(board, 3, 1, 5) == 0
assert gamma_move(board, 3, 6, 1) == 1
assert gamma_move(board, 1, 4, 2) == 0
assert gamma_move(board, 2, 2, 3) == 0
assert gamma_move(board, 2, 6, 0) == 0
assert gamma_busy_fields(board, 2) == 11
assert gamma_move(board, 3, 1, 5) == 0
assert gamma_busy_fields(board, 3) == 5
assert gamma_move(board, 1, 3, 6) == 0
assert gamma_move(board, 1, 2, 3) == 0
assert gamma_move(board, 2, 1, 5) == 0
assert gamma_move(board, 2, 6, 3) == 1
assert gamma_busy_fields(board, 2) == 12
assert gamma_golden_possible(board, 2) == 1
gamma_delete(board)
| 32.808219
| 46
| 0.661169
| 878
| 4,790
| 3.447608
| 0.05467
| 0.363396
| 0.386521
| 0.515362
| 0.847043
| 0.83515
| 0.765775
| 0.511067
| 0.420548
| 0.403039
| 0
| 0.131391
| 0.183299
| 4,790
| 145
| 47
| 33.034483
| 0.642382
| 0
| 0
| 0.294574
| 0
| 0
| 0.015313
| 0
| 0
| 0
| 0
| 0
| 0.813953
| 1
| 0
| false
| 0
| 0.007752
| 0
| 0.007752
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
16d31c1f0f9ebe2f8d677db11e6889388e12081f
| 39,863
|
py
|
Python
|
pysteps/timeseries/autoregression.py
|
AFansGH/pysteps
|
ee5cd10ed9058808f934cb1992913055fbcbb3d2
|
[
"BSD-3-Clause"
] | null | null | null |
pysteps/timeseries/autoregression.py
|
AFansGH/pysteps
|
ee5cd10ed9058808f934cb1992913055fbcbb3d2
|
[
"BSD-3-Clause"
] | null | null | null |
pysteps/timeseries/autoregression.py
|
AFansGH/pysteps
|
ee5cd10ed9058808f934cb1992913055fbcbb3d2
|
[
"BSD-3-Clause"
] | null | null | null |
"""
pysteps.timeseries.autoregression
=================================
Methods related to autoregressive AR(p) models.
.. autosummary::
:toctree: ../generated/
adjust_lag2_corrcoef1
adjust_lag2_corrcoef2
ar_acf
estimate_ar_params_ols
estimate_ar_params_ols_localized
estimate_ar_params_yw
estimate_ar_params_yw_localized
estimate_var_params_ols
estimate_var_params_ols_localized
estimate_var_params_yw
iterate_ar_model
iterate_var_model
"""
import numpy as np
from scipy.special import binom
from scipy import linalg as la
from scipy import ndimage
def adjust_lag2_corrcoef1(gamma_1, gamma_2):
"""A simple adjustment of lag-2 temporal autocorrelation coefficient to
ensure that the resulting AR(2) process is stationary when the parameters
are estimated from the Yule-Walker equations.
Parameters
----------
gamma_1 : float
Lag-1 temporal autocorrelation coeffient.
gamma_2 : float
Lag-2 temporal autocorrelation coeffient.
Returns
-------
out : float
The adjusted lag-2 correlation coefficient.
"""
gamma_2 = np.maximum(gamma_2, 2 * gamma_1 * gamma_1 - 1 + 1e-10)
gamma_2 = np.minimum(gamma_2, 1 - 1e-10)
return gamma_2
def adjust_lag2_corrcoef2(gamma_1, gamma_2):
"""A more advanced adjustment of lag-2 temporal autocorrelation coefficient
to ensure that the resulting AR(2) process is stationary when
the parameters are estimated from the Yule-Walker equations.
Parameters
----------
gamma_1 : float
Lag-1 temporal autocorrelation coeffient.
gamma_2 : float
Lag-2 temporal autocorrelation coeffient.
Returns
-------
out : float
The adjusted lag-2 correlation coefficient.
"""
gamma_2 = np.maximum(gamma_2, 2 * gamma_1 * gamma_2 - 1)
gamma_2 = np.maximum(
gamma_2, (3 * gamma_1 ** 2 - 2 + 2 * (1 - gamma_1 ** 2) ** 1.5) / gamma_1 ** 2
)
return gamma_2
def ar_acf(gamma, n=None):
"""Compute theoretical autocorrelation function (ACF) from the AR(p) model
with lag-l, l=1,2,...,p temporal autocorrelation coefficients.
Parameters
----------
gamma : array-like
Array of length p containing the lag-l, l=1,2,...p, temporal
autocorrelation coefficients.
The correlation coefficients are assumed to be in ascending
order with respect to time lag.
n : int
Desired length of ACF array. Must be greater than len(gamma).
Returns
-------
out : array-like
Array containing the ACF values.
"""
ar_order = len(gamma)
if n == ar_order or n is None:
return gamma
elif n < ar_order:
raise ValueError(
"n=%i, but must be larger than the order of the AR process %i"
% (n, ar_order)
)
phi = estimate_ar_params_yw(gamma)[:-1]
acf = gamma.copy()
for t in range(0, n - ar_order):
# Retrieve gammas (in reverse order)
gammas = acf[t : t + ar_order][::-1]
# Compute next gamma
gamma_ = np.sum(gammas * phi)
acf.append(gamma_)
return acf
def estimate_ar_params_ols(
x, p, d=0, check_stationarity=True, include_constant_term=False, h=0, lam=0.0
):
r"""Estimate the parameters of an autoregressive AR(p) model
:math:`x_{k+1}=c+\phi_1 x_k+\phi_2 x_{k-1}+\dots+\phi_p x_{k-p}+\phi_{p+1}\epsilon`
by using ordinary least squares (OLS). If :math:`d\geq 1`, the parameters
are estimated for a d times differenced time series that is integrated back
to the original one by summation of the differences.
Parameters
----------
x : array_like
Array of shape (n,...) containing a time series of length n=p+d+h+1.
The remaining dimensions are flattened. The rows and columns of x
represent time steps and samples, respectively.
p : int
The order of the model.
d : {0,1}
The order of differencing to apply to the time series.
check_stationarity : bool
Check the stationarity of the estimated model.
include_constant_term : bool
Include the constant term :math:`c` to the model.
h : int
If h>0, the fitting is done by using a history of length h in addition
to the minimal required number of time steps n=p+d+1.
lam : float
If lam>0, the regression is regularized by adding a penalty term
(i.e. ridge regression).
Returns
-------
out : list
The estimated parameter matrices :math:`\mathbf{\Phi}_1,\mathbf{\Phi}_2,
\dots,\mathbf{\Phi}_{p+1}`. If include_constant_term is True, the
constant term :math:`c` is added to the beginning of the list.
Notes
-----
Estimation of the innovation term parameter :math:`\phi_{p+1}` is currently
implemented for p<=2. If p > 2, :math:`\phi_{p+1}` is set to zero.
"""
n = x.shape[0]
if n != p + d + h + 1:
raise ValueError(
"n = %d, p = %d, d = %d, h = %d, but n = p+d+h+1 = %d required"
% (n, p, d, h, p + d + h + 1)
)
if len(x.shape) > 1:
x = x.reshape((n, np.prod(x.shape[1:])))
if d not in [0, 1]:
raise ValueError("d = %d, but 0 or 1 required" % d)
if d == 1:
x = np.diff(x, axis=0)
n -= d
x_lhs = x[p:, :]
Z = []
for i in range(x.shape[1]):
for j in range(p - 1, n - 1 - h):
z_ = np.hstack([x[j - k, i] for k in range(p)])
if include_constant_term:
z_ = np.hstack([[1], z_])
Z.append(z_)
Z = np.column_stack(Z)
b = np.dot(
np.dot(x_lhs, Z.T), np.linalg.inv(np.dot(Z, Z.T) + lam * np.eye(Z.shape[0]))
)
b = b.flatten()
if include_constant_term:
c = b[0]
phi = list(b[1:])
else:
phi = list(b)
if p == 1:
phi_pert = np.sqrt(1.0 - phi[0] * phi[0])
elif p == 2:
phi_pert = np.sqrt(
(1.0 + phi[1]) * ((1.0 - phi[1]) ** 2.0 - phi[0] ** 2.0) / (1.0 - phi[1])
)
else:
phi_pert = 0.0
if check_stationarity:
if not test_ar_stationarity(phi):
raise RuntimeError(
"Error in estimate_ar_params_yw: " "nonstationary AR(p) process"
)
if d == 1:
phi_out = _compute_differenced_model_params(phi, p, 1, 1)
else:
phi_out = phi
phi_out.append(phi_pert)
if include_constant_term:
phi_out.insert(0, c)
return phi_out
def estimate_ar_params_ols_localized(
x,
p,
window_radius,
d=0,
include_constant_term=False,
h=0,
lam=0.0,
window="gaussian",
):
r"""Estimate the parameters of a localized AR(p) model
:math:`x_{k+1,i}=c_i+\phi_{1,i}x_{k,i}+\phi_{2,i}x_{k-1,i}+\dots+\phi_{p,i}x_{k-p,i}+\phi_{p+1,i}\epsilon`
by using ordinary least squares (OLS), where :math:`i` denote spatial
coordinates with arbitrary dimension. If :math:`d\geq 1`, the parameters
are estimated for a d times differenced time series that is integrated back
to the original one by summation of the differences.
Parameters
----------
x : array_like
Array of shape (n,...) containing a time series of length n=p+d+h+1.
The remaining dimensions are flattened. The rows and columns of x
represent time steps and samples, respectively.
p : int
The order of the model.
window_radius : float
Radius of the moving window. If window is 'gaussian', window_radius is
the standard deviation of the Gaussian filter. If window is 'uniform',
the size of the window is 2*window_radius+1.
d : {0,1}
The order of differencing to apply to the time series.
include_constant_term : bool
Include the constant term :math:`c_i` to the model.
h : int
If h>0, the fitting is done by using a history of length h in addition
to the minimal required number of time steps n=p+d+1.
lam : float
If lam>0, the regression is regularized by adding a penalty term
(i.e. ridge regression).
window : {"gaussian", "uniform"}
The weight function to use for the moving window. Applicable if
window_radius < np.inf. Defaults to 'gaussian'.
Returns
-------
out : list
List of length p+1 containing the AR(p) parameter fields for for the
lag-p terms and the innovation term. The parameter fields have the same
shape as the elements of gamma. Nan values are assigned, where the
sample size for estimating the parameters is too small. If
include_constant_term is True, the constant term :math:`c_i` is added
to the beginning of the list.
Notes
-----
Estimation of the innovation term parameter :math:`\phi_{p+1}` is currently
implemented for p<=2. If p > 2, :math:`\phi_{p+1}` is set to a zero array.
"""
n = x.shape[0]
if n != p + d + h + 1:
raise ValueError(
"n = %d, p = %d, d = %d, h = %d, but n = p+d+h+1 = %d required"
% (n, p, d, h, p + d + h + 1)
)
if d == 1:
x = np.diff(x, axis=0)
n -= d
if window == "gaussian":
convol_filter = ndimage.gaussian_filter
else:
convol_filter = ndimage.uniform_filter
if window == "uniform":
window_size = 2 * window_radius + 1
else:
window_size = window_radius
XZ = np.zeros(np.hstack([[p], x.shape[1:]]))
for i in range(p):
for j in range(h + 1):
tmp = convol_filter(
x[p + j, :] * x[p - 1 - i + j, :], window_size, mode="constant"
)
XZ[i, :] += tmp
if include_constant_term:
v = 0.0
for i in range(h + 1):
v += convol_filter(x[p + i, :], window_size, mode="constant")
XZ = np.vstack([v[np.newaxis, :], XZ])
if not include_constant_term:
Z2 = np.zeros(np.hstack([[p, p], x.shape[1:]]))
for i in range(p):
for j in range(p):
for k in range(h + 1):
tmp = convol_filter(
x[p - 1 - i + k, :] * x[p - 1 - j + k, :],
window_size,
mode="constant",
)
Z2[i, j, :] += tmp
else:
Z2 = np.zeros(np.hstack([[p + 1, p + 1], x.shape[1:]]))
Z2[0, 0, :] = convol_filter(np.ones(x.shape[1:]), window_size, mode="constant")
for i in range(p):
for j in range(h + 1):
tmp = convol_filter(x[p - 1 - i + j, :], window_size, mode="constant")
Z2[0, i + 1, :] += tmp
Z2[i + 1, 0, :] += tmp
for i in range(p):
for j in range(p):
for k in range(h + 1):
tmp = convol_filter(
x[p - 1 - i + k, :] * x[p - 1 - j + k, :],
window_size,
mode="constant",
)
Z2[i + 1, j + 1, :] += tmp
m = np.prod(x.shape[1:])
phi = np.empty(np.hstack([[p], m]))
if include_constant_term:
c = np.empty(m)
XZ = XZ.reshape(np.hstack([[XZ.shape[0]], m]))
Z2 = Z2.reshape(np.hstack([[Z2.shape[0], Z2.shape[1]], m]))
for i in range(m):
try:
b = np.dot(XZ[:, i], np.linalg.inv(Z2[:, :, i] + lam * np.eye(Z2.shape[0])))
if not include_constant_term:
phi[:, i] = b
else:
phi[:, i] = b[1:]
c[i] = b[0]
except np.linalg.LinAlgError:
phi[:, i] = np.nan
if include_constant_term:
c[i] = np.nan
if p == 1:
phi_pert = np.sqrt(1.0 - phi[0, :] * phi[0, :])
elif p == 2:
phi_pert = np.sqrt(
(1.0 + phi[1, :])
* ((1.0 - phi[1, :]) ** 2.0 - phi[0, :] ** 2.0)
/ (1.0 - phi[1, :])
)
else:
phi_pert = np.zeros(m)
phi = list(phi.reshape(np.hstack([[phi.shape[0]], x.shape[1:]])))
if d == 1:
phi = _compute_differenced_model_params(phi, p, 1, 1)
phi.append(phi_pert.reshape(x.shape[1:]))
if include_constant_term:
phi.insert(0, c.reshape(x.shape[1:]))
return phi
def estimate_ar_params_yw(gamma, d=0, check_stationarity=True):
r"""Estimate the parameters of an AR(p) model
:math:`x_{k+1}=\phi_1 x_k+\phi_2 x_{k-1}+\dots+\phi_p x_{k-p}+\phi_{p+1}\epsilon`
from the Yule-Walker equations using the given set of autocorrelation
coefficients.
Parameters
----------
gamma : array_like
Array of length p containing the lag-l temporal autocorrelation
coefficients for l=1,2,...p. The correlation coefficients are assumed
to be in ascending order with respect to time lag.
d : {0,1}
The order of differencing. If d=1, the correlation coefficients gamma
are assumed to be computed from the differenced time series, which is
also done for the resulting parameter estimates.
check_stationarity : bool
If True, the stationarity of the resulting VAR(p) process is tested. An
exception is thrown if the process is not stationary.
Returns
-------
out : ndarray
Array of length p+1 containing the AR(p) parameters for for the
lag-p terms and the innovation term.
Notes
-----
To estimate the parameters of an integrated ARI(p,d) model, compute the
correlation coefficients gamma by calling
:py:func:`pysteps.timeseries.correlation.temporal_autocorrelation` with d>0.
"""
if d not in [0, 1]:
raise ValueError("d = %d, but 0 or 1 required" % d)
p = len(gamma)
g = np.hstack([[1.0], gamma])
G = []
for j in range(p):
G.append(np.roll(g[:-1], j))
G = np.array(G)
phi = np.linalg.solve(G, g[1:].flatten())
# Check that the absolute values of the roots of the characteristic
# polynomial are less than one.
# Otherwise the AR(p) model is not stationary.
if check_stationarity:
if not test_ar_stationarity(phi):
raise RuntimeError(
"Error in estimate_ar_params_yw: " "nonstationary AR(p) process"
)
c = 1.0
for j in range(p):
c -= gamma[j] * phi[j]
phi_pert = np.sqrt(c)
# If the expression inside the square root is negative, phi_pert cannot
# be computed and it is set to zero instead.
if not np.isfinite(phi_pert):
phi_pert = 0.0
if d == 1:
phi = _compute_differenced_model_params(phi, p, 1, 1)
phi_out = np.empty(len(phi) + 1)
phi_out[: len(phi)] = phi
phi_out[-1] = phi_pert
return phi_out
def estimate_ar_params_yw_localized(gamma, d=0):
r"""Estimate the parameters of a localized AR(p) model
:math:`x_{k+1,i}=\phi_{1,i}x_{k,i}+\phi_{2,i}x_{k-1,i}+\dots+\phi_{p,i}x_{k-p,i}+\phi_{p+1}\epsilon`
from the Yule-Walker equations using the given set of autocorrelation
coefficients :math`\gamma_{l,i}`, where :math`l` denotes time lag and
:math:`i` denote spatial coordinates with arbitrary dimension.
Parameters
----------
gamma : array_like
A list containing the lag-l temporal autocorrelation coefficient fields
for l=1,2,...p. The correlation coefficients are assumed to be in
ascending order with respect to time lag.
d : {0,1}
The order of differencing. If d=1, the correlation coefficients gamma
are assumed to be computed from the differenced time series, which is
also done for the resulting parameter estimates.
Returns
-------
out : list
List of length p+1 containing the AR(p) parameter fields for for the
lag-p terms and the innovation term. The parameter fields have the same
shape as the elements of gamma.
Notes
-----
To estimate the parameters of an integrated ARI(p,d) model, compute the
correlation coefficients gamma by calling
:py:func:`pysteps.timeseries.correlation.temporal_autocorrelation` with d>0
and window_radius<np.inf.
"""
for i in range(1, len(gamma)):
if gamma[i].shape != gamma[0].shape:
raise ValueError(
"the correlation coefficient fields gamma have mismatching shapes"
)
if d not in [0, 1]:
raise ValueError("d = %d, but 0 or 1 required" % d)
p = len(gamma)
n = np.prod(gamma[0].shape)
gamma_1d = [gamma[i].flatten() for i in range(len(gamma))]
phi = np.empty((p, n))
for i in range(n):
g = np.hstack([[1.0], [gamma_1d[k][i] for k in range(len(gamma_1d))]])
G = []
for k in range(p):
G.append(np.roll(g[:-1], k))
G = np.array(G)
try:
phi_ = np.linalg.solve(G, g[1:].flatten())
except np.linalg.LinAlgError:
phi_ = np.ones(p) * np.nan
phi[:, i] = phi_
c = 1.0
for i in range(p):
c -= gamma_1d[i] * phi[i]
phi_pert = np.sqrt(c)
if d == 1:
phi = _compute_differenced_model_params(phi, p, 1, 1)
phi_out = np.empty((len(phi) + 1, n))
phi_out[: len(phi), :] = phi
phi_out[-1, :] = phi_pert
return list(phi_out.reshape(np.hstack([[len(phi_out)], gamma[0].shape])))
def estimate_var_params_ols(
x, p, d=0, check_stationarity=True, include_constant_term=False, h=0, lam=0.0
):
r"""Estimate the parameters of a vector autoregressive VAR(p) model
:math:`\mathbf{x}_{k+1}=\mathbf{c}+\mathbf{\Phi}_1\mathbf{x}_k+
\mathbf{\Phi}_2\mathbf{x}_{k-1}+\dots+\mathbf{\Phi}_p\mathbf{x}_{k-p}+
\mathbf{\Phi}_{p+1}\mathbf{\epsilon}`
by using ordinary least squares (OLS). If :math:`d\geq 1`, the parameters
are estimated for a d times differenced time series that is integrated back
to the original one by summation of the differences.
Parameters
----------
x : array_like
Array of shape (n, q, :) containing a time series of length n=p+d+h+1
with q-dimensional variables. The remaining dimensions are flattened.
The remaining dimensions starting from the third one represent the
samples.
p : int
The order of the model.
d : {0,1}
The order of differencing to apply to the time series.
check_stationarity : bool
If True, the stationarity of the resulting VAR(p) process is tested. An
exception is thrown if the process is not stationary.
include_constant_term : bool
Include the constant term :math:`\mathbf{c}` to the model.
h : int
If h>0, the fitting is done by using a history of length h in addition
to the minimal required number of time steps n=p+d+1.
lam : float
If lam>0, the regression is regularized by adding a penalty term
(i.e. ridge regression).
Returns
-------
out : list
The estimated parameter matrices :math:`\mathbf{\Phi}_1,\mathbf{\Phi}_2,
\dots,\mathbf{\Phi}_{p+1}`. If include_constant_term is True, the
constant term :math:`\mathbf{c}` is added to the beginning of the list.
Notes
-----
Estimation of the innovation parameter :math:`\mathbf{\Phi}_{p+1}` is not
currently implemented, and it is set to a zero matrix.
"""
q = x.shape[1]
n = x.shape[0]
if n != p + d + h + 1:
raise ValueError(
"n = %d, p = %d, d = %d, h = %d, but n = p+d+h+1 = %d required"
% (n, p, d, h, p + d + h + 1)
)
if d not in [0, 1]:
raise ValueError("d = %d, but 0 or 1 required" % d)
if d == 1:
x = np.diff(x, axis=0)
n -= d
x = x.reshape((n, q, np.prod(x.shape[2:])))
X = []
for i in range(x.shape[2]):
for j in range(p + h, n):
x_ = x[j, :, i]
X.append(x_.reshape((q, 1)))
X = np.hstack(X)
Z = []
for i in range(x.shape[2]):
for j in range(p - 1, n - 1 - h):
z_ = np.vstack([x[j - k, :, i].reshape((q, 1)) for k in range(p)])
if include_constant_term:
z_ = np.vstack([[1], z_])
Z.append(z_)
Z = np.column_stack(Z)
B = np.dot(np.dot(X, Z.T), np.linalg.inv(np.dot(Z, Z.T) + lam * np.eye(Z.shape[0])))
phi = []
if include_constant_term:
c = B[:, 0]
for i in range(p):
phi.append(B[:, i * q + 1 : (i + 1) * q + 1])
else:
for i in range(p):
phi.append(B[:, i * q : (i + 1) * q])
if check_stationarity:
M = np.zeros((p * q, p * q))
for i in range(p):
M[0:q, i * q : (i + 1) * q] = phi[i]
for i in range(1, p):
M[i * q : (i + 1) * q, (i - 1) * q : i * q] = np.eye(q, q)
r, v = np.linalg.eig(M)
if np.any(np.abs(r) > 0.999):
raise RuntimeError(
"Error in estimate_var_params_ols: " "nonstationary VAR(p) process"
)
if d == 1:
phi = _compute_differenced_model_params(phi, p, q, 1)
if include_constant_term:
phi.insert(0, c)
phi.append(np.zeros((q, q)))
return phi
def estimate_var_params_ols_localized(
x,
p,
window_radius,
d=0,
include_constant_term=False,
h=0,
lam=0.0,
window="gaussian",
):
r"""Estimate the parameters of a vector autoregressive VAR(p) model
:math:`\mathbf{x}_{k+1,i}=\mathbf{c}_i+\mathbf{\Phi}_{1,i}\mathbf{x}_{k,i}+
\mathbf{\Phi}_{2,i}\mathbf{x}_{k-1,i}+\dots+\mathbf{\Phi}_{p,i}
\mathbf{x}_{k-p,i}+\mathbf{\Phi}_{p+1,i}\mathbf{\epsilon}`
by using ordinary least squares (OLS), where :math:`i` denote spatial
coordinates with arbitrary dimension. If :math:`d\geq 1`, the parameters
are estimated for a d times differenced time series that is integrated back
to the original one by summation of the differences.
Parameters
----------
x : array_like
Array of shape (n, q, :) containing a time series of length n=p+d+h+1
with q-dimensional variables. The remaining dimensions are flattened.
The remaining dimensions starting from the third one represent the
samples.
p : int
The order of the model.
window_radius : float
Radius of the moving window. If window is 'gaussian', window_radius is
the standard deviation of the Gaussian filter. If window is 'uniform',
the size of the window is 2*window_radius+1.
d : {0,1}
The order of differencing to apply to the time series.
include_constant_term : bool
Include the constant term :math:`\mathbf{c}` to the model.
h : int
If h>0, the fitting is done by using a history of length h in addition
to the minimal required number of time steps n=p+d+1.
lam : float
If lam>0, the regression is regularized by adding a penalty term
(i.e. ridge regression).
window : {"gaussian", "uniform"}
The weight function to use for the moving window. Applicable if
window_radius < np.inf. Defaults to 'gaussian'.
Returns
-------
out : list
The estimated parameter matrices :math:`\mathbf{\Phi}_{1,i},
\mathbf{\Phi}_{2,i},\dots,\mathbf{\Phi}_{p+1,i}`. If
include_constant_term is True, the constant term :math:`\mathbf{c}_i` is
added to the beginning of the list. Each element of the list is a matrix
of shape (x.shape[2:], q, q).
Notes
-----
Estimation of the innovation parameter :math:`\mathbf{\Phi}_{p+1}` is not
currently implemented, and it is set to a zero matrix.
"""
q = x.shape[1]
n = x.shape[0]
if n != p + d + h + 1:
raise ValueError(
"n = %d, p = %d, d = %d, h = %d, but n = p+d+h+1 = %d required"
% (n, p, d, h, p + d + h + 1)
)
if d == 1:
x = np.diff(x, axis=0)
n -= d
if window == "gaussian":
convol_filter = ndimage.gaussian_filter
else:
convol_filter = ndimage.uniform_filter
if window == "uniform":
window_size = 2 * window_radius + 1
else:
window_size = window_radius
XZ = np.zeros(np.hstack([[q, p * q], x.shape[2:]]))
for i in range(q):
for k in range(p):
for j in range(q):
for l in range(h + 1):
tmp = convol_filter(
x[p + l, i, :] * x[p - 1 - k + l, j, :],
window_size,
mode="constant",
)
XZ[i, k * q + j, :] += tmp
if include_constant_term:
v = np.zeros(np.hstack([[q], x.shape[2:]]))
for i in range(q):
for j in range(h + 1):
v[i, :] += convol_filter(x[p + j, i, :], window_size, mode="constant")
XZ = np.hstack([v[:, np.newaxis, :], XZ])
if not include_constant_term:
Z2 = np.zeros(np.hstack([[p * q, p * q], x.shape[2:]]))
for i in range(p):
for j in range(q):
for k in range(p):
for l in range(q):
for m in range(h + 1):
tmp = convol_filter(
x[p - 1 - i + m, j, :] * x[p - 1 - k + m, l, :],
window_size,
mode="constant",
)
Z2[i * q + j, k * q + l, :] += tmp
else:
Z2 = np.zeros(np.hstack([[p * q + 1, p * q + 1], x.shape[2:]]))
Z2[0, 0, :] = convol_filter(np.ones(x.shape[2:]), window_size, mode="constant")
for i in range(p):
for j in range(q):
for k in range(h + 1):
tmp = convol_filter(
x[p - 1 - i + k, j, :], window_size, mode="constant"
)
Z2[0, i * q + j + 1, :] += tmp
Z2[i * q + j + 1, 0, :] += tmp
for i in range(p):
for j in range(q):
for k in range(p):
for l in range(q):
for m in range(h + 1):
tmp = convol_filter(
x[p - 1 - i + m, j, :] * x[p - 1 - k + m, l, :],
window_size,
mode="constant",
)
Z2[i * q + j + 1, k * q + l + 1, :] += tmp
m = np.prod(x.shape[2:])
if include_constant_term:
c = np.empty((m, q))
XZ = XZ.reshape((XZ.shape[0], XZ.shape[1], m))
Z2 = Z2.reshape((Z2.shape[0], Z2.shape[1], m))
phi = np.empty((p, m, q, q))
for i in range(m):
try:
B = np.dot(
XZ[:, :, i], np.linalg.inv(Z2[:, :, i] + lam * np.eye(Z2.shape[0]))
)
for k in range(p):
if not include_constant_term:
phi[k, i, :, :] = B[:, k * q : (k + 1) * q]
else:
phi[k, i, :, :] = B[:, k * q + 1 : (k + 1) * q + 1]
if include_constant_term:
c[i, :] = B[:, 0]
except np.linalg.LinAlgError:
phi[:, i, :, :] = np.nan
if include_constant_term:
c[i, :] = np.nan
phi_out = [
phi[i].reshape(np.hstack([x.shape[2:], [q, q]])) for i in range(len(phi))
]
if d == 1:
phi_out = _compute_differenced_model_params(phi_out, p, q, 1)
phi_out.append(np.zeros(phi_out[0].shape))
if include_constant_term:
phi_out.insert(0, c.reshape(np.hstack([x.shape[2:], [q]])))
return phi_out
def estimate_var_params_yw(gamma, d=0, check_stationarity=True):
r"""Estimate the parameters of a VAR(p) model
:math:`\mathbf{x}_{k+1}=\mathbf{\Phi}_1\mathbf{x}_k+
\mathbf{\Phi}_2\mathbf{x}_{k-1}+\dots+\mathbf{\Phi}_p\mathbf{x}_{k-p}+
\mathbf{\Phi}_{p+1}\mathbf{\epsilon}`
from the Yule-Walker equations using the given correlation matrices
:math:`\mathbf{\Gamma}_0,\mathbf{\Gamma}_1,\dots,\mathbf{\Gamma}_n`, where
n=p.
Parameters
----------
gamma : list
List of correlation matrices
:math:`\mathbf{\Gamma}_0,\mathbf{\Gamma}_1,\dots,\mathbf{\Gamma}_n`.
To obtain these matrices, use
:py:func:`pysteps.timeseries.correlation.temporal_autocorrelation_multivariate`
with window_radius=np.inf.
d : {0,1}
The order of differencing. If d=1, the correlation coefficients gamma
are assumed to be computed from the differenced time series, which is
also done for the resulting parameter estimates.
check_stationarity : bool
If True, the stationarity of the resulting VAR(p) process is tested. An
exception is thrown if the process is not stationary.
Returns
-------
out : list
List of VAR(p) coefficient matrices :math:`\mathbf{\Phi}_1,
\mathbf{\Phi}_2,\dots\mathbf{\Phi}_{p+1}`, where the last matrix
corresponds to the innovation term.
Notes
-----
To estimate the parameters of an integrated VARI(p,d) model, compute the
correlation coefficients gamma by calling
:py:func:`pysteps.timeseries.correlation.temporal_autocorrelation_multivariate`
with d>0. Estimation of the innovation parameter :math:`\mathbf{\Phi}_{p+1}`
is not currently implemented, and it is set to a zero matrix.
"""
p = len(gamma) - 1
q = gamma[0].shape[0]
for i in range(len(gamma)):
if gamma[i].shape[0] != q or gamma[i].shape[1] != q:
raise ValueError(
"dimension mismatch: gamma[%d].shape=%s, but (%d,%d) expected"
% (i, str(gamma[i].shape), q, q)
)
if d not in [0, 1]:
raise ValueError("d = %d, but 0 or 1 required" % d)
a = np.empty((p * q, p * q))
for i in range(p):
for j in range(p):
a_tmp = gamma[abs(i - j)]
if i > j:
a_tmp = a_tmp.T
a[i * q : (i + 1) * q, j * q : (j + 1) * q] = a_tmp
b = np.vstack([gamma[i].T for i in range(1, p + 1)])
x = np.linalg.solve(a, b)
phi = []
for i in range(p):
phi.append(x[i * q : (i + 1) * q, :])
if check_stationarity:
if not test_var_stationarity(phi):
raise RuntimeError(
"Error in estimate_var_params_yw: " "nonstationary VAR(p) process"
)
if d == 1:
phi = _compute_differenced_model_params(phi, p, q, 1)
phi.append(np.zeros(phi[0].shape))
return phi
def estimate_var_params_yw_localized(gamma, d=0):
r"""Estimate the parameters of a vector autoregressive VAR(p) model
:math:`\mathbf{x}_{k+1,i}=\mathbf{\Phi}_{1,i}\mathbf{x}_{k,i}+
\mathbf{\Phi}_{2,i}\mathbf{x}_{k-1,i}+\dots+\mathbf{\Phi}_{p,i}
\mathbf{x}_{k-p,i}+\mathbf{\Phi}_{p+1,i}\mathbf{\epsilon}`
from the Yule-Walker equations by using the given correlation matrices,
where :math:`i` denote spatial coordinates with arbitrary dimension.
Parameters
----------
gamma : list
List of correlation matrices
:math:`\mathbf{\Gamma}_0,\mathbf{\Gamma}_1,\dots,\mathbf{\Gamma}_n`.
To obtain these matrices, use
:py:func:`pysteps.timeseries.correlation.temporal_autocorrelation_multivariate`
with window_radius<np.inf.
d : {0,1}
The order of differencing. If d=1, the correlation coefficients gamma
are assumed to be computed from the differenced time series, which is
also done for the resulting parameter estimates.
Returns
-------
out : list
The estimated parameter matrices :math:`\mathbf{\Phi}_{1,i},
\mathbf{\Phi}_{2,i},\dots,\mathbf{\Phi}_{p+1,i}`. Each element of the
list has the same shape as those in gamma.
Notes
-----
To estimate the parameters of an integrated VARI(p,d) model, compute the
correlation coefficients gamma by calling
:py:func:`pysteps.timeseries.correlation.temporal_autocorrelation_multivariate`
with d>0 and window_radius<np.inf. Estimation of the innovation parameter
:math:`\mathbf{\Phi}_{p+1}` is not currently implemented, and it is set to
a zero matrix.
"""
p = len(gamma) - 1
q = gamma[0].shape[2]
n = np.prod(gamma[0].shape[:-2])
for i in range(1, len(gamma)):
if gamma[i].shape != gamma[0].shape:
raise ValueError(
"dimension mismatch: gamma[%d].shape=%s, but %s expected"
% (i, str(gamma[i].shape), str(gamma[0].shape))
)
if d not in [0, 1]:
raise ValueError("d = %d, but 0 or 1 required" % d)
gamma_1d = [g.reshape((n, q, q)) for g in gamma]
phi_out = [np.zeros([n, q, q]) for i in range(p)]
for k in range(n):
a = np.empty((p * q, p * q))
for i in range(p):
for j in range(p):
a_tmp = gamma_1d[abs(i - j)][k, :]
if i > j:
a_tmp = a_tmp.T
a[i * q : (i + 1) * q, j * q : (j + 1) * q] = a_tmp
b = np.vstack([gamma_1d[i][k, :].T for i in range(1, p + 1)])
x = np.linalg.solve(a, b)
for i in range(p):
phi_out[i][k, :, :] = x[i * q : (i + 1) * q, :]
for i in range(len(phi_out)):
phi_out[i] = phi_out[i].reshape(np.hstack([gamma[0].shape[:-2], [q, q]]))
if d == 1:
phi_out = _compute_differenced_model_params(phi_out, p, 1, 1)
phi_out.append(np.zeros(gamma[0].shape))
return phi_out
def iterate_ar_model(x, phi, eps=None):
r"""Apply an AR(p) model
:math:`x_{k+1}=\phi_1 x_k+\phi_2 x_{k-1}+\dots+\phi_p x_{k-p}+\phi_{p+1}\epsilon`
to a time series :math:`x_k`.
Parameters
----------
x : array_like
Array of shape (n,...), n>=p, containing a time series of a input variable
x. The elements of x along the first dimension are assumed to be in
ascending order by time, and the time intervals are assumed to be regular.
phi : list
List or array of length p+1 specifying the parameters of the AR(p) model.
The parameters are in ascending order by increasing time lag, and the
last element is the parameter corresponding to the innovation term eps.
eps : array_like
Optional innovation term for the AR(p) process. The shape of eps is
expected to be a scalar or x.shape[1:] if len(x.shape)>1. If eps is
None, the innovation term is not added.
"""
if x.shape[0] < len(phi) - 1:
raise ValueError(
"dimension mismatch between x and phi: x.shape[0]=%d, len(phi)=%d"
% (x.shape[0], len(phi))
)
if len(x.shape) == 1:
x_simple_shape = True
x = x[:, np.newaxis]
else:
x_simple_shape = False
if eps is not None and eps.shape != x.shape[1:]:
raise ValueError(
"dimension mismatch between x and eps: x.shape=%s, eps.shape[1:]=%s"
% (str(x.shape), str(eps.shape[1:]))
)
x_new = 0.0
p = len(phi) - 1
for i in range(p):
x_new += phi[i] * x[-(i + 1), :]
if eps is not None:
x_new += phi[-1] * eps
if x_simple_shape:
return np.hstack([x[1:], [x_new]])
else:
return np.concatenate([x[1:, :], x_new[np.newaxis, :]])
def iterate_var_model(x, phi, eps=None):
r"""Apply a VAR(p) model
:math:`\mathbf{x}_{k+1}=\mathbf{\Phi}_1\mathbf{x}_k+\mathbf{\Phi}_2
\mathbf{x}_{k-1}+\dots+\mathbf{\Phi}_p\mathbf{x}_{k-p}+
\mathbf{\Phi}_{p+1}\mathbf{\epsilon}`
to a q-variate time series :math:`\mathbf{x}_k`.
Parameters
----------
x : array_like
Array of shape (n,q,...), n>=p, containing a q-variate time series of a
input variable x. The elements of x along the first dimension are
assumed to be in ascending order by time, and the time intervals are
assumed to be regular.
phi : list
List of parameter matrices :math:`\mathbf{\Phi}_1,\mathbf{\Phi}_2,\dots,
\mathbf{\Phi}_{p+1}`.
eps : array_like
Optional innovation term for the AR(p) process. The shape of eps is
expected to be (x.shape[1],) or (x.shape[1],x.shape[2:]) if
len(x.shape)>2. If eps is None, the innovation term is not added.
"""
if x.shape[0] < len(phi) - 1:
raise ValueError(
"dimension mismatch between x and phi: x.shape[0]=%d, len(phi)=%d"
% (x.shape[1], len(phi))
)
phi_shape = phi[0].shape
if phi_shape[-1] != phi_shape[-2]:
raise ValueError(
"phi[0].shape = %s, but the last two dimensions are expected to be equal"
% str(phi_shape)
)
for i in range(1, len(phi)):
if phi[i].shape != phi_shape:
raise ValueError("dimension mismatch between parameter matrices phi")
if len(x.shape) == 2:
x_simple_shape = True
x = x[:, :, np.newaxis]
else:
x_simple_shape = False
x_new = np.zeros(x.shape[1:])
p = len(phi) - 1
for l in range(p):
x_new += np.einsum("...ij,j...->i...", phi[l], x[-(l + 1), :])
if eps is not None:
x_new += np.dot(np.dot(phi[-1], phi[-1]), eps)
if x_simple_shape:
return np.vstack([x[1:, :, 0], x_new[:, 0]])
else:
x_new = x_new.reshape(x.shape[1:])
return np.concatenate([x[1:, :], x_new[np.newaxis, :, :]], axis=0)
def test_ar_stationarity(phi):
r"""Test stationarity of an AR(p) process. That is, test that the roots of
the equation :math:`x^p-\phi_1*x^{p-1}-\dots-\phi_p` lie inside the unit
circle.
Parameters
----------
phi : list
List of AR(p) parameters :math:`\phi_1,\phi_2,\dots,\phi_p`.
Returns
-------
out : bool
True/False if the process is/is not stationary.
"""
r = np.array(
[
np.abs(r_)
for r_ in np.roots([1.0 if i == 0 else -phi[i] for i in range(len(phi))])
]
)
return False if np.any(r >= 1) else True
def test_var_stationarity(phi):
r"""Test stationarity of an AR(p) process. That is, test that the moduli of
the eigenvalues of the companion matrix lie inside the unit circle.
Parameters
----------
phi : list
List of VAR(p) parameter matrices :math:`\mathbf{\Phi}_1,\mathbf{\Phi}_2,
\dots,\mathbf{\Phi}_p`.
Returns
-------
out : bool
True/False if the process is/is not stationary.
"""
q = phi[0].shape
for i in range(1, len(phi)):
if phi[i].shape != q:
raise ValueError("dimension mismatch between parameter matrices phi")
p = len(phi)
q = phi[0].shape[0]
M = np.zeros((p * q, p * q))
for i in range(p):
M[0:q, i * q : (i + 1) * q] = phi[i]
for i in range(1, p):
M[i * q : (i + 1) * q, (i - 1) * q : i * q] = np.eye(q, q)
r = np.linalg.eig(M)[0]
return False if np.any(np.abs(r) >= 1) else True
def _compute_differenced_model_params(phi, p, q, d):
phi_out = []
for i in range(p + d):
if q > 1:
if len(phi[0].shape) == 2:
phi_out.append(np.zeros((q, q)))
else:
phi_out.append(np.zeros(phi[0].shape))
else:
phi_out.append(0.0)
for i in range(1, d + 1):
if q > 1:
phi_out[i - 1] -= binom(d, i) * (-1) ** i * np.eye(q)
else:
phi_out[i - 1] -= binom(d, i) * (-1) ** i
for i in range(1, p + 1):
phi_out[i - 1] += phi[i - 1]
for i in range(1, p + 1):
for j in range(1, d + 1):
phi_out[i + j - 1] += phi[i - 1] * binom(d, j) * (-1) ** j
return phi_out
| 32.64783
| 110
| 0.555904
| 6,096
| 39,863
| 3.543307
| 0.058071
| 0.025926
| 0.012222
| 0.022407
| 0.85037
| 0.81463
| 0.77912
| 0.749769
| 0.72625
| 0.694444
| 0
| 0.022967
| 0.308607
| 39,863
| 1,220
| 111
| 32.67459
| 0.760749
| 0.43642
| 0
| 0.527119
| 1
| 0.00678
| 0.067608
| 0.004373
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027119
| false
| 0
| 0.00678
| 0
| 0.066102
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
bc4aba728f28f7c57f8139b67de061a513693c30
| 5,920
|
py
|
Python
|
src/encoded/tests/test_types_functional_characterization_experiment.py
|
procha2/encoded
|
e9f122362b71f3b8641023b8d2d5ad531d3484b7
|
[
"MIT"
] | null | null | null |
src/encoded/tests/test_types_functional_characterization_experiment.py
|
procha2/encoded
|
e9f122362b71f3b8641023b8d2d5ad531d3484b7
|
[
"MIT"
] | null | null | null |
src/encoded/tests/test_types_functional_characterization_experiment.py
|
procha2/encoded
|
e9f122362b71f3b8641023b8d2d5ad531d3484b7
|
[
"MIT"
] | null | null | null |
import pytest
def test_fcc_replicate_type(
testapp, functional_characterization_experiment, pooled_clone_sequencing
):
res = testapp.get(
functional_characterization_experiment['@id']+'@@index-data'
)
assert res.json['object']['replication_type'] == 'unreplicated'
res = testapp.get(
pooled_clone_sequencing['@id']+'@@index-data'
)
assert 'replication_type' not in res.json['object']
def test_fcc_crispr_assay_perturbation(testapp, functional_characterization_experiment_disruption_screen, biosample_1, biosample_2, library_1, library_2, replicate_1_fce, replicate_2_fce, disruption_genetic_modification, activation_genetic_modification, binding_genetic_modification, ctcf):
testapp.patch_json(biosample_1['@id'], {'genetic_modifications': [disruption_genetic_modification['@id']]})
testapp.patch_json(biosample_2['@id'], {'genetic_modifications': [disruption_genetic_modification['@id']]})
testapp.patch_json(library_1['@id'], {'biosample': biosample_1['@id']})
testapp.patch_json(library_2['@id'], {'biosample': biosample_2['@id']})
testapp.patch_json(replicate_1_fce['@id'], {'library': library_1['@id']})
testapp.patch_json(replicate_2_fce['@id'], {'library': library_2['@id']})
res = testapp.get(functional_characterization_experiment_disruption_screen['@id']+'@@index-data')
assert res.json['object']['perturbation_type'] == 'disruption'
# more than one CRISPR characterization genetic modification
testapp.patch_json(biosample_1['@id'], {'genetic_modifications': [disruption_genetic_modification['@id']]})
testapp.patch_json(biosample_2['@id'], {'genetic_modifications': [disruption_genetic_modification['@id'], activation_genetic_modification['@id']]})
res = testapp.get(functional_characterization_experiment_disruption_screen['@id']+'@@index-data')
assert 'perturbation_type' not in res.json['object']
# binding CRISPR genetic modification
testapp.patch_json(biosample_1['@id'], {'genetic_modifications': [binding_genetic_modification['@id']]})
testapp.patch_json(biosample_2['@id'], {'genetic_modifications': [binding_genetic_modification['@id']]})
res = testapp.get(functional_characterization_experiment_disruption_screen['@id']+'@@index-data')
assert res.json['object']['perturbation_type'] == 'binding'
def test_fcc_crispr_assay_readout_method(testapp, functional_characterization_experiment_disruption_screen, biosample_1, biosample_2, library_1, library_2, replicate_1_fce, replicate_2_fce, disruption_genetic_modification, activation_genetic_modification, binding_genetic_modification, ctcf):
testapp.patch_json(biosample_1['@id'], {'genetic_modifications': [disruption_genetic_modification['@id']]})
testapp.patch_json(biosample_2['@id'], {'genetic_modifications': [disruption_genetic_modification['@id']]})
testapp.patch_json(library_1['@id'], {'biosample': biosample_1['@id']})
testapp.patch_json(library_2['@id'], {'biosample': biosample_2['@id']})
testapp.patch_json(replicate_1_fce['@id'], {'library': library_1['@id']})
testapp.patch_json(replicate_2_fce['@id'], {'library': library_2['@id']})
res = testapp.get(functional_characterization_experiment_disruption_screen['@id']+'@@index-data')
assert res.json['object']['crispr_screen_readout'] == 'proliferation'
# patch with basic examined_loci, no method
testapp.patch_json(functional_characterization_experiment_disruption_screen['@id'], {'examined_loci': [{'gene': ctcf['uuid'], 'expression_percentile': 100}]})
res = testapp.get(functional_characterization_experiment_disruption_screen['@id']+'@@index-data')
assert 'crispr_screen_readout' not in res.json['object']
# expression_measurement_method in examined_loci
testapp.patch_json(functional_characterization_experiment_disruption_screen['@id'], {'examined_loci': [{'gene': ctcf['uuid'], 'expression_percentile': 100, 'expression_measurement_method': 'HCR-FlowFISH'}]})
res = testapp.get(functional_characterization_experiment_disruption_screen['@id']+'@@index-data')
assert res.json['object']['crispr_screen_readout'] == 'HCR-FlowFISH'
# multiple different expression_measurement_method in examined_loci
testapp.patch_json(
functional_characterization_experiment_disruption_screen['@id'],
{'examined_loci': [{'gene': ctcf['uuid'], 'expression_percentile': 100, 'expression_measurement_method': 'HCR-FlowFISH'},
{'gene': ctcf['uuid'], 'expression_percentile': 10, 'expression_measurement_method': 'PrimeFlow'}]})
res = testapp.get(functional_characterization_experiment_disruption_screen['@id']+'@@index-data')
assert 'crispr_screen_readout' not in res.json['object']
# controls should not receive crispr_screen_readout property
testapp.patch_json(functional_characterization_experiment_disruption_screen['@id'], {'control_type': 'control'})
res = testapp.get(functional_characterization_experiment_disruption_screen['@id']+'@@index-data')
assert 'crispr_screen_readout' not in res.json['object']
def test_fcc_replication_count_0(testapp, functional_characterization_experiment_disruption_screen):
res = testapp.get(functional_characterization_experiment_disruption_screen['@id'] + '@@index-data')
assert res.json['object']['bio_replicate_count'] == 0 and res.json['object']['tech_replicate_count'] == 0
def test_fcc_replication_count_2(testapp, functional_characterization_experiment_disruption_screen, library_1, library_2, replicate_1_fce, replicate_2_fce):
testapp.patch_json(replicate_1_fce['@id'], {'library': library_1['@id']})
testapp.patch_json(replicate_2_fce['@id'], {'library': library_2['@id']})
res = testapp.get(functional_characterization_experiment_disruption_screen['@id'] + '@@index-data')
assert res.json['object']['bio_replicate_count'] == 2 and res.json['object']['tech_replicate_count'] == 2
| 75.897436
| 292
| 0.759459
| 691
| 5,920
| 6.127352
| 0.109986
| 0.062352
| 0.083137
| 0.19556
| 0.885215
| 0.86018
| 0.814596
| 0.788144
| 0.779169
| 0.762636
| 0
| 0.011351
| 0.09223
| 5,920
| 77
| 293
| 76.883117
| 0.776517
| 0.052027
| 0
| 0.491803
| 0
| 0
| 0.22248
| 0.079215
| 0
| 0
| 0
| 0
| 0.196721
| 1
| 0.081967
| false
| 0
| 0.016393
| 0
| 0.098361
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
bc4e20e407a125e8db054cdc715603ca155902e0
| 1,034
|
py
|
Python
|
temboo/core/Library/Twitter/Users/__init__.py
|
jordanemedlock/psychtruths
|
52e09033ade9608bd5143129f8a1bfac22d634dd
|
[
"Apache-2.0"
] | 7
|
2016-03-07T02:07:21.000Z
|
2022-01-21T02:22:41.000Z
|
temboo/core/Library/Twitter/Users/__init__.py
|
jordanemedlock/psychtruths
|
52e09033ade9608bd5143129f8a1bfac22d634dd
|
[
"Apache-2.0"
] | null | null | null |
temboo/core/Library/Twitter/Users/__init__.py
|
jordanemedlock/psychtruths
|
52e09033ade9608bd5143129f8a1bfac22d634dd
|
[
"Apache-2.0"
] | 8
|
2016-06-14T06:01:11.000Z
|
2020-04-22T09:21:44.000Z
|
from temboo.Library.Twitter.Users.GetAccountSettings import GetAccountSettings, GetAccountSettingsInputSet, GetAccountSettingsResultSet, GetAccountSettingsChoreographyExecution
from temboo.Library.Twitter.Users.Lookup import Lookup, LookupInputSet, LookupResultSet, LookupChoreographyExecution
from temboo.Library.Twitter.Users.Search import Search, SearchInputSet, SearchResultSet, SearchChoreographyExecution
from temboo.Library.Twitter.Users.Show import Show, ShowInputSet, ShowResultSet, ShowChoreographyExecution
from temboo.Library.Twitter.Users.UpdateAccountSettings import UpdateAccountSettings, UpdateAccountSettingsInputSet, UpdateAccountSettingsResultSet, UpdateAccountSettingsChoreographyExecution
from temboo.Library.Twitter.Users.UpdateProfile import UpdateProfile, UpdateProfileInputSet, UpdateProfileResultSet, UpdateProfileChoreographyExecution
from temboo.Library.Twitter.Users.VerifyCredentials import VerifyCredentials, VerifyCredentialsInputSet, VerifyCredentialsResultSet, VerifyCredentialsChoreographyExecution
| 129.25
| 191
| 0.905222
| 77
| 1,034
| 12.155844
| 0.441558
| 0.074786
| 0.127137
| 0.179487
| 0.21688
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.047389
| 1,034
| 7
| 192
| 147.714286
| 0.950254
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
bc70659378b0bf34e46d6340ab865971e3bf2f6f
| 31,342
|
py
|
Python
|
main_code.py
|
sjshtura/Code_Snippets
|
4f0a3083966c02849fdbeb9f4272013892f08cbf
|
[
"MIT"
] | null | null | null |
main_code.py
|
sjshtura/Code_Snippets
|
4f0a3083966c02849fdbeb9f4272013892f08cbf
|
[
"MIT"
] | null | null | null |
main_code.py
|
sjshtura/Code_Snippets
|
4f0a3083966c02849fdbeb9f4272013892f08cbf
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Functions for estimating electricity prices, eeg levies, remunerations and other components, based on customer type and annual demand
@author: Abuzar and Shakhawat
"""
from typing import ValuesView
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from scipy import interpolate
from scipy.interpolate import InterpolatedUnivariateSpline
def calculate_mean_price(customer_type, val_yearly_demand):
"""
Parameters
----------
customer_type : Type of customer, differentiated between household and industrial customers
total_demand : yearly electricity demand for household customers in KWh/y and for industrial customers in MWh/y
Returns
-------
mean_price: average price for the customer for the next year in cents/kWh
"""
def plotting(x,y, title, x_label, y_label, name_plot):
fig = plt.figure()
values = x
plt.plot (x,y)
plt.title(title)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.xticks(x,values)
plt.xticks(rotation = 45)
fig.savefig(name_plot, dpi=fig.dpi)
def haupt_tarif(data):
#haupt_tarrif = df_with_data
df_with_data = pd.read_excel(data)
yearly_mean = df_with_data.price.mean()
haupt_tarrif = df_with_data[df_with_data["hour"].isin([8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]) & df_with_data["Day"].isin(['Wednesday', 'Thursday', 'Friday', 'Monday', 'Tuesday'])]
cond = df_with_data['hour'].isin(haupt_tarrif['hour'])
df_with_data.drop(haupt_tarrif[cond].index, inplace = True)
ht_factor = haupt_tarrif.price.mean()/yearly_mean
return ht_factor
def neben_tarif(data):
#neben_tarrif = df_with_data
df_with_data = pd.read_excel(data)
yearly_mean = df_with_data.price.mean()
neben_tarrif = df_with_data[(df_with_data["hour"].isin([1, 2, 3, 4, 5, 6, 7, 20, 21, 22, 23, 24]) & df_with_data["Day"].isin(['Wednesday', 'Thursday', 'Friday', 'Monday', 'Tuesday'])) |(df_with_data["Day"].isin(['Saturday', 'Sunday']))]
neben_tarrif.head()
cond = df_with_data['hour'].isin(neben_tarrif['hour'])
df_with_data.drop(neben_tarrif[cond].index, inplace = True)
nt_factor = neben_tarrif.price.mean()/yearly_mean
return nt_factor
ht_factor = haupt_tarif("ht_nt_price.xlsx")
nt_factor = neben_tarif("ht_nt_price.xlsx")
#industrial 2000 - 20000 MWh
industrie_prices_without_VAT = pd.read_excel(r'Energiepreisentwicklung.xlsx',sheet_name='5.8.3 Strom - € - Industrie', skiprows = 5, nrows = 26, index_col = 0)
industrie_prices_without_VAT = industrie_prices_without_VAT.iloc[:,0]
industrie_prices_without_VAT = industrie_prices_without_VAT.reset_index()
industrie_prices_without_VAT["index"]= industrie_prices_without_VAT["index"].str.slice(start = 5)
industrie_prices_without_VAT.columns = ["year","price"]
industrie_prices_without_VAT = industrie_prices_without_VAT.set_index("year")
industrie_prices_without_VAT.index = industrie_prices_without_VAT.index.astype(str)
industrie_prices_without_VAT.index = pd.to_datetime(industrie_prices_without_VAT.index, errors='ignore')
industrie_prices_without_VAT = industrie_prices_without_VAT.astype(float)
industrie_prices_without_VAT = industrie_prices_without_VAT.resample('12M').mean()
industrie_prices_without_VAT.index = industrie_prices_without_VAT.index.astype(str)
industrie_prices_without_VAT.index= industrie_prices_without_VAT.index.str.slice(start = 0, stop = -6)
ht_industrie_prices_without_VAT = industrie_prices_without_VAT.price * ht_factor
nt_industrie_prices_without_VAT = industrie_prices_without_VAT.price * nt_factor
ht_industrie_prices_without_VAT = ht_industrie_prices_without_VAT.reset_index()
nt_industrie_prices_without_VAT = nt_industrie_prices_without_VAT.reset_index()
industrie_prices_without_VAT = industrie_prices_without_VAT.reset_index()
industrie_prices_without_VAT = industrie_prices_without_VAT[industrie_prices_without_VAT.year >= str(2016)]
#industrial prices > 150000 MWh/y
v_big_industrial_prices_BDEW = {'year': range(2019,2021), 'price': [3.77,3.05]}
v_big_industrial_prices_BDEW = pd.DataFrame(data=v_big_industrial_prices_BDEW)
v_big_industrial_prices_BDEW
#industrial prices between 70000-150000 MWh/y
big_industrial_prices_BDEW = {'year': range(2016,2021), 'price': [8.37, 9.96, 8.96, 9.28, 10.07]}
big_industrial_prices_BDEW = pd.DataFrame(data=big_industrial_prices_BDEW)
big_industrial_prices_BDEW
#industrial prices between 20000-70000 MWh/y
mid_industrie_prices = pd.read_excel(r'mid_size_industrial_prices.xlsx')
mid_industrie_prices.columns = ['year', 'price']
mid_industrie_prices
#household electricity prices between 2500-5000 KWh/y
household_prices_without_VAT = pd.read_excel(r'Energiepreisentwicklung.xlsx',sheet_name='5.8.2 Strom - € - Haushalte', skiprows = 5, nrows = 26, index_col = 0)
household_prices_without_VAT = household_prices_without_VAT.iloc[:,0]
household_prices_without_VAT = household_prices_without_VAT.reset_index()
household_prices_without_VAT["index"]= household_prices_without_VAT["index"].str.slice(start = 5)
household_prices_without_VAT.columns = ["year","price"]
household_prices_without_VAT = household_prices_without_VAT.set_index("year")
household_prices_without_VAT.index = household_prices_without_VAT.index.astype(str)
household_prices_without_VAT.index = pd.to_datetime(household_prices_without_VAT.index, errors='ignore')
household_prices_without_VAT = household_prices_without_VAT.astype(float)
household_prices_without_VAT = household_prices_without_VAT.resample('12M').mean()
household_prices_without_VAT.index = household_prices_without_VAT.index.astype(str)
household_prices_without_VAT.index= household_prices_without_VAT.index.str.slice(start = 0, stop = -6)
household_prices_without_VAT = household_prices_without_VAT[6:].reset_index()
household_prices_without_VAT = household_prices_without_VAT[household_prices_without_VAT.year >= str(2016)]
household_prices_without_VAT
if ((customer_type == 0) & ((val_yearly_demand >= 2500) & (val_yearly_demand <= 5000))):
print("Do you already know your electricty price?")
#print("Yes = 1 / No = 2")
print("Yes = 0 / No = 1")
#choose = 0
val = input("Enter your value: ")
val = int(val)
if (val == 0):
print("Do you have a fixed electricity price or HT/NT price structure?")
val_ht_nt = input("Enter 0 (zero) for yearly mean price and Enter 1 for HT/NT price structure: ")
val_ht_nt = int(val_ht_nt)
if (val_ht_nt == 1):
val1 = input("Enter HT value: ")
val1 = float(val1)
val2 = input("Enter NT value: ")
val2 = float(val2)
# ht_industrie_prices_without_VAT = household_prices
ht_household_prices_without_VAT = household_prices_without_VAT
ht_household_prices_without_VAT["year"] = ht_household_prices_without_VAT["year"].astype(int)
ht_year = ht_household_prices_without_VAT["year"]
ht_price = ht_household_prices_without_VAT["price"] * ht_factor
ht_new_year = np.append(ht_year, 2021)
ht_new_price = np.append(ht_price, val1)
print(ht_new_year)
print(ht_new_price)
plotting(ht_new_year, ht_new_price, "HT Price", "Year", "Price", "images/HT Price.png")
# plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
nt_household_prices_without_VAT = household_prices_without_VAT
nt_household_prices_without_VAT["year"] = nt_household_prices_without_VAT["year"].astype(int)
nt_year = nt_household_prices_without_VAT["year"]
nt_price = nt_household_prices_without_VAT["price"] * nt_factor
nt_new_year = np.append(nt_year, 2021)
nt_new_price = np.append(nt_price, val2)
print(nt_new_year)
print(nt_new_price)
plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
elif (val_ht_nt == 0):
val1 = input("Enter yearly mean price for electricity: ")
val1 = float(val1)
yt_household_prices_without_VAT = household_prices_without_VAT
yt_household_prices_without_VAT["year"] = yt_household_prices_without_VAT["year"].astype(int)
yt_year = yt_household_prices_without_VAT["year"]
yt_price = yt_household_prices_without_VAT["price"]
yt_new_year = np.append(yt_year, 2021)
yt_new_price = np.append(yt_price, (val1))
print(yt_new_year)
print(yt_new_price)
plotting(yt_new_year, yt_new_price, "Price", "Year", "Price", "images/Price.png")
# plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
elif (val == 1):
yt_household_prices_without_VAT = household_prices_without_VAT
yt_household_prices_without_VAT["year"] = yt_household_prices_without_VAT["year"].astype(int)
yt_year = yt_household_prices_without_VAT["year"]
yt_price = yt_household_prices_without_VAT["price"]
f = interpolate.interp1d(yt_year, yt_price, fill_value = "extrapolate")
p_2021 = f(2021)
yt_new_year = np.append(yt_year, 2021)
yt_new_price = np.append(yt_price, (f(2021)))
# ht_new_price = ht_new_price * ht_factor
print(yt_new_year)
print(yt_new_price)
plotting(yt_new_year, yt_new_price, "Price", "Year", "Price", "images/Price.png")
# plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
elif ((customer_type == 1) & (val_yearly_demand > 0) & (val_yearly_demand < 2000)):
print("Do you already know your electricty price?")
#print("Yes = 1 / No = 2")
print("Yes = 0 / No = 1")
#choose = 0
val = input("Enter your value: ")
val = int(val)
if (val == 0):
print("Do you have a fixed electricity price or HT/NT price structure?")
val_ht_nt = input("Enter 0 (zero) for yearly mean price and Enter 1 for HT/NT price structure: ")
val_ht_nt = int(val_ht_nt)
if (val_ht_nt == 1):
val1 = input("Enter HT value: ")
val1 = float(val1)
val2 = input("Enter NT value: ")
val2 = float(val2)
ht_household_prices_without_VAT = household_prices_without_VAT
ht_household_prices_without_VAT["year"] = ht_household_prices_without_VAT["year"].astype(int)
ht_year = ht_household_prices_without_VAT["year"]
ht_price = ht_household_prices_without_VAT["price"] * ht_factor
ht_new_year = np.append(ht_year, 2021)
ht_new_price = np.append(ht_price, val1)
print(ht_new_year)
print(ht_new_price)
plotting(ht_new_year, ht_new_price, "HT Price", "Year", "Price", "images/HT Price.png")
# plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
nt_industrie_prices_without_VAT = household_prices_without_VAT
nt_industrie_prices_without_VAT["year"] = nt_industrie_prices_without_VAT["year"].astype(int)
nt_year = nt_industrie_prices_without_VAT["year"]
nt_price = nt_industrie_prices_without_VAT["price"] * nt_factor
nt_new_year = np.append(nt_year, 2021)
nt_new_price = np.append(nt_price, val2)
print(nt_new_year)
print(nt_new_price)
plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
elif (val_ht_nt == 0):
val1 = input("Enter yearly mean price for electricity: ")
val1 = float(val1)
ht_industrie_prices_without_VAT = household_prices_without_VAT
ht_industrie_prices_without_VAT["year"] = ht_industrie_prices_without_VAT["year"].astype(int)
ht_year = ht_industrie_prices_without_VAT["year"]
ht_price = ht_industrie_prices_without_VAT["price"]
ht_new_year = np.append(ht_year, 2021)
ht_new_price = np.append(ht_price, (val1))
print(ht_new_year)
print(ht_new_price)
plotting(ht_new_year, ht_new_price, "Price", "Year", "Price", "images/Price.png")
# plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
elif (val == 1):
# val1 = input("Enter your preferred price: ")
# val1 = float(val1)
ht_industrie_prices_without_VAT = household_prices_without_VAT
ht_industrie_prices_without_VAT["year"] = ht_industrie_prices_without_VAT["year"].astype(int)
ht_year = ht_industrie_prices_without_VAT["year"]
ht_price = ht_industrie_prices_without_VAT["price"]
f = interpolate.interp1d(ht_year, ht_price, fill_value = "extrapolate")
p_2021 = f(2021)
ht_new_year = np.append(ht_year, 2021)
ht_new_price = np.append(ht_price, (f(2021)))
ht_new_price = ht_new_price
print(ht_new_year)
print(ht_new_price)
plotting(ht_new_year, ht_new_price, "Price", "Year", "Price", "images/Price.png")
# plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
elif ((customer_type == 1) & (val_yearly_demand >= 2000) & (val_yearly_demand <= 20000)):
print("Do you already know your electricty price?")
#print("Yes = 1 / No = 2")
print("Yes = 0 / No = 1")
#choose = 0
val = input("Enter your value: ")
val = int(val)
if (val == 0):
print("Do you have a fixed electricity price or HT/NT price structure?")
val_ht_nt = input("Enter 0 (zero) for yearly mean price and Enter 1 for HT/NT price structure: ")
val_ht_nt = int(val_ht_nt)
if (val_ht_nt == 1):
val1 = input("Enter HT value: ")
val1 = float(val1)
val2 = input("Enter NT value: ")
val2 = float(val2)
ht_industrie_prices_without_VAT["year"] = ht_industrie_prices_without_VAT["year"].astype(int)
ht_year = ht_industrie_prices_without_VAT["year"]
ht_price = ht_industrie_prices_without_VAT["price"] * ht_factor
ht_new_year = np.append(ht_year, 2021)
ht_new_price = np.append(ht_price, val1)
print(ht_new_year)
print(ht_new_price)
plotting(ht_new_year, ht_new_price, "HT Price", "Year", "Price", "images/HT Price.png")
# plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
nt_industrie_prices_without_VAT["year"] = nt_industrie_prices_without_VAT["year"].astype(int)
nt_year = nt_industrie_prices_without_VAT["year"]
nt_price = nt_industrie_prices_without_VAT["price"]
nt_new_year = np.append(nt_year, 2021)
nt_new_price = np.append(nt_price * nt_factor, val2)
print(nt_new_year)
print(nt_new_price)
plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
elif (val_ht_nt == 0):
val1 = input("Enter yearly mean price for electricity: ")
val1 = float(val1)
ht_industrie_prices_without_VAT = industrie_prices_without_VAT
ht_industrie_prices_without_VAT["year"] = ht_industrie_prices_without_VAT["year"].astype(int)
ht_year = ht_industrie_prices_without_VAT["year"]
ht_price = ht_industrie_prices_without_VAT["price"]
ht_new_year = np.append(ht_year, 2021)
ht_new_price = np.append(ht_price, (val1))
print(ht_new_year)
print(ht_new_price)
plotting(ht_new_year, ht_new_price, "Price", "Year", "Price", "images/Price.png")
# plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
elif (val == 1):
# val1 = input("Enter your preferred price: ")
# val1 = float(val1)
ht_industrie_prices_without_VAT = industrie_prices_without_VAT
ht_industrie_prices_without_VAT["year"] = ht_industrie_prices_without_VAT["year"].astype(int)
ht_year = ht_industrie_prices_without_VAT["year"]
ht_price = ht_industrie_prices_without_VAT["price"]
f = interpolate.interp1d(ht_year, ht_price, fill_value = "extrapolate")
p_2021 = f(2021)
ht_new_year = np.append(ht_year, 2021)
ht_new_price = np.append(ht_price, (f(2021)))
ht_new_price = ht_new_price
print(ht_new_year)
print(ht_new_price)
plotting(ht_new_year, ht_new_price, "Price", "Year", "Price", "images/Price.png")
# plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
elif ((customer_type == 1) & (val_yearly_demand > 20000) & (val_yearly_demand <= 70000)):
print("Do you already know your electricty price?")
#print("Yes = 1 / No = 2")
print("Yes = 0 / No = 1")
#choose = 0
val = input("Enter your value: ")
val = int(val)
if (val == 0):
print("Do you have a fixed electricity price or HT/NT price structure?")
val_ht_nt = input("Enter 0 (zero) for yearly mean price and Enter 1 for HT/NT price structure: ")
val_ht_nt = int(val_ht_nt)
if (val_ht_nt == 1):
val1 = input("Enter HT value: ")
val1 = float(val1)
val2 = input("Enter NT value: ")
val2 = float(val2)
ht_industrie_prices_without_VAT = mid_industrie_prices
ht_industrie_prices_without_VAT["year"] = ht_industrie_prices_without_VAT["year"].astype(int)
ht_year = ht_industrie_prices_without_VAT["year"]
ht_price = ht_industrie_prices_without_VAT["price"] * ht_factor
ht_new_year = np.append(ht_year, 2021)
ht_new_price = np.append(ht_price, val1)
print(ht_new_year)
print(ht_new_price)
plotting(ht_new_year, ht_new_price, "HT Price", "Year", "Price", "images/HT Price.png")
# plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
nt_industrie_prices_without_VAT = mid_industrie_prices
nt_industrie_prices_without_VAT["year"] = nt_industrie_prices_without_VAT["year"].astype(int)
nt_year = nt_industrie_prices_without_VAT["year"]
nt_price = nt_industrie_prices_without_VAT["price"] * nt_factor
nt_new_year = np.append(nt_year, 2021)
nt_new_price = np.append(nt_price, val2)
print(nt_new_year)
print(nt_new_price)
plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
elif (val_ht_nt == 0):
val1 = input("Enter yearly mean price for electricity: ")
val1 = float(val1)
ht_industrie_prices_without_VAT = mid_industrie_prices
ht_industrie_prices_without_VAT["year"] = ht_industrie_prices_without_VAT["year"].astype(int)
ht_year = ht_industrie_prices_without_VAT["year"]
ht_price = ht_industrie_prices_without_VAT["price"]
ht_new_year = np.append(ht_year, 2021)
ht_new_price = np.append(ht_price, (val1))
print(ht_new_year)
print(ht_new_price)
plotting(ht_new_year, ht_new_price, "Price", "Year", "Price", "images/Price.png")
# plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
elif (val == 1):
ht_industrie_prices_without_VAT = mid_industrie_prices
ht_industrie_prices_without_VAT["year"] = ht_industrie_prices_without_VAT["year"].astype(int)
ht_year = ht_industrie_prices_without_VAT["year"]
ht_price = ht_industrie_prices_without_VAT["price"]
f = interpolate.interp1d(ht_year, ht_price, fill_value = "extrapolate")
p_2021 = f(2021)
ht_new_year = np.append(ht_year, 2021)
ht_new_price = np.append(ht_price, (f(2021)))
ht_new_price = ht_new_price
print(ht_new_year)
print(ht_new_price)
plotting(ht_new_year, ht_new_price, "Price", "Year", "Price", "images/Price.png")
# plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
elif ((customer_type == 1) & (val_yearly_demand > 70000) & (val_yearly_demand <= 150000)):
print("Do you already know your electricty price?")
#print("Yes = 1 / No = 2")
print("Yes = 0 / No = 1")
#choose = 0
val = input("Enter your value: ")
val = int(val)
if (val == 0):
print("Do you have a fixed electricity price or HT/NT price structure?")
val_ht_nt = input("Enter 0 (zero) for yearly mean price and Enter 1 for HT/NT price structure: ")
val_ht_nt = int(val_ht_nt)
if (val_ht_nt == 1):
val1 = input("Enter HT value: ")
val1 = float(val1)
val2 = input("Enter NT value: ")
val2 = float(val2)
ht_industrie_prices_without_VAT = big_industrial_prices_BDEW
ht_industrie_prices_without_VAT["year"] = ht_industrie_prices_without_VAT["year"].astype(int)
ht_year = ht_industrie_prices_without_VAT["year"]
ht_price = ht_industrie_prices_without_VAT["price"] * ht_factor
ht_new_year = np.append(ht_year, 2021)
ht_new_price = np.append(ht_price, val1)
print(ht_new_year)
print(ht_new_price)
plotting(ht_new_year, ht_new_price, "HT Price", "Year", "Price", "images/HT Price.png")
# plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
nt_industrie_prices_without_VAT = big_industrial_prices_BDEW
nt_industrie_prices_without_VAT["year"] = nt_industrie_prices_without_VAT["year"].astype(int)
nt_year = nt_industrie_prices_without_VAT["year"]
nt_price = nt_industrie_prices_without_VAT["price"] * nt_factor
nt_new_year = np.append(nt_year, 2021)
nt_new_price = np.append(nt_price, val2)
print(nt_new_year)
print(nt_new_price)
plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
elif (val_ht_nt == 0):
val1 = input("Enter yearly mean price for electricity: ")
val1 = float(val1)
ht_industrie_prices_without_VAT = big_industrial_prices_BDEW
ht_industrie_prices_without_VAT["year"] = ht_industrie_prices_without_VAT["year"].astype(int)
ht_year = ht_industrie_prices_without_VAT["year"]
ht_price = ht_industrie_prices_without_VAT["price"]
ht_new_year = np.append(ht_year, 2021)
ht_new_price = np.append(ht_price, val1)
print(ht_new_year)
print(ht_new_price)
plotting(ht_new_year, ht_new_price, "HT Price", "Year", "Price", "images/HT Price.png")
# plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
#nt_industrie_prices_without_VAT = big_industrial_prices_BDEW
#nt_industrie_prices_without_VAT["year"] = nt_industrie_prices_without_VAT["year"].astype(int)
#nt_year = nt_industrie_prices_without_VAT["year"]
#nt_price = nt_industrie_prices_without_VAT["price"] * nt_factor
#nt_new_year = np.append(nt_year, 2021)
#nt_new_price = np.append(nt_price, (val1))
#print(nt_new_year)
#print(nt_new_price)
# plotting(ht_new_year, ht_new_price, "HT Price", "Year", "Price", "images/HT Price.png")
#plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
elif (val == 1):
ht_industrie_prices_without_VAT = big_industrial_prices_BDEW
ht_industrie_prices_without_VAT["year"] = ht_industrie_prices_without_VAT["year"].astype(int)
ht_year = ht_industrie_prices_without_VAT["year"]
ht_price = ht_industrie_prices_without_VAT["price"]
f = interpolate.interp1d(ht_year, ht_price, fill_value = "extrapolate")
p_2021 = f(2021)
ht_new_year = np.append(ht_year, 2021)
ht_new_price = np.append(ht_price, (f(2021)))
ht_new_price = ht_new_price
print(ht_new_year)
print(ht_new_price)
plotting(ht_new_year, ht_new_price, "Price", "Year", "Price", "images/Price.png")
# plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
elif ((customer_type == 1) & (val_yearly_demand > 150000)):
print("Do you already know your electricty price?")
#print("Yes = 1 / No = 2")
print("Yes = 0 / No = 1")
#choose = 0
val = input("Enter your value: ")
val = int(val)
if (val == 0):
print("Do you have a fixed electricity price or HT/NT price structure?")
val_ht_nt = input("Enter 0 (zero) for yearly mean price and Enter 1 for HT/NT price structure: ")
val_ht_nt = int(val_ht_nt)
if (val_ht_nt == 1):
val1 = input("Enter HT value: ")
val1 = float(val1)
val2 = input("Enter NT value: ")
val2 = float(val2)
ht_industrie_prices_without_VAT = v_big_industrial_prices_BDEW
ht_industrie_prices_without_VAT["year"] = ht_industrie_prices_without_VAT["year"].astype(int)
ht_year = ht_industrie_prices_without_VAT["year"]
ht_price = ht_industrie_prices_without_VAT["price"] * ht_factor
ht_new_year = np.append(ht_year, 2021)
ht_new_price = np.append(ht_price, val1)
print(ht_new_year)
print(ht_new_price)
plotting(ht_new_year, ht_new_price, "HT Price", "Year", "Price", "images/HT Price.png")
# plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
nt_industrie_prices_without_VAT = v_big_industrial_prices_BDEW
nt_industrie_prices_without_VAT["year"] = nt_industrie_prices_without_VAT["year"].astype(int)
nt_year = nt_industrie_prices_without_VAT["year"]
nt_price = nt_industrie_prices_without_VAT["price"] * nt_factor
nt_new_year = np.append(nt_year, 2021)
nt_new_price = np.append(nt_price, val2)
print(nt_new_year)
print(nt_new_price)
plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
elif (val_ht_nt == 0):
val1 = input("Enter yearly mean price for electricity: ")
val1 = float(val1)
ht_industrie_prices_without_VAT = v_big_industrial_prices_BDEW
ht_industrie_prices_without_VAT["year"] = ht_industrie_prices_without_VAT["year"].astype(int)
ht_year = ht_industrie_prices_without_VAT["year"]
ht_price = ht_industrie_prices_without_VAT["price"]
ht_new_year = np.append(ht_year, 2021)
ht_new_price = np.append(ht_price, val1)
print(ht_new_year)
print(ht_new_price)
plotting(ht_new_year, ht_new_price, "HT Price", "Year", "Price", "images/HT Price.png")
# plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
#nt_industrie_prices_without_VAT = v_big_industrial_prices_BDEW
#nt_industrie_prices_without_VAT["year"] = nt_industrie_prices_without_VAT["year"].astype(int)
#nt_year = nt_industrie_prices_without_VAT["year"]
#nt_price = nt_industrie_prices_without_VAT["price"] * nt_factor
#nt_new_year = np.append(nt_year, 2021)
#nt_new_price = np.append(nt_price, (val1*nt_factor))
#print(nt_new_year)
#print(nt_new_price)
# plotting(ht_new_year, ht_new_price, "HT Price", "Year", "Price", "images/HT Price.png")
#plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
elif (val == 1):
ht_industrie_prices_without_VAT = v_big_industrial_prices_BDEW
ht_industrie_prices_without_VAT["year"] = ht_industrie_prices_without_VAT["year"].astype(int)
ht_year = ht_industrie_prices_without_VAT["year"]
ht_price = ht_industrie_prices_without_VAT["price"]
f = interpolate.interp1d(ht_year, ht_price, fill_value = "extrapolate")
p_2021 = f(2021)
ht_new_year = np.append(ht_year, 2021)
ht_new_price = np.append(ht_price, (f(2021)))
ht_new_price = ht_new_price
print(ht_new_year)
print(ht_new_price)
plotting(ht_new_year, ht_new_price, "Price", "Year", "Price", "images/Price.png")
# plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
print("Which type of Customer category you have?")
print("Enter 0 (zero) for Household customers and 1 (one) for Industrial customers.")
val1 = input("Please enter your value: ")
val1 = int(val1)
if (val1 == 0):
print("What is your yearly electricty demand (in KWh/y)?")
val2 =input("Please enter your value: ")
val2 = float(val2)
elif(val1 == 1):
val2 = input("What is your yearly electricty demand in (MWh/y)? ")
val2 = float(val2)
calculate_mean_price(val1,val2)
| 52.236667
| 244
| 0.628039
| 4,150
| 31,342
| 4.365301
| 0.053976
| 0.144955
| 0.178406
| 0.194579
| 0.904063
| 0.890042
| 0.864981
| 0.839865
| 0.806635
| 0.781906
| 0
| 0.02431
| 0.26632
| 31,342
| 599
| 245
| 52.323873
| 0.763427
| 0.124466
| 0
| 0.773893
| 0
| 0
| 0.133785
| 0.003184
| 0
| 0
| 0
| 0
| 0
| 1
| 0.009324
| false
| 0
| 0.013986
| 0
| 0.027972
| 0.160839
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
4c15bc3ae6d27342af3401b95744c85e8ac312fb
| 2,334
|
py
|
Python
|
tests/api_test.py
|
silvermullet/dinghy-ping
|
5ad10c20f19eadcf840748e65bef5b9368e9512a
|
[
"MIT"
] | 20
|
2018-10-19T21:12:38.000Z
|
2022-02-15T20:45:49.000Z
|
tests/api_test.py
|
silvermullet/dinghy-ping
|
5ad10c20f19eadcf840748e65bef5b9368e9512a
|
[
"MIT"
] | 51
|
2018-10-19T21:09:01.000Z
|
2022-03-25T01:03:18.000Z
|
tests/api_test.py
|
silvermullet/dinghy-ping
|
5ad10c20f19eadcf840748e65bef5b9368e9512a
|
[
"MIT"
] | 7
|
2018-10-29T09:41:25.000Z
|
2019-07-19T18:58:51.000Z
|
import json
import pytest
import sys
sys.path.insert(0, './dinghy_ping/')
import services.api as service
import models.data as data
with open('tests/multiple_domains.json') as f:
multiple_domains = json.load(f)
@pytest.fixture
def api():
return service.api
@pytest.fixture
def session(api):
return api.requests
def test_dinghy_ping_google_http(api):
r = api.requests.get("/ping/http/google.com")
assert r.status_code == 200
def test_dinghy_ping_google_http(api):
r = api.requests.get("/form-input-tcp-connection-test?tcp-endpoint=google.com&tcp-port=443")
assert r.status_code == 200
def test_dinghy_ping_google_https_and_query_params(api):
r = api.requests.get("/ping/https/www.google.com/search?source=hp&ei=aIHTW9mLNuOJ0gK8g624Ag&q=dinghy&btnK=Google+Search&oq=dinghy&gs_l=psy-ab.3..35i39l2j0i131j0i20i264j0j0i20i264j0l4.4754.5606..6143...1.0..0.585.957.6j5-1......0....1..gws-wiz.....6..0i67j0i131i20i264.oe0qJ9brs-8")
assert r.status_code == 200
def test_dinghy_ping_google_no_proto_set_and_query_params(api):
r = api.requests.get("/ping//www.google.com/search?source=hp&ei=aIHTW9mLNuOJ0gK8g624Ag&q=dinghy&btnK=Google+Search&oq=dinghy&gs_l=psy-ab.3..35i39l2j0i131j0i20i264j0j0i20i264j0l4.4754.5606..6143...1.0..0.585.957.6j5-1......0....1..gws-wiz.....6..0i67j0i131i20i264.oe0qJ9brs-8")
assert r.status_code == 200
"""
def test_multiple_domains_request_for_google(api):
r = api.requests.post(api.url_for("ping_multiple_domains"), json=multiple_domains)
response_json = r.json()
assert response_json['domains_response_results'][0]['domain_response_code'] == 200
def test_multiple_domains_request_for_google_with_params(api):
r = api.requests.post(api.url_for("ping_multiple_domains"), json=multiple_domains)
response_json = r.json()
assert response_json['domains_response_results'][1]['domain_response_code'] == 200
def test_multiple_domains_request_for_microsoft(api):
r = api.requests.post(api.url_for("ping_multiple_domains"), json=multiple_domains)
response_json = r.json()
assert response_json['domains_response_results'][2]['domain_response_code'] == 200
"""
def test_ping_saved_results(api):
api.requests.get("/ping/http/www.google.com")
p = service._get_all_pinged_urls()
assert "http://www.google.com/" in p
| 35.907692
| 285
| 0.751928
| 356
| 2,334
| 4.685393
| 0.264045
| 0.098921
| 0.029377
| 0.06295
| 0.747602
| 0.728417
| 0.709233
| 0.709233
| 0.709233
| 0.643885
| 0
| 0.082818
| 0.099829
| 2,334
| 64
| 286
| 36.46875
| 0.71109
| 0
| 0
| 0.266667
| 0
| 0.1
| 0.443726
| 0.42044
| 0
| 0
| 0
| 0
| 0.166667
| 1
| 0.233333
| false
| 0
| 0.166667
| 0.066667
| 0.466667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4c1b53c9854de75f596ad6c9700b82a607f9c70b
| 48
|
py
|
Python
|
lunguka/__init__.py
|
pyzi/funniest
|
5a62792b119f887542a121bb56198cb212789eed
|
[
"MIT"
] | null | null | null |
lunguka/__init__.py
|
pyzi/funniest
|
5a62792b119f887542a121bb56198cb212789eed
|
[
"MIT"
] | 1
|
2018-01-19T13:11:09.000Z
|
2018-01-19T13:11:09.000Z
|
lunguka/__init__.py
|
pyzi/funniest
|
5a62792b119f887542a121bb56198cb212789eed
|
[
"MIT"
] | null | null | null |
def joke():
return (u'Isto aqui e um joke')
| 16
| 35
| 0.604167
| 9
| 48
| 3.222222
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 48
| 2
| 36
| 24
| 0.805556
| 0
| 0
| 0
| 0
| 0
| 0.395833
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0.5
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
4c66480ff7fe507853e348dd1b75faef7494f86c
| 15,380
|
py
|
Python
|
crawler/getAllData.py
|
zp1008611/housePrice
|
fd5a6322ff6960f64fdea797902debde074a88b5
|
[
"MIT"
] | 1
|
2022-01-23T05:23:54.000Z
|
2022-01-23T05:23:54.000Z
|
crawler/getAllData.py
|
zp1008611/housePrice
|
fd5a6322ff6960f64fdea797902debde074a88b5
|
[
"MIT"
] | null | null | null |
crawler/getAllData.py
|
zp1008611/housePrice
|
fd5a6322ff6960f64fdea797902debde074a88b5
|
[
"MIT"
] | null | null | null |
# encoding: utf-8
import pymysql
from getURL import getDayUrl,getUrl
from getJingtaiData import get_jingtai_data
from getCommunityData import get_community_data
from getIpPool import makeIp
import time
import traceback
import random
import datetime
import sys
from conn import get_conn,close_conn
def update_count():
try:
cursor = None
conn = None
conn, cursor = get_conn()
stopTime = random.uniform(0,1)
time.sleep(stopTime)
DayUrl_dic = getDayUrl()
# {房子ID:{'发布时间':releaseTime, '所在区域':area, '总价':price}}
try:
time_sql = "SELECT DISTINCT releaseTime FROM sevenDay ORDER BY releaseTime ASC LIMIT 1"
cursor.execute(time_sql)
data=cursor.fetchall()
if int((datetime.datetime.now()-data[0].get('releaseTime')).days) > 7:
del_sql="DELETE FROM sevenDay WHERE releaseTime=%s"
cursor.execute(del_sql,data[0].get('releaseTime'))
conn.commit()
except:
pass
logFile = open('crawlerlog.txt', 'a')
logFile.write(f"================{time.asctime()}=================")
logFile.write("\n")
logFile.write(f'{time.asctime()}数量数据开始更新')
logFile.write("\n")
logFile.close()
conn, cursor = get_conn()
insert_sql = "insert into sevenDay values(%s, %s, %s, %s, %s, %s)"
sql_query = "select houseID from sevenDay where releaseTime=%(releaseTime)s and houseID=%(id)s"
for key,value in DayUrl_dic.items():
if not cursor.execute(sql_query, {'releaseTime':value.get('releaseTime'),'id':key}):
cursor.execute(insert_sql, [key, value.get('releaseTime'), value.get('area'), value.get('price'), value.get('avePrice'), value.get('houseurl')])
conn.commit() # 提交事务 update delete insert操作
logFile = open('crawlerlog.txt', 'a')
logFile.write(f"================{time.asctime()}=================\n")
logFile.write(f'{time.asctime()}数量数据更新完毕')
logFile.write("\n")
logFile.close()
except:
# traceback.print_exc()
errorFile = open('errorlog.txt', 'a')
errorFile.write(f"================{time.asctime()}=================")
errorFile.write(traceback.format_exc())
errorFile.close()
finally:
close_conn(conn, cursor)
def update_seven():
try:
cursor = None
conn = None
conn, cursor = get_conn()
time_sql = "SELECT DISTINCT releaseTime FROM sevenDay ORDER BY releaseTime ASC LIMIT 1"
cursor.execute(time_sql)
data=cursor.fetchall()
if int((datetime.datetime.now()-data[0].get('releaseTime')).days) == 7:
seven_sql="SELECT * FROM sevenDay WHERE releaseTime=%s"
cursor.execute(seven_sql,data[0].get('releaseTime'))
url_lis=cursor.fetchall()
logFile = open('crawlerlog.txt', 'a')
logFile.write(f"================{time.asctime()}=================\n")
logFile.write(f'{time.asctime()}七天前数据开始更新')
logFile.write("\n")
logFile.close()
houseFieldName=[]
houseField_sql = "SELECT COLUMN_NAME FROM information_schema.`COLUMNS` WHERE TABLE_NAME = 'eachhouse'"
cursor.execute(houseField_sql)
houseField_data=cursor.fetchall()
houseInsert = "insert into eachhouse values("
for i in range(len(houseField_data)):
houseFieldName.append(houseField_data[i].get('COLUMN_NAME'))
if i!=len(houseField_data)-1:
houseInsert += '%s,'
else:
houseInsert += '%s'
houseInsert += ')'
communityFieldName=[]
communityField_sql = "SELECT COLUMN_NAME FROM information_schema.`COLUMNS` WHERE TABLE_NAME = 'community'"
cursor.execute(communityField_sql)
communityField_data=cursor.fetchall()
communityInsert = "insert into community values("
# communityInsert = "insert into community values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
for i in range(len(communityField_data)):
communityFieldName.append(communityField_data[i].get('COLUMN_NAME'))
if i!=len(communityField_data)-1:
communityInsert += '%s,'
else:
communityInsert += '%s'
communityInsert += ')'
community_sql = "select communityID from community where communityID=%s"
house_sql = "select houseID from eachhouse where houseID=%s"
for value in url_lis:
if not cursor.execute(house_sql, value.get('houseID')):
stopTime = random.uniform(1,2)
time.sleep(stopTime)
jingtaiRes = get_jingtai_data([value.get('houseurl'),value.get('area')])
if not jingtaiRes: # 有一些房子没有房屋总价,或者爬取不到小区ID,所以我没有要这样的数据,返回空
continue
communitylis=[]
if not cursor.execute(community_sql, jingtaiRes['communityID']): # 如果没有这个小区就插入
stopTime = random.uniform(1,2)
time.sleep(stopTime)
communityRes = get_community_data(jingtaiRes['小区详情url'])
try:
for Name in communityFieldName:
communitylis.append(communityRes.get(Name,'NULL'))
cursor.execute(communityInsert, communitylis)
del communitylis[0]
del communitylis[0]
conn.commit()
except:
errorFile = open('errorlog.txt', 'a')
errorFile.write(f"================{time.asctime()}=================")
errorFile.write(traceback.format_exc())
errorFile.close()
else :
get_community_sql = "select * from community where communityID=%s"
cursor.execute(get_community_sql, jingtaiRes['communityID'])
communityRes=cursor.fetchall()[0]
communityRes.pop('crawlingTime')
communityRes.pop('communityID')
for Name in list(communityRes.keys()):
communitylis.append(communityRes.get(Name,'NULL'))
if communitylis==[]:
continue
cursor.execute(houseInsert, [jingtaiRes.get('houseID','NULL'),jingtaiRes.get('communityID','NULL'),
datetime.datetime.strptime(jingtaiRes.get('挂牌时间',datetime.datetime.now()), "%Y-%m-%d"),
jingtaiRes.get('房屋总价','NULL'),jingtaiRes.get('房屋每平米价','NULL'),
jingtaiRes.get('建楼时间','NULL'),
jingtaiRes.get('房屋户型','NULL'),jingtaiRes.get('所在楼层','NULL'),
jingtaiRes.get('建筑面积','NULL'),jingtaiRes.get('户型结构','NULL'),
jingtaiRes.get('套内面积','NULL'),jingtaiRes.get('建筑类型','NULL'),
jingtaiRes.get('房屋朝向','NULL'),jingtaiRes.get('建筑结构','NULL'),
jingtaiRes.get('装修情况','NULL'),jingtaiRes.get('梯户比例','NULL'),
jingtaiRes.get('配备电梯','NULL'),
jingtaiRes.get('交易权属','NULL'),jingtaiRes.get('上次交易','NULL'),
jingtaiRes.get('房屋用途','NULL'),jingtaiRes.get('房屋年限','NULL'),
jingtaiRes.get('产权所属','NULL'),jingtaiRes.get('抵押信息','NULL'),
jingtaiRes.get('房本备件','NULL'),jingtaiRes.get('户型分间','NULL'),
jingtaiRes.get('小区详情url','NULL'),jingtaiRes.get('area','NULL')]+communitylis)
conn.commit()
logFile = open('crawlerlog.txt', 'a')
logFile.write(f"================{time.asctime()}=================\n")
logFile.write(f'{time.asctime()}七天前数据更新完毕')
logFile.write("\n")
logFile.close()
except:
errorFile = open('errorlog.txt', 'a')
errorFile.write(f"================{time.asctime()}=================")
errorFile.write(traceback.format_exc())
errorFile.close()
finally:
close_conn(conn, cursor)
def update_data():
try:
cursor = None
conn = None
conn, cursor = get_conn()
time_sql = "SELECT DISTINCT listingTime FROM eachhouse ORDER BY listingTime ASC LIMIT 1"
cursor.execute(time_sql)
data=cursor.fetchall()
try:
if int((datetime.datetime.now()-data[0].get('listingTime')).days) > 90:
del_sql="DELETE FROM eachhouse WHERE listingTime=%s"
cursor.execute(del_sql,data[0].get('listingTime'))
conn.commit()
except:
pass
url_lis = getUrl()
logFile = open('crawlerlog.txt', 'a')
logFile.write(f"================{time.asctime()}=================\n")
logFile.write(f'{time.asctime()}房屋数据开始更新')
logFile.write("\n")
logFile.close()
houseFieldName=[]
houseField_sql = "SELECT COLUMN_NAME FROM information_schema.`COLUMNS` WHERE TABLE_NAME = 'eachhouse'"
cursor.execute(houseField_sql)
houseField_data=cursor.fetchall()
houseInsert = "insert into eachhouse values("
for i in range(len(houseField_data)):
houseFieldName.append(houseField_data[i].get('COLUMN_NAME'))
if i!=len(houseField_data)-1:
houseInsert += '%s,'
else:
houseInsert += '%s'
houseInsert += ')'
communityFieldName=[]
communityField_sql = "SELECT COLUMN_NAME FROM information_schema.`COLUMNS` WHERE TABLE_NAME = 'community'"
cursor.execute(communityField_sql)
communityField_data=cursor.fetchall()
communityInsert = "insert into community values("
# communityInsert = "insert into community values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
for i in range(len(communityField_data)):
communityFieldName.append(communityField_data[i].get('COLUMN_NAME'))
if i!=len(communityField_data)-1:
communityInsert += '%s,'
else:
communityInsert += '%s'
communityInsert += ')'
community_sql = "select communityID from community where communityID=%s"
for value in url_lis:
jingtaiRes = get_jingtai_data([value.get('houseurl'),value.get('area')])
if not jingtaiRes: # 有一些房子没有房屋总价,或者爬取不到小区ID,所以我没有要这样的数据,返回空
continue
stopTime = random.uniform(1,2)
time.sleep(stopTime)
communitylis=[]
if not cursor.execute(community_sql, jingtaiRes['communityID']): # 如果没有这个小区就插入
stopTime = random.uniform(1,2)
time.sleep(stopTime)
communityRes = get_community_data(jingtaiRes['小区详情url'])
try:
for Name in communityFieldName:
communitylis.append(communityRes.get(Name,'NULL'))
cursor.execute(communityInsert, communitylis)
del communitylis[0]
del communitylis[0]
conn.commit()
except:
errorFile = open('errorlog.txt', 'a')
errorFile.write(f"================{time.asctime()}=================")
errorFile.write(traceback.format_exc())
errorFile.close()
else :
get_community_sql = "select * from community where communityID=%s"
cursor.execute(get_community_sql, jingtaiRes['communityID'])
communityRes=cursor.fetchall()[0]
communityRes.pop('crawlingTime')
communityRes.pop('communityID')
for Name in list(communityRes.keys()):
communitylis.append(communityRes.get(Name,'NULL'))
if communitylis==[]:
continue
cursor.execute(houseInsert, [jingtaiRes.get('houseID','NULL'),jingtaiRes.get('communityID','NULL'),
datetime.datetime.strptime(jingtaiRes.get('挂牌时间',datetime.datetime.now()), "%Y-%m-%d"),
jingtaiRes.get('房屋总价','NULL'),jingtaiRes.get('房屋每平米价','NULL'),
jingtaiRes.get('建楼时间','NULL'),
jingtaiRes.get('房屋户型','NULL'),jingtaiRes.get('所在楼层','NULL'),
jingtaiRes.get('建筑面积','NULL'),jingtaiRes.get('户型结构','NULL'),
jingtaiRes.get('套内面积','NULL'),jingtaiRes.get('建筑类型','NULL'),
jingtaiRes.get('房屋朝向','NULL'),jingtaiRes.get('建筑结构','NULL'),
jingtaiRes.get('装修情况','NULL'),jingtaiRes.get('梯户比例','NULL'),
jingtaiRes.get('配备电梯','NULL'),
jingtaiRes.get('交易权属','NULL'),jingtaiRes.get('上次交易','NULL'),
jingtaiRes.get('房屋用途','NULL'),jingtaiRes.get('房屋年限','NULL'),
jingtaiRes.get('产权所属','NULL'),jingtaiRes.get('抵押信息','NULL'),
jingtaiRes.get('房本备件','NULL'),jingtaiRes.get('户型分间','NULL'),
jingtaiRes.get('小区详情url','NULL'),jingtaiRes.get('area','NULL')]+communitylis)
conn.commit()
logFile = open('crawlerlog.txt', 'a')
logFile.write(f"================{time.asctime()}=================\n")
logFile.write(f'{time.asctime()}房屋数据更新完毕')
logFile.write("\n")
logFile.close()
except:
errorFile = open('errorlog.txt', 'a')
errorFile.write(f"================{time.asctime()}=================")
errorFile.write(traceback.format_exc())
errorFile.close()
finally:
close_conn(conn, cursor)
def crawlerRun():
with open("errorlog.txt", 'r+') as file:
file.truncate(0)
with open("crawlerlog.txt", 'r+') as file:
file.truncate(0)
makeIp()
update_data()
makeIp()
update_count()
makeIp()
update_seven()
with open('/www/wwwroot/test/crawler/crontabtest.log', 'a+') as logfile:
logfile.write(f"================{time.asctime()}=================")
logfile.write("执行了爬虫脚本")
logfile.write("\n")
if __name__=='__main__':
crawlerRun()
| 52.491468
| 161
| 0.509428
| 1,401
| 15,380
| 5.508922
| 0.132049
| 0.094325
| 0.105727
| 0.01192
| 0.835709
| 0.828194
| 0.816662
| 0.793859
| 0.772091
| 0.767168
| 0
| 0.003546
| 0.339922
| 15,380
| 293
| 162
| 52.491468
| 0.756698
| 0.025878
| 0
| 0.809187
| 0
| 0.003534
| 0.20361
| 0.062943
| 0
| 0
| 0
| 0
| 0
| 1
| 0.014134
| false
| 0.007067
| 0.038869
| 0
| 0.053004
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d5b2cad7ea79e1346f51cf6aa1a6bf482e29bbd3
| 5,791
|
py
|
Python
|
exercise_3/exercise_code/rnn/rnn_nn.py
|
dilaraadem/I2DL
|
c9398edfe9978842b40f4dfa08c4b5b0c70714a2
|
[
"RSA-MD"
] | null | null | null |
exercise_3/exercise_code/rnn/rnn_nn.py
|
dilaraadem/I2DL
|
c9398edfe9978842b40f4dfa08c4b5b0c70714a2
|
[
"RSA-MD"
] | 6
|
2020-03-24T18:12:41.000Z
|
2022-03-12T00:14:26.000Z
|
exercise_3/exercise_code/rnn/rnn_nn.py
|
dilaraadem/I2DL
|
c9398edfe9978842b40f4dfa08c4b5b0c70714a2
|
[
"RSA-MD"
] | null | null | null |
import torch
import torch.nn as nn
class RNN(nn.Module):
def __init__(self, input_size=1, hidden_size=20, activation="tanh"):
super().__init__()
"""
Inputs:
- input_size: Number of features in input vector
- hidden_size: Dimension of hidden vector
- activation: Nonlinearity in cell; 'tanh' or 'relu'
"""
#######################################################################
# TODO: Build a simple one layer RNN with an activation with the #
# attributes defined above and a forward function below. Use the #
# nn.Linear() function as your linear layers. #
# Initialse h as 0 if these values are not given. #
#######################################################################
#######################################################################
# END OF YOUR CODE #
#######################################################################
def forward(self, x, h=None):
"""
Inputs:
- x: Input tensor (seq_len, batch_size, input_size)
- h: Optional hidden vector (nr_layers, batch_size, hidden_size)
Outputs:
- h_seq: Hidden vector along sequence
(seq_len, batch_size, hidden_size)
- h: Final hidden vetor of sequence(1, batch_size, hidden_size)
"""
h_seq = []
#######################################################################
# YOUR CODE #
#######################################################################
#######################################################################
# END OF YOUR CODE #
#######################################################################
return h_seq, h
class LSTM(nn.Module):
def __init__(self, input_size=1, hidden_size=20):
super().__init__()
#######################################################################
# TODO: Build a one layer LSTM with an activation with the attributes #
# defined above and a forward function below. Use the #
# nn.Linear() function as your linear layers. #
# Initialse h and c as 0 if these values are not given. #
#######################################################################
#######################################################################
# END OF YOUR CODE #
#######################################################################
def forward(self, x, h=None, c=None):
"""
Inputs:
- x: Input tensor (seq_len, batch_size, input_size)
- h: Hidden vector (nr_layers, batch_size, hidden_size)
- c: Cell state vector (nr_layers, batch_size, hidden_size)
Outputs:
- h_seq: Hidden vector along sequence
(seq_len, batch_size, hidden_size)
- h: Final hidden vetor of sequence(1, batch_size, hidden_size)
- c: Final cell state vetor of sequence(1, batch_size, hidden_size)
"""
h_seq = None
#######################################################################
# YOUR CODE #
#######################################################################
#######################################################################
# END OF YOUR CODE #
#######################################################################
return h_seq, (h, c)
class RNN_Classifier(torch.nn.Module):
def __init__(self, classes=10, input_size=28, hidden_size=128,
activation="relu"):
super(RNN_Classifier, self).__init__()
#######################################################################
# TODO: Build a RNN classifier #
#######################################################################
#######################################################################
# END OF YOUR CODE #
#######################################################################
def save(self, path):
"""
Save model with its parameters to the given path. Conventionally the
path should end with "*.model".
Inputs:
- path: path string
"""
print('Saving model... %s' % path)
torch.save(self, path)
class LSTM_Classifier(torch.nn.Module):
def __init__(self, classes=10, input_size=28, hidden_size=128):
super(LSTM_Classifier, self).__init__()
#######################################################################
# TODO: Build a LSTM classifier #
#######################################################################
#######################################################################
# END OF YOUR CODE #
#######################################################################
def save(self, path):
"""
Save model with its parameters to the given path. Conventionally the
path should end with "*.model".
Inputs:
- path: path string
"""
print('Saving model... %s' % path)
torch.save(self, path)
| 44.546154
| 79
| 0.331894
| 427
| 5,791
| 4.316159
| 0.220141
| 0.070537
| 0.065111
| 0.082474
| 0.817146
| 0.816061
| 0.785676
| 0.785676
| 0.761259
| 0.761259
| 0
| 0.006345
| 0.319634
| 5,791
| 129
| 80
| 44.891473
| 0.461421
| 0.372302
| 0
| 0.296296
| 0
| 0
| 0.031052
| 0
| 0
| 0
| 0
| 0.031008
| 0
| 1
| 0.296296
| false
| 0
| 0.074074
| 0
| 0.592593
| 0.074074
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 10
|
d5b44aa720df48b794f17d9be64c73f0afa046d2
| 109
|
py
|
Python
|
tests/test_config.py
|
weiyangliu/QuLab
|
f3ff8ff2120be96f57c1d293d9be15df17717526
|
[
"MIT"
] | null | null | null |
tests/test_config.py
|
weiyangliu/QuLab
|
f3ff8ff2120be96f57c1d293d9be15df17717526
|
[
"MIT"
] | null | null | null |
tests/test_config.py
|
weiyangliu/QuLab
|
f3ff8ff2120be96f57c1d293d9be15df17717526
|
[
"MIT"
] | null | null | null |
import pytest
from qulab.config import *
def test_config_dir():
assert isinstance(config_dir(), Path)
| 13.625
| 41
| 0.743119
| 15
| 109
| 5.2
| 0.733333
| 0.230769
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.165138
| 109
| 7
| 42
| 15.571429
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0.25
| true
| 0
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
91007b6dd0ef1de7cfce527b9069624a0337dcd5
| 153
|
py
|
Python
|
boxy/core/views.py
|
MattyPy/boxy
|
6082dbc45512a5fbc2a2d7664613b1e04ba40ddb
|
[
"MIT"
] | null | null | null |
boxy/core/views.py
|
MattyPy/boxy
|
6082dbc45512a5fbc2a2d7664613b1e04ba40ddb
|
[
"MIT"
] | 1
|
2020-04-30T12:50:58.000Z
|
2020-04-30T12:50:58.000Z
|
boxy/core/views.py
|
MattyPy/boxy
|
6082dbc45512a5fbc2a2d7664613b1e04ba40ddb
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
# Create your views here.
def add_subscription(request):
return render(request, 'core/add_subscription.html')
| 19.125
| 56
| 0.777778
| 20
| 153
| 5.85
| 0.8
| 0.25641
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137255
| 153
| 7
| 57
| 21.857143
| 0.886364
| 0.150327
| 0
| 0
| 0
| 0
| 0.203125
| 0.203125
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
9108cf873ec18fba62ad4f29dfaf083a393118f5
| 8,394
|
py
|
Python
|
pypeln/task/api/flat_map_task_test.py
|
quarckster/pypeln
|
f4160d0f4d4718b67f79a0707d7261d249459a4b
|
[
"MIT"
] | 1,281
|
2018-09-20T05:35:27.000Z
|
2022-03-30T01:29:48.000Z
|
pypeln/task/api/flat_map_task_test.py
|
webclinic017/pypeln
|
5231806f2cac9d2019dacbbcf913484fd268b8c1
|
[
"MIT"
] | 78
|
2018-09-18T20:38:12.000Z
|
2022-03-30T20:16:02.000Z
|
pypeln/task/api/flat_map_task_test.py
|
webclinic017/pypeln
|
5231806f2cac9d2019dacbbcf913484fd268b8c1
|
[
"MIT"
] | 88
|
2018-09-24T10:46:14.000Z
|
2022-03-28T09:34:50.000Z
|
import asyncio
import sys
import time
import typing as tp
from unittest import TestCase
import cytoolz as cz
import hypothesis as hp
from hypothesis import strategies as st
import pypeln as pl
MAX_EXAMPLES = 10
T = tp.TypeVar("T")
@hp.given(nums=st.lists(st.integers()))
@hp.settings(max_examples=MAX_EXAMPLES)
def test_flat_map_square(nums: tp.List[int]):
def _generator(x):
yield x
yield x + 1
yield x + 2
nums_py = map(lambda x: x ** 2, nums)
nums_py = cz.mapcat(_generator, nums_py)
nums_py = list(nums_py)
nums_pl = pl.task.map(lambda x: x ** 2, nums)
nums_pl = pl.task.flat_map(_generator, nums_pl)
nums_pl = list(nums_pl)
assert nums_pl == nums_py
@hp.given(nums=st.lists(st.integers()))
@hp.settings(max_examples=MAX_EXAMPLES)
def test_flat_map_square_async_1(nums: tp.List[int]):
def _generator(x):
yield x
yield x + 1
yield x + 2
async def _generator_async(x):
yield x
yield x + 1
yield x + 2
nums_py = map(lambda x: x ** 2, nums)
nums_py = cz.mapcat(_generator, nums_py)
nums_py = list(nums_py)
nums_pl = pl.task.map(lambda x: x ** 2, nums)
nums_pl = pl.task.flat_map(_generator_async, nums_pl)
nums_pl = list(nums_pl)
assert nums_pl == nums_py
@hp.given(nums=st.lists(st.integers()))
@hp.settings(max_examples=MAX_EXAMPLES)
@pl.task.utils.run_test_async
async def test_flat_map_square_async_2(nums: tp.List[int]):
def _generator(x):
yield x
yield x + 1
yield x + 2
async def _generator_async(x):
yield x
yield x + 1
yield x + 2
nums_py = map(lambda x: x ** 2, nums)
nums_py = cz.mapcat(_generator, nums_py)
nums_py = list(nums_py)
nums_pl = pl.task.map(lambda x: x ** 2, nums)
nums_pl = pl.task.flat_map(_generator_async, nums_pl)
nums_pl = await nums_pl
assert nums_pl == nums_py
@hp.given(nums=st.lists(st.integers()))
@hp.settings(max_examples=MAX_EXAMPLES)
@pl.task.utils.run_test_async
async def test_flat_map_square_async_3(nums: tp.List[int]):
def _generator(x):
yield x
yield x + 1
yield x + 2
async def _generator_async(x):
yield x
yield x + 1
yield x + 2
async def nums_generator():
for x in nums:
yield x
nums_py = map(lambda x: x ** 2, nums)
nums_py = cz.mapcat(_generator, nums_py)
nums_py = list(nums_py)
nums_pl = pl.task.map(lambda x: x ** 2, nums_generator())
nums_pl = pl.task.flat_map(_generator_async, nums_pl)
nums_pl = await nums_pl
assert nums_pl == nums_py
@hp.given(nums=st.lists(st.integers()))
@hp.settings(max_examples=MAX_EXAMPLES)
@pl.task.utils.run_test_async
async def test_flat_map_square_async_4(nums: tp.List[int]):
def _generator(x):
yield x
yield x + 1
yield x + 2
async def _generator_async(x):
return [x, x + 1, x + 2]
async def nums_generator():
for x in nums:
yield x
nums_py = map(lambda x: x ** 2, nums)
nums_py = cz.mapcat(_generator, nums_py)
nums_py = list(nums_py)
nums_pl = pl.task.map(lambda x: x ** 2, nums_generator())
nums_pl = pl.task.flat_map(_generator_async, nums_pl)
nums_pl = await nums_pl
assert nums_pl == nums_py
@hp.given(nums=st.lists(st.integers()))
@hp.settings(max_examples=MAX_EXAMPLES)
def test_flat_map_square_workers(nums: tp.List[int]):
def _generator(x):
yield x
yield x + 1
yield x + 2
nums_py = map(lambda x: x ** 2, nums)
nums_py = cz.mapcat(_generator, nums_py)
nums_py = list(nums_py)
nums_pl = pl.task.map(lambda x: x ** 2, nums)
nums_pl = pl.task.flat_map(_generator, nums_pl, workers=3)
nums_pl = list(nums_pl)
assert sorted(nums_pl) == sorted(nums_py)
@hp.given(nums=st.lists(st.integers()))
@hp.settings(max_examples=MAX_EXAMPLES)
def test_flat_map_square_workers_async_1(nums: tp.List[int]):
def _generator(x):
yield x
yield x + 1
yield x + 2
async def _generator_async(x):
yield x
yield x + 1
yield x + 2
nums_py = map(lambda x: x ** 2, nums)
nums_py = cz.mapcat(_generator, nums_py)
nums_py = list(nums_py)
nums_pl = pl.task.map(lambda x: x ** 2, nums)
nums_pl = pl.task.flat_map(_generator_async, nums_pl, workers=3)
nums_pl = list(nums_pl)
assert sorted(nums_pl) == sorted(nums_py)
@hp.given(nums=st.lists(st.integers()))
@hp.settings(max_examples=MAX_EXAMPLES)
@pl.task.utils.run_test_async
async def test_flat_map_square_workers_async_2(nums: tp.List[int]):
def _generator(x):
yield x
yield x + 1
yield x + 2
async def _generator_async(x):
yield x
yield x + 1
yield x + 2
nums_py = map(lambda x: x ** 2, nums)
nums_py = cz.mapcat(_generator, nums_py)
nums_py = list(nums_py)
nums_pl = pl.task.map(lambda x: x ** 2, nums)
nums_pl = pl.task.flat_map(_generator_async, nums_pl, workers=3)
nums_pl = await nums_pl
assert sorted(nums_pl) == sorted(nums_py)
@hp.given(nums=st.lists(st.integers()))
@hp.settings(max_examples=MAX_EXAMPLES)
@pl.task.utils.run_test_async
async def test_flat_map_square_workers_async_3(nums: tp.List[int]):
def _generator(x):
yield x
yield x + 1
yield x + 2
async def _generator_async(x):
yield x
yield x + 1
yield x + 2
async def nums_generator():
for x in nums:
yield x
nums_py = map(lambda x: x ** 2, nums)
nums_py = cz.mapcat(_generator, nums_py)
nums_py = list(nums_py)
nums_pl = pl.task.map(lambda x: x ** 2, nums_generator())
nums_pl = pl.task.flat_map(_generator_async, nums_pl, workers=3)
nums_pl = await nums_pl
assert sorted(nums_pl) == sorted(nums_py)
@hp.given(nums=st.lists(st.integers()))
@hp.settings(max_examples=MAX_EXAMPLES)
@pl.task.utils.run_test_async
async def test_flat_map_square_workers_async_3(nums: tp.List[int]):
def _generator(x):
yield x
yield x + 1
yield x + 2
async def _generator_async(x):
yield x
yield x + 1
await asyncio.sleep(0.1)
yield x + 2
async def nums_generator():
for x in nums:
yield x
nums_py = map(lambda x: x ** 2, nums)
nums_py = cz.mapcat(_generator, nums_py)
nums_py = list(nums_py)
nums_pl = pl.task.map(lambda x: x ** 2, nums_generator())
nums_pl = pl.task.flat_map(_generator_async, nums_pl, workers=3, timeout=0.01)
nums_pl = await nums_pl
assert nums_py == [] or sorted(nums_pl) != sorted(nums_py)
@hp.given(nums=st.lists(st.integers()))
@hp.settings(max_examples=MAX_EXAMPLES)
@pl.task.utils.run_test_async
async def test_flat_map_square_workers_async_4(nums: tp.List[int]):
def _generator(x):
yield x
yield x + 1
yield x + 2
async def _generator_async(x):
yield x
yield x + 1
await asyncio.sleep(0.01)
yield x + 2
async def nums_generator():
for x in nums:
yield x
nums_py = map(lambda x: x ** 2, nums)
nums_py = cz.mapcat(_generator, nums_py)
nums_py = list(nums_py)
nums_pl = pl.task.map(lambda x: x ** 2, nums_generator())
nums_pl = pl.task.flat_map(
_generator_async, nums_pl, workers=3, timeout=0.1, maxsize=2
)
nums_pl = await nums_pl
assert nums_py == [] or sorted(nums_pl) == sorted(nums_py)
@hp.given(nums=st.lists(st.integers()))
@hp.settings(max_examples=MAX_EXAMPLES)
@pl.task.utils.run_test_async
async def test_flat_map_square_workers_async_5(nums: tp.List[int]):
def _generator(x):
yield x
yield x + 1
yield x + 2
async def _generator_async(x):
yield x
yield x + 1
await asyncio.sleep(0.01)
yield x + 2
async def nums_generator():
for x in nums:
yield x
nums_py = map(lambda x: x ** 2, nums)
nums_py = cz.mapcat(_generator, nums_py)
nums_py = list(nums_py)
nums_pl = pl.task.map(lambda x: x ** 2, nums_generator())
nums_pl = pl.task.flat_map(
_generator_async, nums_pl, workers=3, timeout=0.1, maxsize=0
)
nums_pl = await nums_pl
assert nums_py == [] or sorted(nums_pl) == sorted(nums_py)
| 25.436364
| 82
| 0.637241
| 1,375
| 8,394
| 3.642182
| 0.045818
| 0.089856
| 0.058706
| 0.052716
| 0.960264
| 0.960264
| 0.960264
| 0.960264
| 0.960264
| 0.960264
| 0
| 0.016516
| 0.249821
| 8,394
| 329
| 83
| 25.513678
| 0.778784
| 0
| 0
| 0.876
| 0
| 0
| 0.000119
| 0
| 0
| 0
| 0
| 0
| 0.048
| 1
| 0.064
| false
| 0
| 0.036
| 0
| 0.104
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
91532b9c85050bde280f2d35728d9cfc6ed19fc7
| 230
|
py
|
Python
|
histnd/__init__.py
|
6-6-6/histnd
|
6c69ed532ff4670a6eadc8c49f28ab798e9bb85a
|
[
"MIT"
] | null | null | null |
histnd/__init__.py
|
6-6-6/histnd
|
6c69ed532ff4670a6eadc8c49f28ab798e9bb85a
|
[
"MIT"
] | null | null | null |
histnd/__init__.py
|
6-6-6/histnd
|
6c69ed532ff4670a6eadc8c49f28ab798e9bb85a
|
[
"MIT"
] | null | null | null |
from .wrapper import histnd_serial, histnd_parallel
from .histnd import histnd_serial_f64, histnd_parallel_f64
from .histnd import histnd_serial_i64, histnd_parallel_i64
from .histnd import histnd_serial_u64, histnd_parallel_u64
| 38.333333
| 58
| 0.873913
| 34
| 230
| 5.5
| 0.264706
| 0.256684
| 0.385027
| 0.352941
| 0.449198
| 0
| 0
| 0
| 0
| 0
| 0
| 0.057416
| 0.091304
| 230
| 6
| 59
| 38.333333
| 0.837321
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
e66c0a9c3fbb2673a54dc509df7eaf8548faa5de
| 27,198
|
py
|
Python
|
decora_wifi/models/person.py
|
balloob/python-decora_wifi
|
47900ad67002f3655fc4c799518bc4e73293ceb4
|
[
"MIT"
] | 33
|
2017-09-02T16:37:15.000Z
|
2021-12-28T15:24:39.000Z
|
decora_wifi/models/person.py
|
balloob/python-decora_wifi
|
47900ad67002f3655fc4c799518bc4e73293ceb4
|
[
"MIT"
] | 17
|
2017-09-12T04:53:07.000Z
|
2022-01-25T03:31:45.000Z
|
decora_wifi/models/person.py
|
balloob/python-decora_wifi
|
47900ad67002f3655fc4c799518bc4e73293ceb4
|
[
"MIT"
] | 21
|
2018-01-29T22:50:06.000Z
|
2022-01-06T02:30:47.000Z
|
# Leviton Cloud Services API model Person.
# Auto-generated by api_scraper.py.
#
# Copyright 2017 Tim Lyakhovetskiy <tlyakhov@gmail.com>
#
# This code is released under the terms of the MIT license. See the LICENSE
# file for more details.
from decora_wifi.base_model import BaseModel
class Person(BaseModel):
def __init__(self, session, model_id=None):
super(Person, self).__init__(session, model_id)
@classmethod
def apply_password(cls, session, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/applyPassword"
return session.call_api(api, attribs, 'post')
@classmethod
def confirm(cls, session, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/confirm"
return session.call_api(api, attribs, 'get')
@classmethod
def count(cls, session, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/count"
return session.call_api(api, attribs, 'get')
def count_access_tokens(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/{0}/accessTokens/count".format(self._id)
return self._session.call_api(api, attribs, 'get')
def count_invitations(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/{0}/invitations/count".format(self._id)
return self._session.call_api(api, attribs, 'get')
def count_management_tiers(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/{0}/managementTiers/count".format(self._id)
return self._session.call_api(api, attribs, 'get')
def count_notification_subscriptions(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/{0}/notificationSubscriptions/count".format(self._id)
return self._session.call_api(api, attribs, 'get')
def count_notification_triggers(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/{0}/notificationTriggers/count".format(self._id)
return self._session.call_api(api, attribs, 'get')
def count_permissions(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/{0}/permissions/count".format(self._id)
return self._session.call_api(api, attribs, 'get')
def count_preferences(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/{0}/preferences/count".format(self._id)
return self._session.call_api(api, attribs, 'get')
def count_residential_permissions(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/{0}/residentialPermissions/count".format(self._id)
return self._session.call_api(api, attribs, 'get')
def count_role_mappings(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/{0}/roleMappings/count".format(self._id)
return self._session.call_api(api, attribs, 'get')
def count_user_feedbacks(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/{0}/userFeedbacks/count".format(self._id)
return self._session.call_api(api, attribs, 'get')
@classmethod
def create(cls, session, attribs=None):
if attribs is None:
attribs = {}
api = "/Person"
return session.call_api(api, attribs, 'post')
def create_access_tokens(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/{0}/accessTokens".format(self._id)
return self._session.call_api(api, attribs, 'post')
def create_invitations(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/{0}/invitations".format(self._id)
return self._session.call_api(api, attribs, 'post')
def create_management_tiers(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/{0}/managementTiers".format(self._id)
return self._session.call_api(api, attribs, 'post')
@classmethod
def create_many(cls, session, attribs=None):
if attribs is None:
attribs = {}
api = "/Person"
return session.call_api(api, attribs, 'post')
def create_notification_subscriptions(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/{0}/notificationSubscriptions".format(self._id)
return self._session.call_api(api, attribs, 'post')
def create_notification_triggers(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/{0}/notificationTriggers".format(self._id)
return self._session.call_api(api, attribs, 'post')
def create_preferences(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/{0}/preferences".format(self._id)
return self._session.call_api(api, attribs, 'post')
def create_residential_permissions(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/{0}/residentialPermissions".format(self._id)
return self._session.call_api(api, attribs, 'post')
def create_role_mappings(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/{0}/roleMappings".format(self._id)
return self._session.call_api(api, attribs, 'post')
def create_user_feedbacks(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/{0}/userFeedbacks".format(self._id)
return self._session.call_api(api, attribs, 'post')
def delete_access_tokens(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/{0}/accessTokens".format(self._id)
return self._session.call_api(api, attribs, 'delete')
def delete_by_id(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/{0}".format(self._id)
return self._session.call_api(api, attribs, 'delete')
def delete_invitations(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/{0}/invitations".format(self._id)
return self._session.call_api(api, attribs, 'delete')
def delete_management_tiers(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/{0}/managementTiers".format(self._id)
return self._session.call_api(api, attribs, 'delete')
def delete_notification_subscriptions(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/{0}/notificationSubscriptions".format(self._id)
return self._session.call_api(api, attribs, 'delete')
def delete_notification_triggers(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/{0}/notificationTriggers".format(self._id)
return self._session.call_api(api, attribs, 'delete')
def delete_permissions(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/{0}/permissions".format(self._id)
return self._session.call_api(api, attribs, 'delete')
def delete_preferences(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/{0}/preferences".format(self._id)
return self._session.call_api(api, attribs, 'delete')
def delete_residential_permissions(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/{0}/residentialPermissions".format(self._id)
return self._session.call_api(api, attribs, 'delete')
def delete_role_mappings(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/{0}/roleMappings".format(self._id)
return self._session.call_api(api, attribs, 'delete')
def delete_user_feedbacks(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/{0}/userFeedbacks".format(self._id)
return self._session.call_api(api, attribs, 'delete')
def destroy_by_id_access_tokens(self, access_token_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/{0}/accessTokens/{1}".format(self._id, access_token_id)
return self._session.call_api(api, attribs, 'delete')
def destroy_by_id_invitations(self, invitation_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/{0}/invitations/{1}".format(self._id, invitation_id)
return self._session.call_api(api, attribs, 'delete')
def destroy_by_id_management_tiers(self, management_tier_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/{0}/managementTiers/{1}".format(self._id, management_tier_id)
return self._session.call_api(api, attribs, 'delete')
def destroy_by_id_notification_subscriptions(self, notification_subscription_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/{0}/notificationSubscriptions/{1}".format(self._id, notification_subscription_id)
return self._session.call_api(api, attribs, 'delete')
def destroy_by_id_notification_triggers(self, notification_trigger_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/{0}/notificationTriggers/{1}".format(self._id, notification_trigger_id)
return self._session.call_api(api, attribs, 'delete')
def destroy_by_id_permissions(self, permission_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/{0}/permissions/{1}".format(self._id, permission_id)
return self._session.call_api(api, attribs, 'delete')
def destroy_by_id_preferences(self, preference_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/{0}/preferences/{1}".format(self._id, preference_id)
return self._session.call_api(api, attribs, 'delete')
def destroy_by_id_residential_permissions(self, residential_permission_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/{0}/residentialPermissions/{1}".format(self._id, residential_permission_id)
return self._session.call_api(api, attribs, 'delete')
def destroy_by_id_role_mappings(self, role_mapping_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/{0}/roleMappings/{1}".format(self._id, role_mapping_id)
return self._session.call_api(api, attribs, 'delete')
def destroy_by_id_user_feedbacks(self, user_feedback_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/{0}/userFeedbacks/{1}".format(self._id, user_feedback_id)
return self._session.call_api(api, attribs, 'delete')
def exists(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/{0}/exists".format(self._id)
return self._session.call_api(api, attribs, 'get')
def exists_management_tiers(self, management_tier_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/{0}/managementTiers/rel/{1}".format(self._id, management_tier_id)
return self._session.call_api(api, attribs, 'head')
@classmethod
def find(cls, session, attribs=None):
if attribs is None:
attribs = {}
api = "/Person"
items = session.call_api(api, attribs, 'get')
result = []
if items is not None:
for data in items:
model = Person(session, data['id'])
model.data = data
result.append(model)
return result
def find_by_id(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/{0}".format(self._id)
data = self._session.call_api(api, attribs, 'get')
self.data.update(data)
return self
def find_by_id_access_tokens(self, access_token_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/{0}/accessTokens/{1}".format(self._id, access_token_id)
return self._session.call_api(api, attribs, 'get')
def find_by_id_invitations(self, invitation_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/{0}/invitations/{1}".format(self._id, invitation_id)
data = self._session.call_api(api, attribs, 'get')
from .invitation import Invitation
model = Invitation(self._session, data['id'])
model.data = data
return model
def find_by_id_management_tiers(self, management_tier_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/{0}/managementTiers/{1}".format(self._id, management_tier_id)
data = self._session.call_api(api, attribs, 'get')
from .management_tier import ManagementTier
model = ManagementTier(self._session, data['id'])
model.data = data
return model
def find_by_id_notification_subscriptions(self, notification_subscription_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/{0}/notificationSubscriptions/{1}".format(self._id, notification_subscription_id)
return self._session.call_api(api, attribs, 'get')
def find_by_id_notification_triggers(self, notification_trigger_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/{0}/notificationTriggers/{1}".format(self._id, notification_trigger_id)
return self._session.call_api(api, attribs, 'get')
def find_by_id_permissions(self, permission_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/{0}/permissions/{1}".format(self._id, permission_id)
data = self._session.call_api(api, attribs, 'get')
from .permission import Permission
model = Permission(self._session, data['id'])
model.data = data
return model
def find_by_id_preferences(self, preference_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/{0}/preferences/{1}".format(self._id, preference_id)
data = self._session.call_api(api, attribs, 'get')
from .preference import Preference
model = Preference(self._session, data['id'])
model.data = data
return model
def find_by_id_residential_permissions(self, residential_permission_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/{0}/residentialPermissions/{1}".format(self._id, residential_permission_id)
data = self._session.call_api(api, attribs, 'get')
from .residential_permission import ResidentialPermission
model = ResidentialPermission(self._session, data['id'])
model.data = data
return model
def find_by_id_role_mappings(self, role_mapping_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/{0}/roleMappings/{1}".format(self._id, role_mapping_id)
return self._session.call_api(api, attribs, 'get')
def find_by_id_user_feedbacks(self, user_feedback_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/{0}/userFeedbacks/{1}".format(self._id, user_feedback_id)
data = self._session.call_api(api, attribs, 'get')
from .user_feedback import UserFeedback
model = UserFeedback(self._session, data['id'])
model.data = data
return model
@classmethod
def find_one(cls, session, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/findOne"
return session.call_api(api, attribs, 'get')
def refresh(self):
api = "/Person/{0}".format(self._id)
result = self._session.call_api(api, {}, 'get')
if result is not None:
self.data.update(result)
return self
def get_access_tokens(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/{0}/accessTokens".format(self._id)
return self._session.call_api(api, attribs, 'get')
def get_current(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/{0}".format(self._id)
data = self._session.call_api(api, attribs, 'get')
self.data.update(data)
return self
def get_invitations(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/{0}/invitations".format(self._id)
items = self._session.call_api(api, attribs, 'get')
from .invitation import Invitation
result = []
if items is not None:
for data in items:
model = Invitation(self._session, data['id'])
model.data = data
result.append(model)
return result
def get_management_tiers(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/{0}/managementTiers".format(self._id)
items = self._session.call_api(api, attribs, 'get')
from .management_tier import ManagementTier
result = []
if items is not None:
for data in items:
model = ManagementTier(self._session, data['id'])
model.data = data
result.append(model)
return result
def get_notification_subscriptions(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/{0}/notificationSubscriptions".format(self._id)
return self._session.call_api(api, attribs, 'get')
def get_notification_triggers(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/{0}/notificationTriggers".format(self._id)
return self._session.call_api(api, attribs, 'get')
def get_permissions(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/{0}/permissions".format(self._id)
items = self._session.call_api(api, attribs, 'get')
from .permission import Permission
result = []
if items is not None:
for data in items:
model = Permission(self._session, data['id'])
model.data = data
result.append(model)
return result
def get_preferences(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/{0}/preferences".format(self._id)
items = self._session.call_api(api, attribs, 'get')
from .preference import Preference
result = []
if items is not None:
for data in items:
model = Preference(self._session, data['id'])
model.data = data
result.append(model)
return result
def get_residential_permissions(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/{0}/residentialPermissions".format(self._id)
items = self._session.call_api(api, attribs, 'get')
from .residential_permission import ResidentialPermission
result = []
if items is not None:
for data in items:
model = ResidentialPermission(self._session, data['id'])
model.data = data
result.append(model)
return result
def get_role_mappings(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/{0}/roleMappings".format(self._id)
return self._session.call_api(api, attribs, 'get')
def get_user_feedbacks(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/{0}/userFeedbacks".format(self._id)
items = self._session.call_api(api, attribs, 'get')
from .user_feedback import UserFeedback
result = []
if items is not None:
for data in items:
model = UserFeedback(self._session, data['id'])
model.data = data
result.append(model)
return result
def link_management_tiers(self, management_tier_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/{0}/managementTiers/rel/{1}".format(self._id, management_tier_id)
data = self._session.call_api(api, attribs, 'put')
from .management_tier import ManagementTier
model = ManagementTier(self._session, data['id'])
model.data = data
return model
@classmethod
def login(cls, session, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/login"
return session.call_api(api, attribs, 'post')
@classmethod
def logout(cls, session, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/logout"
return session.call_api(api, attribs, 'post')
def notify(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/{0}/notify".format(self._id)
return self._session.call_api(api, attribs, 'post')
def replace_by_id(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/{0}/replace".format(self._id)
return self._session.call_api(api, attribs, 'post')
@classmethod
def replace_or_create(cls, session, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/replaceOrCreate"
return session.call_api(api, attribs, 'post')
@classmethod
def reset_password(cls, session, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/reset"
return session.call_api(api, attribs, 'post')
def unlink_management_tiers(self, management_tier_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/{0}/managementTiers/rel/{1}".format(self._id, management_tier_id)
return self._session.call_api(api, attribs, 'delete')
def update_attributes(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/{0}".format(self._id)
data = self._session.call_api(api, attribs, 'put')
self.data.update(attribs)
return self
def update_by_id_access_tokens(self, access_token_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/{0}/accessTokens/{1}".format(self._id, access_token_id)
return self._session.call_api(api, attribs, 'put')
def update_by_id_invitations(self, invitation_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/{0}/invitations/{1}".format(self._id, invitation_id)
data = self._session.call_api(api, attribs, 'put')
from .invitation import Invitation
model = Invitation(self._session, data['id'])
model.data = data
return model
def update_by_id_management_tiers(self, management_tier_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/{0}/managementTiers/{1}".format(self._id, management_tier_id)
data = self._session.call_api(api, attribs, 'put')
from .management_tier import ManagementTier
model = ManagementTier(self._session, data['id'])
model.data = data
return model
def update_by_id_notification_subscriptions(self, notification_subscription_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/{0}/notificationSubscriptions/{1}".format(self._id, notification_subscription_id)
return self._session.call_api(api, attribs, 'put')
def update_by_id_notification_triggers(self, notification_trigger_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/{0}/notificationTriggers/{1}".format(self._id, notification_trigger_id)
return self._session.call_api(api, attribs, 'put')
def update_by_id_permissions(self, permission_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/{0}/permissions/{1}".format(self._id, permission_id)
data = self._session.call_api(api, attribs, 'put')
from .permission import Permission
model = Permission(self._session, data['id'])
model.data = data
return model
def update_by_id_preferences(self, preference_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/{0}/preferences/{1}".format(self._id, preference_id)
data = self._session.call_api(api, attribs, 'put')
from .preference import Preference
model = Preference(self._session, data['id'])
model.data = data
return model
def update_by_id_residential_permissions(self, residential_permission_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/{0}/residentialPermissions/{1}".format(self._id, residential_permission_id)
data = self._session.call_api(api, attribs, 'put')
from .residential_permission import ResidentialPermission
model = ResidentialPermission(self._session, data['id'])
model.data = data
return model
def update_by_id_role_mappings(self, role_mapping_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/{0}/roleMappings/{1}".format(self._id, role_mapping_id)
return self._session.call_api(api, attribs, 'put')
def update_by_id_user_feedbacks(self, user_feedback_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/{0}/userFeedbacks/{1}".format(self._id, user_feedback_id)
data = self._session.call_api(api, attribs, 'put')
from .user_feedback import UserFeedback
model = UserFeedback(self._session, data['id'])
model.data = data
return model
@classmethod
def upsert(cls, session, attribs=None):
if attribs is None:
attribs = {}
api = "/Person"
data = session.call_api(api, attribs, 'put')
model = Person(session, data['id'])
model.data = data
return model
@classmethod
def upsert_with_where(cls, session, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/upsertWithWhere"
return session.call_api(api, attribs, 'post')
def verify_email(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Person/{0}/verifyEmail".format(self._id)
return self._session.call_api(api, attribs, 'post')
| 37.105048
| 104
| 0.617545
| 3,217
| 27,198
| 5.030463
| 0.041654
| 0.068652
| 0.08132
| 0.098746
| 0.955323
| 0.954026
| 0.94933
| 0.948959
| 0.932893
| 0.919051
| 0
| 0.005903
| 0.265056
| 27,198
| 732
| 105
| 37.155738
| 0.803692
| 0.008273
| 0
| 0.788079
| 1
| 0
| 0.107106
| 0.083667
| 0
| 0
| 0
| 0
| 0
| 1
| 0.157285
| false
| 0.004967
| 0.033113
| 0
| 0.347682
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
e6c9ce81bb07d8d29954a57b28226dffd4ba2fc0
| 202
|
py
|
Python
|
orlov/libs/adb/__init__.py
|
coppelia517/orlov
|
d7ed6c061432b99ab2b75e0262db293e444fe6be
|
[
"MIT"
] | null | null | null |
orlov/libs/adb/__init__.py
|
coppelia517/orlov
|
d7ed6c061432b99ab2b75e0262db293e444fe6be
|
[
"MIT"
] | null | null | null |
orlov/libs/adb/__init__.py
|
coppelia517/orlov
|
d7ed6c061432b99ab2b75e0262db293e444fe6be
|
[
"MIT"
] | null | null | null |
""" Orlov is Multi-Platform Automation Testing Framework. """
from orlov.libs.adb.module import Android
from orlov.libs.adb.module import PROFILE_PATH
from orlov.libs.adb.factory import AndroidFactory
| 33.666667
| 61
| 0.811881
| 29
| 202
| 5.62069
| 0.586207
| 0.165644
| 0.239264
| 0.294479
| 0.343558
| 0.343558
| 0
| 0
| 0
| 0
| 0
| 0
| 0.10396
| 202
| 5
| 62
| 40.4
| 0.900552
| 0.262376
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
e6f37c318ecbe88153dbdc550c3e6708b755fd9b
| 72,401
|
py
|
Python
|
src/appservice-kube/azext_appservice_kube/vendored_sdks/azure_mgmt_web/v2020_06_01/operations/_app_service_certificate_orders_operations.py
|
AzureK8SE/azure-cli-extensions
|
71bcff04baeffac02fe3afd7e007c3f6676d62e5
|
[
"MIT"
] | 2
|
2021-03-24T21:06:20.000Z
|
2021-03-24T21:07:58.000Z
|
src/appservice-kube/azext_appservice_kube/vendored_sdks/azure_mgmt_web/v2020_06_01/operations/_app_service_certificate_orders_operations.py
|
AzureK8SE/azure-cli-extensions
|
71bcff04baeffac02fe3afd7e007c3f6676d62e5
|
[
"MIT"
] | 3
|
2020-05-27T20:16:26.000Z
|
2020-07-23T19:46:49.000Z
|
src/appservice-kube/azext_appservice_kube/vendored_sdks/azure_mgmt_web/v2020_06_01/operations/_app_service_certificate_orders_operations.py
|
AzureK8SE/azure-cli-extensions
|
71bcff04baeffac02fe3afd7e007c3f6676d62e5
|
[
"MIT"
] | 5
|
2020-05-09T17:47:09.000Z
|
2020-10-01T19:52:06.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrest.polling import LROPoller, NoPolling
from msrestazure.polling.arm_polling import ARMPolling
from .. import models
class AppServiceCertificateOrdersOperations(object):
"""AppServiceCertificateOrdersOperations operations.
You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: API Version. Constant value: "2020-06-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2020-06-01"
self.config = config
def list(
self, custom_headers=None, raw=False, **operation_config):
"""List all certificate orders in a subscription.
Description for List all certificate orders in a subscription.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of AppServiceCertificateOrder
:rtype:
~azure.mgmt.web.v2020_06_01.models.AppServiceCertificateOrderPaged[~azure.mgmt.web.v2020_06_01.models.AppServiceCertificateOrder]
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.v2020_06_01.models.DefaultErrorResponseException>`
"""
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def internal_paging(next_link=None):
request = prepare_request(next_link)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.DefaultErrorResponseException(self._deserialize, response)
return response
# Deserialize response
header_dict = None
if raw:
header_dict = {}
deserialized = models.AppServiceCertificateOrderPaged(internal_paging, self._deserialize.dependencies, header_dict)
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.CertificateRegistration/certificateOrders'}
def validate_purchase_information(
self, app_service_certificate_order, custom_headers=None, raw=False, **operation_config):
"""Validate information for a certificate order.
Description for Validate information for a certificate order.
:param app_service_certificate_order: Information for a certificate
order.
:type app_service_certificate_order:
~azure.mgmt.web.v2020_06_01.models.AppServiceCertificateOrder
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.v2020_06_01.models.DefaultErrorResponseException>`
"""
# Construct URL
url = self.validate_purchase_information.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(app_service_certificate_order, 'AppServiceCertificateOrder')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [204]:
raise models.DefaultErrorResponseException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
validate_purchase_information.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.CertificateRegistration/validateCertificateRegistrationInformation'}
def list_by_resource_group(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Get certificate orders in a resource group.
Description for Get certificate orders in a resource group.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of AppServiceCertificateOrder
:rtype:
~azure.mgmt.web.v2020_06_01.models.AppServiceCertificateOrderPaged[~azure.mgmt.web.v2020_06_01.models.AppServiceCertificateOrder]
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.v2020_06_01.models.DefaultErrorResponseException>`
"""
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def internal_paging(next_link=None):
request = prepare_request(next_link)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.DefaultErrorResponseException(self._deserialize, response)
return response
# Deserialize response
header_dict = None
if raw:
header_dict = {}
deserialized = models.AppServiceCertificateOrderPaged(internal_paging, self._deserialize.dependencies, header_dict)
return deserialized
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders'}
def get(
self, resource_group_name, certificate_order_name, custom_headers=None, raw=False, **operation_config):
"""Get a certificate order.
Description for Get a certificate order.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param certificate_order_name: Name of the certificate order..
:type certificate_order_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: AppServiceCertificateOrder or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.web.v2020_06_01.models.AppServiceCertificateOrder
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.v2020_06_01.models.DefaultErrorResponseException>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'certificateOrderName': self._serialize.url("certificate_order_name", certificate_order_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.DefaultErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('AppServiceCertificateOrder', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}'}
def _create_or_update_initial(
self, resource_group_name, certificate_order_name, certificate_distinguished_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.create_or_update.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'certificateOrderName': self._serialize.url("certificate_order_name", certificate_order_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(certificate_distinguished_name, 'AppServiceCertificateOrder')
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 201]:
raise models.DefaultErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('AppServiceCertificateOrder', response)
if response.status_code == 201:
deserialized = self._deserialize('AppServiceCertificateOrder', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, certificate_order_name, certificate_distinguished_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""Create or update a certificate purchase order.
Description for Create or update a certificate purchase order.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param certificate_order_name: Name of the certificate order.
:type certificate_order_name: str
:param certificate_distinguished_name: Distinguished name to use for
the certificate order.
:type certificate_distinguished_name:
~azure.mgmt.web.v2020_06_01.models.AppServiceCertificateOrder
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns
AppServiceCertificateOrder or
ClientRawResponse<AppServiceCertificateOrder> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.web.v2020_06_01.models.AppServiceCertificateOrder]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.web.v2020_06_01.models.AppServiceCertificateOrder]]
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.v2020_06_01.models.DefaultErrorResponseException>`
"""
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
certificate_order_name=certificate_order_name,
certificate_distinguished_name=certificate_distinguished_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('AppServiceCertificateOrder', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}'}
def delete(
self, resource_group_name, certificate_order_name, custom_headers=None, raw=False, **operation_config):
"""Delete an existing certificate order.
Description for Delete an existing certificate order.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param certificate_order_name: Name of the certificate order.
:type certificate_order_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.v2020_06_01.models.DefaultErrorResponseException>`
"""
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'certificateOrderName': self._serialize.url("certificate_order_name", certificate_order_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 204]:
raise models.DefaultErrorResponseException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}'}
def update(
self, resource_group_name, certificate_order_name, certificate_distinguished_name, custom_headers=None, raw=False, **operation_config):
"""Create or update a certificate purchase order.
Description for Create or update a certificate purchase order.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param certificate_order_name: Name of the certificate order.
:type certificate_order_name: str
:param certificate_distinguished_name: Distinguished name to use for
the certificate order.
:type certificate_distinguished_name:
~azure.mgmt.web.v2020_06_01.models.AppServiceCertificateOrderPatchResource
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: AppServiceCertificateOrder or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.web.v2020_06_01.models.AppServiceCertificateOrder
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.v2020_06_01.models.DefaultErrorResponseException>`
"""
# Construct URL
url = self.update.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'certificateOrderName': self._serialize.url("certificate_order_name", certificate_order_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(certificate_distinguished_name, 'AppServiceCertificateOrderPatchResource')
# Construct and send request
request = self._client.patch(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 201]:
raise models.DefaultErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('AppServiceCertificateOrder', response)
if response.status_code == 201:
deserialized = self._deserialize('AppServiceCertificateOrder', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}'}
def list_certificates(
self, resource_group_name, certificate_order_name, custom_headers=None, raw=False, **operation_config):
"""List all certificates associated with a certificate order.
Description for List all certificates associated with a certificate
order.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param certificate_order_name: Name of the certificate order.
:type certificate_order_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of AppServiceCertificateResource
:rtype:
~azure.mgmt.web.v2020_06_01.models.AppServiceCertificateResourcePaged[~azure.mgmt.web.v2020_06_01.models.AppServiceCertificateResource]
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.v2020_06_01.models.DefaultErrorResponseException>`
"""
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list_certificates.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'certificateOrderName': self._serialize.url("certificate_order_name", certificate_order_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def internal_paging(next_link=None):
request = prepare_request(next_link)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.DefaultErrorResponseException(self._deserialize, response)
return response
# Deserialize response
header_dict = None
if raw:
header_dict = {}
deserialized = models.AppServiceCertificateResourcePaged(internal_paging, self._deserialize.dependencies, header_dict)
return deserialized
list_certificates.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}/certificates'}
def get_certificate(
self, resource_group_name, certificate_order_name, name, custom_headers=None, raw=False, **operation_config):
"""Get the certificate associated with a certificate order.
Description for Get the certificate associated with a certificate
order.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param certificate_order_name: Name of the certificate order.
:type certificate_order_name: str
:param name: Name of the certificate.
:type name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: AppServiceCertificateResource or ClientRawResponse if
raw=true
:rtype:
~azure.mgmt.web.v2020_06_01.models.AppServiceCertificateResource or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.v2020_06_01.models.DefaultErrorResponseException>`
"""
# Construct URL
url = self.get_certificate.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'certificateOrderName': self._serialize.url("certificate_order_name", certificate_order_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.DefaultErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('AppServiceCertificateResource', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_certificate.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}/certificates/{name}'}
def _create_or_update_certificate_initial(
self, resource_group_name, certificate_order_name, name, key_vault_certificate, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.create_or_update_certificate.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'certificateOrderName': self._serialize.url("certificate_order_name", certificate_order_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(key_vault_certificate, 'AppServiceCertificateResource')
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 201]:
raise models.DefaultErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('AppServiceCertificateResource', response)
if response.status_code == 201:
deserialized = self._deserialize('AppServiceCertificateResource', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update_certificate(
self, resource_group_name, certificate_order_name, name, key_vault_certificate, custom_headers=None, raw=False, polling=True, **operation_config):
"""Creates or updates a certificate and associates with key vault secret.
Description for Creates or updates a certificate and associates with
key vault secret.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param certificate_order_name: Name of the certificate order.
:type certificate_order_name: str
:param name: Name of the certificate.
:type name: str
:param key_vault_certificate: Key vault certificate resource Id.
:type key_vault_certificate:
~azure.mgmt.web.v2020_06_01.models.AppServiceCertificateResource
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns
AppServiceCertificateResource or
ClientRawResponse<AppServiceCertificateResource> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.web.v2020_06_01.models.AppServiceCertificateResource]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.web.v2020_06_01.models.AppServiceCertificateResource]]
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.v2020_06_01.models.DefaultErrorResponseException>`
"""
raw_result = self._create_or_update_certificate_initial(
resource_group_name=resource_group_name,
certificate_order_name=certificate_order_name,
name=name,
key_vault_certificate=key_vault_certificate,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('AppServiceCertificateResource', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
create_or_update_certificate.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}/certificates/{name}'}
def delete_certificate(
self, resource_group_name, certificate_order_name, name, custom_headers=None, raw=False, **operation_config):
"""Delete the certificate associated with a certificate order.
Description for Delete the certificate associated with a certificate
order.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param certificate_order_name: Name of the certificate order.
:type certificate_order_name: str
:param name: Name of the certificate.
:type name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.v2020_06_01.models.DefaultErrorResponseException>`
"""
# Construct URL
url = self.delete_certificate.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'certificateOrderName': self._serialize.url("certificate_order_name", certificate_order_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 204]:
raise models.DefaultErrorResponseException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
delete_certificate.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}/certificates/{name}'}
def update_certificate(
self, resource_group_name, certificate_order_name, name, key_vault_certificate, custom_headers=None, raw=False, **operation_config):
"""Creates or updates a certificate and associates with key vault secret.
Description for Creates or updates a certificate and associates with
key vault secret.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param certificate_order_name: Name of the certificate order.
:type certificate_order_name: str
:param name: Name of the certificate.
:type name: str
:param key_vault_certificate: Key vault certificate resource Id.
:type key_vault_certificate:
~azure.mgmt.web.v2020_06_01.models.AppServiceCertificatePatchResource
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: AppServiceCertificateResource or ClientRawResponse if
raw=true
:rtype:
~azure.mgmt.web.v2020_06_01.models.AppServiceCertificateResource or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.v2020_06_01.models.DefaultErrorResponseException>`
"""
# Construct URL
url = self.update_certificate.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'certificateOrderName': self._serialize.url("certificate_order_name", certificate_order_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(key_vault_certificate, 'AppServiceCertificatePatchResource')
# Construct and send request
request = self._client.patch(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 201]:
raise models.DefaultErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('AppServiceCertificateResource', response)
if response.status_code == 201:
deserialized = self._deserialize('AppServiceCertificateResource', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
update_certificate.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}/certificates/{name}'}
def reissue(
self, resource_group_name, certificate_order_name, reissue_certificate_order_request, custom_headers=None, raw=False, **operation_config):
"""Reissue an existing certificate order.
Description for Reissue an existing certificate order.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param certificate_order_name: Name of the certificate order.
:type certificate_order_name: str
:param reissue_certificate_order_request: Parameters for the reissue.
:type reissue_certificate_order_request:
~azure.mgmt.web.v2020_06_01.models.ReissueCertificateOrderRequest
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.v2020_06_01.models.DefaultErrorResponseException>`
"""
# Construct URL
url = self.reissue.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'certificateOrderName': self._serialize.url("certificate_order_name", certificate_order_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(reissue_certificate_order_request, 'ReissueCertificateOrderRequest')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [204]:
raise models.DefaultErrorResponseException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
reissue.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}/reissue'}
def renew(
self, resource_group_name, certificate_order_name, renew_certificate_order_request, custom_headers=None, raw=False, **operation_config):
"""Renew an existing certificate order.
Description for Renew an existing certificate order.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param certificate_order_name: Name of the certificate order.
:type certificate_order_name: str
:param renew_certificate_order_request: Renew parameters
:type renew_certificate_order_request:
~azure.mgmt.web.v2020_06_01.models.RenewCertificateOrderRequest
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.v2020_06_01.models.DefaultErrorResponseException>`
"""
# Construct URL
url = self.renew.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'certificateOrderName': self._serialize.url("certificate_order_name", certificate_order_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(renew_certificate_order_request, 'RenewCertificateOrderRequest')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [204]:
raise models.DefaultErrorResponseException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
renew.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}/renew'}
def resend_email(
self, resource_group_name, certificate_order_name, custom_headers=None, raw=False, **operation_config):
"""Resend certificate email.
Description for Resend certificate email.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param certificate_order_name: Name of the certificate order.
:type certificate_order_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.v2020_06_01.models.DefaultErrorResponseException>`
"""
# Construct URL
url = self.resend_email.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'certificateOrderName': self._serialize.url("certificate_order_name", certificate_order_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [204]:
raise models.DefaultErrorResponseException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
resend_email.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}/resendEmail'}
def resend_request_emails(
self, resource_group_name, certificate_order_name, name=None, custom_headers=None, raw=False, **operation_config):
"""Verify domain ownership for this certificate order.
Description for Verify domain ownership for this certificate order.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param certificate_order_name: Name of the certificate order.
:type certificate_order_name: str
:param name: Name of the object.
:type name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.v2020_06_01.models.DefaultErrorResponseException>`
"""
name_identifier = models.NameIdentifier(name=name)
# Construct URL
url = self.resend_request_emails.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'certificateOrderName': self._serialize.url("certificate_order_name", certificate_order_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(name_identifier, 'NameIdentifier')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [204]:
raise models.DefaultErrorResponseException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
resend_request_emails.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}/resendRequestEmails'}
def retrieve_site_seal(
self, resource_group_name, certificate_order_name, light_theme=None, locale=None, custom_headers=None, raw=False, **operation_config):
"""Verify domain ownership for this certificate order.
Description for Verify domain ownership for this certificate order.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param certificate_order_name: Name of the certificate order.
:type certificate_order_name: str
:param light_theme: If <code>true</code> use the light color theme for
site seal; otherwise, use the default color theme.
:type light_theme: bool
:param locale: Locale of site seal.
:type locale: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: SiteSeal or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.web.v2020_06_01.models.SiteSeal or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.v2020_06_01.models.DefaultErrorResponseException>`
"""
site_seal_request = models.SiteSealRequest(light_theme=light_theme, locale=locale)
# Construct URL
url = self.retrieve_site_seal.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'certificateOrderName': self._serialize.url("certificate_order_name", certificate_order_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(site_seal_request, 'SiteSealRequest')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.DefaultErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('SiteSeal', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
retrieve_site_seal.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}/retrieveSiteSeal'}
def verify_domain_ownership(
self, resource_group_name, certificate_order_name, custom_headers=None, raw=False, **operation_config):
"""Verify domain ownership for this certificate order.
Description for Verify domain ownership for this certificate order.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param certificate_order_name: Name of the certificate order.
:type certificate_order_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.v2020_06_01.models.DefaultErrorResponseException>`
"""
# Construct URL
url = self.verify_domain_ownership.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'certificateOrderName': self._serialize.url("certificate_order_name", certificate_order_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [204]:
raise models.DefaultErrorResponseException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
verify_domain_ownership.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}/verifyDomainOwnership'}
def retrieve_certificate_actions(
self, resource_group_name, name, custom_headers=None, raw=False, **operation_config):
"""Retrieve the list of certificate actions.
Description for Retrieve the list of certificate actions.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param name: Name of the certificate order.
:type name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: list or ClientRawResponse if raw=true
:rtype:
list[~azure.mgmt.web.v2020_06_01.models.CertificateOrderAction] or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.v2020_06_01.models.DefaultErrorResponseException>`
"""
# Construct URL
url = self.retrieve_certificate_actions.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.DefaultErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('[CertificateOrderAction]', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
retrieve_certificate_actions.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{name}/retrieveCertificateActions'}
def retrieve_certificate_email_history(
self, resource_group_name, name, custom_headers=None, raw=False, **operation_config):
"""Retrieve email history.
Description for Retrieve email history.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param name: Name of the certificate order.
:type name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: list or ClientRawResponse if raw=true
:rtype: list[~azure.mgmt.web.v2020_06_01.models.CertificateEmail] or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.v2020_06_01.models.DefaultErrorResponseException>`
"""
# Construct URL
url = self.retrieve_certificate_email_history.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.DefaultErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('[CertificateEmail]', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
retrieve_certificate_email_history.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{name}/retrieveEmailHistory'}
| 50.843399
| 226
| 0.691579
| 7,626
| 72,401
| 6.337136
| 0.034881
| 0.044696
| 0.03377
| 0.029797
| 0.940054
| 0.932771
| 0.924204
| 0.913796
| 0.908767
| 0.90227
| 0
| 0.01002
| 0.215632
| 72,401
| 1,423
| 227
| 50.879129
| 0.840973
| 0.291405
| 0
| 0.81686
| 0
| 0.005814
| 0.191231
| 0.116092
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045058
| false
| 0
| 0.007267
| 0
| 0.114826
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
fc8a7223224308f390bb42ff02190edd6c82d30f
| 1,661
|
py
|
Python
|
tests/python/test_stop_grad.py
|
ppwwyyxx/taichi
|
ef0c3367bb06ad78b3457b8f93b5370f14b1d9c4
|
[
"MIT"
] | 1
|
2020-08-04T05:43:54.000Z
|
2020-08-04T05:43:54.000Z
|
tests/python/test_stop_grad.py
|
zf38473013/taichi
|
ad4d7ae04f4e559e84f6dee4a64ad57c3cf0c7fb
|
[
"MIT"
] | null | null | null |
tests/python/test_stop_grad.py
|
zf38473013/taichi
|
ad4d7ae04f4e559e84f6dee4a64ad57c3cf0c7fb
|
[
"MIT"
] | null | null | null |
import taichi as ti
@ti.all_archs
def test_normal_grad():
x = ti.var(ti.f32)
loss = ti.var(ti.f32)
n = 128
@ti.layout
def place():
ti.root.dense(ti.i, n).place(x)
ti.root.place(loss)
ti.root.lazy_grad()
@ti.kernel
def func():
for i in range(n):
ti.atomic_add(loss, x[i]**2)
for i in range(n):
x[i] = i
with ti.Tape(loss):
func()
for i in range(n):
assert x.grad[i] == i * 2
@ti.all_archs
def test_stop_grad():
x = ti.var(ti.f32)
loss = ti.var(ti.f32)
n = 128
@ti.layout
def place():
ti.root.dense(ti.i, n).place(x)
ti.root.place(loss)
ti.root.lazy_grad()
@ti.kernel
def func():
for i in range(n):
ti.core.stop_grad(x.snode().ptr)
ti.atomic_add(loss, x[i]**2)
for i in range(n):
x[i] = i
with ti.Tape(loss):
func()
for i in range(n):
assert x.grad[i] == 0
@ti.all_archs
def test_stop_grad2():
x = ti.var(ti.f32)
loss = ti.var(ti.f32)
n = 128
@ti.layout
def place():
ti.root.dense(ti.i, n).place(x)
ti.root.place(loss)
ti.root.lazy_grad()
@ti.kernel
def func():
# Two loops, one with stop grad on without
for i in range(n):
ti.stop_grad(x)
ti.atomic_add(loss, x[i]**2)
for i in range(n):
ti.atomic_add(loss, x[i]**2)
for i in range(n):
x[i] = i
with ti.Tape(loss):
func()
# If without stop, grad x.grad[i] = i * 4
for i in range(n):
assert x.grad[i] == i * 2
| 18.054348
| 50
| 0.499097
| 280
| 1,661
| 2.896429
| 0.164286
| 0.049322
| 0.073983
| 0.135635
| 0.8582
| 0.837238
| 0.768187
| 0.768187
| 0.768187
| 0.768187
| 0
| 0.027675
| 0.347381
| 1,661
| 91
| 51
| 18.252747
| 0.72048
| 0.048164
| 0
| 0.892308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.046154
| 1
| 0.138462
| false
| 0
| 0.015385
| 0
| 0.153846
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5db6d55089e8a5e00249c05e08335f434b160220
| 3,730
|
py
|
Python
|
poem/Newpoem.py
|
AllisonW2323/creativecode
|
e7c276de46d6208505f2bd1c2922a2f0e0bd1ecd
|
[
"MIT"
] | null | null | null |
poem/Newpoem.py
|
AllisonW2323/creativecode
|
e7c276de46d6208505f2bd1c2922a2f0e0bd1ecd
|
[
"MIT"
] | null | null | null |
poem/Newpoem.py
|
AllisonW2323/creativecode
|
e7c276de46d6208505f2bd1c2922a2f0e0bd1ecd
|
[
"MIT"
] | null | null | null |
import pronouncing
import random
uploaded = files.upload()
for fn in uploaded.keys():
text = uploaded[fn].decode()
wordlist= text.split(" ")
from textblob import TextBlob
blob= TextBlob (text)
random.randint(1,6)
blob= TextBlob (text)
import nltk
nltk.download('averaged_perceptron_tagger')
import nltk
nltk.download('punkt')
bannedword=["chink", "Chink", "Niger", "niger"] #had to write them so my bot would not
syll = random.randint(1,3)
deads = []
likes= []
loves= []
adjlist=[]
for word,tag in blob.tags:
if tag == "JJ":
adjlist.append(word)
for i in range (1):
word1=(adjlist[random.randint(0,len(adjlist)-1)])
word2= ( blob.words[random.randint(0,12787)])
#poetry code borrowed from Whalen
dead = random.choice(pronouncing.rhymes("dead"))
for r in pronouncing.rhymes("dead"):
rsyl = pronouncing.phones_for_word(r)
if (pronouncing.syllable_count(rsyl[0]) is syll):
deads.append(r)
print(dead)
#end of borrowed
if word1 not in bannedword:
print(word1)
else:
print("bleep")
print(" ")
nounlist=[]
for word,tag in blob.tags:
if tag == "NN":
nounlist.append(word)
for i in range (1):
word1=(nounlist[random.randint(0,len(nounlist)-1)])
#poetry code borrowed from Whalen
dead = random.choice(pronouncing.rhymes("dead"))
for r in pronouncing.rhymes("dead"):
rsyl = pronouncing.phones_for_word(r)
if (pronouncing.syllable_count(rsyl[0]) is syll):
deads.append(r)
print(dead)
#end of borrowed
if word1 not in bannedword:
print(word1)
else:
print("bleep")
print(" ")
verblist=[]
for word,tag in blob.tags:
if tag == "VB":
verblist.append(word)
for i in range (1):
word1=(verblist[random.randint(0,len(verblist)-1)])
print( blob.words[random.randint(0,12787)])
#poetry code borrowed from Whalen
like = random.choice(pronouncing.rhymes("like"))
for r in pronouncing.rhymes("like"):
rsyl = pronouncing.phones_for_word(r)
if (pronouncing.syllable_count(rsyl[0]) is syll):
likes.append(r)
print(like)
#end of borrowed
if word1 not in bannedword:
print(word1)
else:
print("bleep")
print(" ")
for i in range(1):
print (blob.sentences[random.randint (0,len(blob.sentences))])
#poetry code borrowed from whalen
like = random.choice(pronouncing.rhymes("like"))
for r in pronouncing.rhymes("like"):
rsyl = pronouncing.phones_for_word(r)
if (pronouncing.syllable_count(rsyl[0]) is syll):
likes.append(r)
print(like)
#end of borrowed
verblist=[]
for word,tag in blob.tags:
if tag == "VB":
verblist.append(word)
for i in range (1):
word1=(verblist[random.randint(0,len(verblist)-1)])
print( blob.words[random.randint(0,12787)])
#poetry code borrowed from Whalen
love = random.choice(pronouncing.rhymes("love"))
for r in pronouncing.rhymes("love"):
rsyl = pronouncing.phones_for_word(r)
if (pronouncing.syllable_count(rsyl[0]) is syll):
loves.append(r)
print(love)
#end of borrowed
if word1 not in bannedword:
print(word1)
else:
print("bleep")
print(" ")
for i in range(1):
print (blob.sentences[random.randint (0,len(blob.sentences))])
nounlist=[]
for word,tag in blob.tags:
if tag == "NN":
nounlist.append(word)
for i in range (1):
word1=(nounlist[random.randint(0,len(nounlist)-1)])
#poetry code borrowed from Whalen
love = random.choice(pronouncing.rhymes("love"))
for r in pronouncing.rhymes("love"):
rsyl = pronouncing.phones_for_word(r)
if (pronouncing.syllable_count(rsyl[0]) is syll):
loves.append(r)
print(love)
#end of borrowed
if word1 not in bannedword:
print(word1)
else:
print("bleep")
| 20.053763
| 88
| 0.670241
| 534
| 3,730
| 4.644195
| 0.155431
| 0.062903
| 0.056452
| 0.031048
| 0.840323
| 0.840323
| 0.840323
| 0.840323
| 0.819355
| 0.819355
| 0
| 0.020778
| 0.187131
| 3,730
| 185
| 89
| 20.162162
| 0.797164
| 0.085523
| 0
| 0.827586
| 0
| 0
| 0.062132
| 0.007656
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.043103
| 0
| 0.043103
| 0.206897
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5dd783401f7d4b9a0737306ade4ee0e2021cabd5
| 10,226
|
py
|
Python
|
Algorithm.Python/stubs/QuantConnect/Data/Custom/SEC.py
|
gaoxiaojun/Lean
|
9dca43bccb720d0df91e4bfc1d363b71e3a36cb5
|
[
"Apache-2.0"
] | 2
|
2020-12-08T11:27:20.000Z
|
2021-04-06T13:21:15.000Z
|
Algorithm.Python/stubs/QuantConnect/Data/Custom/SEC.py
|
gaoxiaojun/Lean
|
9dca43bccb720d0df91e4bfc1d363b71e3a36cb5
|
[
"Apache-2.0"
] | null | null | null |
Algorithm.Python/stubs/QuantConnect/Data/Custom/SEC.py
|
gaoxiaojun/Lean
|
9dca43bccb720d0df91e4bfc1d363b71e3a36cb5
|
[
"Apache-2.0"
] | 1
|
2020-10-13T00:49:17.000Z
|
2020-10-13T00:49:17.000Z
|
from .__SEC_1 import *
import typing
import System.IO
import System
import QuantConnect.Data.Custom.SEC
import QuantConnect.Data
import QuantConnect
import datetime
# no functions
# classes
class ISECReport(QuantConnect.Data.IBaseData):
"""
Base interface for all SEC report types.
Using an interface, we can retrieve all report types with a single
call to QuantConnect.Data.Slice.Get
"""
Report: QuantConnect.Data.Custom.SEC.SECReportSubmission
class SECReport10K(QuantConnect.Data.BaseData, QuantConnect.Data.Custom.SEC.ISECReport, QuantConnect.Data.IBaseData):
"""
SEC 10-K report (annual earnings) QuantConnect.Data.BaseData implementation.
Using this class, you can retrieve SEC report data for a security if it exists.
If the ticker you want no longer trades, you can also use the CIK of the company
you want data for as well except for currently traded stocks. This may change in the future.
SECReport10K()
SECReport10K(report: SECReportSubmission)
"""
@typing.overload
def Clone(self) -> QuantConnect.Data.BaseData:
pass
@typing.overload
def Clone(self, fillForward: bool) -> QuantConnect.Data.BaseData:
pass
def Clone(self, *args) -> QuantConnect.Data.BaseData:
pass
def DefaultResolution(self) -> QuantConnect.Resolution:
pass
@typing.overload
def GetSource(self, config: QuantConnect.Data.SubscriptionDataConfig, date: datetime.datetime, isLiveMode: bool) -> QuantConnect.Data.SubscriptionDataSource:
pass
@typing.overload
def GetSource(self, config: QuantConnect.Data.SubscriptionDataConfig, date: datetime.datetime, datafeed: QuantConnect.DataFeedEndpoint) -> str:
pass
def GetSource(self, *args) -> str:
pass
@typing.overload
def Reader(self, config: QuantConnect.Data.SubscriptionDataConfig, line: str, date: datetime.datetime, isLiveMode: bool) -> QuantConnect.Data.BaseData:
pass
@typing.overload
def Reader(self, config: QuantConnect.Data.SubscriptionDataConfig, stream: System.IO.StreamReader, date: datetime.datetime, isLiveMode: bool) -> QuantConnect.Data.BaseData:
pass
@typing.overload
def Reader(self, config: QuantConnect.Data.SubscriptionDataConfig, line: str, date: datetime.datetime, datafeed: QuantConnect.DataFeedEndpoint) -> QuantConnect.Data.BaseData:
pass
def Reader(self, *args) -> QuantConnect.Data.BaseData:
pass
def RequiresMapping(self) -> bool:
pass
def SupportedResolutions(self) -> typing.List[QuantConnect.Resolution]:
pass
@typing.overload
def __init__(self) -> QuantConnect.Data.Custom.SEC.SECReport10K:
pass
@typing.overload
def __init__(self, report: QuantConnect.Data.Custom.SEC.SECReportSubmission) -> QuantConnect.Data.Custom.SEC.SECReport10K:
pass
def __init__(self, *args) -> QuantConnect.Data.Custom.SEC.SECReport10K:
pass
Report: QuantConnect.Data.Custom.SEC.SECReportSubmission
class SECReport10Q(QuantConnect.Data.BaseData, QuantConnect.Data.Custom.SEC.ISECReport, QuantConnect.Data.IBaseData):
"""
SEC 10-Q report (quarterly earnings) QuantConnect.Data.BaseData implementation.
Using this class, you can retrieve SEC report data for a security if it exists.
If the ticker you want no longer trades, you can also use the CIK of the company
you want data for as well except for currently traded stocks. This may change in the future.
SECReport10Q()
SECReport10Q(report: SECReportSubmission)
"""
@typing.overload
def Clone(self) -> QuantConnect.Data.BaseData:
pass
@typing.overload
def Clone(self, fillForward: bool) -> QuantConnect.Data.BaseData:
pass
def Clone(self, *args) -> QuantConnect.Data.BaseData:
pass
def DefaultResolution(self) -> QuantConnect.Resolution:
pass
@typing.overload
def GetSource(self, config: QuantConnect.Data.SubscriptionDataConfig, date: datetime.datetime, isLiveMode: bool) -> QuantConnect.Data.SubscriptionDataSource:
pass
@typing.overload
def GetSource(self, config: QuantConnect.Data.SubscriptionDataConfig, date: datetime.datetime, datafeed: QuantConnect.DataFeedEndpoint) -> str:
pass
def GetSource(self, *args) -> str:
pass
@typing.overload
def Reader(self, config: QuantConnect.Data.SubscriptionDataConfig, line: str, date: datetime.datetime, isLiveMode: bool) -> QuantConnect.Data.BaseData:
pass
@typing.overload
def Reader(self, config: QuantConnect.Data.SubscriptionDataConfig, stream: System.IO.StreamReader, date: datetime.datetime, isLiveMode: bool) -> QuantConnect.Data.BaseData:
pass
@typing.overload
def Reader(self, config: QuantConnect.Data.SubscriptionDataConfig, line: str, date: datetime.datetime, datafeed: QuantConnect.DataFeedEndpoint) -> QuantConnect.Data.BaseData:
pass
def Reader(self, *args) -> QuantConnect.Data.BaseData:
pass
def RequiresMapping(self) -> bool:
pass
def SupportedResolutions(self) -> typing.List[QuantConnect.Resolution]:
pass
@typing.overload
def __init__(self) -> QuantConnect.Data.Custom.SEC.SECReport10Q:
pass
@typing.overload
def __init__(self, report: QuantConnect.Data.Custom.SEC.SECReportSubmission) -> QuantConnect.Data.Custom.SEC.SECReport10Q:
pass
def __init__(self, *args) -> QuantConnect.Data.Custom.SEC.SECReport10Q:
pass
Report: QuantConnect.Data.Custom.SEC.SECReportSubmission
class SECReport8K(QuantConnect.Data.BaseData, QuantConnect.Data.Custom.SEC.ISECReport, QuantConnect.Data.IBaseData):
"""
SEC 8-K report (important investor notices) QuantConnect.Data.BaseData implementation.
Using this class, you can retrieve SEC report data for a security if it exists.
If the ticker you want no longer trades, you can also use the CIK of the company
you want data for as well except for currently traded stocks. This may change in the future.
SECReport8K()
SECReport8K(report: SECReportSubmission)
"""
@typing.overload
def Clone(self) -> QuantConnect.Data.BaseData:
pass
@typing.overload
def Clone(self, fillForward: bool) -> QuantConnect.Data.BaseData:
pass
def Clone(self, *args) -> QuantConnect.Data.BaseData:
pass
def DefaultResolution(self) -> QuantConnect.Resolution:
pass
@typing.overload
def GetSource(self, config: QuantConnect.Data.SubscriptionDataConfig, date: datetime.datetime, isLiveMode: bool) -> QuantConnect.Data.SubscriptionDataSource:
pass
@typing.overload
def GetSource(self, config: QuantConnect.Data.SubscriptionDataConfig, date: datetime.datetime, datafeed: QuantConnect.DataFeedEndpoint) -> str:
pass
def GetSource(self, *args) -> str:
pass
@typing.overload
def Reader(self, config: QuantConnect.Data.SubscriptionDataConfig, line: str, date: datetime.datetime, isLiveMode: bool) -> QuantConnect.Data.BaseData:
pass
@typing.overload
def Reader(self, config: QuantConnect.Data.SubscriptionDataConfig, stream: System.IO.StreamReader, date: datetime.datetime, isLiveMode: bool) -> QuantConnect.Data.BaseData:
pass
@typing.overload
def Reader(self, config: QuantConnect.Data.SubscriptionDataConfig, line: str, date: datetime.datetime, datafeed: QuantConnect.DataFeedEndpoint) -> QuantConnect.Data.BaseData:
pass
def Reader(self, *args) -> QuantConnect.Data.BaseData:
pass
def RequiresMapping(self) -> bool:
pass
def SupportedResolutions(self) -> typing.List[QuantConnect.Resolution]:
pass
@typing.overload
def __init__(self) -> QuantConnect.Data.Custom.SEC.SECReport8K:
pass
@typing.overload
def __init__(self, report: QuantConnect.Data.Custom.SEC.SECReportSubmission) -> QuantConnect.Data.Custom.SEC.SECReport8K:
pass
def __init__(self, *args) -> QuantConnect.Data.Custom.SEC.SECReport8K:
pass
Report: QuantConnect.Data.Custom.SEC.SECReportSubmission
class SECReportBusinessAddress(System.object):
""" SECReportBusinessAddress() """
City: str
Phone: str
State: str
StreetOne: str
StreetTwo: str
Zip: str
class SECReportCompanyData(System.object):
""" SECReportCompanyData() """
AssignedSic: str
Cik: str
ConformedName: str
FiscalYearEnd: str
IrsNumber: str
StateOfIncorporation: str
class SECReportDateTimeConverter(Newtonsoft.Json.Converters.IsoDateTimeConverter):
"""
Specifies format for parsing System.DateTime values from SEC data
SECReportDateTimeConverter()
"""
class SECReportDocument(System.object):
""" SECReportDocument() """
Description: str
Filename: str
FormType: str
Sequence: int
Text: str
class SECReportFactory(System.object):
""" SECReportFactory() """
def CreateSECReport(self, xmlText: str) -> QuantConnect.Data.Custom.SEC.ISECReport:
pass
class SECReportFiler(System.object):
""" SECReportFiler() """
BusinessAddress: typing.List[QuantConnect.Data.Custom.SEC.SECReportBusinessAddress]
CompanyData: QuantConnect.Data.Custom.SEC.SECReportCompanyData
FormerCompanies: typing.List[QuantConnect.Data.Custom.SEC.SECReportFormerCompany]
MailingAddress: typing.List[QuantConnect.Data.Custom.SEC.SECReportMailAddress]
Values: typing.List[QuantConnect.Data.Custom.SEC.SECReportFilingValues]
class SECReportFilingValues(System.object):
""" SECReportFilingValues() """
Act: str
FileNumber: str
FilmNumber: str
FormType: str
class SECReportFormerCompany(System.object):
""" SECReportFormerCompany() """
Changed: datetime.datetime
FormerConformedName: str
class SECReportIndexDirectory(System.object):
""" SECReportIndexDirectory() """
Items: typing.List[QuantConnect.Data.Custom.SEC.SECReportIndexItem]
Name: str
ParentDirectory: str
| 34.086667
| 178
| 0.711813
| 1,065
| 10,226
| 6.798122
| 0.14554
| 0.172376
| 0.082044
| 0.093232
| 0.773619
| 0.768785
| 0.730111
| 0.722514
| 0.698066
| 0.679834
| 0
| 0.004375
| 0.195384
| 10,226
| 299
| 179
| 34.200669
| 0.875547
| 0.17211
| 0
| 0.668508
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.270718
| false
| 0.270718
| 0.044199
| 0
| 0.580111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 7
|
5def3b33e30f7c47d28fb11e7f095a41815bd14e
| 142,783
|
py
|
Python
|
python/src/ties/test/schema_validation_tests.py
|
Noblis/ties-lib
|
e7c6165ebcd80e11b792fd4bcddf6ce634da0c60
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2020-04-10T19:02:27.000Z
|
2020-04-10T19:02:27.000Z
|
python/src/ties/test/schema_validation_tests.py
|
Noblis/ties-lib
|
e7c6165ebcd80e11b792fd4bcddf6ce634da0c60
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
python/src/ties/test/schema_validation_tests.py
|
Noblis/ties-lib
|
e7c6165ebcd80e11b792fd4bcddf6ce634da0c60
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
################################################################################
# Copyright 2019 Noblis, Inc #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
################################################################################
from __future__ import unicode_literals
import json
import os
import unittest
from tempfile import mkstemp
from unittest import TestCase
from ties.schema_validation import SchemaValidator, TiesSchemaValidator, load_schema, object_relationship_pointer
test_input_str = """\
{
"version": "0.9",
"securityTag": "UNCLASSIFIED",
"objectItems": [
{
"objectId": "a",
"sha256Hash": "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"md5Hash": "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"authorityInformation": {
"securityTag": "UNCLASSIFIED"
}
}
]
}"""
class SchemaValidatorTests(TestCase):
def setUp(self):
self._test_input_str = test_input_str
fd, self._test_input_file_path = mkstemp()
with os.fdopen(fd, 'w') as f:
f.write(self._test_input_str)
self._test_input_file = open(self._test_input_file_path, 'r', encoding='utf-8')
self._test_input_dict = json.loads(self._test_input_str)
self._schema_validator = SchemaValidator()
def tearDown(self):
self._test_input_file.close()
try:
os.remove(self._test_input_file_path)
except Exception: # pylint: disable=broad-except
pass
def test_load_schema_ties(self):
schema = load_schema()
self.assertSetEqual(set(schema['properties'].keys()), {'version', 'id', 'system', 'organization', 'time', 'description', 'type', 'securityTag', 'objectItems', 'objectGroups', 'objectRelationships', 'otherInformation'})
def test_load_schema_sub_schema(self):
schema = load_schema(json_pointer=object_relationship_pointer)
self.assertSetEqual(set(schema['properties'].keys()), {'linkageMemberIds', 'linkageDirectionality', 'linkageType', 'linkageAssertionId', 'otherInformation'})
def test_validate_json_str(self):
self._schema_validator.validate(self._test_input_str)
def test_validate_json_file(self):
self._schema_validator.validate(self._test_input_file)
def test_validate_json_dict(self):
self._schema_validator.validate(self._test_input_dict)
def test_all_errors_json_str(self):
errors = self._schema_validator.all_errors(self._test_input_str)
self.assertEqual(errors, [])
def test_all_errors_json_file(self):
errors = self._schema_validator.all_errors(self._test_input_file)
self.assertEqual(errors, [])
def test_all_errors_json_dict(self):
errors = self._schema_validator.all_errors(self._test_input_dict)
self.assertEqual(errors, [])
class AnnotationSchemaTests(TestCase):
def setUp(self):
self.annotation = {
'assertionId': 'a',
'assertionReferenceId': 'a',
'assertionReferenceIdLabel': 'a',
'time': 'a',
'annotationType': 'a',
'key': 'a',
'value': 'a',
'itemAction': 'a',
'itemActionTime': 'a',
'creator': 'a',
'system': 'a',
'securityTag': '',
}
self.object_item = {
'objectId': 'a',
'sha256Hash': 'a' * 64,
'md5Hash': 'a' * 32,
'authorityInformation': {
'securityTag': 'a'
},
'objectAssertions': {
'annotations': [self.annotation]
}
}
self.ties = {
'version': '0.9',
'securityTag': 'a',
'objectItems': [self.object_item]
}
def test_all_fields(self):
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_only_required_fields(self):
del self.annotation['assertionReferenceId']
del self.annotation['assertionReferenceIdLabel']
del self.annotation['time']
del self.annotation['key']
del self.annotation['itemAction']
del self.annotation['itemActionTime']
del self.annotation['creator']
del self.annotation['system']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_all_required_fields_missing(self):
del self.annotation['assertionId']
del self.annotation['annotationType']
del self.annotation['value']
del self.annotation['securityTag']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'required properties [annotationType, assertionId, securityTag, value] are missing')
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/annotations[0]')
def test_additional_field(self):
self.annotation['foo'] = 'a'
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'additional property foo is not allowed')
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/annotations[0]')
def test_additional_fields(self):
self.annotation['foo'] = 'a'
self.annotation['bar'] = 'a'
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'additional properties [bar, foo] are not allowed')
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/annotations[0]')
def test_assertion_id_missing(self):
del self.annotation['assertionId']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'required property assertionId is missing')
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/annotations[0]')
def test_assertion_id_too_short(self):
self.annotation['assertionId'] = ''
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, "property value '' for assertionId property is too short, minimum length 1")
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/annotations[0]/assertionId')
def test_assertion_id_too_long(self):
self.annotation['assertionId'] = 'a' * 257
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, "property value '{}' for assertionId property is too long, maximum length 256".format('a' * 257))
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/annotations[0]/assertionId')
def test_assertion_reference_id_missing(self):
del self.annotation['assertionReferenceId']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_assertion_reference_id_too_short(self):
self.annotation['assertionReferenceId'] = ''
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, "property value '' for assertionReferenceId property is too short, minimum length 1")
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/annotations[0]/assertionReferenceId')
def test_assertion_reference_id_label_missing(self):
del self.annotation['assertionReferenceIdLabel']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_assertion_reference_id_label_too_short(self):
self.annotation['assertionReferenceIdLabel'] = ''
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, "property value '' for assertionReferenceIdLabel property is too short, minimum length 1")
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/annotations[0]/assertionReferenceIdLabel')
def test_time_missing(self):
del self.annotation['time']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_annotationType_missing(self):
del self.annotation['annotationType']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'required property annotationType is missing')
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/annotations[0]')
def test_annotationType_too_short(self):
self.annotation['annotationType'] = ''
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, "property value '' for annotationType property is too short, minimum length 1")
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/annotations[0]/annotationType')
def test_key_missing(self):
del self.annotation['key']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_key_too_short(self):
self.annotation['key'] = ''
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, "property value '' for key property is too short, minimum length 1")
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/annotations[0]/key')
def test_value_missing(self):
del self.annotation['value']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'required property value is missing')
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/annotations[0]')
def test_value_too_short(self):
self.annotation['value'] = ''
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, "property value '' for value property is too short, minimum length 1")
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/annotations[0]/value')
def test_item_action_missing(self):
del self.annotation['itemAction']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_item_action_too_short(self):
self.annotation['itemAction'] = ''
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, "property value '' for itemAction property is too short, minimum length 1")
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/annotations[0]/itemAction')
def test_item_action_time_missing(self):
del self.annotation['itemActionTime']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_creator_missing(self):
del self.annotation['creator']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_creator_too_short(self):
self.annotation['creator'] = ''
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, "property value '' for creator property is too short, minimum length 1")
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/annotations[0]/creator')
def test_system_missing(self):
del self.annotation['system']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_system_too_short(self):
self.annotation['system'] = ''
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, "property value '' for system property is too short, minimum length 1")
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/annotations[0]/system')
def test_security_tag_missing(self):
del self.annotation['securityTag']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'required property securityTag is missing')
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/annotations[0]')
class AuthorityInformationSchemaTests(TestCase):
def setUp(self):
self.authority_information = {
'collectionId': 'a',
'collectionIdLabel': 'a',
'collectionIdAlias': 'a',
'collectionDescription': 'a',
'subCollectionId': 'a',
'subCollectionIdLabel': 'a',
'subCollectionIdAlias': 'a',
'subCollectionDescription': 'a',
'registrationDate': 'a',
'expirationDate': 'a',
'securityTag': '',
'owner': 'a',
}
self.object_item = {
'objectId': 'a',
'sha256Hash': 'a' * 64,
'md5Hash': 'a' * 32,
'authorityInformation': self.authority_information,
}
self.ties = {
'version': '0.9',
'securityTag': 'a',
'objectItems': [self.object_item]
}
def test_all_fields(self):
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_only_required_fields(self):
del self.authority_information['collectionId']
del self.authority_information['collectionIdLabel']
del self.authority_information['collectionIdAlias']
del self.authority_information['collectionDescription']
del self.authority_information['subCollectionId']
del self.authority_information['subCollectionIdLabel']
del self.authority_information['subCollectionIdAlias']
del self.authority_information['subCollectionDescription']
del self.authority_information['registrationDate']
del self.authority_information['expirationDate']
del self.authority_information['owner']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_all_required_fields_missing(self):
del self.authority_information['securityTag']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'required property securityTag is missing')
self.assertEqual(errors[0].location, '/objectItems[0]/authorityInformation')
def test_additional_field(self):
self.authority_information['foo'] = 'a'
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'additional property foo is not allowed')
self.assertEqual(errors[0].location, '/objectItems[0]/authorityInformation')
def test_additional_fields(self):
self.authority_information['foo'] = 'a'
self.authority_information['bar'] = 'a'
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'additional properties [bar, foo] are not allowed')
self.assertEqual(errors[0].location, '/objectItems[0]/authorityInformation')
def test_collection_id_missing(self):
del self.authority_information['collectionId']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_collection_id_too_short(self):
self.authority_information['collectionId'] = ''
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, "property value '' for collectionId property is too short, minimum length 1")
self.assertEqual(errors[0].location, '/objectItems[0]/authorityInformation/collectionId')
def test_collection_id_label_missing(self):
del self.authority_information['collectionIdLabel']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_collection_id_label_too_short(self):
self.authority_information['collectionIdLabel'] = ''
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, "property value '' for collectionIdLabel property is too short, minimum length 1")
self.assertEqual(errors[0].location, '/objectItems[0]/authorityInformation/collectionIdLabel')
def test_collection_id_alias_missing(self):
del self.authority_information['collectionIdAlias']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_collection_id_alias_too_short(self):
self.authority_information['collectionIdAlias'] = ''
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, "property value '' for collectionIdAlias property is too short, minimum length 1")
self.assertEqual(errors[0].location, '/objectItems[0]/authorityInformation/collectionIdAlias')
def test_collection_description_missing(self):
del self.authority_information['collectionDescription']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_collection_description_too_short(self):
self.authority_information['collectionDescription'] = ''
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, "property value '' for collectionDescription property is too short, minimum length 1")
self.assertEqual(errors[0].location, '/objectItems[0]/authorityInformation/collectionDescription')
def test_sub_collection_id_missing(self):
del self.authority_information['subCollectionId']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_sub_collection_id_too_short(self):
self.authority_information['subCollectionId'] = ''
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, "property value '' for subCollectionId property is too short, minimum length 1")
self.assertEqual(errors[0].location, '/objectItems[0]/authorityInformation/subCollectionId')
def test_sub_collection_id_label_missing(self):
del self.authority_information['subCollectionIdLabel']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_sub_collection_id_label_too_short(self):
self.authority_information['subCollectionIdLabel'] = ''
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, "property value '' for subCollectionIdLabel property is too short, minimum length 1")
self.assertEqual(errors[0].location, '/objectItems[0]/authorityInformation/subCollectionIdLabel')
def test_sub_collection_id_alias_missing(self):
del self.authority_information['subCollectionIdAlias']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_sub_collection_id_alias_too_short(self):
self.authority_information['subCollectionIdAlias'] = ''
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, "property value '' for subCollectionIdAlias property is too short, minimum length 1")
self.assertEqual(errors[0].location, '/objectItems[0]/authorityInformation/subCollectionIdAlias')
def test_sub_collection_description_missing(self):
del self.authority_information['subCollectionDescription']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_sub_collection_description_too_short(self):
self.authority_information['subCollectionDescription'] = ''
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, "property value '' for subCollectionDescription property is too short, minimum length 1")
self.assertEqual(errors[0].location, '/objectItems[0]/authorityInformation/subCollectionDescription')
def test_registration_date_missing(self):
del self.authority_information['registrationDate']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_expiration_date_missing(self):
del self.authority_information['expirationDate']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_security_tag_missing(self):
del self.authority_information['securityTag']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'required property securityTag is missing')
self.assertEqual(errors[0].location, '/objectItems[0]/authorityInformation')
def test_security_tag_too_short(self):
self.authority_information['securityTag'] = ''
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_owner_missing(self):
del self.authority_information['owner']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_owner_too_short(self):
self.authority_information['owner'] = ''
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, "property value '' for owner property is too short, minimum length 1")
self.assertEqual(errors[0].location, '/objectItems[0]/authorityInformation/owner')
class ObjectAssertionsSchemaTests(TestCase):
def setUp(self):
self.annotation = {
'assertionId': 'a',
'annotationType': 'a',
'value': 'a',
'securityTag': '',
}
self.supplemental_description = {
'assertionId': 'a',
'informationType': 'a',
'sha256DataHash': 'a' * 64,
'dataSize': 0,
'securityTag': '',
}
self.object_assertions = {
'annotations': [self.annotation],
'supplementalDescriptions': [self.supplemental_description],
}
self.object_item = {
'objectId': 'a',
'sha256Hash': 'a' * 64,
'md5Hash': 'a' * 32,
'authorityInformation': {
'securityTag': '',
},
'objectAssertions': self.object_assertions,
}
self.ties = {
'version': '0.9',
'securityTag': 'a',
'objectItems': [self.object_item]
}
def test_all_fields(self):
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_only_required_fields(self):
del self.object_assertions['annotations']
del self.object_assertions['supplementalDescriptions']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_all_required_fields_missing(self):
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_additional_field(self):
self.object_assertions['foo'] = 'a'
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'additional property foo is not allowed')
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions')
def test_additional_fields(self):
self.object_assertions['foo'] = 'a'
self.object_assertions['bar'] = 'a'
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'additional properties [bar, foo] are not allowed')
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions')
def test_annotations_missing(self):
del self.object_assertions['annotations']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_annotations_list_too_short(self):
self.object_assertions['annotations'] = []
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_annotations_list_duplicate_items(self):
self.object_assertions['annotations'].append(self.object_assertions['annotations'][0])
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'array property annotations has duplicate items at index [0, 1]')
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/annotations')
def test_supplemental_descriptions_missing(self):
del self.object_assertions['supplementalDescriptions']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_supplemental_descriptions_list_too_short(self):
self.object_assertions['supplementalDescriptions'] = []
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_supplemental_descriptions_list_duplicate_items(self):
self.object_assertions['supplementalDescriptions'].append(self.object_assertions['supplementalDescriptions'][0])
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'array property supplementalDescriptions has duplicate items at index [0, 1]')
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions')
class ObjectItemSchemaTests(TestCase):
def setUp(self):
self.object_item = {
'objectId': 'a',
'mimeType': 'a',
'sha256Hash': 'a' * 64,
'md5Hash': 'a' * 32,
'size': 0,
'originalPath': 'a',
'relativeUri': 'a',
'authorityInformation': {
'securityTag': '',
},
'objectAssertions': {},
'otherInformation': [],
}
self.ties = {
'version': '0.9',
'securityTag': 'a',
'objectItems': [self.object_item]
}
def test_all_fields(self):
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_only_required_fields(self):
del self.object_item['mimeType']
del self.object_item['size']
del self.object_item['originalPath']
del self.object_item['relativeUri']
del self.object_item['objectAssertions']
del self.object_item['otherInformation']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_all_required_fields_missing(self):
del self.object_item['objectId']
del self.object_item['sha256Hash']
del self.object_item['md5Hash']
del self.object_item['authorityInformation']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'required properties [authorityInformation, md5Hash, objectId, sha256Hash] are missing')
self.assertEqual(errors[0].location, '/objectItems[0]')
def test_additional_field(self):
self.object_item['foo'] = 'a'
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'additional property foo is not allowed')
self.assertEqual(errors[0].location, '/objectItems[0]')
def test_additional_fields(self):
self.object_item['foo'] = 'a'
self.object_item['bar'] = 'a'
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'additional properties [bar, foo] are not allowed')
self.assertEqual(errors[0].location, '/objectItems[0]')
def test_object_id_missing(self):
del self.object_item['objectId']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'required property objectId is missing')
self.assertEqual(errors[0].location, '/objectItems[0]')
def test_object_id_too_short(self):
self.object_item['objectId'] = ''
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, "property value '' for objectId property is too short, minimum length 1")
self.assertEqual(errors[0].location, '/objectItems[0]/objectId')
def test_object_id_too_long(self):
self.object_item['objectId'] = 'a' * 257
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, "property value '{}' for objectId property is too long, maximum length 256".format('a' * 257))
self.assertEqual(errors[0].location, '/objectItems[0]/objectId')
def test_mime_type_missing(self):
del self.object_item['mimeType']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_mime_type_too_short(self):
self.object_item['mimeType'] = ''
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, "property value '' for mimeType property is too short, minimum length 1")
self.assertEqual(errors[0].location, '/objectItems[0]/mimeType')
def test_sha256_hash_missing(self):
del self.object_item['sha256Hash']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'required property sha256Hash is missing')
self.assertEqual(errors[0].location, '/objectItems[0]')
def test_sha256_hash_too_short(self):
self.object_item['sha256Hash'] = 'a' * 63
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 2)
self.assertEqual(errors[0].message, "property value 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' for sha256Hash property is too short, minimum length 64")
self.assertEqual(errors[0].location, '/objectItems[0]/sha256Hash')
self.assertEqual(errors[1].message, "property value 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' for sha256Hash property does not match the pattern '^[a-fA-F0-9]{64}$'")
self.assertEqual(errors[1].location, '/objectItems[0]/sha256Hash')
def test_sha256_hash_too_long(self):
self.object_item['sha256Hash'] = 'a' * 65
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 2)
self.assertEqual(errors[0].message, "property value 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' for sha256Hash property is too long, maximum length 64")
self.assertEqual(errors[0].location, '/objectItems[0]/sha256Hash')
self.assertEqual(errors[1].message, "property value 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' for sha256Hash property does not match the pattern '^[a-fA-F0-9]{64}$'")
self.assertEqual(errors[1].location, '/objectItems[0]/sha256Hash')
def test_sha256_hash_bad_format(self):
self.object_item['sha256Hash'] = 'z' * 64
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, "property value 'zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz' for sha256Hash property does not match the pattern '^[a-fA-F0-9]{64}$'")
self.assertEqual(errors[0].location, '/objectItems[0]/sha256Hash')
def test_md5_hash_missing(self):
del self.object_item['md5Hash']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'required property md5Hash is missing')
self.assertEqual(errors[0].location, '/objectItems[0]')
def test_md5_hash_too_short(self):
self.object_item['md5Hash'] = 'a' * 31
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 2)
self.assertEqual(errors[0].message, "property value 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' for md5Hash property is too short, minimum length 32")
self.assertEqual(errors[0].location, '/objectItems[0]/md5Hash')
self.assertEqual(errors[1].message, "property value 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' for md5Hash property does not match the pattern '^[a-fA-F0-9]{32}$'")
self.assertEqual(errors[1].location, '/objectItems[0]/md5Hash')
def test_md5_hash_too_long(self):
self.object_item['md5Hash'] = 'a' * 33
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 2)
self.assertEqual(errors[0].message, "property value 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' for md5Hash property is too long, maximum length 32")
self.assertEqual(errors[0].location, '/objectItems[0]/md5Hash')
self.assertEqual(errors[1].message, "property value 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' for md5Hash property does not match the pattern '^[a-fA-F0-9]{32}$'")
self.assertEqual(errors[1].location, '/objectItems[0]/md5Hash')
def test_md5_hash_bad_format(self):
self.object_item['md5Hash'] = 'z' * 32
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, "property value 'zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz' for md5Hash property does not match the pattern '^[a-fA-F0-9]{32}$'")
self.assertEqual(errors[0].location, '/objectItems[0]/md5Hash')
def test_size_missing(self):
del self.object_item['size']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_size_too_small(self):
self.object_item['size'] = -1
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'property value -1 for size property is less than the minimum value of 0')
self.assertEqual(errors[0].location, '/objectItems[0]/size')
def test_original_path_missing(self):
del self.object_item['originalPath']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_original_path_too_short(self):
self.object_item['originalPath'] = ''
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, "property value '' for originalPath property is too short, minimum length 1")
self.assertEqual(errors[0].location, '/objectItems[0]/originalPath')
def test_relative_uri_missing(self):
del self.object_item['relativeUri']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_relative_uri_too_short(self):
self.object_item['relativeUri'] = ''
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, "property value '' for relativeUri property is too short, minimum length 1")
self.assertEqual(errors[0].location, '/objectItems[0]/relativeUri')
def test_authority_information_missing(self):
del self.object_item['authorityInformation']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'required property authorityInformation is missing')
self.assertEqual(errors[0].location, '/objectItems[0]')
def test_object_assertions_missing(self):
del self.object_item['objectAssertions']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_other_information_missing(self):
del self.object_item['otherInformation']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_other_information_empty_list(self):
self.object_item['otherInformation'] = []
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_other_information_duplicate_items(self):
other_info = {'key': 'a', 'value': 'a'}
self.object_item['otherInformation'] = [other_info, other_info]
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'array property otherInformation has duplicate items at index [0, 1]')
self.assertEqual(errors[0].location, '/objectItems[0]/otherInformation')
class ObjectRelationshipSchemaTests(TestCase):
def setUp(self):
self.object_relationship = {
'linkageMemberIds': ['a', 'b'],
'linkageDirectionality': 'DIRECTED',
'linkageType': 'a',
'linkageAssertionId': 'a',
'otherInformation': [],
}
self.object_item = {
'objectId': 'a',
'sha256Hash': 'a' * 64,
'md5Hash': 'a' * 32,
'authorityInformation': {
'securityTag': '',
},
}
self.ties = {
'version': '0.9',
'securityTag': 'a',
'objectItems': [self.object_item],
'objectRelationships': [self.object_relationship],
}
def test_all_fields(self):
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_only_required_fields(self):
del self.object_relationship['linkageType']
del self.object_relationship['linkageAssertionId']
del self.object_relationship['otherInformation']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_all_required_fields_missing(self):
del self.object_relationship['linkageMemberIds']
del self.object_relationship['linkageDirectionality']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'required properties [linkageDirectionality, linkageMemberIds] are missing')
self.assertEqual(errors[0].location, '/objectRelationships[0]')
def test_additional_field(self):
self.object_relationship['foo'] = 'a'
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'additional property foo is not allowed')
self.assertEqual(errors[0].location, '/objectRelationships[0]')
def test_additional_fields(self):
self.object_relationship['foo'] = 'a'
self.object_relationship['bar'] = 'a'
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'additional properties [bar, foo] are not allowed')
self.assertEqual(errors[0].location, '/objectRelationships[0]')
def test_linkage_member_ids_missing(self):
del self.object_relationship['linkageMemberIds']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'required property linkageMemberIds is missing')
self.assertEqual(errors[0].location, '/objectRelationships[0]')
def test_linkage_member_ids_too_small(self):
self.object_relationship['linkageMemberIds'] = ['a']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, "array property linkageMemberIds with 1 items is too small, minimum size 2")
self.assertEqual(errors[0].location, '/objectRelationships[0]/linkageMemberIds')
def test_linkage_member_ids_too_large(self):
self.object_relationship['linkageMemberIds'] = ['a', 'b', 'c']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, "array property linkageMemberIds with 3 items is too large, maximum size 2")
self.assertEqual(errors[0].location, '/objectRelationships[0]/linkageMemberIds')
def test_linkage_member_id_too_short(self):
self.object_relationship['linkageMemberIds'] = ['a', '']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, "property value '' for element at index 1 in linkageMemberIds is too short, minimum length 1")
self.assertEqual(errors[0].location, '/objectRelationships[0]/linkageMemberIds[1]')
def test_linkage_member_id_too_long(self):
self.object_relationship['linkageMemberIds'] = ['a', 'a' * 257]
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, "property value '{}' for element at index 1 in linkageMemberIds is too long, maximum length 256".format('a' * 257))
self.assertEqual(errors[0].location, '/objectRelationships[0]/linkageMemberIds[1]')
def test_linkage_directionality_missing(self):
del self.object_relationship['linkageDirectionality']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'required property linkageDirectionality is missing')
self.assertEqual(errors[0].location, '/objectRelationships[0]')
def test_linkage_directionality_invalid_value(self):
self.object_relationship['linkageDirectionality'] = 'INVALID'
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, "enum property linkageDirectionality with value 'INVALID' should have one of the allowed values: [DIRECTED, BIDIRECTED, UNDIRECTED]")
self.assertEqual(errors[0].location, '/objectRelationships[0]/linkageDirectionality')
def test_linkage_type_missing(self):
del self.object_relationship['linkageType']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_linkage_type_too_short(self):
self.object_relationship['linkageType'] = ''
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, "property value '' for linkageType property is too short, minimum length 1")
self.assertEqual(errors[0].location, '/objectRelationships[0]/linkageType')
def test_linkage_assertion_id_missing(self):
del self.object_relationship['linkageAssertionId']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_linkage_assertion_id_too_short(self):
self.object_relationship['linkageAssertionId'] = ''
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, "property value '' for linkageAssertionId property is too short, minimum length 1")
self.assertEqual(errors[0].location, '/objectRelationships[0]/linkageAssertionId')
def test_other_information_missing(self):
del self.object_relationship['otherInformation']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_other_information_empty_list(self):
self.object_relationship['otherInformation'] = []
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_other_information_duplicate_items(self):
other_info = {'key': 'a', 'value': 'a'}
self.object_relationship['otherInformation'] = [other_info, other_info]
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'array property otherInformation has duplicate items at index [0, 1]')
self.assertEqual(errors[0].location, '/objectRelationships[0]/otherInformation')
class OtherInformationSchemaTests(TestCase):
def setUp(self):
self.object_item = {
'objectId': 'a',
'sha256Hash': 'a' * 64,
'md5Hash': 'a' * 32,
'authorityInformation': {
'securityTag': '',
},
}
self.other_information = {
'key': 'a',
'value': 'a',
}
self.object_relationship = {
'linkageMemberIds': ['a', 'b'],
'linkageDirectionality': 'DIRECTED',
'otherInformation': [self.other_information],
}
self.ties = {
'version': '0.9',
'securityTag': 'a',
'objectItems': [self.object_item],
'objectRelationships': [self.object_relationship],
}
def test_all_fields(self):
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_all_required_fields_missing(self):
del self.other_information['key']
del self.other_information['value']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'required properties [key, value] are missing')
self.assertEqual(errors[0].location, '/objectRelationships[0]/otherInformation[0]')
def test_additional_field(self):
self.other_information['foo'] = 'a'
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'additional property foo is not allowed')
self.assertEqual(errors[0].location, '/objectRelationships[0]/otherInformation[0]')
def test_additional_fields(self):
self.other_information['foo'] = 'a'
self.other_information['bar'] = 'a'
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'additional properties [bar, foo] are not allowed')
self.assertEqual(errors[0].location, '/objectRelationships[0]/otherInformation[0]')
def test_key_missing(self):
del self.other_information['key']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'required property key is missing')
self.assertEqual(errors[0].location, '/objectRelationships[0]/otherInformation[0]')
def test_key_too_short(self):
self.other_information['key'] = ''
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, "property value '' for key property is too short, minimum length 1")
self.assertEqual(errors[0].location, '/objectRelationships[0]/otherInformation[0]/key')
def test_value_missing(self):
del self.other_information['value']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'required property value is missing')
self.assertEqual(errors[0].location, '/objectRelationships[0]/otherInformation[0]')
def test_value_boolean(self):
self.other_information['value'] = True
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_value_integer(self):
self.other_information['value'] = 1
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_value_number(self):
self.other_information['value'] = 1.1
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_value_string(self):
self.other_information['value'] = ''
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_value_array(self):
self.other_information['value'] = []
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'property type array for property value is not one of the allowed types: [string, boolean, integer, number]')
self.assertEqual(errors[0].location, '/objectRelationships[0]/otherInformation[0]/value')
def test_value_object(self):
self.other_information['value'] = {}
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'property type object for property value is not one of the allowed types: [string, boolean, integer, number]')
self.assertEqual(errors[0].location, '/objectRelationships[0]/otherInformation[0]/value')
class SupplementalDescriptionDataFileSchemaTests(TestCase):
def setUp(self):
self.supplemental_description = {
'assertionId': 'a',
'assertionReferenceId': 'a',
'assertionReferenceIdLabel': 'a',
'system': 'a',
'informationType': 'a',
'sha256DataHash': 'a' * 64,
'dataSize': 0,
'dataRelativeUri': 'a',
'securityTag': '',
}
self.object_item = {
'objectId': 'a',
'sha256Hash': 'a' * 64,
'md5Hash': 'a' * 32,
'authorityInformation': {
'securityTag': ''
},
'objectAssertions': {
'supplementalDescriptions': [self.supplemental_description]
}
}
self.ties = {
'version': '0.9',
'securityTag': '',
'objectItems': [self.object_item]
}
def test_all_fields(self):
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_only_required_fields(self):
del self.supplemental_description['assertionReferenceId']
del self.supplemental_description['assertionReferenceIdLabel']
del self.supplemental_description['system']
del self.supplemental_description['dataRelativeUri']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_all_required_fields_missing(self):
del self.supplemental_description['assertionId']
del self.supplemental_description['informationType']
del self.supplemental_description['sha256DataHash']
del self.supplemental_description['dataSize']
del self.supplemental_description['securityTag']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'content for array property at index 0 in supplementalDescriptions does not match any of the possible schema definitions')
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes), 3)
self.assertEqual(errors[0].causes[0].message, 'additional property dataRelativeUri is not allowed')
self.assertEqual(errors[0].causes[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[0].causes), 0)
self.assertEqual(errors[0].causes[1].message, 'required properties [assertionId, dataSize, informationType, securityTag, sha256DataHash] are missing')
self.assertEqual(errors[0].causes[1].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[1].causes), 0)
self.assertEqual(errors[0].causes[2].message, 'required properties [assertionId, dataObject, informationType, securityTag] are missing')
self.assertEqual(errors[0].causes[2].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[2].causes), 0)
def test_additional_field(self):
self.supplemental_description['foo'] = 'a'
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'content for array property at index 0 in supplementalDescriptions does not match any of the possible schema definitions')
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes), 3)
self.assertEqual(errors[0].causes[0].message, 'additional property foo is not allowed')
self.assertEqual(errors[0].causes[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[0].causes), 0)
self.assertEqual(errors[0].causes[1].message, 'additional properties [dataRelativeUri, dataSize, foo, sha256DataHash] are not allowed')
self.assertEqual(errors[0].causes[1].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[1].causes), 0)
self.assertEqual(errors[0].causes[2].message, 'required property dataObject is missing')
self.assertEqual(errors[0].causes[2].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[2].causes), 0)
def test_additional_fields(self):
self.supplemental_description['foo'] = 'a'
self.supplemental_description['bar'] = 'a'
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'content for array property at index 0 in supplementalDescriptions does not match any of the possible schema definitions')
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes), 3)
self.assertEqual(errors[0].causes[0].message, 'additional properties [bar, foo] are not allowed')
self.assertEqual(errors[0].causes[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[0].causes), 0)
self.assertEqual(errors[0].causes[1].message, 'additional properties [bar, dataRelativeUri, dataSize, foo, sha256DataHash] are not allowed')
self.assertEqual(errors[0].causes[1].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[1].causes), 0)
self.assertEqual(errors[0].causes[2].message, 'required property dataObject is missing')
self.assertEqual(errors[0].causes[2].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[2].causes), 0)
def test_assertion_id_missing(self):
del self.supplemental_description['assertionId']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'content for array property at index 0 in supplementalDescriptions does not match any of the possible schema definitions')
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes), 3)
self.assertEqual(errors[0].causes[0].message, 'additional properties [dataRelativeUri, dataSize, sha256DataHash] are not allowed')
self.assertEqual(errors[0].causes[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[0].causes), 0)
self.assertEqual(errors[0].causes[1].message, 'required property assertionId is missing')
self.assertEqual(errors[0].causes[1].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[1].causes), 0)
self.assertEqual(errors[0].causes[2].message, 'required properties [assertionId, dataObject] are missing')
self.assertEqual(errors[0].causes[2].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[2].causes), 0)
def test_assertion_id_null(self):
self.supplemental_description['assertionId'] = None
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'content for array property at index 0 in supplementalDescriptions does not match any of the possible schema definitions')
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes), 3)
self.assertEqual(errors[0].causes[0].message, 'additional properties [dataRelativeUri, dataSize, sha256DataHash] are not allowed')
self.assertEqual(errors[0].causes[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[0].causes), 0)
self.assertEqual(errors[0].causes[1].message, 'required property dataObject is missing')
self.assertEqual(errors[0].causes[1].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[1].causes), 0)
self.assertEqual(errors[0].causes[2].message, 'property assertionId with null value should be of type string')
self.assertEqual(errors[0].causes[2].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]/assertionId')
self.assertEqual(len(errors[0].causes[2].causes), 0)
def test_assertion_id_too_short(self):
self.supplemental_description['assertionId'] = ''
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'content for array property at index 0 in supplementalDescriptions does not match any of the possible schema definitions')
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes), 3)
self.assertEqual(errors[0].causes[0].message, 'additional properties [dataRelativeUri, dataSize, sha256DataHash] are not allowed')
self.assertEqual(errors[0].causes[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[0].causes), 0)
self.assertEqual(errors[0].causes[1].message, 'required property dataObject is missing')
self.assertEqual(errors[0].causes[1].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[1].causes), 0)
self.assertEqual(errors[0].causes[2].message, "property value '' for assertionId property is too short, minimum length 1")
self.assertEqual(errors[0].causes[2].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]/assertionId')
self.assertEqual(len(errors[0].causes[2].causes), 0)
def test_assertion_id_too_long(self):
self.supplemental_description['assertionId'] = 'a' * 257
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'content for array property at index 0 in supplementalDescriptions does not match any of the possible schema definitions')
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes), 3)
self.assertEqual(errors[0].causes[0].message, 'additional properties [dataRelativeUri, dataSize, sha256DataHash] are not allowed')
self.assertEqual(errors[0].causes[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[0].causes), 0)
self.assertEqual(errors[0].causes[1].message, 'required property dataObject is missing')
self.assertEqual(errors[0].causes[1].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[1].causes), 0)
self.assertEqual(errors[0].causes[2].message, "property value '{}' for assertionId property is too long, maximum length 256".format('a' * 257))
self.assertEqual(errors[0].causes[2].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]/assertionId')
self.assertEqual(len(errors[0].causes[2].causes), 0)
def test_assertion_reference_id_missing(self):
del self.supplemental_description['assertionReferenceId']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_assertion_reference_id_null(self):
self.supplemental_description['assertionReferenceId'] = None
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'content for array property at index 0 in supplementalDescriptions does not match any of the possible schema definitions')
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes), 3)
self.assertEqual(errors[0].causes[0].message, 'additional properties [dataRelativeUri, dataSize, sha256DataHash] are not allowed')
self.assertEqual(errors[0].causes[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[0].causes), 0)
self.assertEqual(errors[0].causes[1].message, 'required property dataObject is missing')
self.assertEqual(errors[0].causes[1].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[1].causes), 0)
self.assertEqual(errors[0].causes[2].message, 'property assertionReferenceId with null value should be of type string')
self.assertEqual(errors[0].causes[2].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]/assertionReferenceId')
self.assertEqual(len(errors[0].causes[2].causes), 0)
def test_assertion_reference_id_too_short(self):
self.supplemental_description['assertionReferenceId'] = ''
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'content for array property at index 0 in supplementalDescriptions does not match any of the possible schema definitions')
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes), 3)
self.assertEqual(errors[0].causes[0].message, 'additional properties [dataRelativeUri, dataSize, sha256DataHash] are not allowed')
self.assertEqual(errors[0].causes[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[0].causes), 0)
self.assertEqual(errors[0].causes[1].message, 'required property dataObject is missing')
self.assertEqual(errors[0].causes[1].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[1].causes), 0)
self.assertEqual(errors[0].causes[2].message, "property value '' for assertionReferenceId property is too short, minimum length 1")
self.assertEqual(errors[0].causes[2].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]/assertionReferenceId')
self.assertEqual(len(errors[0].causes[2].causes), 0)
def test_assertion_reference_id_label_missing(self):
del self.supplemental_description['assertionReferenceIdLabel']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_assertion_reference_id_label_null(self):
self.supplemental_description['assertionReferenceIdLabel'] = None
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'content for array property at index 0 in supplementalDescriptions does not match any of the possible schema definitions')
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes), 3)
self.assertEqual(errors[0].causes[0].message, 'additional properties [dataRelativeUri, dataSize, sha256DataHash] are not allowed')
self.assertEqual(errors[0].causes[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[0].causes), 0)
self.assertEqual(errors[0].causes[1].message, 'required property dataObject is missing')
self.assertEqual(errors[0].causes[1].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[1].causes), 0)
self.assertEqual(errors[0].causes[2].message, 'property assertionReferenceIdLabel with null value should be of type string')
self.assertEqual(errors[0].causes[2].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]/assertionReferenceIdLabel')
self.assertEqual(len(errors[0].causes[2].causes), 0)
def test_assertion_reference_id_label_too_short(self):
self.supplemental_description['assertionReferenceIdLabel'] = ''
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'content for array property at index 0 in supplementalDescriptions does not match any of the possible schema definitions')
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes), 3)
self.assertEqual(errors[0].causes[0].message, 'additional properties [dataRelativeUri, dataSize, sha256DataHash] are not allowed')
self.assertEqual(errors[0].causes[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[0].causes), 0)
self.assertEqual(errors[0].causes[1].message, 'required property dataObject is missing')
self.assertEqual(errors[0].causes[1].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[1].causes), 0)
self.assertEqual(errors[0].causes[2].message, "property value '' for assertionReferenceIdLabel property is too short, minimum length 1")
self.assertEqual(errors[0].causes[2].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]/assertionReferenceIdLabel')
self.assertEqual(len(errors[0].causes[2].causes), 0)
def test_system_missing(self):
del self.supplemental_description['system']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_system_null(self):
self.supplemental_description['system'] = None
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'content for array property at index 0 in supplementalDescriptions does not match any of the possible schema definitions')
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes), 3)
self.assertEqual(errors[0].causes[0].message, 'additional properties [dataRelativeUri, dataSize, sha256DataHash] are not allowed')
self.assertEqual(errors[0].causes[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[0].causes), 0)
self.assertEqual(errors[0].causes[1].message, 'required property dataObject is missing')
self.assertEqual(errors[0].causes[1].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[1].causes), 0)
self.assertEqual(errors[0].causes[2].message, 'property system with null value should be of type string')
self.assertEqual(errors[0].causes[2].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]/system')
self.assertEqual(len(errors[0].causes[2].causes), 0)
def test_system_too_short(self):
self.supplemental_description['system'] = ''
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'content for array property at index 0 in supplementalDescriptions does not match any of the possible schema definitions')
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes), 3)
self.assertEqual(errors[0].causes[0].message, 'additional properties [dataRelativeUri, dataSize, sha256DataHash] are not allowed')
self.assertEqual(errors[0].causes[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[0].causes), 0)
self.assertEqual(errors[0].causes[1].message, 'required property dataObject is missing')
self.assertEqual(errors[0].causes[1].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[1].causes), 0)
self.assertEqual(errors[0].causes[2].message, "property value '' for system property is too short, minimum length 1")
self.assertEqual(errors[0].causes[2].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]/system')
self.assertEqual(len(errors[0].causes[2].causes), 0)
def test_information_type_missing(self):
del self.supplemental_description['informationType']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'content for array property at index 0 in supplementalDescriptions does not match any of the possible schema definitions')
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes), 3)
self.assertEqual(errors[0].causes[0].message, 'additional properties [dataRelativeUri, dataSize, sha256DataHash] are not allowed')
self.assertEqual(errors[0].causes[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[0].causes), 0)
self.assertEqual(errors[0].causes[1].message, 'required property informationType is missing')
self.assertEqual(errors[0].causes[1].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[1].causes), 0)
self.assertEqual(errors[0].causes[2].message, 'required properties [dataObject, informationType] are missing')
self.assertEqual(errors[0].causes[2].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[2].causes), 0)
def test_information_type_null(self):
self.supplemental_description['informationType'] = None
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'content for array property at index 0 in supplementalDescriptions does not match any of the possible schema definitions')
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes), 3)
self.assertEqual(errors[0].causes[0].message, 'additional properties [dataRelativeUri, dataSize, sha256DataHash] are not allowed')
self.assertEqual(errors[0].causes[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[0].causes), 0)
self.assertEqual(errors[0].causes[1].message, 'required property dataObject is missing')
self.assertEqual(errors[0].causes[1].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[1].causes), 0)
self.assertEqual(errors[0].causes[2].message, 'property informationType with null value should be of type string')
self.assertEqual(errors[0].causes[2].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]/informationType')
self.assertEqual(len(errors[0].causes[2].causes), 0)
def test_information_type_too_short(self):
self.supplemental_description['informationType'] = ''
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'content for array property at index 0 in supplementalDescriptions does not match any of the possible schema definitions')
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes), 3)
self.assertEqual(errors[0].causes[0].message, 'additional properties [dataRelativeUri, dataSize, sha256DataHash] are not allowed')
self.assertEqual(errors[0].causes[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[0].causes), 0)
self.assertEqual(errors[0].causes[1].message, 'required property dataObject is missing')
self.assertEqual(errors[0].causes[1].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[1].causes), 0)
self.assertEqual(errors[0].causes[2].message, "property value '' for informationType property is too short, minimum length 1")
self.assertEqual(errors[0].causes[2].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]/informationType')
self.assertEqual(len(errors[0].causes[2].causes), 0)
def test_sha256_data_hash_missing(self):
del self.supplemental_description['sha256DataHash']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'content for array property at index 0 in supplementalDescriptions does not match any of the possible schema definitions')
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes), 3)
self.assertEqual(errors[0].causes[0].message, 'additional properties [dataRelativeUri, dataSize] are not allowed')
self.assertEqual(errors[0].causes[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[0].causes), 0)
self.assertEqual(errors[0].causes[1].message, 'required property sha256DataHash is missing')
self.assertEqual(errors[0].causes[1].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[1].causes), 0)
self.assertEqual(errors[0].causes[2].message, 'required property dataObject is missing')
self.assertEqual(errors[0].causes[2].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[2].causes), 0)
def test_sha256_data_hash_null(self):
self.supplemental_description['sha256DataHash'] = None
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'content for array property at index 0 in supplementalDescriptions does not match any of the possible schema definitions')
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes), 3)
self.assertEqual(errors[0].causes[0].message, 'additional properties [dataRelativeUri, dataSize, sha256DataHash] are not allowed')
self.assertEqual(errors[0].causes[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[0].causes), 0)
self.assertEqual(errors[0].causes[1].message, 'required property dataObject is missing')
self.assertEqual(errors[0].causes[1].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[1].causes), 0)
self.assertEqual(errors[0].causes[2].message, 'property sha256DataHash with null value should be of type string')
self.assertEqual(errors[0].causes[2].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]/sha256DataHash')
self.assertEqual(len(errors[0].causes[2].causes), 0)
def test_sha256_data_hash_too_short(self):
self.supplemental_description['sha256DataHash'] = 'a' * 63
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'content for array property at index 0 in supplementalDescriptions does not match any of the possible schema definitions')
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes), 4)
self.assertEqual(errors[0].causes[0].message, 'additional properties [dataRelativeUri, dataSize, sha256DataHash] are not allowed')
self.assertEqual(errors[0].causes[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[0].causes), 0)
self.assertEqual(errors[0].causes[1].message, 'required property dataObject is missing')
self.assertEqual(errors[0].causes[1].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[1].causes), 0)
self.assertEqual(errors[0].causes[2].message, "property value 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' for sha256DataHash property is too short, minimum length 64")
self.assertEqual(errors[0].causes[2].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]/sha256DataHash')
self.assertEqual(len(errors[0].causes[2].causes), 0)
self.assertEqual(errors[0].causes[3].message, "property value 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' for sha256DataHash property does not match the pattern '^[a-fA-F0-9]{64}$'")
self.assertEqual(errors[0].causes[3].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]/sha256DataHash')
self.assertEqual(len(errors[0].causes[3].causes), 0)
def test_sha256_data_hash_too_long(self):
self.supplemental_description['sha256DataHash'] = 'a' * 65
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'content for array property at index 0 in supplementalDescriptions does not match any of the possible schema definitions')
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes), 4)
self.assertEqual(errors[0].causes[0].message, 'additional properties [dataRelativeUri, dataSize, sha256DataHash] are not allowed')
self.assertEqual(errors[0].causes[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[0].causes), 0)
self.assertEqual(errors[0].causes[1].message, 'required property dataObject is missing')
self.assertEqual(errors[0].causes[1].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[1].causes), 0)
self.assertEqual(errors[0].causes[2].message, "property value 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' for sha256DataHash property is too long, maximum length 64")
self.assertEqual(errors[0].causes[2].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]/sha256DataHash')
self.assertEqual(len(errors[0].causes[2].causes), 0)
self.assertEqual(errors[0].causes[3].message, "property value 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' for sha256DataHash property does not match the pattern '^[a-fA-F0-9]{64}$'")
self.assertEqual(errors[0].causes[3].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]/sha256DataHash')
self.assertEqual(len(errors[0].causes[3].causes), 0)
def test_sha256_hash_bad_format(self):
self.supplemental_description['sha256DataHash'] = 'z' * 64
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'content for array property at index 0 in supplementalDescriptions does not match any of the possible schema definitions')
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes), 3)
self.assertEqual(errors[0].causes[0].message, 'additional properties [dataRelativeUri, dataSize, sha256DataHash] are not allowed')
self.assertEqual(errors[0].causes[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[0].causes), 0)
self.assertEqual(errors[0].causes[1].message, 'required property dataObject is missing')
self.assertEqual(errors[0].causes[1].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[1].causes), 0)
self.assertEqual(errors[0].causes[2].message, "property value 'zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz' for sha256DataHash property does not match the pattern '^[a-fA-F0-9]{64}$'")
self.assertEqual(errors[0].causes[2].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]/sha256DataHash')
self.assertEqual(len(errors[0].causes[2].causes), 0)
def test_data_size_missing(self):
del self.supplemental_description['dataSize']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'content for array property at index 0 in supplementalDescriptions does not match any of the possible schema definitions')
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes), 3)
self.assertEqual(errors[0].causes[0].message, 'additional properties [dataRelativeUri, sha256DataHash] are not allowed')
self.assertEqual(errors[0].causes[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[0].causes), 0)
self.assertEqual(errors[0].causes[1].message, 'required property dataSize is missing')
self.assertEqual(errors[0].causes[1].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[1].causes), 0)
self.assertEqual(errors[0].causes[2].message, 'required property dataObject is missing')
self.assertEqual(errors[0].causes[2].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[2].causes), 0)
def test_data_size_null(self):
self.supplemental_description['dataSize'] = None
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'content for array property at index 0 in supplementalDescriptions does not match any of the possible schema definitions')
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes), 3)
self.assertEqual(errors[0].causes[0].message, 'additional properties [dataRelativeUri, dataSize, sha256DataHash] are not allowed')
self.assertEqual(errors[0].causes[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[0].causes), 0)
self.assertEqual(errors[0].causes[1].message, 'required property dataObject is missing')
self.assertEqual(errors[0].causes[1].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[1].causes), 0)
self.assertEqual(errors[0].causes[2].message, 'property dataSize with null value should be of type integer')
self.assertEqual(errors[0].causes[2].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]/dataSize')
self.assertEqual(len(errors[0].causes[2].causes), 0)
def test_data_size_too_small(self):
self.supplemental_description['dataSize'] = -1
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'content for array property at index 0 in supplementalDescriptions does not match any of the possible schema definitions')
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes), 3)
self.assertEqual(errors[0].causes[0].message, 'additional properties [dataRelativeUri, dataSize, sha256DataHash] are not allowed')
self.assertEqual(errors[0].causes[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[0].causes), 0)
self.assertEqual(errors[0].causes[1].message, 'required property dataObject is missing')
self.assertEqual(errors[0].causes[1].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[1].causes), 0)
self.assertEqual(errors[0].causes[2].message, 'property value -1 for dataSize property is less than the minimum value of 0')
self.assertEqual(errors[0].causes[2].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]/dataSize')
self.assertEqual(len(errors[0].causes[2].causes), 0)
def test_data_relative_uri_missing(self):
del self.supplemental_description['dataRelativeUri']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_data_relative_uri_null(self):
self.supplemental_description['dataRelativeUri'] = None
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'content for array property at index 0 in supplementalDescriptions does not match any of the possible schema definitions')
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes), 3)
self.assertEqual(errors[0].causes[0].message, 'additional properties [dataRelativeUri, dataSize, sha256DataHash] are not allowed')
self.assertEqual(errors[0].causes[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[0].causes), 0)
self.assertEqual(errors[0].causes[1].message, 'required property dataObject is missing')
self.assertEqual(errors[0].causes[1].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[1].causes), 0)
self.assertEqual(errors[0].causes[2].message, 'property dataRelativeUri with null value should be of type string')
self.assertEqual(errors[0].causes[2].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]/dataRelativeUri')
self.assertEqual(len(errors[0].causes[2].causes), 0)
def test_data_relative_uri_too_short(self):
self.supplemental_description['dataRelativeUri'] = ''
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'content for array property at index 0 in supplementalDescriptions does not match any of the possible schema definitions')
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes), 3)
self.assertEqual(errors[0].causes[0].message, 'additional properties [dataRelativeUri, dataSize, sha256DataHash] are not allowed')
self.assertEqual(errors[0].causes[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[0].causes), 0)
self.assertEqual(errors[0].causes[1].message, 'required property dataObject is missing')
self.assertEqual(errors[0].causes[1].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[1].causes), 0)
self.assertEqual(errors[0].causes[2].message, "property value '' for dataRelativeUri property is too short, minimum length 1")
self.assertEqual(errors[0].causes[2].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]/dataRelativeUri')
self.assertEqual(len(errors[0].causes[2].causes), 0)
def test_security_tag_missing(self):
del self.supplemental_description['securityTag']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'content for array property at index 0 in supplementalDescriptions does not match any of the possible schema definitions')
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes), 3)
self.assertEqual(errors[0].causes[0].message, 'additional properties [dataRelativeUri, dataSize, sha256DataHash] are not allowed')
self.assertEqual(errors[0].causes[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[0].causes), 0)
self.assertEqual(errors[0].causes[1].message, 'required property securityTag is missing')
self.assertEqual(errors[0].causes[1].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[1].causes), 0)
self.assertEqual(errors[0].causes[2].message, 'required properties [dataObject, securityTag] are missing')
self.assertEqual(errors[0].causes[2].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[2].causes), 0)
def test_security_tag_null(self):
self.supplemental_description['securityTag'] = None
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'content for array property at index 0 in supplementalDescriptions does not match any of the possible schema definitions')
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes), 3)
self.assertEqual(errors[0].causes[0].message, 'additional properties [dataRelativeUri, dataSize, sha256DataHash] are not allowed')
self.assertEqual(errors[0].causes[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[0].causes), 0)
self.assertEqual(errors[0].causes[1].message, 'required property dataObject is missing')
self.assertEqual(errors[0].causes[1].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[1].causes), 0)
self.assertEqual(errors[0].causes[2].message, 'property securityTag with null value should be of type string')
self.assertEqual(errors[0].causes[2].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]/securityTag')
self.assertEqual(len(errors[0].causes[2].causes), 0)
def test_security_tag_empty_string(self):
self.supplemental_description['securityTag'] = ''
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
class SupplementalDescriptionDataObjectSchemaTests(TestCase):
def setUp(self):
self.supplemental_description = {
'assertionId': 'a',
'assertionReferenceId': 'a',
'assertionReferenceIdLabel': 'a',
'system': 'a',
'informationType': 'a',
'dataObject': {},
'securityTag': '',
}
self.object_item = {
'objectId': 'a',
'sha256Hash': 'a' * 64,
'md5Hash': 'a' * 32,
'authorityInformation': {
'securityTag': ''
},
'objectAssertions': {
'supplementalDescriptions': [self.supplemental_description]
}
}
self.ties = {
'version': '0.9',
'securityTag': '',
'objectItems': [self.object_item]
}
def test_all_fields(self):
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_only_required_fields(self):
del self.supplemental_description['assertionReferenceId']
del self.supplemental_description['assertionReferenceIdLabel']
del self.supplemental_description['system']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_all_required_fields_missing(self):
del self.supplemental_description['assertionId']
del self.supplemental_description['informationType']
del self.supplemental_description['dataObject']
del self.supplemental_description['securityTag']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'content for array property at index 0 in supplementalDescriptions does not match any of the possible schema definitions')
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes), 2)
self.assertEqual(errors[0].causes[0].message, 'required properties [assertionId, dataSize, informationType, securityTag, sha256DataHash] are missing')
self.assertEqual(errors[0].causes[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[0].causes), 0)
self.assertEqual(errors[0].causes[1].message, 'required properties [assertionId, dataObject, informationType, securityTag] are missing')
self.assertEqual(errors[0].causes[1].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[1].causes), 0)
def test_additional_field(self):
self.supplemental_description['foo'] = 'a'
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'content for array property at index 0 in supplementalDescriptions does not match any of the possible schema definitions')
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes), 3)
self.assertEqual(errors[0].causes[0].message, 'additional properties [dataObject, foo] are not allowed')
self.assertEqual(errors[0].causes[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[0].causes), 0)
self.assertEqual(errors[0].causes[1].message, 'additional property foo is not allowed')
self.assertEqual(errors[0].causes[1].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[1].causes), 0)
self.assertEqual(errors[0].causes[2].message, 'required properties [dataSize, sha256DataHash] are missing')
self.assertEqual(errors[0].causes[2].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[2].causes), 0)
def test_additional_fields(self):
self.supplemental_description['foo'] = 'a'
self.supplemental_description['bar'] = 'a'
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'content for array property at index 0 in supplementalDescriptions does not match any of the possible schema definitions')
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes), 3)
self.assertEqual(errors[0].causes[0].message, 'additional properties [bar, dataObject, foo] are not allowed')
self.assertEqual(errors[0].causes[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[0].causes), 0)
self.assertEqual(errors[0].causes[1].message, 'additional properties [bar, foo] are not allowed')
self.assertEqual(errors[0].causes[1].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[1].causes), 0)
self.assertEqual(errors[0].causes[2].message, 'required properties [dataSize, sha256DataHash] are missing')
self.assertEqual(errors[0].causes[2].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[2].causes), 0)
def test_assertion_id_missing(self):
del self.supplemental_description['assertionId']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'content for array property at index 0 in supplementalDescriptions does not match any of the possible schema definitions')
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes), 3)
self.assertEqual(errors[0].causes[0].message, 'additional property dataObject is not allowed')
self.assertEqual(errors[0].causes[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[0].causes), 0)
self.assertEqual(errors[0].causes[1].message, 'required properties [assertionId, dataSize, sha256DataHash] are missing')
self.assertEqual(errors[0].causes[1].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[1].causes), 0)
self.assertEqual(errors[0].causes[2].message, 'required property assertionId is missing')
self.assertEqual(errors[0].causes[2].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[2].causes), 0)
def test_assertion_id_null(self):
self.supplemental_description['assertionId'] = None
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'content for array property at index 0 in supplementalDescriptions does not match any of the possible schema definitions')
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes), 3)
self.assertEqual(errors[0].causes[0].message, 'additional property dataObject is not allowed')
self.assertEqual(errors[0].causes[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[0].causes), 0)
self.assertEqual(errors[0].causes[1].message, 'required properties [dataSize, sha256DataHash] are missing')
self.assertEqual(errors[0].causes[1].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[1].causes), 0)
self.assertEqual(errors[0].causes[2].message, 'property assertionId with null value should be of type string')
self.assertEqual(errors[0].causes[2].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]/assertionId')
self.assertEqual(len(errors[0].causes[2].causes), 0)
def test_assertion_id_too_short(self):
self.supplemental_description['assertionId'] = ''
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'content for array property at index 0 in supplementalDescriptions does not match any of the possible schema definitions')
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes), 3)
self.assertEqual(errors[0].causes[0].message, 'additional property dataObject is not allowed')
self.assertEqual(errors[0].causes[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[0].causes), 0)
self.assertEqual(errors[0].causes[1].message, 'required properties [dataSize, sha256DataHash] are missing')
self.assertEqual(errors[0].causes[1].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[1].causes), 0)
self.assertEqual(errors[0].causes[2].message, "property value '' for assertionId property is too short, minimum length 1")
self.assertEqual(errors[0].causes[2].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]/assertionId')
self.assertEqual(len(errors[0].causes[2].causes), 0)
def test_assertion_id_too_long(self):
self.supplemental_description['assertionId'] = 'a' * 257
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'content for array property at index 0 in supplementalDescriptions does not match any of the possible schema definitions')
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes), 3)
self.assertEqual(errors[0].causes[0].message, 'additional property dataObject is not allowed')
self.assertEqual(errors[0].causes[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[0].causes), 0)
self.assertEqual(errors[0].causes[1].message, 'required properties [dataSize, sha256DataHash] are missing')
self.assertEqual(errors[0].causes[1].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[1].causes), 0)
self.assertEqual(errors[0].causes[2].message, "property value '{}' for assertionId property is too long, maximum length 256".format('a' * 257))
self.assertEqual(errors[0].causes[2].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]/assertionId')
self.assertEqual(len(errors[0].causes[2].causes), 0)
def test_assertion_reference_id_missing(self):
del self.supplemental_description['assertionReferenceId']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_assertion_reference_id_null(self):
self.supplemental_description['assertionReferenceId'] = None
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'content for array property at index 0 in supplementalDescriptions does not match any of the possible schema definitions')
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes), 3)
self.assertEqual(errors[0].causes[0].message, 'additional property dataObject is not allowed')
self.assertEqual(errors[0].causes[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[0].causes), 0)
self.assertEqual(errors[0].causes[1].message, 'required properties [dataSize, sha256DataHash] are missing')
self.assertEqual(errors[0].causes[1].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[1].causes), 0)
self.assertEqual(errors[0].causes[2].message, 'property assertionReferenceId with null value should be of type string')
self.assertEqual(errors[0].causes[2].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]/assertionReferenceId')
self.assertEqual(len(errors[0].causes[2].causes), 0)
def test_assertion_reference_id_too_short(self):
self.supplemental_description['assertionReferenceId'] = ''
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'content for array property at index 0 in supplementalDescriptions does not match any of the possible schema definitions')
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes), 3)
self.assertEqual(errors[0].causes[0].message, 'additional property dataObject is not allowed')
self.assertEqual(errors[0].causes[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[0].causes), 0)
self.assertEqual(errors[0].causes[1].message, 'required properties [dataSize, sha256DataHash] are missing')
self.assertEqual(errors[0].causes[1].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[1].causes), 0)
self.assertEqual(errors[0].causes[2].message, "property value '' for assertionReferenceId property is too short, minimum length 1")
self.assertEqual(errors[0].causes[2].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]/assertionReferenceId')
self.assertEqual(len(errors[0].causes[2].causes), 0)
def test_assertion_reference_id_label_missing(self):
del self.supplemental_description['assertionReferenceIdLabel']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_assertion_reference_id_label_null(self):
self.supplemental_description['assertionReferenceIdLabel'] = None
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'content for array property at index 0 in supplementalDescriptions does not match any of the possible schema definitions')
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes), 3)
self.assertEqual(errors[0].causes[0].message, 'additional property dataObject is not allowed')
self.assertEqual(errors[0].causes[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[0].causes), 0)
self.assertEqual(errors[0].causes[1].message, 'required properties [dataSize, sha256DataHash] are missing')
self.assertEqual(errors[0].causes[1].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[1].causes), 0)
self.assertEqual(errors[0].causes[2].message, 'property assertionReferenceIdLabel with null value should be of type string')
self.assertEqual(errors[0].causes[2].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]/assertionReferenceIdLabel')
self.assertEqual(len(errors[0].causes[2].causes), 0)
def test_assertion_reference_id_label_too_short(self):
self.supplemental_description['assertionReferenceIdLabel'] = ''
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'content for array property at index 0 in supplementalDescriptions does not match any of the possible schema definitions')
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes), 3)
self.assertEqual(errors[0].causes[0].message, 'additional property dataObject is not allowed')
self.assertEqual(errors[0].causes[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[0].causes), 0)
self.assertEqual(errors[0].causes[1].message, 'required properties [dataSize, sha256DataHash] are missing')
self.assertEqual(errors[0].causes[1].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[1].causes), 0)
self.assertEqual(errors[0].causes[2].message, "property value '' for assertionReferenceIdLabel property is too short, minimum length 1")
self.assertEqual(errors[0].causes[2].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]/assertionReferenceIdLabel')
self.assertEqual(len(errors[0].causes[2].causes), 0)
def test_system_missing(self):
del self.supplemental_description['system']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_system_null(self):
self.supplemental_description['system'] = None
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'content for array property at index 0 in supplementalDescriptions does not match any of the possible schema definitions')
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes), 3)
self.assertEqual(errors[0].causes[0].message, 'additional property dataObject is not allowed')
self.assertEqual(errors[0].causes[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[0].causes), 0)
self.assertEqual(errors[0].causes[1].message, 'required properties [dataSize, sha256DataHash] are missing')
self.assertEqual(errors[0].causes[1].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[1].causes), 0)
self.assertEqual(errors[0].causes[2].message, 'property system with null value should be of type string')
self.assertEqual(errors[0].causes[2].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]/system')
self.assertEqual(len(errors[0].causes[2].causes), 0)
def test_system_too_short(self):
self.supplemental_description['system'] = ''
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'content for array property at index 0 in supplementalDescriptions does not match any of the possible schema definitions')
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes), 3)
self.assertEqual(errors[0].causes[0].message, 'additional property dataObject is not allowed')
self.assertEqual(errors[0].causes[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[0].causes), 0)
self.assertEqual(errors[0].causes[1].message, 'required properties [dataSize, sha256DataHash] are missing')
self.assertEqual(errors[0].causes[1].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[1].causes), 0)
self.assertEqual(errors[0].causes[2].message, "property value '' for system property is too short, minimum length 1")
self.assertEqual(errors[0].causes[2].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]/system')
self.assertEqual(len(errors[0].causes[2].causes), 0)
def test_information_type_missing(self):
del self.supplemental_description['informationType']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'content for array property at index 0 in supplementalDescriptions does not match any of the possible schema definitions')
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes), 3)
self.assertEqual(errors[0].causes[0].message, 'additional property dataObject is not allowed')
self.assertEqual(errors[0].causes[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[0].causes), 0)
self.assertEqual(errors[0].causes[1].message, 'required properties [dataSize, informationType, sha256DataHash] are missing')
self.assertEqual(errors[0].causes[1].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[1].causes), 0)
self.assertEqual(errors[0].causes[2].message, 'required property informationType is missing')
self.assertEqual(errors[0].causes[2].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[2].causes), 0)
def test_information_type_null(self):
self.supplemental_description['informationType'] = None
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'content for array property at index 0 in supplementalDescriptions does not match any of the possible schema definitions')
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes), 3)
self.assertEqual(errors[0].causes[0].message, 'additional property dataObject is not allowed')
self.assertEqual(errors[0].causes[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[0].causes), 0)
self.assertEqual(errors[0].causes[1].message, 'required properties [dataSize, sha256DataHash] are missing')
self.assertEqual(errors[0].causes[1].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[1].causes), 0)
self.assertEqual(errors[0].causes[2].message, 'property informationType with null value should be of type string')
self.assertEqual(errors[0].causes[2].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]/informationType')
self.assertEqual(len(errors[0].causes[2].causes), 0)
def test_information_type_too_short(self):
self.supplemental_description['informationType'] = ''
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'content for array property at index 0 in supplementalDescriptions does not match any of the possible schema definitions')
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes), 3)
self.assertEqual(errors[0].causes[0].message, 'additional property dataObject is not allowed')
self.assertEqual(errors[0].causes[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[0].causes), 0)
self.assertEqual(errors[0].causes[1].message, 'required properties [dataSize, sha256DataHash] are missing')
self.assertEqual(errors[0].causes[1].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[1].causes), 0)
self.assertEqual(errors[0].causes[2].message, "property value '' for informationType property is too short, minimum length 1")
self.assertEqual(errors[0].causes[2].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]/informationType')
self.assertEqual(len(errors[0].causes[2].causes), 0)
def test_data_object_missing(self):
del self.supplemental_description['dataObject']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'content for array property at index 0 in supplementalDescriptions does not match any of the possible schema definitions')
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes), 2)
self.assertEqual(errors[0].causes[0].message, 'required properties [dataSize, sha256DataHash] are missing')
self.assertEqual(errors[0].causes[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[0].causes), 0)
self.assertEqual(errors[0].causes[1].message, 'required property dataObject is missing')
self.assertEqual(errors[0].causes[1].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[1].causes), 0)
def test_data_object_null(self):
self.supplemental_description['dataObject'] = None
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'content for array property at index 0 in supplementalDescriptions does not match any of the possible schema definitions')
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes), 3)
self.assertEqual(errors[0].causes[0].message, 'additional property dataObject is not allowed')
self.assertEqual(errors[0].causes[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[0].causes), 0)
self.assertEqual(errors[0].causes[1].message, 'required properties [dataSize, sha256DataHash] are missing')
self.assertEqual(errors[0].causes[1].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[1].causes), 0)
self.assertEqual(errors[0].causes[2].message, 'property dataObject with null value should be of type object')
self.assertEqual(errors[0].causes[2].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]/dataObject')
self.assertEqual(len(errors[0].causes[2].causes), 0)
def test_data_object(self):
self.supplemental_description['dataObject'] = {
'string': '',
'int': 1,
'number': 1.1,
'object': {},
'array': [],
'boolean': True,
'null': None,
}
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_security_tag_missing(self):
del self.supplemental_description['securityTag']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'content for array property at index 0 in supplementalDescriptions does not match any of the possible schema definitions')
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes), 3)
self.assertEqual(errors[0].causes[0].message, 'additional property dataObject is not allowed')
self.assertEqual(errors[0].causes[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[0].causes), 0)
self.assertEqual(errors[0].causes[1].message, 'required properties [dataSize, securityTag, sha256DataHash] are missing')
self.assertEqual(errors[0].causes[1].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[1].causes), 0)
self.assertEqual(errors[0].causes[2].message, 'required property securityTag is missing')
self.assertEqual(errors[0].causes[2].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[2].causes), 0)
def test_security_tag_null(self):
self.supplemental_description['securityTag'] = None
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'content for array property at index 0 in supplementalDescriptions does not match any of the possible schema definitions')
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes), 3)
self.assertEqual(errors[0].causes[0].message, 'additional property dataObject is not allowed')
self.assertEqual(errors[0].causes[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[0].causes), 0)
self.assertEqual(errors[0].causes[1].message, 'required properties [dataSize, sha256DataHash] are missing')
self.assertEqual(errors[0].causes[1].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[1].causes), 0)
self.assertEqual(errors[0].causes[2].message, 'property securityTag with null value should be of type string')
self.assertEqual(errors[0].causes[2].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]/securityTag')
self.assertEqual(len(errors[0].causes[2].causes), 0)
def test_security_tag_empty_string(self):
self.supplemental_description['securityTag'] = ''
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
class TiesSchemaTests(TestCase):
def setUp(self):
self.ties = {
'version': '0.9',
'id': 'a',
'system': 'a',
'organization': 'a',
'time': 'a',
'description': 'a',
'type': 'a',
'securityTag': '',
'objectItems': [
{
'objectId': 'a',
'sha256Hash': 'a' * 64,
'md5Hash': 'a' * 32,
'authorityInformation': {
'securityTag': '',
},
}
],
'objectRelationships': [],
'otherInformation': [],
}
def test_all_fields(self):
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_only_required_fields(self):
del self.ties['id']
del self.ties['system']
del self.ties['organization']
del self.ties['time']
del self.ties['description']
del self.ties['type']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_all_required_fields_missing(self):
del self.ties['version']
del self.ties['securityTag']
del self.ties['objectItems']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'required properties [objectItems, securityTag, version] are missing')
self.assertEqual(errors[0].location, '/')
def test_additional_field(self):
self.ties['foo'] = 'a'
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'additional property foo is not allowed')
self.assertEqual(errors[0].location, '/')
def test_additional_fields(self):
self.ties['foo'] = 'a'
self.ties['bar'] = 'a'
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'additional properties [bar, foo] are not allowed')
self.assertEqual(errors[0].location, '/')
def test_version_missing(self):
del self.ties['version']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'required property version is missing')
self.assertEqual(errors[0].location, '/')
def test_version_null(self):
self.ties['version'] = None
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, "enum property version with value null should have one of the allowed values: [0.9]")
self.assertEqual(errors[0].location, '/version')
def test_version_empty_string(self):
self.ties['version'] = ''
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, "enum property version with value '' should have one of the allowed values: [0.9]")
self.assertEqual(errors[0].location, '/version')
def test_version_invalid_value(self):
self.ties['version'] = '0.1'
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, "enum property version with value '0.1' should have one of the allowed values: [0.9]")
self.assertEqual(errors[0].location, '/version')
def test_id_missing(self):
del self.ties['id']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_id_null(self):
self.ties['id'] = None
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'property id with null value should be of type string')
self.assertEqual(errors[0].location, '/id')
def test_id_too_short(self):
self.ties['id'] = ''
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, "property value '' for id property is too short, minimum length 1")
self.assertEqual(errors[0].location, '/id')
def test_id_too_long(self):
self.ties['id'] = 'a' * 257
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, "property value '{}' for id property is too long, maximum length 256".format('a' * 257))
self.assertEqual(errors[0].location, '/id')
def test_system_missing(self):
del self.ties['system']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_system_null(self):
self.ties['system'] = None
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'property system with null value should be of type string')
self.assertEqual(errors[0].location, '/system')
def test_system_too_short(self):
self.ties['system'] = ''
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, "property value '' for system property is too short, minimum length 1")
self.assertEqual(errors[0].location, '/system')
def test_organization_missing(self):
del self.ties['organization']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_organization_null(self):
self.ties['organization'] = None
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'property organization with null value should be of type string')
self.assertEqual(errors[0].location, '/organization')
def test_organization_too_short(self):
self.ties['organization'] = ''
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, "property value '' for organization property is too short, minimum length 1")
self.assertEqual(errors[0].location, '/organization')
def test_time_missing(self):
del self.ties['time']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_time_null(self):
self.ties['time'] = None
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'property time with null value should be of type string')
self.assertEqual(errors[0].location, '/time')
def test_description_missing(self):
del self.ties['description']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_description_null(self):
self.ties['description'] = None
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'property description with null value should be of type string')
self.assertEqual(errors[0].location, '/description')
def test_description_too_short(self):
self.ties['description'] = ''
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, "property value '' for description property is too short, minimum length 1")
self.assertEqual(errors[0].location, '/description')
def test_type_missing(self):
del self.ties['type']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_type_null(self):
self.ties['type'] = None
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'property type with null value should be of type string')
self.assertEqual(errors[0].location, '/type')
def test_type_too_short(self):
self.ties['type'] = ''
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, "property value '' for type property is too short, minimum length 1")
self.assertEqual(errors[0].location, '/type')
def test_security_tag_missing(self):
del self.ties['securityTag']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'required property securityTag is missing')
self.assertEqual(errors[0].location, '/')
def test_security_tag_null(self):
self.ties['securityTag'] = None
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'property securityTag with null value should be of type string')
self.assertEqual(errors[0].location, '/securityTag')
def test_security_tag_empty_string(self):
self.ties['securityTag'] = ''
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_object_items_missing(self):
del self.ties['objectItems']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'required property objectItems is missing')
self.assertEqual(errors[0].location, '/')
def test_object_items_null(self):
self.ties['objectItems'] = None
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'property objectItems with null value should be of type array')
self.assertEqual(errors[0].location, '/objectItems')
def test_object_items_too_small(self):
self.ties['objectItems'] = []
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'array property objectItems with 0 items is too small, minimum size 1')
self.assertEqual(errors[0].location, '/objectItems')
def test_object_items_duplicate_items(self):
obj_item = {
'objectId': 'a',
'sha256Hash': 'a' * 64,
'md5Hash': 'a' * 32,
'authorityInformation': {
'securityTag': '',
},
}
self.ties['objectItems'] = [obj_item, obj_item]
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'array property objectItems has duplicate items at index [0, 1]')
self.assertEqual(errors[0].location, '/objectItems')
def test_object_relationships_missing(self):
del self.ties['objectRelationships']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_object_relationships_empty_list(self):
self.ties['objectRelationships'] = []
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_object_relationships_duplicate_items(self):
object_relationship = {
'linkageMemberIds': ['a', 'b'],
'linkageDirectionality': 'DIRECTED',
}
self.ties['objectRelationships'] = [object_relationship, object_relationship]
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'array property objectRelationships has duplicate items at index [0, 1]')
self.assertEqual(errors[0].location, '/objectRelationships')
def test_other_information_missing(self):
del self.ties['otherInformation']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_other_information_empty_list(self):
self.ties['otherInformation'] = []
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_other_information_duplicate_items(self):
other_info = {'key': 'a', 'value': 'a'}
self.ties['otherInformation'] = [other_info, other_info]
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'array property otherInformation has duplicate items at index [0, 1]')
self.assertEqual(errors[0].location, '/otherInformation')
if __name__ == '__main__':
unittest.main()
| 60.3989
| 226
| 0.697576
| 16,100
| 142,783
| 6.103106
| 0.01913
| 0.155557
| 0.128018
| 0.131651
| 0.945369
| 0.923763
| 0.909577
| 0.890861
| 0.877305
| 0.870721
| 0
| 0.023735
| 0.17112
| 142,783
| 2,363
| 227
| 60.42446
| 0.806515
| 0.007298
| 0
| 0.74988
| 0
| 0.006265
| 0.307065
| 0.12875
| 0
| 0
| 0
| 0
| 0.540723
| 1
| 0.119036
| false
| 0.000482
| 0.003373
| 0
| 0.127229
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
5dfcb4944726585326ef2fa76153144014fd29bb
| 11,446
|
py
|
Python
|
tests/st/ops/cpu/test_bitwise_op.py
|
httpsgithu/mindspore
|
c29d6bb764e233b427319cb89ba79e420f1e2c64
|
[
"Apache-2.0"
] | 1
|
2022-02-23T09:13:43.000Z
|
2022-02-23T09:13:43.000Z
|
tests/st/ops/cpu/test_bitwise_op.py
|
949144093/mindspore
|
c29d6bb764e233b427319cb89ba79e420f1e2c64
|
[
"Apache-2.0"
] | null | null | null |
tests/st/ops/cpu/test_bitwise_op.py
|
949144093/mindspore
|
c29d6bb764e233b427319cb89ba79e420f1e2c64
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.ops import operations as P
from mindspore.ops import functional as F
from mindspore.ops.functional import vmap
class OpNetWrapper(nn.Cell):
"""OpNetWrapper"""
def __init__(self, op):
"""__init__"""
super(OpNetWrapper, self).__init__()
self.op = op
def construct(self, *inputs):
"""construct"""
return self.op(*inputs)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
@pytest.mark.parametrize('shape', [(2,), (4, 5), (3, 4, 5, 6), (3, 4, 5, 6, 2)])
@pytest.mark.parametrize('dtype', [np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32, np.uint64])
def test_bitwise_and(shape, dtype):
"""
Feature: BitwiseAnd cpu kernel.
Description: test the rightness of BitwiseAnd cpu kernel.
Expectation: Success.
"""
context.set_context(mode=context.GRAPH_MODE, device_target='CPU')
op = P.BitwiseAnd()
op_wrapper = OpNetWrapper(op)
prop = 100 if np.random.random() > 0.5 else -100
x_np = (np.random.randn(*shape) * prop).astype(dtype)
y_np = (np.random.randn(*shape) * prop).astype(dtype)
outputs = op_wrapper(Tensor(x_np), Tensor(y_np))
outputs_functional = F.bitwise_and(Tensor(x_np), Tensor(y_np))
expect = np.bitwise_and(x_np, y_np)
assert np.allclose(outputs.asnumpy(), expect)
assert np.allclose(outputs_functional.asnumpy(), expect)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
@pytest.mark.parametrize('shape', [(2,), (4, 5), (3, 4, 5, 6), (3, 4, 5, 6, 2)])
@pytest.mark.parametrize('dtype', [np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32, np.uint64])
def test_bitwise_or(shape, dtype):
"""
Feature: BitwiseOr cpu kernel.
Description: test the rightness of BitwiseOr cpu kernel.
Expectation: Success.
"""
context.set_context(mode=context.GRAPH_MODE, device_target='CPU')
op = P.BitwiseOr()
op_wrapper = OpNetWrapper(op)
prop = 100 if np.random.random() > 0.5 else -100
x_np = (np.random.randn(*shape) * prop).astype(dtype)
y_np = (np.random.randn(*shape) * prop).astype(dtype)
outputs = op_wrapper(Tensor(x_np), Tensor(y_np))
outputs_functional = F.bitwise_or(Tensor(x_np), Tensor(y_np))
expect = np.bitwise_or(x_np, y_np)
assert np.allclose(outputs.asnumpy(), expect)
assert np.allclose(outputs_functional.asnumpy(), expect)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
@pytest.mark.parametrize('shape', [(2,), (4, 5), (3, 4, 5, 6), (3, 4, 5, 6, 2)])
@pytest.mark.parametrize('dtype', [np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32, np.uint64])
def test_bitwise_xor(shape, dtype):
"""
Feature: BitwiseXor cpu kernel.
Description: test the rightness of BitwiseXor cpu kernel.
Expectation: Success.
"""
context.set_context(mode=context.GRAPH_MODE, device_target='CPU')
op = P.BitwiseXor()
op_wrapper = OpNetWrapper(op)
prop = 100 if np.random.random() > 0.5 else -100
x_np = (np.random.randn(*shape) * prop).astype(dtype)
y_np = (np.random.randn(*shape) * prop).astype(dtype)
outputs = op_wrapper(Tensor(x_np), Tensor(y_np))
outputs_functional = F.bitwise_xor(Tensor(x_np), Tensor(y_np))
expect = np.bitwise_xor(x_np, y_np)
assert np.allclose(outputs.asnumpy(), expect)
assert np.allclose(outputs_functional.asnumpy(), expect)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
@pytest.mark.parametrize('op', [P.BitwiseAnd(), P.BitwiseOr(), P.BitwiseXor()])
@pytest.mark.parametrize('dtype', [np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32, np.uint64])
def test_bitwise_vmap(op, dtype):
"""
Feature: Bitwise cpu kernel.
Description: test the rightness of Bitwise vmap feature.
Expectation: Success.
"""
context.set_context(mode=context.GRAPH_MODE, device_target='CPU')
def test_add(x, y):
return op(x, y)
x = Tensor(np.array([[1, 2], [3, 4], [5, 6]]).astype(dtype))
y = Tensor(np.array([[-3, -2, -1], [3, 2, 1]]).astype(dtype))
outputs = vmap(test_add, in_axes=(0, 1), out_axes=0)(x, y)
x_manual = np.array([[1, 2], [3, 4], [5, 6]]).astype(dtype)
y_manual = np.array([[-3, 3], [-2, 2], [-1, 1]]).astype(dtype)
def manually_batched(xs, ws):
output = []
for i in range(xs.shape[0]):
output.append(test_add(Tensor(xs[i]), Tensor(ws[i])).asnumpy())
return np.stack(output)
expect = manually_batched(x_manual, y_manual)
assert np.allclose(outputs.asnumpy(), expect)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
@pytest.mark.parametrize('dtype', [np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32, np.uint64])
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])
@pytest.mark.parametrize('shape', [(2,), (4, 5), (3, 4, 5, 6), (3, 4, 5, 6, 2)])
def test_bitwise_and_tensor_interface(dtype, mode, shape):
"""
Feature: BitwiseAnd cpu kernel.
Description: test the rightness of BitwiseAnd tensor interface.
Expectation: Success.
"""
context.set_context(mode=mode, device_target='CPU')
prop = 100 if np.random.random() > 0.5 else -100
x_np = (np.random.randn(*shape) * prop).astype(dtype)
y_np = (np.random.randn(*shape) * prop).astype(dtype)
outputs = Tensor(x_np).bitwise_and(Tensor(y_np))
expect = np.bitwise_and(x_np, y_np)
assert np.allclose(outputs.asnumpy(), expect)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
@pytest.mark.parametrize('dtype', [np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32, np.uint64])
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])
@pytest.mark.parametrize('shape', [(2,), (4, 5), (3, 4, 5, 6), (3, 4, 5, 6, 2)])
def test_bitwise_or_tensor_interface(dtype, mode, shape):
"""
Feature: BitwiseOr cpu kernel.
Description: test the rightness of BitwiseOr tensor interface.
Expectation: Success.
"""
context.set_context(mode=mode, device_target='CPU')
prop = 100 if np.random.random() > 0.5 else -100
x_np = (np.random.randn(*shape) * prop).astype(dtype)
y_np = (np.random.randn(*shape) * prop).astype(dtype)
outputs = Tensor(x_np).bitwise_or(Tensor(y_np))
expect = np.bitwise_or(x_np, y_np)
assert np.allclose(outputs.asnumpy(), expect)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
@pytest.mark.parametrize('dtype', [np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32, np.uint64])
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])
@pytest.mark.parametrize('shape', [(2,), (4, 5), (3, 4, 5, 6), (3, 4, 5, 6, 2)])
def test_bitwise_xor_tensor_interface(dtype, mode, shape):
"""
Feature: BitwiseXor cpu kernel.
Description: test the rightness of BitwiseXor tensor interface.
Expectation: Success.
"""
context.set_context(mode=mode, device_target='CPU')
prop = 100 if np.random.random() > 0.5 else -100
x_np = (np.random.randn(*shape) * prop).astype(dtype)
y_np = (np.random.randn(*shape) * prop).astype(dtype)
outputs = Tensor(x_np).bitwise_xor(Tensor(y_np))
expect = np.bitwise_xor(x_np, y_np)
assert np.allclose(outputs.asnumpy(), expect)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
@pytest.mark.parametrize('xshape', [(2, 3)])
@pytest.mark.parametrize('yshape', [(1, 1), (1, 3), (2, 1)])
@pytest.mark.parametrize('dtype', [np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32, np.uint64])
def test_bitwise_and_broadcast(xshape, yshape, dtype):
"""
Feature: BitwiseAnd cpu kernel.
Description: test the rightness of BitwiseAnd cpu kernel broadcast.
Expectation: Success.
"""
context.set_context(mode=context.GRAPH_MODE, device_target='CPU')
op = P.BitwiseAnd()
op_wrapper = OpNetWrapper(op)
prop = 100 if np.random.random() > 0.5 else -100
x_np = (np.random.randn(*xshape) * prop).astype(dtype)
y_np = (np.random.randn(*yshape) * prop).astype(dtype)
outputs = op_wrapper(Tensor(x_np), Tensor(y_np))
outputs_functional = F.bitwise_and(Tensor(x_np), Tensor(y_np))
expect = np.bitwise_and(x_np, y_np)
assert np.allclose(outputs.asnumpy(), expect)
assert np.allclose(outputs_functional.asnumpy(), expect)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
@pytest.mark.parametrize('xshape', [(2, 3)])
@pytest.mark.parametrize('yshape', [(1, 1), (1, 3), (2, 1)])
@pytest.mark.parametrize('dtype', [np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32, np.uint64])
def test_bitwise_or_broadcast(xshape, yshape, dtype):
"""
Feature: BitwiseOr cpu kernel.
Description: test the rightness of BitwiseOr cpu kernel broadcast.
Expectation: Success.
"""
context.set_context(mode=context.GRAPH_MODE, device_target='CPU')
op = P.BitwiseOr()
op_wrapper = OpNetWrapper(op)
prop = 100 if np.random.random() > 0.5 else -100
x_np = (np.random.randn(*xshape) * prop).astype(dtype)
y_np = (np.random.randn(*yshape) * prop).astype(dtype)
outputs = op_wrapper(Tensor(x_np), Tensor(y_np))
outputs_functional = F.bitwise_or(Tensor(x_np), Tensor(y_np))
expect = np.bitwise_or(x_np, y_np)
assert np.allclose(outputs.asnumpy(), expect)
assert np.allclose(outputs_functional.asnumpy(), expect)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
@pytest.mark.parametrize('xshape', [(2, 3)])
@pytest.mark.parametrize('yshape', [(1, 1), (1, 3), (2, 1)])
@pytest.mark.parametrize('dtype', [np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32, np.uint64])
def test_bitwise_xor_broadcast(xshape, yshape, dtype):
"""
Feature: BitwiseXor cpu kernel.
Description: test the rightness of BitwiseXor cpu kernel broadcast.
Expectation: Success.
"""
context.set_context(mode=context.GRAPH_MODE, device_target='CPU')
op = P.BitwiseXor()
op_wrapper = OpNetWrapper(op)
prop = 100 if np.random.random() > 0.5 else -100
x_np = (np.random.randn(*xshape) * prop).astype(dtype)
y_np = (np.random.randn(*yshape) * prop).astype(dtype)
outputs = op_wrapper(Tensor(x_np), Tensor(y_np))
outputs_functional = F.bitwise_xor(Tensor(x_np), Tensor(y_np))
expect = np.bitwise_xor(x_np, y_np)
assert np.allclose(outputs.asnumpy(), expect)
assert np.allclose(outputs_functional.asnumpy(), expect)
| 38.668919
| 117
| 0.684519
| 1,695
| 11,446
| 4.490855
| 0.097935
| 0.073568
| 0.071729
| 0.03547
| 0.844325
| 0.836048
| 0.824619
| 0.819627
| 0.819627
| 0.819627
| 0
| 0.038993
| 0.153067
| 11,446
| 295
| 118
| 38.8
| 0.746235
| 0.160231
| 0
| 0.774725
| 0
| 0
| 0.017107
| 0
| 0
| 0
| 0
| 0
| 0.087912
| 1
| 0.076923
| false
| 0
| 0.043956
| 0.005495
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f8e37c7f431db7fa96e17fbf81cff57810e12cb0
| 4,911
|
py
|
Python
|
tests/test_app1/securities/test_sec_register.py
|
loitd/myflask
|
276db36c698c440dc4f1af42ca34961294234ab3
|
[
"Apache-2.0"
] | null | null | null |
tests/test_app1/securities/test_sec_register.py
|
loitd/myflask
|
276db36c698c440dc4f1af42ca34961294234ab3
|
[
"Apache-2.0"
] | 4
|
2021-02-08T20:55:21.000Z
|
2022-01-06T22:51:00.000Z
|
tests/test_app1/securities/test_sec_register.py
|
loitd/myflask
|
276db36c698c440dc4f1af42ca34961294234ab3
|
[
"Apache-2.0"
] | null | null | null |
# https://viblo.asia/p/tim-hieu-ve-sql-injection-testing-RQqKLv90l7z
import pytest
from bs4 import BeautifulSoup as bs
from flask import url_for
def test_register_sqlinjection(app, client):
theurlstr = 'register_blp.reg'
with app.test_request_context():
# Test render without errors
assert client.get(url_for(theurlstr)).status_code == 200
# Test inject 1
data = dict(
inputName='2018 OR ""="',
inputEmail='test@myflask.com OR ""="',
inputPassword="123"
)
reg = client.post(url_for(theurlstr), data=data, follow_redirects=True)
assert reg.status_code == 200 # has feedback
assert reg.headers.get("Location") == None
assert "Validation failed! Contact admin@myflask.com for help" in reg.data.decode('utf-8')
assert '<script src="/static/js/login.js"></script>' not in reg.data.decode('utf-8') #not redirected to login
data = dict(
inputName='2018 OR ""="',
inputEmail='test@myflask.com',
inputPassword="123"
)
reg = client.post(url_for(theurlstr), data=data, follow_redirects=True)
assert reg.status_code == 200 # has feedback
assert reg.headers.get("Location") == None
assert "Validation failed! Contact admin@myflask.com for help" in reg.data.decode('utf-8')
assert '<script src="/static/js/login.js"></script>' not in reg.data.decode('utf-8') #not redirected to login
data = dict(
inputName='A test',
inputEmail='test@myflask.com OR ""="',
inputPassword="123"
)
reg = client.post(url_for(theurlstr), data=data, follow_redirects=True)
assert reg.status_code == 200 # has feedback
assert reg.headers.get("Location") == None
assert "Validation failed! Contact admin@myflask.com for help" in reg.data.decode('utf-8')
assert '<script src="/static/js/login.js"></script>' not in reg.data.decode('utf-8') #not redirected to login
data = dict(
inputName='A test',
inputEmail="' OR 1=1",
inputPassword="123"
)
reg = client.post(url_for(theurlstr), data=data, follow_redirects=True)
assert reg.status_code == 200 # has feedback
assert reg.headers.get("Location") == None
assert "Validation failed! Contact admin@myflask.com for help" in reg.data.decode('utf-8')
assert '<script src="/static/js/login.js"></script>' not in reg.data.decode('utf-8') #not redirected to login
data = dict(
inputName='A test',
inputEmail='test@myflask.com; DROP TABLE tbl_users; --',
inputPassword="123"
)
reg = client.post(url_for(theurlstr), data=data, follow_redirects=True)
assert reg.status_code == 200 # has feedback
assert reg.headers.get("Location") == None
assert "Validation failed! Contact admin@myflask.com for help" in reg.data.decode('utf-8')
assert '<script src="/static/js/login.js"></script>' not in reg.data.decode('utf-8') #not redirected to login
# Test length of inputName (must be from 6-20)
data = dict(
inputName='A test i am the one who pass it alllllllllllllllllllllllllllll',
inputEmail='test@myflask.com',
inputPassword="123"
)
reg = client.post(url_for(theurlstr), data=data, follow_redirects=True)
assert reg.status_code == 200 # has feedback
assert reg.headers.get("Location") == None
assert "Validation failed! Contact admin@myflask.com for help" in reg.data.decode('utf-8')
assert '<script src="/static/js/login.js"></script>' not in reg.data.decode('utf-8') #not redirected to login
data = dict(
inputName='A',
inputEmail='test@myflask.com',
inputPassword="123"
)
reg = client.post(url_for(theurlstr), data=data, follow_redirects=True)
assert reg.status_code == 200 # has feedback
assert reg.headers.get("Location") == None
assert "Validation failed! Contact admin@myflask.com for help" in reg.data.decode('utf-8')
assert '<script src="/static/js/login.js"></script>' not in reg.data.decode('utf-8') #not redirected to login
data = dict(
inputName="'; select true; --",
inputEmail='test@myflask.com',
inputPassword="123"
)
reg = client.post(url_for(theurlstr), data=data, follow_redirects=True)
assert reg.status_code == 200 # has feedback
assert reg.headers.get("Location") == None
assert "Validation failed! Contact admin@myflask.com for help" in reg.data.decode('utf-8')
assert '<script src="/static/js/login.js"></script>' not in reg.data.decode('utf-8') #not redirected to login
| 49.606061
| 117
| 0.618815
| 629
| 4,911
| 4.779014
| 0.157393
| 0.047904
| 0.047904
| 0.07984
| 0.869927
| 0.862608
| 0.862608
| 0.862608
| 0.862608
| 0.852961
| 0
| 0.023104
| 0.250865
| 4,911
| 99
| 118
| 49.606061
| 0.793966
| 0.089595
| 0
| 0.770115
| 0
| 0
| 0.277915
| 0.069647
| 0
| 0
| 0
| 0
| 0.37931
| 1
| 0.011494
| false
| 0.103448
| 0.034483
| 0
| 0.045977
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
5d18273e13675a5fe94f7e9eb409516927ce0940
| 41,436
|
py
|
Python
|
messagegenerator.py
|
Smiddie31/aws-health-aware
|
3a548794e9e03acce24ea954a354f13396f221d8
|
[
"MIT-0"
] | 142
|
2021-03-09T18:10:37.000Z
|
2022-03-21T18:04:07.000Z
|
messagegenerator.py
|
Smiddie31/aws-health-aware
|
3a548794e9e03acce24ea954a354f13396f221d8
|
[
"MIT-0"
] | 21
|
2021-03-09T18:15:53.000Z
|
2022-03-06T03:59:51.000Z
|
messagegenerator.py
|
Smiddie31/aws-health-aware
|
3a548794e9e03acce24ea954a354f13396f221d8
|
[
"MIT-0"
] | 54
|
2021-03-09T04:51:30.000Z
|
2022-03-31T07:57:58.000Z
|
import json
import boto3
from datetime import datetime, timedelta
from botocore.exceptions import ClientError
import os
import time
def get_message_for_slack(event_details, event_type, affected_accounts, affected_entities, slack_webhook):
message = ""
summary = ""
if slack_webhook == "webhook":
if len(affected_entities) >= 1:
affected_entities = "\n".join(affected_entities)
if affected_entities == "UNKNOWN":
affected_entities = "All resources\nin region"
else:
affected_entities = "All resources\nin region"
if len(affected_accounts) >= 1:
affected_accounts = "\n".join(affected_accounts)
else:
affected_accounts = "All accounts\nin region"
if event_type == "create":
summary += (
f":rotating_light:*[NEW] AWS Health reported an issue with the {event_details['successfulSet'][0]['event']['service'].upper()} service in "
f"the {event_details['successfulSet'][0]['event']['region'].upper()} region.*"
)
message = {
"text": summary,
"attachments": [
{
"color": "danger",
"fields": [
{ "title": "Account(s)", "value": affected_accounts, "short": True },
{ "title": "Resource(s)", "value": affected_entities, "short": True },
{ "title": "Service", "value": event_details['successfulSet'][0]['event']['service'], "short": True },
{ "title": "Region", "value": event_details['successfulSet'][0]['event']['region'], "short": True },
{ "title": "Start Time (UTC)", "value": cleanup_time(event_details['successfulSet'][0]['event']['startTime']), "short": True },
{ "title": "Status", "value": event_details['successfulSet'][0]['event']['statusCode'], "short": True },
{ "title": "Event ARN", "value": event_details['successfulSet'][0]['event']['arn'], "short": False },
{ "title": "Updates", "value": get_last_aws_update(event_details), "short": False }
],
}
]
}
elif event_type == "resolve":
summary += (
f":heavy_check_mark:*[RESOLVED] The AWS Health issue with the {event_details['successfulSet'][0]['event']['service'].upper()} service in "
f"the {event_details['successfulSet'][0]['event']['region'].upper()} region is now resolved.*"
)
message = {
"text": summary,
"attachments": [
{
"color": "00ff00",
"fields": [
{ "title": "Account(s)", "value": affected_accounts, "short": True },
{ "title": "Resource(s)", "value": affected_entities, "short": True },
{ "title": "Service", "value": event_details['successfulSet'][0]['event']['service'], "short": True },
{ "title": "Region", "value": event_details['successfulSet'][0]['event']['region'], "short": True },
{ "title": "Start Time (UTC)", "value": cleanup_time(event_details['successfulSet'][0]['event']['startTime']), "short": True },
{ "title": "End Time (UTC)", "value": cleanup_time(event_details['successfulSet'][0]['event']['endTime']), "short": True },
{ "title": "Status", "value": event_details['successfulSet'][0]['event']['statusCode'], "short": True },
{ "title": "Event ARN", "value": event_details['successfulSet'][0]['event']['arn'], "short": False },
{ "title": "Updates", "value": get_last_aws_update(event_details), "short": False }
],
}
]
}
else:
if len(affected_entities) >= 1:
affected_entities = "\n".join(affected_entities)
if affected_entities == "UNKNOWN":
affected_entities = "All resources\nin region"
else:
affected_entities = "All resources in region"
if len(affected_accounts) >= 1:
affected_accounts = "\n".join(affected_accounts)
else:
affected_accounts = "All accounts in region"
if event_type == "create":
summary += (
f":rotating_light:*[NEW] AWS Health reported an issue with the {event_details['successfulSet'][0]['event']['service'].upper()} service in "
f"the {event_details['successfulSet'][0]['event']['region'].upper()} region.*"
)
message = {
"text": summary,
"accounts": affected_accounts,
"resources": affected_entities,
"service": event_details['successfulSet'][0]['event']['service'],
"region": event_details['successfulSet'][0]['event']['region'],
"start_time": cleanup_time(event_details['successfulSet'][0]['event']['startTime']),
"status": event_details['successfulSet'][0]['event']['statusCode'],
"event_arn": event_details['successfulSet'][0]['event']['arn'],
"updates": get_last_aws_update(event_details)
}
elif event_type == "resolve":
summary += (
f":heavy_check_mark:*[RESOLVED] The AWS Health issue with the {event_details['successfulSet'][0]['event']['service'].upper()} service in "
f"the {event_details['successfulSet'][0]['event']['region'].upper()} region is now resolved.*"
)
message = {
"text": summary,
"accounts": affected_accounts,
"resources": affected_entities,
"service": event_details['successfulSet'][0]['event']['service'],
"region": event_details['successfulSet'][0]['event']['region'],
"start_time": cleanup_time(event_details['successfulSet'][0]['event']['startTime']),
"status": event_details['successfulSet'][0]['event']['statusCode'],
"event_arn": event_details['successfulSet'][0]['event']['arn'],
"updates": get_last_aws_update(event_details)
}
print("Message sent to Slack: ", message)
return message
def get_message_for_eventbridge(event_details, event_type, affected_accounts, affected_entities):
message = ""
if len(affected_entities) >= 1:
affected_entities = "\n".join(affected_entities)
if affected_entities == "UNKNOWN":
affected_entities = "All resources\nin region"
else:
affected_entities = "All resources\nin region"
if len(affected_accounts) >= 1:
affected_accounts = "\n".join(affected_accounts)
else:
affected_accounts = "All accounts\nin region"
if event_type == "create":
message = {
"attachments": [
{
"fields": [
{ "title": "Account(s)", "value": affected_accounts, "short": True },
{ "title": "Resource(s)", "value": affected_entities, "short": True },
{ "title": "Service", "value": event_details['successfulSet'][0]['event']['service'], "short": True },
{ "title": "Region", "value": event_details['successfulSet'][0]['event']['region'], "short": True },
{ "title": "Start Time (UTC)", "value": cleanup_time(event_details['successfulSet'][0]['event']['startTime']), "short": True },
{ "title": "Status", "value": event_details['successfulSet'][0]['event']['statusCode'], "short": True },
{ "title": "Event ARN", "value": event_details['successfulSet'][0]['event']['arn'], "short": False },
{ "title": "Updates", "value": get_last_aws_update(event_details), "short": False }
],
}
]
}
elif event_type == "resolve":
message = {
"attachments": [
{
"fields": [
{ "title": "Account(s)", "value": affected_accounts, "short": True },
{ "title": "Resource(s)", "value": affected_entities, "short": True },
{ "title": "Service", "value": event_details['successfulSet'][0]['event']['service'], "short": True },
{ "title": "Region", "value": event_details['successfulSet'][0]['event']['region'], "short": True },
{ "title": "Start Time (UTC)", "value": cleanup_time(event_details['successfulSet'][0]['event']['startTime']), "short": True },
{ "title": "End Time (UTC)", "value": cleanup_time(event_details['successfulSet'][0]['event']['endTime']), "short": True },
{ "title": "Status", "value": event_details['successfulSet'][0]['event']['statusCode'], "short": True },
{ "title": "Event ARN", "value": event_details['successfulSet'][0]['event']['arn'], "short": False },
{ "title": "Updates", "value": get_last_aws_update(event_details), "short": False }
],
}
]
}
print("SHD Message generated for EventBridge : ", message)
return message
def get_org_message_for_eventbridge(event_details, event_type, affected_org_accounts, affected_org_entities):
message = ""
if len(affected_org_entities) >= 1:
affected_org_entities = "\n".join(affected_org_entities)
else:
affected_org_entities = "All resources\nin region"
if len(affected_org_accounts) >= 1:
affected_org_accounts = "\n".join(affected_org_accounts)
else:
affected_org_accounts = "All accounts\nin region"
if event_type == "create":
message = {
"attachments": [
{
"fields": [
{ "title": "Account(s)", "value": affected_org_accounts, "short": True },
{ "title": "Resource(s)", "value": affected_org_entities, "short": True },
{ "title": "Service", "value": event_details['successfulSet'][0]['event']['service'], "short": True },
{ "title": "Region", "value": event_details['successfulSet'][0]['event']['region'], "short": True },
{ "title": "Start Time (UTC)", "value": cleanup_time(event_details['successfulSet'][0]['event']['startTime']), "short": True },
{ "title": "Status", "value": event_details['successfulSet'][0]['event']['statusCode'], "short": True },
{ "title": "Event ARN", "value": event_details['successfulSet'][0]['event']['arn'], "short": False },
{ "title": "Updates", "value": get_last_aws_update(event_details), "short": False }
],
}
]
}
elif event_type == "resolve":
message = {
"attachments": [
{
"fields": [
{ "title": "Account(s)", "value": affected_org_accounts, "short": True },
{ "title": "Resource(s)", "value": affected_org_entities, "short": True },
{ "title": "Service", "value": event_details['successfulSet'][0]['event']['service'], "short": True },
{ "title": "Region", "value": event_details['successfulSet'][0]['event']['region'], "short": True },
{ "title": "Start Time (UTC)", "value": cleanup_time(event_details['successfulSet'][0]['event']['startTime']), "short": True },
{ "title": "End Time (UTC)", "value": cleanup_time(event_details['successfulSet'][0]['event']['endTime']), "short": True },
{ "title": "Status", "value": event_details['successfulSet'][0]['event']['statusCode'], "short": True },
{ "title": "Event ARN", "value": event_details['successfulSet'][0]['event']['arn'], "short": False },
{ "title": "Updates", "value": get_last_aws_update(event_details), "short": False }
],
}
]
}
json.dumps(message)
print("PHD/SHD Message generated for Event Bridge: ", message)
return message
def get_org_message_for_slack(event_details, event_type, affected_org_accounts, affected_org_entities, slack_webhook):
message = ""
summary = ""
if slack_webhook == "webhook":
if len(affected_org_entities) >= 1:
affected_org_entities = "\n".join(affected_org_entities)
else:
affected_org_entities = "All resources\nin region"
if len(affected_org_accounts) >= 1:
affected_org_accounts = "\n".join(affected_org_accounts)
else:
affected_org_accounts = "All accounts\nin region"
if event_type == "create":
summary += (
f":rotating_light:*[NEW] AWS Health reported an issue with the {event_details['successfulSet'][0]['event']['service'].upper()} service in "
f"the {event_details['successfulSet'][0]['event']['region'].upper()} region.*"
)
message = {
"text": summary,
"attachments": [
{
"color": "danger",
"fields": [
{ "title": "Account(s)", "value": affected_org_accounts, "short": True },
{ "title": "Resource(s)", "value": affected_org_entities, "short": True },
{ "title": "Service", "value": event_details['successfulSet'][0]['event']['service'], "short": True },
{ "title": "Region", "value": event_details['successfulSet'][0]['event']['region'], "short": True },
{ "title": "Start Time (UTC)", "value": cleanup_time(event_details['successfulSet'][0]['event']['startTime']), "short": True },
{ "title": "Status", "value": event_details['successfulSet'][0]['event']['statusCode'], "short": True },
{ "title": "Event ARN", "value": event_details['successfulSet'][0]['event']['arn'], "short": False },
{ "title": "Updates", "value": get_last_aws_update(event_details), "short": False }
],
}
]
}
elif event_type == "resolve":
summary += (
f":heavy_check_mark:*[RESOLVED] The AWS Health issue with the {event_details['successfulSet'][0]['event']['service'].upper()} service in "
f"the {event_details['successfulSet'][0]['event']['region'].upper()} region is now resolved.*"
)
message = {
"text": summary,
"attachments": [
{
"color": "00ff00",
"fields": [
{ "title": "Account(s)", "value": affected_org_accounts, "short": True },
{ "title": "Resource(s)", "value": affected_org_entities, "short": True },
{ "title": "Service", "value": event_details['successfulSet'][0]['event']['service'], "short": True },
{ "title": "Region", "value": event_details['successfulSet'][0]['event']['region'], "short": True },
{ "title": "Start Time (UTC)", "value": cleanup_time(event_details['successfulSet'][0]['event']['startTime']), "short": True },
{ "title": "End Time (UTC)", "value": cleanup_time(event_details['successfulSet'][0]['event']['endTime']), "short": True },
{ "title": "Status", "value": event_details['successfulSet'][0]['event']['statusCode'], "short": True },
{ "title": "Event ARN", "value": event_details['successfulSet'][0]['event']['arn'], "short": False },
{ "title": "Updates", "value": get_last_aws_update(event_details), "short": False }
],
}
]
}
else:
if len(affected_org_entities) >= 1:
affected_org_entities = "\n".join(affected_org_entities)
else:
affected_org_entities = "All resources in region"
if len(affected_org_accounts) >= 1:
affected_org_accounts = "\n".join(affected_org_accounts)
else:
affected_org_accounts = "All accounts in region"
if event_type == "create":
summary += (
f":rotating_light:*[NEW] AWS Health reported an issue with the {event_details['successfulSet'][0]['event']['service'].upper()} service in "
f"the {event_details['successfulSet'][0]['event']['region'].upper()} region.*"
)
message = {
"text": summary,
"accounts": affected_org_accounts,
"resources": affected_org_entities,
"service": event_details['successfulSet'][0]['event']['service'],
"region": event_details['successfulSet'][0]['event']['region'],
"start_time": cleanup_time(event_details['successfulSet'][0]['event']['startTime']),
"status": event_details['successfulSet'][0]['event']['statusCode'],
"event_arn": event_details['successfulSet'][0]['event']['arn'],
"updates": get_last_aws_update(event_details)
}
elif event_type == "resolve":
summary += (
f":heavy_check_mark:*[RESOLVED] The AWS Health issue with the {event_details['successfulSet'][0]['event']['service'].upper()} service in "
f"the {event_details['successfulSet'][0]['event']['region'].upper()} region is now resolved.*"
)
message = {
"text": summary,
"accounts": affected_org_accounts,
"resources": affected_org_entities,
"service": event_details['successfulSet'][0]['event']['service'],
"region": event_details['successfulSet'][0]['event']['region'],
"start_time": cleanup_time(event_details['successfulSet'][0]['event']['startTime']),
"status": event_details['successfulSet'][0]['event']['statusCode'],
"event_arn": event_details['successfulSet'][0]['event']['arn'],
"updates": get_last_aws_update(event_details)
}
json.dumps(message)
print("Message sent to Slack: ", message)
return message
def get_message_for_chime(event_details, event_type, affected_accounts, affected_entities):
message = ""
if len(affected_entities) >= 1:
affected_entities = "\n".join(affected_entities)
if affected_entities == "UNKNOWN":
affected_entities = "All resources\nin region"
else:
affected_entities = "All resources\nin region"
if len(affected_accounts) >= 1:
affected_accounts = "\n".join(affected_accounts)
else:
affected_accounts = "All accounts\nin region"
summary = ""
if event_type == "create":
message = str("/md" + "\n" + "**:rotating_light:\[NEW\] AWS Health reported an issue with the " + event_details['successfulSet'][0]['event']['service'].upper() + " service in " + event_details['successfulSet'][0]['event']['region'].upper() + " region.**" + "\n"
"---" + "\n"
"**Account(s)**: " + affected_accounts + "\n"
"**Resource(s)**: " + affected_entities + "\n"
"**Service**: " + event_details['successfulSet'][0]['event']['service'] + "\n"
"**Region**: " + event_details['successfulSet'][0]['event']['region'] + "\n"
"**Start Time (UTC)**: " + cleanup_time(event_details['successfulSet'][0]['event']['startTime']) + "\n"
"**Status**: " + event_details['successfulSet'][0]['event']['statusCode'] + "\n"
"**Event ARN**: " + event_details['successfulSet'][0]['event']['arn'] + "\n"
"**Updates:**" + "\n" + get_last_aws_update(event_details)
)
elif event_type == "resolve":
message = str("/md" + "\n" + "**:heavy_check_mark:\[RESOLVED\] The AWS Health issue with the " + event_details['successfulSet'][0]['event']['service'].upper() + " service in " + event_details['successfulSet'][0]['event']['region'].upper() + " region is now resolved.**" + "\n"
"---" + "\n"
"**Account(s)**: " + affected_accounts + "\n"
"**Resource(s)**: " + affected_entities + "\n"
"**Service**: " + event_details['successfulSet'][0]['event']['service'] + "\n"
"**Region**: " + event_details['successfulSet'][0]['event']['region'] + "\n"
"**Start Time (UTC)**: " + cleanup_time(event_details['successfulSet'][0]['event']['startTime']) + "\n"
"**End Time (UTC)**: " + cleanup_time(event_details['successfulSet'][0]['event']['endTime']) + "\n"
"**Status**: " + event_details['successfulSet'][0]['event']['statusCode'] + "\n"
"**Event ARN**: " + event_details['successfulSet'][0]['event']['arn'] + "\n"
"**Updates:**" + "\n" + get_last_aws_update(event_details)
)
json.dumps(message)
print("Message sent to Chime: ", message)
return message
def get_org_message_for_chime(event_details, event_type, affected_org_accounts, affected_org_entities):
message = ""
summary = ""
if len(affected_org_entities) >= 1:
affected_org_entities = "\n".join(affected_org_entities)
else:
affected_org_entities = "All resources in region"
if len(affected_org_accounts) >= 1:
affected_org_accounts = "\n".join(affected_org_accounts)
else:
affected_org_accounts = "All accounts in region"
if event_type == "create":
message = str("/md" + "\n" + "**:rotating_light:\[NEW\] AWS Health reported an issue with the " + event_details['successfulSet'][0]['event']['service'].upper()) + " service in " + str(event_details['successfulSet'][0]['event']['region'].upper() + " region**" + "\n"
"---" + "\n"
"**Account(s)**: " + affected_org_accounts + "\n"
"**Resource(s)**: " + affected_org_entities + "\n"
"**Service**: " + event_details['successfulSet'][0]['event']['service'] + "\n"
"**Region**: " + event_details['successfulSet'][0]['event']['region'] + "\n"
"**Start Time (UTC)**: " + cleanup_time(event_details['successfulSet'][0]['event']['startTime']) + "\n"
"**Status**: " + event_details['successfulSet'][0]['event']['statusCode'] + "\n"
"**Event ARN**: " + event_details['successfulSet'][0]['event']['arn'] + "\n"
"**Updates:**" + "\n" + get_last_aws_update(event_details)
)
elif event_type == "resolve":
message = str("/md" + "\n" + "**:heavy_check_mark:\[RESOLVED\] The AWS Health issue with the " + event_details['successfulSet'][0]['event']['service'].upper()) + " service in " + str(event_details['successfulSet'][0]['event']['region'].upper() + " region is now resolved.**" + "\n"
"---" + "\n"
"**Account(s)**: " + affected_org_accounts + "\n"
"**Resource(s)**: " + affected_org_entities + "\n"
"**Service**: " + event_details['successfulSet'][0]['event']['service'] + "\n"
"**Region**: " + event_details['successfulSet'][0]['event']['region'] + "\n"
"**Start Time (UTC)**: " + cleanup_time(event_details['successfulSet'][0]['event']['startTime']) + "\n"
"**End Time (UTC)**: " + cleanup_time(event_details['successfulSet'][0]['event']['endTime']) + "\n"
"**Status**: " + event_details['successfulSet'][0]['event']['statusCode'] + "\n"
"**Event ARN**: " + event_details['successfulSet'][0]['event']['arn'] + "\n"
"**Updates:**" + "\n" + get_last_aws_update(event_details)
)
print("Message sent to Chime: ", message)
return message
def get_message_for_teams(event_details, event_type, affected_accounts, affected_entities):
message = ""
if len(affected_entities) >= 1:
affected_entities = "\n".join(affected_entities)
if affected_entities == "UNKNOWN":
affected_entities = "All resources\nin region"
else:
affected_entities = "All resources\nin region"
if len(affected_accounts) >= 1:
affected_accounts = "\n".join(affected_accounts)
else:
affected_accounts = "All accounts\nin region"
summary = ""
if event_type == "create":
title = "🚨 [NEW] AWS Health reported an issue with the " + event_details['successfulSet'][0]['event'][
'service'].upper() + " service in the " + event_details['successfulSet'][0]['event'][
'region'].upper() + " region."
message = {
"@type": "MessageCard",
"@context": "http://schema.org/extensions",
"themeColor": "FF0000",
"summary": "AWS Health Aware Alert",
"sections": [
{
"activityTitle": str(title),
"markdown": False,
"facts": [
{"name": "Account(s)", "value": affected_accounts},
{"name": "Resource(s)", "value": affected_entities},
{"name": "Service", "value": event_details['successfulSet'][0]['event']['service']},
{"name": "Region", "value": event_details['successfulSet'][0]['event']['region']},
{"name": "Start Time (UTC)", "value": cleanup_time(event_details['successfulSet'][0]['event']['startTime'])},
{"name": "Status", "value": event_details['successfulSet'][0]['event']['statusCode']},
{"name": "Event ARN", "value": event_details['successfulSet'][0]['event']['arn']},
{"name": "Updates", "value": get_last_aws_update(event_details)}
],
}
]
}
elif event_type == "resolve":
title = "✅ [RESOLVED] The AWS Health issue with the " + event_details['successfulSet'][0]['event'][
'service'].upper() + " service in the " + event_details['successfulSet'][0]['event'][
'region'].upper() + " region is now resolved."
message = {
"@type": "MessageCard",
"@context": "http://schema.org/extensions",
"themeColor": "00ff00",
"summary": "AWS Health Aware Alert",
"sections": [
{
"activityTitle": str(title),
"markdown": False,
"facts": [
{"name": "Account(s)", "value": affected_accounts},
{"name": "Resource(s)", "value": affected_entities},
{"name": "Service", "value": event_details['successfulSet'][0]['event']['service']},
{"name": "Region", "value": event_details['successfulSet'][0]['event']['region']},
{"name": "Start Time (UTC)", "value": cleanup_time(event_details['successfulSet'][0]['event']['startTime'])},
{"name": "End Time (UTC)", "value": cleanup_time(event_details['successfulSet'][0]['event']['endTime'])},
{"name": "Status", "value": event_details['successfulSet'][0]['event']['statusCode']},
{"name": "Event ARN", "value": event_details['successfulSet'][0]['event']['arn']},
{"name": "Updates", "value": get_last_aws_update(event_details)}
],
}
]
}
print("Message sent to Teams: ", message)
return message
def get_org_message_for_teams(event_details, event_type, affected_org_accounts, affected_org_entities):
message = ""
summary = ""
if len(affected_org_entities) >= 1:
affected_org_entities = "\n".join(affected_org_entities)
else:
affected_org_entities = "All resources in region"
if len(affected_org_accounts) >= 1:
affected_org_accounts = "\n".join(affected_org_accounts)
else:
affected_org_accounts = "All accounts in region"
if event_type == "create":
title = "🚨 [NEW] AWS Health reported an issue with the " + event_details['successfulSet'][0]['event'][
'service'].upper() + " service in the " + event_details['successfulSet'][0]['event'][
'region'].upper() + " region."
message = {
"@type": "MessageCard",
"@context": "http://schema.org/extensions",
"themeColor": "FF0000",
"summary": "AWS Health Aware Alert",
"sections": [
{
"activityTitle": title,
"markdown": False,
"facts": [
{"name": "Account(s)", "value": affected_org_accounts},
{"name": "Resource(s)", "value": affected_org_entities},
{"name": "Service", "value": event_details['successfulSet'][0]['event']['service']},
{"name": "Region", "value": event_details['successfulSet'][0]['event']['region']},
{"name": "Start Time (UTC)", "value": cleanup_time(event_details['successfulSet'][0]['event']['startTime'])},
{"name": "Status", "value": event_details['successfulSet'][0]['event']['statusCode']},
{"name": "Event ARN", "value": event_details['successfulSet'][0]['event']['arn']},
{"name": "Updates", "value": event_details['successfulSet'][0]['eventDescription']['latestDescription']}
],
}
]
}
elif event_type == "resolve":
title = "✅ [RESOLVED] The AWS Health issue with the " + event_details['successfulSet'][0]['event'][
'service'].upper() + " service in the " + event_details['successfulSet'][0]['event'][
'region'].upper() + " region is now resolved."
message = {
"@type": "MessageCard",
"@context": "http://schema.org/extensions",
"themeColor": "00ff00",
"summary": "AWS Health Aware Alert",
"sections": [
{
"activityTitle": title,
"markdown": False,
"facts": [
{"name": "Account(s)", "value": affected_org_accounts},
{"name": "Resource(s)", "value": affected_org_entities},
{"name": "Service", "value": event_details['successfulSet'][0]['event']['service']},
{"name": "Region", "value": event_details['successfulSet'][0]['event']['region']},
{"name": "Start Time (UTC)", "value": cleanup_time(event_details['successfulSet'][0]['event']['startTime'])},
{"name": "End Time (UTC)", "value": cleanup_time(event_details['successfulSet'][0]['event']['endTime'])},
{"name": "Status", "value": event_details['successfulSet'][0]['event']['statusCode']},
{"name": "Event ARN", "value": event_details['successfulSet'][0]['event']['arn']},
{"name": "Updates", "value": event_details['successfulSet'][0]['eventDescription']['latestDescription']}
],
}
]
}
return message
print("Message sent to Teams: ", message)
def get_message_for_email(event_details, event_type, affected_accounts, affected_entities):
if len(affected_entities) >= 1:
affected_entities = "\n".join(affected_entities)
if affected_entities == "UNKNOWN":
affected_entities = "All resources\nin region"
else:
affected_entities = "All resources\nin region"
if len(affected_accounts) >= 1:
affected_accounts = "\n".join(affected_accounts)
else:
affected_accounts = "All accounts\nin region"
if event_type == "create":
BODY_HTML = f"""
<html>
<body>
<h>Greetings from AWS Health Aware,</h><br>
<p>There is an AWS incident that is in effect which may likely impact your resources. Here are the details:<br><br>
<b>Account(s):</b> {affected_accounts}<br>
<b>Resource(s):</b> {affected_entities}<br>
<b>Service:</b> {event_details['successfulSet'][0]['event']['service']}<br>
<b>Region:</b> {event_details['successfulSet'][0]['event']['region']}<br>
<b>Start Time (UTC):</b> {cleanup_time(event_details['successfulSet'][0]['event']['startTime'])}<br>
<b>Status:</b> {event_details['successfulSet'][0]['event']['statusCode']}<br>
<b>Event ARN:</b> {event_details['successfulSet'][0]['event']['arn']}<br>
<b>Updates:</b> {event_details['successfulSet'][0]['eventDescription']['latestDescription']}<br><br>
For updates, please visit the <a href=https://status.aws.amazon.com>AWS Service Health Dashboard</a><br>
If you are experiencing issues related to this event, please open an <a href=https://console.aws.amazon.com/support/home>AWS Support</a> case within your account.<br><br>
Thanks, <br><br>AHA: AWS Health Aware
</p>
</body>
</html>
"""
else:
BODY_HTML = f"""
<html>
<body>
<h>Greetings again from AWS Health Aware,</h><br>
<p>Good news! The AWS Health incident from earlier has now been marked as resolved.<br><br>
<b>Account(s):</b> {affected_accounts}<br>
<b>Resource(s):</b> {affected_entities}<br>
<b>Service:</b> {event_details['successfulSet'][0]['event']['service']}<br>
<b>Region:</b> {event_details['successfulSet'][0]['event']['region']}<br>
<b>Start Time (UTC):</b> {cleanup_time(event_details['successfulSet'][0]['event']['startTime'])}<br>
<b>End Time (UTC):</b> {cleanup_time(event_details['successfulSet'][0]['event']['endTime'])}<br>
<b>Status:</b> {event_details['successfulSet'][0]['event']['statusCode']}<br>
<b>Event ARN:</b> {event_details['successfulSet'][0]['event']['arn']}<br>
<b>Updates:</b> {event_details['successfulSet'][0]['eventDescription']['latestDescription']}<br><br>
If you are still experiencing issues related to this event, please open an <a href=https://console.aws.amazon.com/support/home>AWS Support</a> case within your account.<br><br>
<br><br>
Thanks, <br><br>AHA: AWS Health Aware
</p>
</body>
</html>
"""
print("Message sent to Email: ", BODY_HTML)
return BODY_HTML
def get_org_message_for_email(event_details, event_type, affected_org_accounts, affected_org_entities):
if len(affected_org_entities) >= 1:
affected_org_entities = "\n".join(affected_org_entities)
else:
affected_org_entities = "All services related resources in region"
if len(affected_org_accounts) >= 1:
affected_org_accounts = "\n".join(affected_org_accounts)
else:
affected_org_accounts = "All accounts in region"
if event_type == "create":
BODY_HTML = f"""
<html>
<body>
<h>Greetings from AWS Health Aware,</h><br>
<p>There is an AWS incident that is in effect which may likely impact your resources. Here are the details:<br><br>
<b>Account(s):</b> {affected_org_accounts}<br>
<b>Resource(s):</b> {affected_org_entities}<br>
<b>Service:</b> {event_details['successfulSet'][0]['event']['service']}<br>
<b>Region:</b> {event_details['successfulSet'][0]['event']['region']}<br>
<b>Start Time (UTC):</b> {cleanup_time(event_details['successfulSet'][0]['event']['startTime'])}<br>
<b>Status:</b> {event_details['successfulSet'][0]['event']['statusCode']}<br>
<b>Event ARN:</b> {event_details['successfulSet'][0]['event']['arn']}<br>
<b>Updates:</b> {event_details['successfulSet'][0]['eventDescription']['latestDescription']}<br><br>
For updates, please visit the <a href=https://status.aws.amazon.com>AWS Service Health Dashboard</a><br>
If you are experiencing issues related to this event, please open an <a href=https://console.aws.amazon.com/support/home>AWS Support</a> case within your account.<br><br>
Thanks, <br><br>AHA: AWS Health Aware
</p>
</body>
</html>
"""
else:
BODY_HTML = f"""
<html>
<body>
<h>Greetings again from AWS Health Aware,</h><br>
<p>Good news! The AWS Health incident from earlier has now been marked as resolved.<br><br>
<b>Account(s):</b> {affected_org_accounts}<br>
<b>Resource(s):</b> {affected_org_entities}<br>
<b>Service:</b> {event_details['successfulSet'][0]['event']['service']}<br>
<b>Region:</b> {event_details['successfulSet'][0]['event']['region']}<br>
<b>Start Time (UTC):</b> {cleanup_time(event_details['successfulSet'][0]['event']['startTime'])}<br>
<b>End Time (UTC):</b> {cleanup_time(event_details['successfulSet'][0]['event']['endTime'])}<br>
<b>Status:</b> {event_details['successfulSet'][0]['event']['statusCode']}<br>
<b>Event ARN:</b> {event_details['successfulSet'][0]['event']['arn']}<br>
<b>Updates:</b> {event_details['successfulSet'][0]['eventDescription']['latestDescription']}<br><br>
If you are still experiencing issues related to this event, please open an <a href=https://console.aws.amazon.com/support/home>AWS Support</a> case within your account.<br><br>
Thanks, <br><br>AHA: AWS Health Aware
</p>
</body>
</html>
"""
print("Message sent to Email: ", BODY_HTML)
return BODY_HTML
def cleanup_time(event_time):
"""
Takes as input a datetime string as received from The AWS Health event_detail call. It converts this string to a
datetime object, changes the timezone to EST and then formats it into a readable string to display in Slack.
:param event_time: datetime string
:type event_time: str
:return: A formatted string that includes the month, date, year and 12-hour time.
:rtype: str
"""
event_time = datetime.strptime(event_time[:16], '%Y-%m-%d %H:%M')
return event_time.strftime("%Y-%m-%d %H:%M:%S")
def get_last_aws_update(event_details):
"""
Takes as input the event_details and returns the last update from AWS (instead of the entire timeline)
:param event_details: Detailed information about a specific AWS health event.
:type event_details: dict
:return: the last update message from AWS
:rtype: str
"""
aws_message = event_details['successfulSet'][0]['eventDescription']['latestDescription']
return aws_message
def format_date(event_time):
"""
Takes as input a datetime string as received from The AWS Health event_detail call. It converts this string to a
datetime object, changes the timezone to EST and then formats it into a readable string to display in Slack.
:param event_time: datetime string
:type event_time: str
:return: A formatted string that includes the month, date, year and 12-hour time.
:rtype: str
"""
event_time = datetime.strptime(event_time[:16], '%Y-%m-%d %H:%M')
return event_time.strftime('%B %d, %Y at %I:%M %p')
| 58.360563
| 290
| 0.530457
| 4,073
| 41,436
| 5.231279
| 0.052787
| 0.113202
| 0.198292
| 0.206223
| 0.976862
| 0.975736
| 0.972779
| 0.969447
| 0.953865
| 0.953865
| 0
| 0.008264
| 0.29916
| 41,436
| 709
| 291
| 58.442877
| 0.725448
| 0.024399
| 0
| 0.849145
| 0
| 0.049767
| 0.410857
| 0.080252
| 0
| 0
| 0
| 0
| 0
| 1
| 0.020218
| false
| 0
| 0.009331
| 0
| 0.049767
| 0.015552
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
5d22984dfa000c0d0693da7c3cb11ce310cdf10f
| 154
|
py
|
Python
|
qdserver/messaging/inapp.py
|
sipsop/qdserver
|
5b372242241eb765cfe2ee57203449717bb81ed2
|
[
"BSD-3-Clause"
] | null | null | null |
qdserver/messaging/inapp.py
|
sipsop/qdserver
|
5b372242241eb765cfe2ee57203449717bb81ed2
|
[
"BSD-3-Clause"
] | null | null | null |
qdserver/messaging/inapp.py
|
sipsop/qdserver
|
5b372242241eb765cfe2ee57203449717bb81ed2
|
[
"BSD-3-Clause"
] | null | null | null |
from qdserver import model
from .types import Message
def send_in_app_message(user_profile : model.UserProfile, message : Message):
# TODO:
pass
| 22
| 77
| 0.75974
| 21
| 154
| 5.380952
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.175325
| 154
| 6
| 78
| 25.666667
| 0.889764
| 0.032468
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 0
| 1
| 0.25
| false
| 0.25
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 1
| 0
|
0
| 7
|
5d27edb9c561e31ec2f0fd41a02aee8b7d26b9a2
| 198
|
py
|
Python
|
examples/dagster_examples/bay_bikes/gbfs/repository.py
|
rparrapy/dagster
|
453ebedb326eae48b5f8fa2a4a3811d94629dc6e
|
[
"Apache-2.0"
] | 1
|
2022-02-07T18:07:36.000Z
|
2022-02-07T18:07:36.000Z
|
examples/dagster_examples/bay_bikes/gbfs/repository.py
|
david-alexander-white/dagster
|
1c341500bb2380e14873b59b7e25503270188bda
|
[
"Apache-2.0"
] | null | null | null |
examples/dagster_examples/bay_bikes/gbfs/repository.py
|
david-alexander-white/dagster
|
1c341500bb2380e14873b59b7e25503270188bda
|
[
"Apache-2.0"
] | null | null | null |
from dagster import RepositoryDefinition
from .pipelines import download_gbfs_files
def gbfs_repository():
return RepositoryDefinition('gbfs_repository', pipeline_defs=[download_gbfs_files])
| 24.75
| 87
| 0.838384
| 22
| 198
| 7.227273
| 0.590909
| 0.150943
| 0.213836
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.10101
| 198
| 7
| 88
| 28.285714
| 0.893258
| 0
| 0
| 0
| 0
| 0
| 0.075758
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.5
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 8
|
5d3c9f4512950fc6ad2e3a6cf54c70fb90b548b6
| 4,540
|
py
|
Python
|
pyEX/premium/fraudfactors/__init__.py
|
jmailloux/pyEX
|
2101e8c53a9080ea8b00b28a758be441095d5048
|
[
"Apache-2.0"
] | null | null | null |
pyEX/premium/fraudfactors/__init__.py
|
jmailloux/pyEX
|
2101e8c53a9080ea8b00b28a758be441095d5048
|
[
"Apache-2.0"
] | null | null | null |
pyEX/premium/fraudfactors/__init__.py
|
jmailloux/pyEX
|
2101e8c53a9080ea8b00b28a758be441095d5048
|
[
"Apache-2.0"
] | null | null | null |
# *****************************************************************************
#
# Copyright (c) 2020, the pyEX authors.
#
# This file is part of the pyEX library, distributed under the terms of
# the Apache License 2.0. The full license can be found in the LICENSE file.
#
from functools import wraps
from ...common import _UTC, _expire
from ...stocks import timeSeries, timeSeriesDF
@_expire(hour=8, tz=_UTC)
def _base(id, symbol="", **kwargs):
"""internal"""
kwargs["id"] = id
kwargs["key"] = symbol or kwargs.pop("key", "")
return timeSeries(**kwargs)
@_expire(hour=8, tz=_UTC)
def _baseDF(id, symbol="", **kwargs):
"""internal"""
kwargs["id"] = id
kwargs["key"] = symbol or kwargs.pop("key", "")
return timeSeriesDF(**kwargs)
@wraps(timeSeries)
def similarityIndex(symbol="", **kwargs):
"""The Similarity Index quantifies the textual differences between a given company’s annual or quarterly filings on an “as disclosed” basis. For example, a similarity score is calculated by comparing a company’s 2017 10-K with the 2016 10-K; or a company’s 2017 Q3 10-Q compared to the 2016 Q3 10-Q a year ago.
Intuitively, firms breaking from routine phrasing and content in mandatory disclosures give clues about their future performance which eventually drive stock returns over time. This data set captures significant changes in disclosure texts in the form of low similarity scores.
Academic research has shown that a portfolio that shorts low similarity scores and longs high similarity scores earns non-trivial and uncorrelated returns over a period of 12-18 months.
Data available from 2001 with coverage of about 23,000 equities
https://iexcloud.io/docs/api/#similiarity-index
Args:
symbol (str): symbol to use
"""
return _base(id="PREMIUM_FRAUD_FACTORS_SIMILARITY_INDEX", symbol=symbol, **kwargs)
@wraps(timeSeries)
def similarityIndexDF(symbol="", **kwargs):
"""The Similarity Index quantifies the textual differences between a given company’s annual or quarterly filings on an “as disclosed” basis. For example, a similarity score is calculated by comparing a company’s 2017 10-K with the 2016 10-K; or a company’s 2017 Q3 10-Q compared to the 2016 Q3 10-Q a year ago.
Intuitively, firms breaking from routine phrasing and content in mandatory disclosures give clues about their future performance which eventually drive stock returns over time. This data set captures significant changes in disclosure texts in the form of low similarity scores.
Academic research has shown that a portfolio that shorts low similarity scores and longs high similarity scores earns non-trivial and uncorrelated returns over a period of 12-18 months.
Data available from 2001 with coverage of about 23,000 equities
https://iexcloud.io/docs/api/#similiarity-index
Args:
symbol (str): symbol to use
"""
return _baseDF(id="PREMIUM_FRAUD_FACTORS_SIMILARITY_INDEX", symbol=symbol, **kwargs)
@wraps(timeSeries)
def nonTimelyFilings(symbol="", **kwargs):
"""The data set records the date in which a firm files a Non-Timely notification with the SEC.
Companies regulated by the SEC are required to file a Non-Timely notification when they are unable to file their annual or quarterly disclosures on time. In most cases, the inability to file annual/quarterly disclosures on time is a red-flag and thus a valuable signal for algorithmic strategies and fundamental investing alike.
Data available from 1994 with coverage of about 18,000 equities
https://iexcloud.io/docs/api/#non-timely-filings
Args:
symbol (str): symbol to use
"""
return _base(id="PREMIUM_FRAUD_FACTORS_NON_TIMELY_FILINGS", symbol=symbol, **kwargs)
@wraps(timeSeries)
def nonTimelyFilingsDF(symbol="", **kwargs):
"""The data set records the date in which a firm files a Non-Timely notification with the SEC.
Companies regulated by the SEC are required to file a Non-Timely notification when they are unable to file their annual or quarterly disclosures on time. In most cases, the inability to file annual/quarterly disclosures on time is a red-flag and thus a valuable signal for algorithmic strategies and fundamental investing alike.
Data available from 1994 with coverage of about 18,000 equities
https://iexcloud.io/docs/api/#non-timely-filings
Args:
symbol (str): symbol to use
"""
return _baseDF(
id="PREMIUM_FRAUD_FACTORS_NON_TIMELY_FILINGS", symbol=symbol, **kwargs
)
| 52.790698
| 332
| 0.735463
| 666
| 4,540
| 4.968468
| 0.273273
| 0.036265
| 0.025385
| 0.029012
| 0.892112
| 0.892112
| 0.875189
| 0.875189
| 0.875189
| 0.875189
| 0
| 0.027726
| 0.173789
| 4,540
| 85
| 333
| 53.411765
| 0.854439
| 0.721145
| 0
| 0.37037
| 0
| 0
| 0.153434
| 0.139161
| 0
| 0
| 0
| 0
| 0
| 1
| 0.222222
| false
| 0
| 0.111111
| 0
| 0.555556
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 8
|
53a65669b13440b04d81827f1a5b8b9276af548b
| 96
|
py
|
Python
|
oil/utils.py
|
kbougy/oil
|
d0eb2836f1bc551a50a6dbdc9128bee43d305e39
|
[
"MIT"
] | 3
|
2017-11-05T00:02:03.000Z
|
2017-11-12T00:20:47.000Z
|
oil/utils.py
|
kbougy/oil
|
d0eb2836f1bc551a50a6dbdc9128bee43d305e39
|
[
"MIT"
] | 43
|
2017-10-29T21:06:00.000Z
|
2018-02-04T22:40:36.000Z
|
oil/utils.py
|
kbougy/oil
|
d0eb2836f1bc551a50a6dbdc9128bee43d305e39
|
[
"MIT"
] | 1
|
2020-01-07T06:51:57.000Z
|
2020-01-07T06:51:57.000Z
|
import arrow
def days_ago(dt_string):
return (arrow.utcnow() - arrow.get(dt_string)).days
| 16
| 55
| 0.71875
| 15
| 96
| 4.4
| 0.666667
| 0.242424
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.145833
| 96
| 5
| 56
| 19.2
| 0.804878
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
54fe2bc2379709f3df2f739c667a41f22d580d6e
| 5,788
|
py
|
Python
|
views.py
|
michael-arroyo/lammy-bot
|
d4c364d40da7743817b867d040281a770cb84a9e
|
[
"Apache-2.0"
] | 3
|
2021-10-14T18:20:26.000Z
|
2021-10-16T10:02:57.000Z
|
views.py
|
michael-arroyo/lammy-bot
|
d4c364d40da7743817b867d040281a770cb84a9e
|
[
"Apache-2.0"
] | 8
|
2021-10-19T22:47:58.000Z
|
2022-03-21T19:47:29.000Z
|
views.py
|
michael-arroyo/lammy-bot
|
d4c364d40da7743817b867d040281a770cb84a9e
|
[
"Apache-2.0"
] | 2
|
2021-03-30T22:54:12.000Z
|
2021-10-17T20:39:42.000Z
|
from nextcord import ButtonStyle, Interaction, SelectOption
from nextcord.ui import button, Button, Select, View
from datetime import datetime
from crud import session_scope
from embed_helper import NightmareHelper, SkillHelper, WeaponHelper
from models import Card, CardEvolution, DiscordMessage, Skill
class WeaponView(View):
def __init__(self):
super().__init__()
self.devolve.disabled = True
@button(label="Devolve", style=ButtonStyle.gray)
async def devolve(self, button: Button, interaction: Interaction):
with session_scope() as s:
message_id = interaction.message.id
message_meta_data = s.query(DiscordMessage).filter(DiscordMessage.message_id==message_id).first()
prev = message_meta_data.prev
weapon = s.query(Card).filter(Card.cardMstId==prev).first()
helper = WeaponHelper(weapon)
embed = helper.create_embed()
devolution = s.query(CardEvolution).filter(CardEvolution.evolvedCardMstId==prev).first()
message_meta_data.last_updated = datetime.now()
message_meta_data.next = message_meta_data.curr
message_meta_data.curr = message_meta_data.prev
message_meta_data.prev = devolution.cardMstId if devolution else None
self.evolve.disabled = False
if devolution:
self.devolve.disabled = False
else:
self.devolve.disabled = True
await interaction.response.edit_message(embed=embed, view=self)
@button(label="Evolve", style=ButtonStyle.gray)
async def evolve(self, button: Button, interaction: Interaction):
with session_scope() as s:
message_id = interaction.message.id
message_meta_data = s.query(DiscordMessage).filter(DiscordMessage.message_id==message_id).first()
next = message_meta_data.next
weapon = s.query(Card).filter(Card.cardMstId==next).first()
helper = WeaponHelper(weapon)
embed = helper.create_embed()
evolution = s.query(CardEvolution).filter(CardEvolution.cardMstId==next).first()
message_meta_data.last_updated = datetime.now()
message_meta_data.prev = message_meta_data.curr
message_meta_data.curr = message_meta_data.next
message_meta_data.next = evolution.evolvedCardMstId if evolution else None
self.devolve.disabled = False
if evolution:
self.evolve.disabled = False
else:
self.evolve.disabled = True
await interaction.response.edit_message(embed=embed, view=self)
class NightmareView(View):
def __init__(self):
super().__init__()
self.devolve.disabled = True
@button(label="Devolve", style=ButtonStyle.gray)
async def devolve(self, button: Button, interaction: Interaction):
with session_scope() as s:
message_id = interaction.message.id
message_meta_data = s.query(DiscordMessage).filter(DiscordMessage.message_id==message_id).first()
prev = message_meta_data.prev
nightmare = s.query(Card).filter(Card.cardMstId==prev).first()
helper = NightmareHelper(nightmare)
embed = helper.create_embed()
devolution = s.query(CardEvolution).filter(CardEvolution.evolvedCardMstId==prev).first()
message_meta_data.last_updated = datetime.now()
message_meta_data.next = message_meta_data.curr
message_meta_data.curr = message_meta_data.prev
message_meta_data.prev = devolution.cardMstId if devolution else None
self.evolve.disabled = False
if devolution:
self.devolve.disabled = False
else:
self.devolve.disabled = True
await interaction.response.edit_message(embed=embed, view=self)
@button(label="Evolve", style=ButtonStyle.gray)
async def evolve(self, button: Button, interaction: Interaction):
with session_scope() as s:
message_id = interaction.message.id
message_meta_data = s.query(DiscordMessage).filter(DiscordMessage.message_id==message_id).first()
next = message_meta_data.next
nightmare = s.query(Card).filter(Card.cardMstId==next).first()
helper = NightmareHelper(nightmare)
embed = helper.create_embed()
evolution = s.query(CardEvolution).filter(CardEvolution.cardMstId==next).first()
message_meta_data.last_updated = datetime.now()
message_meta_data.prev = message_meta_data.curr
message_meta_data.curr = message_meta_data.next
message_meta_data.next = evolution.evolvedCardMstId if evolution else None
self.devolve.disabled = False
if evolution:
self.evolve.disabled = False
else:
self.evolve.disabled = True
await interaction.response.edit_message(embed=embed, view=self)
class SkillDropdown(Select):
def __init__(self, options: list[SelectOption]):
super().__init__(placeholder="Other SP Costs", min_values=1, max_values=1, options=options)
async def callback(self, interaction: Interaction):
with session_scope() as s:
uid = self.values[0].split()[2][:-1]
skill = s.query(Skill).filter(Skill.skillMstId==uid, Skill.category!=4).first()
helper = SkillHelper(skill)
embed = helper.create_embed()
await interaction.response.edit_message(embed=embed)
class SkillView(View):
def __init__(self, dropdown: Select):
super().__init__()
self.add_item(dropdown)
| 37.830065
| 109
| 0.656012
| 638
| 5,788
| 5.744514
| 0.144201
| 0.096044
| 0.130969
| 0.041473
| 0.831105
| 0.831105
| 0.831105
| 0.799454
| 0.776535
| 0.728513
| 0
| 0.001384
| 0.251209
| 5,788
| 152
| 110
| 38.078947
| 0.844255
| 0
| 0
| 0.787037
| 0
| 0
| 0.006912
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037037
| false
| 0
| 0.055556
| 0
| 0.12963
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0702516c15e425849cdfe85e4eabb8e3239083e8
| 141
|
py
|
Python
|
shreyas_API/common_account/models/__init__.py
|
veerpratik/shreyas-app-api
|
81efeeabd2ff594efbad1e1fdf7d501fd0aad8f6
|
[
"MIT"
] | null | null | null |
shreyas_API/common_account/models/__init__.py
|
veerpratik/shreyas-app-api
|
81efeeabd2ff594efbad1e1fdf7d501fd0aad8f6
|
[
"MIT"
] | null | null | null |
shreyas_API/common_account/models/__init__.py
|
veerpratik/shreyas-app-api
|
81efeeabd2ff594efbad1e1fdf7d501fd0aad8f6
|
[
"MIT"
] | null | null | null |
from common_account.models.user import User
from common_account.models.vendor import Vendor
from common_account.models.project import Project
| 47
| 49
| 0.879433
| 21
| 141
| 5.761905
| 0.380952
| 0.247934
| 0.421488
| 0.570248
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.078014
| 141
| 3
| 49
| 47
| 0.930769
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
070f02843cf1f454ac63ecf6ddd9b30a4e187573
| 1,362
|
py
|
Python
|
src/otp_messagebird/migrations/0005_throttling.py
|
arjan-s/django-otp-messagebird
|
ce39d53ca10cb62493fc3dd2d894d356ffafc35e
|
[
"BSD-2-Clause"
] | null | null | null |
src/otp_messagebird/migrations/0005_throttling.py
|
arjan-s/django-otp-messagebird
|
ce39d53ca10cb62493fc3dd2d894d356ffafc35e
|
[
"BSD-2-Clause"
] | null | null | null |
src/otp_messagebird/migrations/0005_throttling.py
|
arjan-s/django-otp-messagebird
|
ce39d53ca10cb62493fc3dd2d894d356ffafc35e
|
[
"BSD-2-Clause"
] | null | null | null |
# Generated by Django 3.0.7 on 2020-08-05 07:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('otp_messagebird', '0004_messagebirdvoicedevice_language'),
]
operations = [
migrations.AddField(
model_name='messagebirdsmsdevice',
name='throttling_failure_count',
field=models.PositiveIntegerField(default=0, help_text='Number of successive failed attempts.'),
),
migrations.AddField(
model_name='messagebirdsmsdevice',
name='throttling_failure_timestamp',
field=models.DateTimeField(blank=True, default=None, help_text='A timestamp of the last failed verification attempt. Null if last attempt succeeded.', null=True),
),
migrations.AddField(
model_name='messagebirdvoicedevice',
name='throttling_failure_count',
field=models.PositiveIntegerField(default=0, help_text='Number of successive failed attempts.'),
),
migrations.AddField(
model_name='messagebirdvoicedevice',
name='throttling_failure_timestamp',
field=models.DateTimeField(blank=True, default=None, help_text='A timestamp of the last failed verification attempt. Null if last attempt succeeded.', null=True),
),
]
| 40.058824
| 174
| 0.669604
| 137
| 1,362
| 6.518248
| 0.408759
| 0.080627
| 0.103024
| 0.120941
| 0.808511
| 0.808511
| 0.808511
| 0.808511
| 0.653975
| 0.653975
| 0
| 0.020192
| 0.236417
| 1,362
| 33
| 175
| 41.272727
| 0.838462
| 0.03304
| 0
| 0.740741
| 1
| 0
| 0.365779
| 0.139924
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.037037
| 0
| 0.148148
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
073b4bd4ec1e4617bdebcc9aab2370488f3937e1
| 88
|
py
|
Python
|
bookorbooks/school/tests/__init__.py
|
talhakoylu/SummerInternshipBackend
|
4ecedf5c97f73e3d32d5a534769e86aac3e4b6d3
|
[
"MIT"
] | 1
|
2021-08-10T22:24:17.000Z
|
2021-08-10T22:24:17.000Z
|
bookorbooks/school/tests/__init__.py
|
talhakoylu/SummerInternshipBackend
|
4ecedf5c97f73e3d32d5a534769e86aac3e4b6d3
|
[
"MIT"
] | null | null | null |
bookorbooks/school/tests/__init__.py
|
talhakoylu/SummerInternshipBackend
|
4ecedf5c97f73e3d32d5a534769e86aac3e4b6d3
|
[
"MIT"
] | null | null | null |
from .school_tests import *
from .class_tests import *
from .student_list_tests import *
| 29.333333
| 33
| 0.806818
| 13
| 88
| 5.153846
| 0.538462
| 0.492537
| 0.447761
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 88
| 3
| 33
| 29.333333
| 0.87013
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
db46ebf71103988de1f2915d38e920f288650569
| 8,280
|
py
|
Python
|
tests/test_recursion.py
|
sobolevn/icontract
|
8dd3c236fd75f74fef722311e2de65d036b9c6a7
|
[
"MIT"
] | null | null | null |
tests/test_recursion.py
|
sobolevn/icontract
|
8dd3c236fd75f74fef722311e2de65d036b9c6a7
|
[
"MIT"
] | null | null | null |
tests/test_recursion.py
|
sobolevn/icontract
|
8dd3c236fd75f74fef722311e2de65d036b9c6a7
|
[
"MIT"
] | null | null | null |
# pylint: disable=missing-docstring
# pylint: disable=no-self-use
import unittest
from typing import List
import icontract
class TestPrecondition(unittest.TestCase):
def test_ok(self) -> None:
order = [] # type: List[str]
@icontract.require(lambda: another_func()) # pylint: disable=unnecessary-lambda
@icontract.require(lambda: yet_another_func()) # pylint: disable=unnecessary-lambda
def some_func() -> bool:
order.append(some_func.__name__)
return True
@icontract.require(lambda: some_func()) # pylint: disable=unnecessary-lambda
@icontract.require(lambda: yet_yet_another_func()) # pylint: disable=unnecessary-lambda
def another_func() -> bool:
order.append(another_func.__name__)
return True
def yet_another_func() -> bool:
order.append(yet_another_func.__name__)
return True
def yet_yet_another_func() -> bool:
order.append(yet_yet_another_func.__name__)
return True
some_func()
self.assertListEqual(['yet_another_func', 'yet_yet_another_func', 'some_func', 'another_func', 'some_func'],
order)
def test_recover_after_exception(self) -> None:
order = [] # type: List[str]
some_func_should_raise = True
class CustomError(Exception):
pass
@icontract.require(lambda: another_func()) # pylint: disable=unnecessary-lambda
@icontract.require(lambda: yet_another_func()) # pylint: disable=unnecessary-lambda
def some_func() -> bool:
order.append(some_func.__name__)
if some_func_should_raise:
raise CustomError('some_func_should_raise')
return True
@icontract.require(lambda: some_func()) # pylint: disable=unnecessary-lambda
@icontract.require(lambda: yet_yet_another_func()) # pylint: disable=unnecessary-lambda
def another_func() -> bool:
order.append(another_func.__name__)
return True
def yet_another_func() -> bool:
order.append(yet_another_func.__name__)
return True
def yet_yet_another_func() -> bool:
order.append(yet_yet_another_func.__name__)
return True
try:
some_func()
except CustomError:
pass
self.assertListEqual(['yet_another_func', 'yet_yet_another_func', 'some_func'], order)
# Reset for the next experiment
order = []
some_func_should_raise = False
some_func()
self.assertListEqual(['yet_another_func', 'yet_yet_another_func', 'some_func', 'another_func', 'some_func'],
order)
class TestPostcondition(unittest.TestCase):
def test_ok(self) -> None:
order = [] # type: List[str]
another_func_should_raise = True
class CustomError(Exception):
pass
@icontract.ensure(lambda: another_func()) # pylint: disable=unnecessary-lambda
@icontract.ensure(lambda: yet_another_func()) # pylint: disable=unnecessary-lambda
def some_func() -> bool:
order.append(some_func.__name__)
return True
@icontract.ensure(lambda: some_func()) # pylint: disable=unnecessary-lambda
@icontract.ensure(lambda: yet_yet_another_func()) # pylint: disable=unnecessary-lambda
def another_func() -> bool:
order.append(another_func.__name__)
if another_func_should_raise:
raise CustomError('some_func_should_raise')
return True
def yet_another_func() -> bool:
order.append(yet_another_func.__name__)
return True
def yet_yet_another_func() -> bool:
order.append(yet_yet_another_func.__name__)
return True
try:
some_func()
except CustomError:
pass
self.assertListEqual(['some_func', 'yet_another_func', 'another_func'], order)
# Reset for the next experiments
order = []
another_func_should_raise = False
some_func()
self.assertListEqual(['some_func', 'yet_another_func', 'another_func', 'yet_yet_another_func', 'some_func'],
order)
def test_recover_after_exception(self) -> None:
order = [] # type: List[str]
@icontract.ensure(lambda: another_func()) # pylint: disable=unnecessary-lambda
@icontract.ensure(lambda: yet_another_func()) # pylint: disable=unnecessary-lambda
def some_func() -> bool:
order.append(some_func.__name__)
return True
@icontract.ensure(lambda: some_func()) # pylint: disable=unnecessary-lambda
@icontract.ensure(lambda: yet_yet_another_func()) # pylint: disable=unnecessary-lambda
def another_func() -> bool:
order.append(another_func.__name__)
return True
def yet_another_func() -> bool:
order.append(yet_another_func.__name__)
return True
def yet_yet_another_func() -> bool:
order.append(yet_yet_another_func.__name__)
return True
some_func()
self.assertListEqual(['some_func', 'yet_another_func', 'another_func', 'yet_yet_another_func', 'some_func'],
order)
class TestInvariant(unittest.TestCase):
def test_ok(self) -> None:
order = [] # type: List[str]
@icontract.invariant(lambda self: self.some_func()) # pylint: disable=no-member
class SomeClass(icontract.DBC):
def __init__(self) -> None:
order.append('__init__')
def some_func(self) -> bool:
order.append('some_func')
return True
def another_func(self) -> bool:
order.append('another_func')
return True
some_instance = SomeClass()
self.assertListEqual(['__init__', 'some_func'], order)
# Reset for the next experiment
order = []
some_instance.another_func()
self.assertListEqual(['some_func', 'another_func', 'some_func'], order)
def test_recover_after_exception(self) -> None:
order = [] # type: List[str]
some_func_should_raise = False
class CustomError(Exception):
pass
@icontract.invariant(lambda self: self.some_func()) # pylint: disable=no-member
class SomeClass(icontract.DBC):
def __init__(self) -> None:
order.append('__init__')
def some_func(self) -> bool:
order.append('some_func')
if some_func_should_raise:
raise CustomError('some_func_should_raise')
return True
def another_func(self) -> bool:
order.append('another_func')
return True
some_instance = SomeClass()
self.assertListEqual(['__init__', 'some_func'], order)
# Reset for the next experiment
order = []
some_func_should_raise = True
try:
some_instance.another_func()
except CustomError:
pass
self.assertListEqual(['some_func'], order)
# Reset for the next experiment
order = []
some_func_should_raise = False
some_instance.another_func()
self.assertListEqual(['some_func', 'another_func', 'some_func'], order)
def test_member_function_call_in_constructor(self) -> None:
order = [] # type: List[str]
@icontract.invariant(lambda self: self.some_attribute > 0) # pylint: disable=no-member
class SomeClass(icontract.DBC):
def __init__(self) -> None:
order.append('__init__ enters')
self.some_attribute = self.some_func()
order.append('__init__ exits')
def some_func(self) -> int:
order.append('some_func')
return 3
_ = SomeClass()
self.assertListEqual(['__init__ enters', 'some_func', '__init__ exits'], order)
if __name__ == '__main__':
unittest.main()
| 33.12
| 116
| 0.60471
| 879
| 8,280
| 5.311718
| 0.083049
| 0.150782
| 0.104948
| 0.061898
| 0.912401
| 0.898908
| 0.892482
| 0.892482
| 0.878775
| 0.854359
| 0
| 0.000342
| 0.294203
| 8,280
| 249
| 117
| 33.253012
| 0.798597
| 0.116184
| 0
| 0.869318
| 0
| 0
| 0.090198
| 0.009061
| 0
| 0
| 0
| 0
| 0.068182
| 1
| 0.176136
| false
| 0.034091
| 0.017045
| 0
| 0.363636
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
db88654748eb293dd5bbb0f289114066a483dcb0
| 8,275
|
py
|
Python
|
tests/orbit/models/test_ktrlite.py
|
vishalbelsare/orbit
|
ff628e16d2a53d1fbc094b1a5b3fa5ac049b5172
|
[
"Apache-2.0"
] | 1
|
2021-06-29T14:19:56.000Z
|
2021-06-29T14:19:56.000Z
|
tests/orbit/models/test_ktrlite.py
|
vishalbelsare/orbit
|
ff628e16d2a53d1fbc094b1a5b3fa5ac049b5172
|
[
"Apache-2.0"
] | null | null | null |
tests/orbit/models/test_ktrlite.py
|
vishalbelsare/orbit
|
ff628e16d2a53d1fbc094b1a5b3fa5ac049b5172
|
[
"Apache-2.0"
] | null | null | null |
import pytest
import numpy as np
import pandas as pd
from orbit.estimators.stan_estimator import StanEstimatorMAP
from orbit.models.ktrlite import KTRLiteMAP
from orbit.diagnostics.metrics import smape
SMAPE_TOLERANCE = 0.5
@pytest.mark.parametrize(
"seasonality_fs_order", [None, [5]],
ids=['default_order', 'manual_order']
)
def test_ktrlite_single_seas(make_daily_data, seasonality_fs_order):
train_df, test_df, coef = make_daily_data
ktrlite = KTRLiteMAP(
response_col='response',
date_col='date',
seasonality=[365.25],
seasonality_fs_order=seasonality_fs_order,
estimator_type=StanEstimatorMAP
)
ktrlite.fit(train_df)
predict_df = ktrlite.predict(test_df)
expected_columns = ['date', 'prediction_5', 'prediction', 'prediction_95']
expected_shape = (364, len(expected_columns))
expected_num_parameters = 6
assert predict_df.shape == expected_shape
assert predict_df.columns.tolist() == expected_columns
assert len(ktrlite._posterior_samples) == expected_num_parameters
assert smape(test_df['response'].values, predict_df['prediction'].values) <= SMAPE_TOLERANCE
@pytest.mark.parametrize(
"seasonality_fs_order", [None, [2, 5]],
ids=['default_order', 'manual_order']
)
def test_ktrlite_dual_seas(make_daily_data, seasonality_fs_order):
train_df, test_df, coef = make_daily_data
ktrlite = KTRLiteMAP(
response_col='response',
date_col='date',
seasonality=[7, 365.25],
seasonality_fs_order=seasonality_fs_order,
estimator_type=StanEstimatorMAP
)
ktrlite.fit(train_df)
predict_df = ktrlite.predict(test_df)
expected_columns = ['date', 'prediction_5', 'prediction', 'prediction_95']
expected_shape = (364, len(expected_columns))
expected_num_parameters = 6
assert predict_df.shape == expected_shape
assert predict_df.columns.tolist() == expected_columns
assert len(ktrlite._posterior_samples) == expected_num_parameters
assert smape(test_df['response'].values, predict_df['prediction'].values) <= SMAPE_TOLERANCE
@pytest.mark.parametrize("span_level", [.05, .1, .5])
def test_ktrlite_span_level(make_daily_data, span_level):
train_df, test_df, coef = make_daily_data
ktrlite = KTRLiteMAP(
response_col='response',
date_col='date',
seasonality=[7, 365.25],
seasonality_fs_order=[2, 5],
span_level=span_level,
estimator_type=StanEstimatorMAP
)
ktrlite.fit(train_df)
predict_df = ktrlite.predict(test_df)
expected_columns = ['date', 'prediction_5', 'prediction', 'prediction_95']
expected_shape = (364, len(expected_columns))
expected_num_parameters = 6
assert predict_df.shape == expected_shape
assert predict_df.columns.tolist() == expected_columns
assert len(ktrlite._posterior_samples) == expected_num_parameters
assert smape(test_df['response'].values, predict_df['prediction'].values) <= SMAPE_TOLERANCE
knots_df = ktrlite.get_level_knots()
levels_df = ktrlite.get_levels()
assert knots_df.shape[0] == round(1/span_level)
assert levels_df.shape[0] == ktrlite.num_of_observations
@pytest.mark.parametrize("level_knot_dates", [pd.date_range(start='2016-03-01', end='2019-01-01', freq='3M'),
pd.date_range(start='2016-03-01', end='2019-01-01', freq='6M')])
def test_ktrlite_level_knot_dates(make_daily_data, level_knot_dates):
train_df, test_df, coef = make_daily_data
ktrlite = KTRLiteMAP(
response_col='response',
date_col='date',
seasonality=[7, 365.25],
seasonality_fs_order=[2, 5],
level_knot_dates=level_knot_dates,
estimator_type=StanEstimatorMAP
)
ktrlite.fit(train_df)
predict_df = ktrlite.predict(test_df)
expected_columns = ['date', 'prediction_5', 'prediction', 'prediction_95']
expected_shape = (364, len(expected_columns))
expected_num_parameters = 6
assert predict_df.shape == expected_shape
assert predict_df.columns.tolist() == expected_columns
assert len(ktrlite._posterior_samples) == expected_num_parameters
assert smape(test_df['response'].values, predict_df['prediction'].values) <= SMAPE_TOLERANCE
assert np.all(np.isin(ktrlite.level_knot_dates, level_knot_dates))
assert len(ktrlite.level_knot_dates) == len(level_knot_dates)
@pytest.mark.parametrize("level_knot_length", [90, 120])
def test_ktrlite_level_knot_distance(make_daily_data, level_knot_length):
train_df, test_df, coef = make_daily_data
ktrlite = KTRLiteMAP(
response_col='response',
date_col='date',
seasonality=[7, 365.25],
seasonality_fs_order=[2, 5],
level_knot_length=level_knot_length,
estimator_type=StanEstimatorMAP
)
ktrlite.fit(train_df)
predict_df = ktrlite.predict(test_df)
expected_columns = ['date', 'prediction_5', 'prediction', 'prediction_95']
expected_shape = (364, len(expected_columns))
expected_num_parameters = 6
assert predict_df.shape == expected_shape
assert predict_df.columns.tolist() == expected_columns
assert len(ktrlite._posterior_samples) == expected_num_parameters
assert smape(test_df['response'].values, predict_df['prediction'].values) <= SMAPE_TOLERANCE
@pytest.mark.parametrize("coefficients_knot_length", [90, 120])
def test_ktrlite_coef_knot_distance(make_daily_data, coefficients_knot_length):
train_df, test_df, coef = make_daily_data
ktrlite = KTRLiteMAP(
response_col='response',
date_col='date',
seasonality=[7, 365.25],
seasonality_fs_order=[2, 5],
coefficients_knot_length=coefficients_knot_length,
estimator_type=StanEstimatorMAP
)
ktrlite.fit(train_df)
predict_df = ktrlite.predict(test_df)
expected_columns = ['date', 'prediction_5', 'prediction', 'prediction_95']
expected_shape = (364, len(expected_columns))
expected_num_parameters = 6
assert predict_df.shape == expected_shape
assert predict_df.columns.tolist() == expected_columns
assert len(ktrlite._posterior_samples) == expected_num_parameters
assert smape(test_df['response'].values, predict_df['prediction'].values) <= SMAPE_TOLERANCE
def test_ktrlite_predict_decompose(make_daily_data):
train_df, test_df, coef = make_daily_data
ktrlite = KTRLiteMAP(
response_col='response',
date_col='date',
seasonality=[7, 365.25],
seasonality_fs_order=[2, 5],
estimator_type=StanEstimatorMAP
)
ktrlite.fit(train_df)
predict_df = ktrlite.predict(test_df, decompose=True)
expected_columns = ['date', 'prediction_5', 'prediction', 'prediction_95',
'trend_5', 'trend', 'trend_95',
'seasonality_7_5', 'seasonality_7', 'seasonality_7_95',
'seasonality_365.25_5', 'seasonality_365.25', 'seasonality_365.25_95']
expected_shape = (364, len(expected_columns))
expected_num_parameters = 6
assert predict_df.shape == expected_shape
assert predict_df.columns.tolist() == expected_columns
assert len(ktrlite._posterior_samples) == expected_num_parameters
assert smape(test_df['response'].values, predict_df['prediction'].values) <= SMAPE_TOLERANCE
def test_ktrlite_predict_decompose_point_estimate(make_daily_data):
train_df, test_df, coef = make_daily_data
ktrlite = KTRLiteMAP(
response_col='response',
date_col='date',
seasonality=[7, 365.25],
seasonality_fs_order=[2, 5],
estimator_type=StanEstimatorMAP,
n_bootstrap_draws=-1,
)
ktrlite.fit(train_df)
predict_df = ktrlite.predict(test_df, decompose=True)
expected_columns = ['date', 'prediction', 'trend', 'seasonality_7', 'seasonality_365.25']
expected_shape = (364, len(expected_columns))
expected_num_parameters = 6
assert predict_df.shape == expected_shape
assert predict_df.columns.tolist() == expected_columns
assert len(ktrlite._posterior_samples) == expected_num_parameters
assert smape(test_df['response'].values, predict_df['prediction'].values) <= SMAPE_TOLERANCE
| 36.135371
| 110
| 0.709728
| 1,022
| 8,275
| 5.389432
| 0.101761
| 0.052288
| 0.037763
| 0.018882
| 0.866376
| 0.837146
| 0.828613
| 0.806282
| 0.802106
| 0.787219
| 0
| 0.029776
| 0.180181
| 8,275
| 228
| 111
| 36.29386
| 0.782134
| 0
| 0
| 0.719101
| 0
| 0
| 0.10719
| 0.005438
| 0
| 0
| 0
| 0
| 0.202247
| 1
| 0.044944
| false
| 0
| 0.033708
| 0
| 0.078652
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
dbd0a0b3d058ea5d7b8a2a9c4fc3f2470ad13edb
| 43,373
|
py
|
Python
|
model_mtl.py
|
Ani0203/source_sep_Ani
|
40c461e6ba9ce01160368a515d6b9b6783f45be6
|
[
"MIT"
] | null | null | null |
model_mtl.py
|
Ani0203/source_sep_Ani
|
40c461e6ba9ce01160368a515d6b9b6783f45be6
|
[
"MIT"
] | null | null | null |
model_mtl.py
|
Ani0203/source_sep_Ani
|
40c461e6ba9ce01160368a515d6b9b6783f45be6
|
[
"MIT"
] | null | null | null |
from torch.nn import LSTM, Linear, BatchNorm1d, Parameter
import torch
import torch.nn as nn
import torch.nn.functional as F
class NoOp(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return x
class STFT(nn.Module):
def __init__(
self,
n_fft=4096,
n_hop=1024,
center=False
):
super(STFT, self).__init__()
self.window = nn.Parameter(
torch.hann_window(n_fft),
requires_grad=False
)
self.n_fft = n_fft
self.n_hop = n_hop
self.center = center
def forward(self, x):
"""
Input: (nb_samples, nb_channels, nb_timesteps)
Output:(nb_samples, nb_channels, nb_bins, nb_frames, 2)
"""
nb_samples, nb_channels, nb_timesteps = x.size()
# merge nb_samples and nb_channels for multichannel stft
x = x.reshape(nb_samples*nb_channels, -1)
# compute stft with parameters as close as possible scipy settings
stft_f = torch.stft(
x,
n_fft=self.n_fft, hop_length=self.n_hop,
window=self.window, center=self.center,
normalized=False, onesided=True,
pad_mode='reflect'
)
# reshape back to channel dimension
stft_f = stft_f.contiguous().view(
nb_samples, nb_channels, self.n_fft // 2 + 1, -1, 2
)
return stft_f
class Spectrogram(nn.Module):
def __init__(
self,
power=1,
mono=False
):
super(Spectrogram, self).__init__()
self.power = power
self.mono = mono
def forward(self, stft_f):
"""
Input: complex STFT
(nb_samples, nb_bins, nb_frames, 2)
Output: Power/Mag Spectrogram
(nb_frames, nb_samples, nb_channels, nb_bins)
"""
stft_f = stft_f.transpose(2, 3)
# take the magnitude
stft_f = stft_f.pow(2).sum(-1).pow(self.power / 2.0)
# downmix in the mag domain
if self.mono:
stft_f = torch.mean(stft_f, 1, keepdim=True)
# permute output for LSTM convenience
return stft_f.permute(2, 0, 1, 3)
class OpenUnmix_mtl(nn.Module):
def __init__(
self,
n_fft=4096,
n_hop=1024,
input_is_spectrogram=False,
hidden_size=512,
nb_channels=2, #changed from stereo to mono
sample_rate=44100, #changed sampling rate
nb_layers=3,
input_mean=None,
input_scale=None,
max_bin=None,
unidirectional=False,
power=1,
):
"""
Input: (nb_samples, nb_channels, nb_timesteps)
or (nb_frames, nb_samples, nb_channels, nb_bins)
Output: Power/Mag Spectrogram
(nb_frames, nb_samples, nb_channels, nb_bins)
"""
super(OpenUnmix_mtl, self).__init__()
self.nb_output_bins = n_fft // 2 + 1
if max_bin:
self.nb_bins = max_bin
else:
self.nb_bins = self.nb_output_bins
self.hidden_size = hidden_size
self.stft = STFT(n_fft=n_fft, n_hop=n_hop)
self.spec = Spectrogram(power=power, mono=(nb_channels == 1))
self.register_buffer('sample_rate', torch.tensor(sample_rate))
if input_is_spectrogram:
self.transform = NoOp()
else:
self.transform = nn.Sequential(self.stft, self.spec)
self.fc1 = Linear(
self.nb_bins*nb_channels, hidden_size,
bias=False
)
self.bn1 = BatchNorm1d(hidden_size)
if unidirectional:
lstm_hidden_size = hidden_size
else:
lstm_hidden_size = hidden_size // 2
self.lstm = LSTM(
input_size=hidden_size,
hidden_size=lstm_hidden_size,
num_layers=nb_layers,
bidirectional=not unidirectional,
batch_first=False,
dropout=0.4,
)
self.fc2 = Linear(
in_features=hidden_size*2,
out_features=hidden_size,
bias=False
)
self.bn2 = BatchNorm1d(hidden_size)
self.fc3 = Linear(
in_features=hidden_size,
out_features=self.nb_output_bins*nb_channels,
bias=False
)
self.bn3 = BatchNorm1d(self.nb_output_bins*nb_channels)
self.fc4 = Linear(
in_features=hidden_size*2,
out_features=hidden_size//2,
bias=False
)
self.bn4 = BatchNorm1d(hidden_size//2)
self.fc5 = Linear(
in_features=hidden_size//2,
out_features=1,
bias=False
)
self.bn5 = BatchNorm1d(1)
if input_mean is not None:
input_mean = torch.from_numpy(
-input_mean[:self.nb_bins]
).float()
else:
input_mean = torch.zeros(self.nb_bins)
if input_scale is not None:
input_scale = torch.from_numpy(
1.0/input_scale[:self.nb_bins]
).float()
else:
input_scale = torch.ones(self.nb_bins)
self.input_mean = Parameter(input_mean)
self.input_scale = Parameter(input_scale)
self.output_scale = Parameter(
torch.ones(self.nb_output_bins).float()
)
self.output_mean = Parameter(
torch.ones(self.nb_output_bins).float()
)
def forward(self, x):
# check for waveform or spectrogram
# transform to spectrogram if (nb_samples, nb_channels, nb_timesteps)
# and reduce feature dimensions, therefore we reshape
x = self.transform(x)
nb_frames, nb_samples, nb_channels, nb_bins = x.data.shape
mix = x.detach().clone()
# crop
x = x[..., :self.nb_bins]
# shift and scale input to mean=0 std=1 (across all bins)
x += self.input_mean
x *= self.input_scale
# to (nb_frames*nb_samples, nb_channels*nb_bins)
# and encode to (nb_frames*nb_samples, hidden_size)
x = self.fc1(x.reshape(-1, nb_channels*self.nb_bins))
#x = self.fc1(x.reshape(-1, self.nb_bins))
# normalize every instance in a batch
x = self.bn1(x)
x = x.reshape(nb_frames, nb_samples, self.hidden_size)
# squash range ot [-1, 1]
x = torch.tanh(x)
# apply 3-layers of stacked LSTM
lstm_out = self.lstm(x)
# lstm skip connection
x = torch.cat([x, lstm_out[0]], -1)
#######Branched onset detection layers#############
#first dense layer + batch norm
y = self.fc4(x.reshape(-1, x.shape[-1]))
y = self.bn4(y)
y = F.relu(y)
#second dense layer + batch norm
y = self.fc5(y)
y = self.bn5(y)
y = torch.sigmoid(y)
#Re-shape back to dims corresponding to num_frames and batch_size
y = y.reshape(nb_samples, nb_frames, 1)
###################################################
# first dense stage + batch norm
x = self.fc2(x.reshape(-1, x.shape[-1]))
x = self.bn2(x)
x = F.relu(x)
# second dense stage + layer norm
x = self.fc3(x)
x = self.bn3(x)
# reshape back to original dim
x = x.reshape(nb_frames, nb_samples, nb_channels, self.nb_output_bins)
# apply output scaling
x *= self.output_scale
x += self.output_mean
# since our output is non-negative, we can apply RELU
x = F.relu(x) * mix
return x, y
class OpenUnmix_mtl_short(nn.Module):
def __init__(
self,
n_fft=4096,
n_hop=1024,
input_is_spectrogram=False,
hidden_size=512,
nb_channels=2, #changed from stereo to mono
sample_rate=44100, #changed sampling rate
nb_layers=3,
input_mean=None,
input_scale=None,
max_bin=None,
unidirectional=False,
power=1,
):
"""
Input: (nb_samples, nb_channels, nb_timesteps)
or (nb_frames, nb_samples, nb_channels, nb_bins)
Output: Power/Mag Spectrogram
(nb_frames, nb_samples, nb_channels, nb_bins)
"""
super(OpenUnmix_mtl_short, self).__init__()
self.nb_output_bins = n_fft // 2 + 1
if max_bin:
self.nb_bins = max_bin
else:
self.nb_bins = self.nb_output_bins
self.hidden_size = hidden_size
self.stft = STFT(n_fft=n_fft, n_hop=n_hop)
self.spec = Spectrogram(power=power, mono=(nb_channels == 1))
self.register_buffer('sample_rate', torch.tensor(sample_rate))
if input_is_spectrogram:
self.transform = NoOp()
else:
self.transform = nn.Sequential(self.stft, self.spec)
self.fc1 = Linear(
self.nb_bins*nb_channels, hidden_size,
bias=False
)
self.bn1 = BatchNorm1d(hidden_size)
if unidirectional:
lstm_hidden_size = hidden_size
else:
lstm_hidden_size = hidden_size // 2
self.lstm = LSTM(
input_size=hidden_size,
hidden_size=lstm_hidden_size,
num_layers=nb_layers,
bidirectional=not unidirectional,
batch_first=False,
dropout=0.4,
)
self.fc2 = Linear(
in_features=hidden_size*2,
out_features=hidden_size,
bias=False
)
self.bn2 = BatchNorm1d(hidden_size)
self.fc3 = Linear(
in_features=hidden_size,
out_features=self.nb_output_bins*nb_channels,
bias=False
)
self.bn3 = BatchNorm1d(self.nb_output_bins*nb_channels)
self.fc4 = Linear(
in_features=hidden_size*2,
out_features=1,
bias=False
)
# self.bn4 = BatchNorm1d(hidden_size//2)
# self.fc5 = Linear(
# in_features=hidden_size//2,
# out_features=1,
# bias=False
# )
self.bn4 = BatchNorm1d(1)
if input_mean is not None:
input_mean = torch.from_numpy(
-input_mean[:self.nb_bins]
).float()
else:
input_mean = torch.zeros(self.nb_bins)
if input_scale is not None:
input_scale = torch.from_numpy(
1.0/input_scale[:self.nb_bins]
).float()
else:
input_scale = torch.ones(self.nb_bins)
self.input_mean = Parameter(input_mean)
self.input_scale = Parameter(input_scale)
self.output_scale = Parameter(
torch.ones(self.nb_output_bins).float()
)
self.output_mean = Parameter(
torch.ones(self.nb_output_bins).float()
)
def forward(self, x):
# check for waveform or spectrogram
# transform to spectrogram if (nb_samples, nb_channels, nb_timesteps)
# and reduce feature dimensions, therefore we reshape
x = self.transform(x)
nb_frames, nb_samples, nb_channels, nb_bins = x.data.shape
mix = x.detach().clone()
# crop
x = x[..., :self.nb_bins]
# shift and scale input to mean=0 std=1 (across all bins)
x += self.input_mean
x *= self.input_scale
# to (nb_frames*nb_samples, nb_channels*nb_bins)
# and encode to (nb_frames*nb_samples, hidden_size)
x = self.fc1(x.reshape(-1, nb_channels*self.nb_bins))
#x = self.fc1(x.reshape(-1, self.nb_bins))
# normalize every instance in a batch
x = self.bn1(x)
x = x.reshape(nb_frames, nb_samples, self.hidden_size)
# squash range ot [-1, 1]
x = torch.tanh(x)
# apply 3-layers of stacked LSTM
lstm_out = self.lstm(x)
# lstm skip connection
x = torch.cat([x, lstm_out[0]], -1)
#######Branched onset detection layers#############
#first dense layer + batch norm
y = self.fc4(x.reshape(-1, x.shape[-1]))
y = self.bn4(y)
# y = F.relu(y)
# #second dense layer + batch norm
# y = self.fc5(y)
# y = self.bn5(y)
y = torch.sigmoid(y)
#Re-shape back to dims corresponding to num_frames and batch_size
y = y.reshape(nb_samples, nb_frames, 1)
###################################################
# first dense stage + batch norm
x = self.fc2(x.reshape(-1, x.shape[-1]))
x = self.bn2(x)
x = F.relu(x)
# second dense stage + layer norm
x = self.fc3(x)
x = self.bn3(x)
# reshape back to original dim
x = x.reshape(nb_frames, nb_samples, nb_channels, self.nb_output_bins)
# apply output scaling
x *= self.output_scale
x += self.output_mean
# since our output is non-negative, we can apply RELU
x = F.relu(x) * mix
return x, y
class OpenUnmix_mtl_conv(nn.Module):
def __init__(
self,
n_fft=4096,
n_hop=1024,
input_is_spectrogram=False,
hidden_size=512,
nb_channels=2, #changed from stereo to mono
sample_rate=44100, #changed sampling rate
nb_layers=3,
input_mean=None,
input_scale=None,
max_bin=None,
unidirectional=False,
power=1,
):
"""
Input: (nb_samples, nb_channels, nb_timesteps)
or (nb_frames, nb_samples, nb_channels, nb_bins)
Output: Power/Mag Spectrogram
(nb_frames, nb_samples, nb_channels, nb_bins)
"""
super(OpenUnmix_mtl_conv, self).__init__()
self.nb_output_bins = n_fft // 2 + 1
if max_bin:
self.nb_bins = max_bin
else:
self.nb_bins = self.nb_output_bins
self.hidden_size = hidden_size
self.stft = STFT(n_fft=n_fft, n_hop=n_hop)
self.spec = Spectrogram(power=power, mono=(nb_channels == 1))
self.register_buffer('sample_rate', torch.tensor(sample_rate))
if input_is_spectrogram:
self.transform = NoOp()
else:
self.transform = nn.Sequential(self.stft, self.spec)
self.fc1 = Linear(
self.nb_bins*nb_channels, hidden_size,
bias=False
)
self.bn1 = BatchNorm1d(hidden_size)
if unidirectional:
lstm_hidden_size = hidden_size
else:
lstm_hidden_size = hidden_size // 2
self.lstm = LSTM(
input_size=hidden_size,
hidden_size=lstm_hidden_size,
num_layers=nb_layers,
bidirectional=not unidirectional,
batch_first=False,
dropout=0.4,
)
self.fc2 = Linear(
in_features=hidden_size*2,
out_features=hidden_size,
bias=False
)
self.bn2 = BatchNorm1d(hidden_size)
self.fc3 = Linear(
in_features=hidden_size,
out_features=self.nb_output_bins*nb_channels,
bias=False
)
self.bn3 = BatchNorm1d(self.nb_output_bins*nb_channels)
#New layers##
self.conv = torch.nn.Conv2d(1, 32 , kernel_size=(1,7), stride=1, padding=(0,3))
self.pool = torch.nn.AvgPool2d(kernel_size=(hidden_size*2,1))
self.fc4 = Linear(in_features=32, out_features=1, bias=False)
self.bn4 = BatchNorm1d(1)
###################
# self.fc4 = Linear(
# in_features=hidden_size*2,
# out_features=hidden_size//2,
# bias=False
# )
# self.bn4 = BatchNorm1d(hidden_size//2)
# self.fc5 = Linear(
# in_features=hidden_size//2,
# out_features=1,
# bias=False
# )
# self.bn5 = BatchNorm1d(1)
if input_mean is not None:
input_mean = torch.from_numpy(
-input_mean[:self.nb_bins]
).float()
else:
input_mean = torch.zeros(self.nb_bins)
if input_scale is not None:
input_scale = torch.from_numpy(
1.0/input_scale[:self.nb_bins]
).float()
else:
input_scale = torch.ones(self.nb_bins)
self.input_mean = Parameter(input_mean)
self.input_scale = Parameter(input_scale)
self.output_scale = Parameter(
torch.ones(self.nb_output_bins).float()
)
self.output_mean = Parameter(
torch.ones(self.nb_output_bins).float()
)
def forward(self, x):
# check for waveform or spectrogram
# transform to spectrogram if (nb_samples, nb_channels, nb_timesteps)
# and reduce feature dimensions, therefore we reshape
x = self.transform(x)
nb_frames, nb_samples, nb_channels, nb_bins = x.data.shape
mix = x.detach().clone()
# crop
x = x[..., :self.nb_bins]
# shift and scale input to mean=0 std=1 (across all bins)
x += self.input_mean
x *= self.input_scale
# to (nb_frames*nb_samples, nb_channels*nb_bins)
# and encode to (nb_frames*nb_samples, hidden_size)
x = self.fc1(x.reshape(-1, nb_channels*self.nb_bins))
#x = self.fc1(x.reshape(-1, self.nb_bins))
# normalize every instance in a batch
x = self.bn1(x)
x = x.reshape(nb_frames, nb_samples, self.hidden_size)
# squash range ot [-1, 1]
x = torch.tanh(x)
# apply 3-layers of stacked LSTM
lstm_out = self.lstm(x)
# lstm skip connection
x = torch.cat([x, lstm_out[0]], -1)
#######Branched onset detection layers#############
# #first dense layer + batch norm
# y = self.fc4(x.reshape(-1, x.shape[-1]))
# y = self.bn4(y)
# y = F.relu(y)
# #second dense layer + batch norm
# y = self.fc5(y)
# y = self.bn5(y)
# y = torch.sigmoid(y)
# Conv MTL layer
y = x.permute(1,2,0)
y = y[:,None,:,:]
y = self.conv(y)
#pool layer
y = self.pool(y)
y = y.permute(3,0,1,2)
y = y.reshape(-1, y.shape[-2])
y = self.fc4(y)
y = self.bn4(y)
y = torch.sigmoid(y)
#Re-shape back to dims corresponding to num_frames and batch_size
y = y.reshape(nb_samples, nb_frames, 1)
###################################################
# first dense stage + batch norm
z = self.fc2(x.reshape(-1, x.shape[-1]))
z = self.bn2(z)
z = F.relu(z)
# second dense stage + layer norm
z = self.fc3(z)
z = self.bn3(z)
# reshape back to original dim
z = z.reshape(nb_frames, nb_samples, nb_channels, self.nb_output_bins)
# apply output scaling
z *= self.output_scale
z += self.output_mean
# since our output is non-negative, we can apply RELU
z = F.relu(z) * mix
return z, y
class OpenUnmix_mtl_conv_late(nn.Module):
def __init__(
self,
n_fft=4096,
n_hop=1024,
input_is_spectrogram=False,
hidden_size=512,
nb_channels=2, #changed from stereo to mono
sample_rate=44100, #changed sampling rate
nb_layers=3,
input_mean=None,
input_scale=None,
max_bin=None,
unidirectional=False,
power=1,
):
"""
Input: (nb_samples, nb_channels, nb_timesteps)
or (nb_frames, nb_samples, nb_channels, nb_bins)
Output: Power/Mag Spectrogram
(nb_frames, nb_samples, nb_channels, nb_bins)
"""
super(OpenUnmix_mtl_conv_late, self).__init__()
self.nb_output_bins = n_fft // 2 + 1
if max_bin:
self.nb_bins = max_bin
else:
self.nb_bins = self.nb_output_bins
self.hidden_size = hidden_size
self.stft = STFT(n_fft=n_fft, n_hop=n_hop)
self.spec = Spectrogram(power=power, mono=(nb_channels == 1))
self.register_buffer('sample_rate', torch.tensor(sample_rate))
if input_is_spectrogram:
self.transform = NoOp()
else:
self.transform = nn.Sequential(self.stft, self.spec)
self.fc1 = Linear(
self.nb_bins*nb_channels, hidden_size,
bias=False
)
self.bn1 = BatchNorm1d(hidden_size)
if unidirectional:
lstm_hidden_size = hidden_size
else:
lstm_hidden_size = hidden_size // 2
self.lstm = LSTM(
input_size=hidden_size,
hidden_size=lstm_hidden_size,
num_layers=nb_layers,
bidirectional=not unidirectional,
batch_first=False,
dropout=0.4,
)
self.fc2 = Linear(
in_features=hidden_size*2,
out_features=hidden_size,
bias=False
)
self.bn2 = BatchNorm1d(hidden_size)
self.fc3 = Linear(
in_features=hidden_size,
out_features=self.nb_output_bins*nb_channels,
bias=False
)
self.bn3 = BatchNorm1d(self.nb_output_bins*nb_channels)
#New layers##
self.conv = torch.nn.Conv2d(1, 32 , kernel_size=(1,7), stride=1, padding=(0,3))
self.pool = torch.nn.AvgPool2d(kernel_size=(hidden_size,1))
self.fc4 = Linear(in_features=32, out_features=1, bias=False)
self.bn4 = BatchNorm1d(1)
###################
# self.fc4 = Linear(
# in_features=hidden_size*2,
# out_features=hidden_size//2,
# bias=False
# )
# self.bn4 = BatchNorm1d(hidden_size//2)
# self.fc5 = Linear(
# in_features=hidden_size//2,
# out_features=1,
# bias=False
# )
# self.bn5 = BatchNorm1d(1)
if input_mean is not None:
input_mean = torch.from_numpy(
-input_mean[:self.nb_bins]
).float()
else:
input_mean = torch.zeros(self.nb_bins)
if input_scale is not None:
input_scale = torch.from_numpy(
1.0/input_scale[:self.nb_bins]
).float()
else:
input_scale = torch.ones(self.nb_bins)
self.input_mean = Parameter(input_mean)
self.input_scale = Parameter(input_scale)
self.output_scale = Parameter(
torch.ones(self.nb_output_bins).float()
)
self.output_mean = Parameter(
torch.ones(self.nb_output_bins).float()
)
def forward(self, x):
# check for waveform or spectrogram
# transform to spectrogram if (nb_samples, nb_channels, nb_timesteps)
# and reduce feature dimensions, therefore we reshape
x = self.transform(x)
nb_frames, nb_samples, nb_channels, nb_bins = x.data.shape
mix = x.detach().clone()
# crop
x = x[..., :self.nb_bins]
# shift and scale input to mean=0 std=1 (across all bins)
x += self.input_mean
x *= self.input_scale
# to (nb_frames*nb_samples, nb_channels*nb_bins)
# and encode to (nb_frames*nb_samples, hidden_size)
x = self.fc1(x.reshape(-1, nb_channels*self.nb_bins))
#x = self.fc1(x.reshape(-1, self.nb_bins))
# normalize every instance in a batch
x = self.bn1(x)
x = x.reshape(nb_frames, nb_samples, self.hidden_size)
# squash range ot [-1, 1]
x = torch.tanh(x)
# apply 3-layers of stacked LSTM
lstm_out = self.lstm(x)
# lstm skip connection
x = torch.cat([x, lstm_out[0]], -1)
# first dense stage + batch norm
x = self.fc2(x.reshape(-1, x.shape[-1]))
x = self.bn2(x)
x = F.relu(x)
#######Branched onset detection layers#############
# #first dense layer + batch norm
# y = self.fc4(x.reshape(-1, x.shape[-1]))
# y = self.bn4(y)
# y = F.relu(y)
# #second dense layer + batch norm
# y = self.fc5(y)
# y = self.bn5(y)
# y = torch.sigmoid(y)
# Conv MTL layer
y = x.reshape(nb_frames, nb_samples, x.shape[-1])
y = y.permute(1,2,0)
y = y[:,None,:,:]
y = self.conv(y)
#pool layer
y = self.pool(y)
y = y.permute(3,0,1,2)
y = y.reshape(-1, y.shape[-2])
y = self.fc4(y)
y = self.bn4(y)
y = torch.sigmoid(y)
#Re-shape back to dims corresponding to num_frames and batch_size
y = y.reshape(nb_samples, nb_frames, 1)
###################################################
# second dense stage + layer norm
x = self.fc3(x)
x = self.bn3(x)
# reshape back to original dim
x = x.reshape(nb_frames, nb_samples, nb_channels, self.nb_output_bins)
# apply output scaling
x *= self.output_scale
x += self.output_mean
# since our output is non-negative, we can apply RELU
x = F.relu(x) * mix
return x, y
class OpenUnmix_mtl_conv_latest(nn.Module):
def __init__(
self,
n_fft=4096,
n_hop=1024,
input_is_spectrogram=False,
hidden_size=512,
nb_channels=2, #changed from stereo to mono
sample_rate=44100, #changed sampling rate
nb_layers=3,
input_mean=None,
input_scale=None,
max_bin=None,
unidirectional=False,
power=1,
):
"""
Input: (nb_samples, nb_channels, nb_timesteps)
or (nb_frames, nb_samples, nb_channels, nb_bins)
Output: Power/Mag Spectrogram
(nb_frames, nb_samples, nb_channels, nb_bins)
"""
super(OpenUnmix_mtl_conv_latest, self).__init__()
self.nb_output_bins = n_fft // 2 + 1
if max_bin:
self.nb_bins = max_bin
else:
self.nb_bins = self.nb_output_bins
self.hidden_size = hidden_size
self.stft = STFT(n_fft=n_fft, n_hop=n_hop)
self.spec = Spectrogram(power=power, mono=(nb_channels == 1))
self.register_buffer('sample_rate', torch.tensor(sample_rate))
if input_is_spectrogram:
self.transform = NoOp()
else:
self.transform = nn.Sequential(self.stft, self.spec)
self.fc1 = Linear(
self.nb_bins*nb_channels, hidden_size,
bias=False
)
self.bn1 = BatchNorm1d(hidden_size)
if unidirectional:
lstm_hidden_size = hidden_size
else:
lstm_hidden_size = hidden_size // 2
self.lstm = LSTM(
input_size=hidden_size,
hidden_size=lstm_hidden_size,
num_layers=nb_layers,
bidirectional=not unidirectional,
batch_first=False,
dropout=0.4,
)
self.fc2 = Linear(
in_features=hidden_size*2,
out_features=hidden_size,
bias=False
)
self.bn2 = BatchNorm1d(hidden_size)
self.fc3 = Linear(
in_features=hidden_size,
out_features=self.nb_output_bins*nb_channels,
bias=False
)
self.bn3 = BatchNorm1d(self.nb_output_bins*nb_channels)
#New layers##
self.conv = torch.nn.Conv2d(1, 32 , kernel_size=(1,7), stride=1, padding=(0,3))
self.pool = torch.nn.AvgPool2d(kernel_size=(self.nb_output_bins*nb_channels,1))
self.fc4 = Linear(in_features=32, out_features=1, bias=False)
self.bn4 = BatchNorm1d(1)
###################
# self.fc4 = Linear(
# in_features=hidden_size*2,
# out_features=hidden_size//2,
# bias=False
# )
# self.bn4 = BatchNorm1d(hidden_size//2)
# self.fc5 = Linear(
# in_features=hidden_size//2,
# out_features=1,
# bias=False
# )
# self.bn5 = BatchNorm1d(1)
if input_mean is not None:
input_mean = torch.from_numpy(
-input_mean[:self.nb_bins]
).float()
else:
input_mean = torch.zeros(self.nb_bins)
if input_scale is not None:
input_scale = torch.from_numpy(
1.0/input_scale[:self.nb_bins]
).float()
else:
input_scale = torch.ones(self.nb_bins)
self.input_mean = Parameter(input_mean)
self.input_scale = Parameter(input_scale)
self.output_scale = Parameter(
torch.ones(self.nb_output_bins).float()
)
self.output_mean = Parameter(
torch.ones(self.nb_output_bins).float()
)
def forward(self, x):
# check for waveform or spectrogram
# transform to spectrogram if (nb_samples, nb_channels, nb_timesteps)
# and reduce feature dimensions, therefore we reshape
x = self.transform(x)
nb_frames, nb_samples, nb_channels, nb_bins = x.data.shape
mix = x.detach().clone()
# crop
x = x[..., :self.nb_bins]
# shift and scale input to mean=0 std=1 (across all bins)
x += self.input_mean
x *= self.input_scale
# to (nb_frames*nb_samples, nb_channels*nb_bins)
# and encode to (nb_frames*nb_samples, hidden_size)
x = self.fc1(x.reshape(-1, nb_channels*self.nb_bins))
#x = self.fc1(x.reshape(-1, self.nb_bins))
# normalize every instance in a batch
x = self.bn1(x)
x = x.reshape(nb_frames, nb_samples, self.hidden_size)
# squash range ot [-1, 1]
x = torch.tanh(x)
# apply 3-layers of stacked LSTM
lstm_out = self.lstm(x)
# lstm skip connection
x = torch.cat([x, lstm_out[0]], -1)
# first dense stage + batch norm
x = self.fc2(x.reshape(-1, x.shape[-1]))
x = self.bn2(x)
x = F.relu(x)
# second dense stage + layer norm
x = self.fc3(x)
x = self.bn3(x)
#######Branched onset detection layers#############
# #first dense layer + batch norm
# y = self.fc4(x.reshape(-1, x.shape[-1]))
# y = self.bn4(y)
# y = F.relu(y)
# #second dense layer + batch norm
# y = self.fc5(y)
# y = self.bn5(y)
# y = torch.sigmoid(y)
# Conv MTL layer
y = x.reshape(nb_frames, nb_samples, x.shape[-1])
y = y.permute(1,2,0)
y = y[:,None,:,:]
y = self.conv(y)
#pool layer
y = self.pool(y)
y = y.permute(3,0,1,2)
y = y.reshape(-1, y.shape[-2])
y = self.fc4(y)
y = self.bn4(y)
y = torch.sigmoid(y)
#Re-shape back to dims corresponding to num_frames and batch_size
y = y.reshape(nb_samples, nb_frames, 1)
###################################################
# reshape back to original dim
x = x.reshape(nb_frames, nb_samples, nb_channels, self.nb_output_bins)
# apply output scaling
x *= self.output_scale
x += self.output_mean
# since our output is non-negative, we can apply RELU
x = F.relu(x) * mix
return x, y
class OpenUnmix_mtl_cross(nn.Module):
def __init__(
self,
n_fft=4096,
n_hop=1024,
input_is_spectrogram=False,
hidden_size=512,
nb_channels=2, #changed from stereo to mono
sample_rate=44100, #changed sampling rate
nb_layers=3,
input_mean=None,
input_scale=None,
max_bin=None,
unidirectional=False,
power=1,
):
"""
Input: (nb_samples, nb_channels, nb_timesteps)
or (nb_frames, nb_samples, nb_channels, nb_bins)
Output: Power/Mag Spectrogram
(nb_frames, nb_samples, nb_channels, nb_bins)
"""
super(OpenUnmix_mtl_cross, self).__init__()
self.nb_output_bins = n_fft // 2 + 1
if max_bin:
self.nb_bins = max_bin
else:
self.nb_bins = self.nb_output_bins
self.hidden_size = hidden_size
self.stft = STFT(n_fft=n_fft, n_hop=n_hop)
self.spec = Spectrogram(power=power, mono=(nb_channels == 1))
self.register_buffer('sample_rate', torch.tensor(sample_rate))
if input_is_spectrogram:
self.transform = NoOp()
else:
self.transform = nn.Sequential(self.stft, self.spec)
self.fc1 = Linear(
self.nb_bins*nb_channels, hidden_size,
bias=False
)
self.bn1 = BatchNorm1d(hidden_size)
if unidirectional:
lstm_hidden_size = hidden_size
else:
lstm_hidden_size = hidden_size // 2
self.lstm = LSTM(
input_size=hidden_size,
hidden_size=lstm_hidden_size,
num_layers=nb_layers,
bidirectional=not unidirectional,
batch_first=False,
dropout=0.4,
)
self.fc2 = Linear(
in_features=hidden_size*2,
out_features=hidden_size,
bias=False
)
self.bn2 = BatchNorm1d(hidden_size)
self.fc3 = Linear(
in_features=hidden_size,
out_features=self.nb_output_bins*nb_channels,
bias=False
)
self.bn3 = BatchNorm1d(self.nb_output_bins*nb_channels)
self.fc4 = Linear(
in_features=hidden_size*2,
out_features=hidden_size//2,
bias=False
)
self.bn4 = BatchNorm1d(hidden_size//2)
self.fc5 = Linear(
in_features=hidden_size//2,
out_features=1,
bias=False
)
self.bn5 = BatchNorm1d(1)
if input_mean is not None:
input_mean = torch.from_numpy(
-input_mean[:self.nb_bins]
).float()
else:
input_mean = torch.zeros(self.nb_bins)
if input_scale is not None:
input_scale = torch.from_numpy(
1.0/input_scale[:self.nb_bins]
).float()
else:
input_scale = torch.ones(self.nb_bins)
self.input_mean = Parameter(input_mean)
self.input_scale = Parameter(input_scale)
self.output_scale = Parameter(
torch.ones(self.nb_output_bins).float()
)
self.output_mean = Parameter(
torch.ones(self.nb_output_bins).float()
)
def forward(self, x):
# check for waveform or spectrogram
# transform to spectrogram if (nb_samples, nb_channels, nb_timesteps)
# and reduce feature dimensions, therefore we reshape
x = self.transform(x)
nb_frames, nb_samples, nb_channels, nb_bins = x.data.shape
mix = x.detach().clone()
# crop
x = x[..., :self.nb_bins]
# shift and scale input to mean=0 std=1 (across all bins)
x += self.input_mean
x *= self.input_scale
# to (nb_frames*nb_samples, nb_channels*nb_bins)
# and encode to (nb_frames*nb_samples, hidden_size)
x = self.fc1(x.reshape(-1, nb_channels*self.nb_bins))
#x = self.fc1(x.reshape(-1, self.nb_bins))
# normalize every instance in a batch
x = self.bn1(x)
x = x.reshape(nb_frames, nb_samples, self.hidden_size)
# squash range ot [-1, 1]
x = torch.tanh(x)
# apply 3-layers of stacked LSTM
lstm_out = self.lstm(x)
# lstm skip connection
x = torch.cat([x, lstm_out[0]], -1)
#######Branched onset detection layers#############
#first dense layer + batch norm
y = self.fc4(x.reshape(-1, x.shape[-1]))
y = self.bn4(y)
y = F.relu(y)
#second dense layer + batch norm
y = self.fc5(y)
y = self.bn5(y)
y = torch.sigmoid(y)
# first dense stage + batch norm
x = self.fc2(x.reshape(-1, x.shape[-1]))
x = self.bn2(x)
x = F.relu(x)
# second dense stage + layer norm
x = self.fc3(x)
x = self.bn3(x)
#x = torch.cat((x, y), dim=1)
x = x*y
#Re-shape back to dims corresponding to num_frames and batch_size
y = y.reshape(nb_samples, nb_frames, 1)
###################################################
# reshape back to original dim
x = x.reshape(nb_frames, nb_samples, nb_channels, self.nb_output_bins)
# apply output scaling
x *= self.output_scale
x += self.output_mean
# since our output is non-negative, we can apply RELU
x = F.relu(x) * mix
return x, y
class OpenUnmix_mtl_mul(nn.Module):
def __init__(
self,
n_fft=4096,
n_hop=1024,
input_is_spectrogram=False,
hidden_size=512,
nb_channels=2, #changed from stereo to mono
sample_rate=44100, #changed sampling rate
nb_layers=3,
input_mean=None,
input_scale=None,
max_bin=None,
unidirectional=False,
power=1,
):
"""
Input: (nb_samples, nb_channels, nb_timesteps)
or (nb_frames, nb_samples, nb_channels, nb_bins)
Output: Power/Mag Spectrogram
(nb_frames, nb_samples, nb_channels, nb_bins)
"""
super(OpenUnmix_mtl_mul, self).__init__()
self.nb_output_bins = n_fft // 2 + 1
if max_bin:
self.nb_bins = max_bin
else:
self.nb_bins = self.nb_output_bins
self.hidden_size = hidden_size
self.stft = STFT(n_fft=n_fft, n_hop=n_hop)
self.spec = Spectrogram(power=power, mono=(nb_channels == 1))
self.register_buffer('sample_rate', torch.tensor(sample_rate))
if input_is_spectrogram:
self.transform = NoOp()
else:
self.transform = nn.Sequential(self.stft, self.spec)
self.fc1 = Linear(
self.nb_bins*nb_channels, hidden_size,
bias=False
)
self.bn1 = BatchNorm1d(hidden_size)
if unidirectional:
lstm_hidden_size = hidden_size
else:
lstm_hidden_size = hidden_size // 2
self.lstm = LSTM(
input_size=hidden_size,
hidden_size=lstm_hidden_size,
num_layers=nb_layers,
bidirectional=not unidirectional,
batch_first=False,
dropout=0.4,
)
self.fc2 = Linear(
in_features=hidden_size*2,
out_features=hidden_size,
bias=False
)
self.bn2 = BatchNorm1d(hidden_size)
self.fc3 = Linear(
in_features=hidden_size,
out_features=self.nb_output_bins*nb_channels,
bias=False
)
self.bn3 = BatchNorm1d(self.nb_output_bins*nb_channels)
# self.fc4 = Linear(
# in_features=hidden_size*2,
# out_features=hidden_size//2,
# bias=False
# )
# self.bn4 = BatchNorm1d(hidden_size//2)
# self.fc5 = Linear(
# in_features=hidden_size//2,
# out_features=1,
# bias=False
# )
# self.bn5 = BatchNorm1d(1)
if input_mean is not None:
input_mean = torch.from_numpy(
-input_mean[:self.nb_bins]
).float()
else:
input_mean = torch.zeros(self.nb_bins)
if input_scale is not None:
input_scale = torch.from_numpy(
1.0/input_scale[:self.nb_bins]
).float()
else:
input_scale = torch.ones(self.nb_bins)
self.input_mean = Parameter(input_mean)
self.input_scale = Parameter(input_scale)
self.output_scale = Parameter(
torch.ones(self.nb_output_bins).float()
)
self.output_mean = Parameter(
torch.ones(self.nb_output_bins).float()
)
def forward(self, x, y):
# check for waveform or spectrogram
# transform to spectrogram if (nb_samples, nb_channels, nb_timesteps)
# and reduce feature dimensions, therefore we reshape
# print(x.shape)
# print(y.shape)
x = self.transform(x)
nb_frames, nb_samples, nb_channels, nb_bins = x.data.shape
mix = x.detach().clone()
# crop
x = x[..., :self.nb_bins]
# shift and scale input to mean=0 std=1 (across all bins)
x += self.input_mean
x *= self.input_scale
# to (nb_frames*nb_samples, nb_channels*nb_bins)
# and encode to (nb_frames*nb_samples, hidden_size)
x = self.fc1(x.reshape(-1, nb_channels*self.nb_bins))
#x = self.fc1(x.reshape(-1, self.nb_bins))
# normalize every instance in a batch
x = self.bn1(x)
x = x.reshape(nb_frames, nb_samples, self.hidden_size)
# squash range ot [-1, 1]
x = torch.tanh(x)
# apply 3-layers of stacked LSTM
lstm_out = self.lstm(x)
# lstm skip connection
x = torch.cat([x, lstm_out[0]], -1)
#######Branched onset detection layers#############
#first dense layer + batch norm
# y = self.fc4(x.reshape(-1, x.shape[-1]))
# y = self.bn4(y)
# y = F.relu(y)
# #second dense layer + batch norm
# y = self.fc5(y)
# y = self.bn5(y)
# y = torch.sigmoid(y)
# first dense stage + batch norm
x = self.fc2(x.reshape(-1, x.shape[-1]))
x = self.bn2(x)
x = F.relu(x)
# second dense stage + layer norm
x = self.fc3(x)
x = self.bn3(x)
y = y.reshape(-1, y.shape[-1])
#x = torch.cat((x, y), dim=1)
x = x*y
#Re-shape back to dims corresponding to num_frames and batch_size
# y = y.reshape(nb_samples, nb_frames, 1)
###################################################
# reshape back to original dim
x = x.reshape(nb_frames, nb_samples, nb_channels, self.nb_output_bins)
# apply output scaling
x *= self.output_scale
x += self.output_mean
# since our output is non-negative, we can apply RELU
x = F.relu(x) * mix
return x
class onsetCNN(nn.Module):
def __init__(self):
super(onsetCNN, self).__init__()
self.conv1 = nn.Conv2d(1, 10, (3,7))
self.pool1 = nn.MaxPool2d((3,1))
self.conv2 = nn.Conv2d(10, 20, 3)
self.pool2 = nn.MaxPool2d((3,1))
self.fc1 = nn.Linear(20 * 7 * 8, 256)
self.fc2 = nn.Linear(256,1)
self.dout = nn.Dropout(p=0.5)
def forward(self,x):
y=torch.tanh(self.conv1(x))
y=self.pool1(y)
y=torch.tanh(self.conv2(y))
y=self.pool2(y)
y=self.dout(y.view(-1,20*7*8))
y=self.dout(torch.sigmoid(self.fc1(y)))
y=torch.sigmoid(self.fc2(y))
return y
| 27.520939
| 87
| 0.549835
| 5,559
| 43,373
| 4.076093
| 0.040115
| 0.068405
| 0.030893
| 0.046119
| 0.940377
| 0.93155
| 0.925151
| 0.923607
| 0.922591
| 0.922591
| 0
| 0.02391
| 0.336592
| 43,373
| 1,575
| 88
| 27.538413
| 0.763571
| 0.219584
| 0
| 0.8479
| 0
| 0
| 0.002575
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.024972
| false
| 0
| 0.00454
| 0.001135
| 0.054484
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
91a58446eb41c1423605a2a201bf8d4c549f4cb9
| 5,687
|
py
|
Python
|
demos/aspect_polarity_classification/run_fast_lsa_deberta.py
|
froth-synthesio/PyABSA
|
61406e7a49f93f6c986dfd7e583d730b69c2861c
|
[
"MIT"
] | 199
|
2021-06-07T15:07:28.000Z
|
2022-03-31T11:53:28.000Z
|
demos/aspect_polarity_classification/run_fast_lsa_deberta.py
|
froth-synthesio/PyABSA
|
61406e7a49f93f6c986dfd7e583d730b69c2861c
|
[
"MIT"
] | 98
|
2021-06-06T06:01:02.000Z
|
2022-03-31T15:48:28.000Z
|
demos/aspect_polarity_classification/run_fast_lsa_deberta.py
|
froth-synthesio/PyABSA
|
61406e7a49f93f6c986dfd7e583d730b69c2861c
|
[
"MIT"
] | 55
|
2021-06-10T08:52:17.000Z
|
2022-03-31T11:08:58.000Z
|
# -*- coding: utf-8 -*-
# file: run_fast_lsa_deberta.py
# time: 2021/5/26 0026
# author: yangheng <yangheng@m.scnu.edu.cn>
# github: https://github.com/yangheng95
# Copyright (C) 2021. All Rights Reserved.
########################################################################################################################
# This code is for paper: #
# "Back to Reality: Leveraging Pattern-driven Modeling to Enable Affordable Sentiment Dependency Learning" #
# but there are some changes in this paper, and it is under submission #
########################################################################################################################
import random
from pyabsa.functional import Trainer
from pyabsa.functional import APCConfigManager
from pyabsa.functional import ABSADatasetList
from pyabsa.functional import APCModelList
import warnings
warnings.filterwarnings('ignore')
seeds = [random.randint(0, 10000) for _ in range(2)]
apc_config_english = APCConfigManager.get_apc_config_english()
apc_config_english.model = APCModelList.FAST_LCF_BERT
apc_config_english.lcf = 'cdw'
apc_config_english.similarity_threshold = 1
apc_config_english.max_seq_len = 80
apc_config_english.dropout = 0
apc_config_english.optimizer = 'adam'
apc_config_english.cache_dataset = False
apc_config_english.patience = 10
apc_config_english.pretrained_bert = 'microsoft/deberta-v3-base'
apc_config_english.hidden_dim = 768
apc_config_english.embed_dim = 768
apc_config_english.num_epoch = 30
apc_config_english.log_step = 10
apc_config_english.SRD = 3
apc_config_english.learning_rate = 1e-5
apc_config_english.batch_size = 16
apc_config_english.evaluate_begin = 3
apc_config_english.l2reg = 1e-8
apc_config_english.seed = seeds
apc_config_english.cross_validate_fold = -1 # disable cross_validate
Laptop14 = ABSADatasetList.Laptop14
Trainer(config=apc_config_english,
dataset=Laptop14, # train set and test set will be automatically detected
checkpoint_save_mode=0, # =None to avoid save model
auto_device=True # automatic choose CUDA or CPU
)
Restaurant14 = ABSADatasetList.Restaurant14
Trainer(config=apc_config_english,
dataset=Restaurant14, # train set and test set will be automatically detected
checkpoint_save_mode=0, # =None to avoid save model
auto_device=True # automatic choose CUDA or CPU
)
Restaurant15 = ABSADatasetList.Restaurant15
Trainer(config=apc_config_english,
dataset=Restaurant15, # train set and test set will be automatically detected
checkpoint_save_mode=0, # =None to avoid save model
auto_device=True # automatic choose CUDA or CPU
)
Restaurant16 = ABSADatasetList.Restaurant16
Trainer(config=apc_config_english,
dataset=Restaurant16, # train set and test set will be automatically detected
checkpoint_save_mode=0, # =None to avoid save model
auto_device=True # automatic choose CUDA or CPU
)
MAMS = ABSADatasetList.MAMS
Trainer(config=apc_config_english,
dataset=MAMS, # train set and test set will be automatically detected
checkpoint_save_mode=1, # =None to avoid save model
auto_device=True # automatic choose CUDA or CPU
)
apc_config_english = APCConfigManager.get_apc_config_english()
apc_config_english.model = APCModelList.FAST_LCFS_BERT
apc_config_english.lcf = 'cdw'
apc_config_english.similarity_threshold = 1
apc_config_english.max_seq_len = 80
apc_config_english.dropout = 0
apc_config_english.cache_dataset = False
apc_config_english.patience = 10
apc_config_english.optimizer = 'adam'
apc_config_english.pretrained_bert = 'microsoft/deberta-v3-base'
apc_config_english.hidden_dim = 768
apc_config_english.embed_dim = 768
apc_config_english.num_epoch = 30
apc_config_english.log_step = 10
apc_config_english.SRD = 3
apc_config_english.learning_rate = 1e-5
apc_config_english.batch_size = 16
apc_config_english.evaluate_begin = 2
apc_config_english.l2reg = 1e-8
apc_config_english.seed = seeds
apc_config_english.cross_validate_fold = -1 # disable cross_validate
Laptop14 = ABSADatasetList.Laptop14
Trainer(config=apc_config_english,
dataset=Laptop14, # train set and test set will be automatically detected
checkpoint_save_mode=0, # =None to avoid save model
auto_device=True, # automatic choose CUDA or CPU
)
Restaurant14 = ABSADatasetList.Restaurant14
Trainer(config=apc_config_english,
dataset=Restaurant14, # train set and test set will be automatically detected
checkpoint_save_mode=0, # =None to avoid save model
auto_device=True # automatic choose CUDA or CPU
)
Restaurant15 = ABSADatasetList.Restaurant15
Trainer(config=apc_config_english,
dataset=Restaurant15, # train set and test set will be automatically detected
checkpoint_save_mode=0, # =None to avoid save model
auto_device=True # automatic choose CUDA or CPU
)
Restaurant16 = ABSADatasetList.Restaurant16
Trainer(config=apc_config_english,
dataset=Restaurant16, # train set and test set will be automatically detected
checkpoint_save_mode=0, # =None to avoid save model
auto_device=True # automatic choose CUDA or CPU
)
MAMS = ABSADatasetList.MAMS
Trainer(config=apc_config_english,
dataset=MAMS, # train set and test set will be automatically detected
checkpoint_save_mode=0, # =None to avoid save model
auto_device=True # automatic choose CUDA or CPU
)
| 40.333333
| 120
| 0.714788
| 725
| 5,687
| 5.361379
| 0.208276
| 0.125032
| 0.222279
| 0.056599
| 0.856959
| 0.856959
| 0.856959
| 0.856959
| 0.85027
| 0.85027
| 0
| 0.029539
| 0.184456
| 5,687
| 140
| 121
| 40.621429
| 0.808538
| 0.295938
| 0
| 0.781818
| 0
| 0
| 0.018822
| 0.013444
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.054545
| 0
| 0.054545
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
5311c3c21a92398445ff2d2daa1bd189b94c8e72
| 100
|
py
|
Python
|
src/utilities.py
|
Open-Speech-EkStep/text-to-speech-open-api
|
6a441996600829ed5c5b456acb339e14d1f59ac5
|
[
"MIT"
] | null | null | null |
src/utilities.py
|
Open-Speech-EkStep/text-to-speech-open-api
|
6a441996600829ed5c5b456acb339e14d1f59ac5
|
[
"MIT"
] | null | null | null |
src/utilities.py
|
Open-Speech-EkStep/text-to-speech-open-api
|
6a441996600829ed5c5b456acb339e14d1f59ac5
|
[
"MIT"
] | null | null | null |
import os
def get_env_var(var_name=str, default=''):
return os.environ.get(var_name, default)
| 16.666667
| 44
| 0.73
| 17
| 100
| 4.058824
| 0.647059
| 0.202899
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.14
| 100
| 5
| 45
| 20
| 0.802326
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
5314c6ef5579ca76148571053b3ee2b9d165c037
| 24,702
|
py
|
Python
|
jamf/api/patch_policies_preview_api.py
|
jensenbox/python-jamf
|
85213085b1064a00375a7aa7df5e33c19f5178eb
|
[
"RSA-MD"
] | 1
|
2021-04-20T15:28:57.000Z
|
2021-04-20T15:28:57.000Z
|
jamf/api/patch_policies_preview_api.py
|
jensenbox/python-jamf
|
85213085b1064a00375a7aa7df5e33c19f5178eb
|
[
"RSA-MD"
] | null | null | null |
jamf/api/patch_policies_preview_api.py
|
jensenbox/python-jamf
|
85213085b1064a00375a7aa7df5e33c19f5178eb
|
[
"RSA-MD"
] | null | null | null |
# coding: utf-8
"""
Jamf Pro API
## Overview This is a sample Jamf Pro server which allows for usage without any authentication. The Jamf Pro environment which supports the Try it Out functionality does not run the current beta version of Jamf Pro, thus any newly added endpoints will result in an error and should be used soley for documentation purposes. # noqa: E501
The version of the OpenAPI document: 10.25.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from jamf.api_client import ApiClient
from jamf.exceptions import ( # noqa: F401
ApiTypeError,
ApiValueError
)
class PatchPoliciesPreviewApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def patch_patch_policies_get(self, **kwargs): # noqa: E501
"""Return a list of patch policies # noqa: E501
Returns a list of patch policies. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_patch_policies_get(async_req=True)
>>> result = thread.get()
:param on_dashboard: Filters whether or not the patch policies are on the dashboard.
:type on_dashboard: bool
:param enabled: Filters whether or not the patch policies are enabled.
:type enabled: bool
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: list[PatchPolicySummary]
"""
kwargs['_return_http_data_only'] = True
return self.patch_patch_policies_get_with_http_info(**kwargs) # noqa: E501
def patch_patch_policies_get_with_http_info(self, **kwargs): # noqa: E501
"""Return a list of patch policies # noqa: E501
Returns a list of patch policies. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_patch_policies_get_with_http_info(async_req=True)
>>> result = thread.get()
:param on_dashboard: Filters whether or not the patch policies are on the dashboard.
:type on_dashboard: bool
:param enabled: Filters whether or not the patch policies are enabled.
:type enabled: bool
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(list[PatchPolicySummary], status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'on_dashboard',
'enabled'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_patch_policies_get" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'on_dashboard' in local_var_params and local_var_params['on_dashboard'] is not None: # noqa: E501
query_params.append(('onDashboard', local_var_params['on_dashboard'])) # noqa: E501
if 'enabled' in local_var_params and local_var_params['enabled'] is not None: # noqa: E501
query_params.append(('enabled', local_var_params['enabled'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
response_types_map = {
200: "list[PatchPolicySummary]",
}
return self.api_client.call_api(
'/patch/patch-policies', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def patch_patch_policies_id_dashboard_delete(self, id, **kwargs): # noqa: E501
"""Remove a patch policy from the dashboard # noqa: E501
Removes a patch policy from the dashboard. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_patch_policies_id_dashboard_delete(id, async_req=True)
>>> result = thread.get()
:param id: patch id (required)
:type id: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
kwargs['_return_http_data_only'] = True
return self.patch_patch_policies_id_dashboard_delete_with_http_info(id, **kwargs) # noqa: E501
def patch_patch_policies_id_dashboard_delete_with_http_info(self, id, **kwargs): # noqa: E501
"""Remove a patch policy from the dashboard # noqa: E501
Removes a patch policy from the dashboard. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_patch_policies_id_dashboard_delete_with_http_info(id, async_req=True)
>>> result = thread.get()
:param id: patch id (required)
:type id: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
local_var_params = locals()
all_params = [
'id'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_patch_policies_id_dashboard_delete" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in local_var_params or # noqa: E501
local_var_params['id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `id` when calling `patch_patch_policies_id_dashboard_delete`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = [] # noqa: E501
response_types_map = {}
return self.api_client.call_api(
'/patch/patch-policies/{id}/dashboard', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def patch_patch_policies_id_dashboard_get(self, id, **kwargs): # noqa: E501
"""Return whether or not the requested patch policy is on the dashboard # noqa: E501
Returns whether or not the requested patch policy is on the dashboard # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_patch_policies_id_dashboard_get(id, async_req=True)
>>> result = thread.get()
:param id: patch policy id (required)
:type id: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: PatchPolicyOnDashboard
"""
kwargs['_return_http_data_only'] = True
return self.patch_patch_policies_id_dashboard_get_with_http_info(id, **kwargs) # noqa: E501
def patch_patch_policies_id_dashboard_get_with_http_info(self, id, **kwargs): # noqa: E501
"""Return whether or not the requested patch policy is on the dashboard # noqa: E501
Returns whether or not the requested patch policy is on the dashboard # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_patch_policies_id_dashboard_get_with_http_info(id, async_req=True)
>>> result = thread.get()
:param id: patch policy id (required)
:type id: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(PatchPolicyOnDashboard, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'id'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_patch_policies_id_dashboard_get" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in local_var_params or # noqa: E501
local_var_params['id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `id` when calling `patch_patch_policies_id_dashboard_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
response_types_map = {
200: "PatchPolicyOnDashboard",
404: "ApiError",
}
return self.api_client.call_api(
'/patch/patch-policies/{id}/dashboard', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def patch_patch_policies_id_dashboard_post(self, id, **kwargs): # noqa: E501
"""Add a patch policy to the dashboard # noqa: E501
Adds a patch policy to the dashboard. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_patch_policies_id_dashboard_post(id, async_req=True)
>>> result = thread.get()
:param id: patch policy id (required)
:type id: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
kwargs['_return_http_data_only'] = True
return self.patch_patch_policies_id_dashboard_post_with_http_info(id, **kwargs) # noqa: E501
def patch_patch_policies_id_dashboard_post_with_http_info(self, id, **kwargs): # noqa: E501
"""Add a patch policy to the dashboard # noqa: E501
Adds a patch policy to the dashboard. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_patch_policies_id_dashboard_post_with_http_info(id, async_req=True)
>>> result = thread.get()
:param id: patch policy id (required)
:type id: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
local_var_params = locals()
all_params = [
'id'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_patch_policies_id_dashboard_post" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in local_var_params or # noqa: E501
local_var_params['id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `id` when calling `patch_patch_policies_id_dashboard_post`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = [] # noqa: E501
response_types_map = {}
return self.api_client.call_api(
'/patch/patch-policies/{id}/dashboard', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
| 43.336842
| 342
| 0.597684
| 2,798
| 24,702
| 5.018585
| 0.080415
| 0.035323
| 0.053838
| 0.030765
| 0.925865
| 0.921592
| 0.919954
| 0.912833
| 0.90329
| 0.896667
| 0
| 0.011946
| 0.335803
| 24,702
| 569
| 343
| 43.413005
| 0.843908
| 0.498705
| 0
| 0.723141
| 0
| 0
| 0.162964
| 0.066074
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03719
| false
| 0
| 0.020661
| 0
| 0.095041
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
533bfccb98e32d56a78d5782e7d99586175c180b
| 11,432
|
py
|
Python
|
pyutilib/misc/tests/test_import.py
|
PyUtilib/PyUtilib
|
d99406f2af1fb62268c34453a2fbe6bd4a7348f0
|
[
"BSD-3-Clause"
] | 24
|
2016-04-02T10:00:02.000Z
|
2021-03-02T16:40:18.000Z
|
pyutilib/misc/tests/test_import.py
|
PyUtilib/PyUtilib
|
d99406f2af1fb62268c34453a2fbe6bd4a7348f0
|
[
"BSD-3-Clause"
] | 105
|
2015-10-29T03:29:58.000Z
|
2021-12-30T22:00:45.000Z
|
pyutilib/misc/tests/test_import.py
|
PyUtilib/PyUtilib
|
d99406f2af1fb62268c34453a2fbe6bd4a7348f0
|
[
"BSD-3-Clause"
] | 22
|
2016-01-21T15:35:25.000Z
|
2021-05-15T20:17:44.000Z
|
#
# Unit Tests for util/misc/import_file
#
#
import os
import sys
from os.path import abspath, dirname
pkgdir = dirname(abspath(__file__)) + os.sep + ".." + os.sep + ".."
currdir = dirname(abspath(__file__)) + os.sep
import pyutilib.misc
import pyutilib.th as unittest
import pyutilib.misc.comparison
try:
import runpy
_runpy = True
except:
_runpy = False
class TestRunFile(unittest.TestCase):
def test_run_file1(self):
pyutilib.misc.run_file(
currdir + "import1.py", logfile=currdir + "import1.log")
if not os.path.exists(currdir + "import1.log"):
self.fail("test_run_file - failed to create logfile")
self.assertFalse(
pyutilib.misc.comparison.compare_file(currdir + "import1.log",
currdir + "import1.txt")[0])
os.remove(currdir + "import1.log")
def test_run_file2(self):
pyutilib.misc.run_file(
"import1.py", logfile=currdir + "import1.log", execdir=currdir)
if not os.path.exists(currdir + "import1.log"):
self.fail("test_run_file - failed to create logfile")
self.assertFalse(
pyutilib.misc.comparison.compare_file(currdir + "import1.log",
currdir + "import1.txt")[0])
os.remove(currdir + "import1.log")
def test_run_file3(self):
try:
pyutilib.misc.run_file(
"import2.py", logfile=currdir + "import2.log", execdir=currdir)
self.fail("test_run_file - expected type error in import2.py")
except TypeError:
pass
self.assertFalse(
pyutilib.misc.comparison.compare_file(currdir + "import2.log",
currdir + "import2.txt")[0])
os.remove(currdir + "import2.log")
def test_run_file_exception(self):
orig_path = list(sys.path)
with self.assertRaisesRegexp(RuntimeError, "raised from __main__"):
pyutilib.misc.run_file(
"import_main_exception.py",
logfile=currdir + "import_main_exception.log", execdir=currdir)
self.assertFalse(
pyutilib.misc.comparison.compare_file(
currdir + "import_main_exception.log",
currdir + "import_main_exception.txt")[0])
os.remove(currdir + "import_main_exception.log")
self.assertIsNot(orig_path, sys.path)
self.assertEqual(orig_path, sys.path)
class TestImportFile(unittest.TestCase):
def setUp(self):
self._mods = list(sys.modules.keys())
def tearDown(self):
to_del = [m for m in sys.modules.keys() if m not in self._mods]
for mod in to_del:
del sys.modules[mod]
def test_import_file_context1(self):
pyutilib.misc.import_file(currdir + "import1.py")
if "import1" in globals():
self.fail(
"test_import_file - globals() should not be affected by import")
def test_import_file_context2(self):
import1 = pyutilib.misc.import_file(currdir + "import1.py")
try:
c = import1.a
except:
self.fail("test_import_file - could not access data in import.py")
def test_import_file_context3(self):
pyutilib.misc.import_file(currdir + "import1.py", context=globals())
if not "import1" in globals():
self.fail("test_import_file - failed to import the import1.py file")
def test_import_exception(self):
orig_path = list(sys.path)
with self.assertRaisesRegexp(RuntimeError, "raised during import"):
pyutilib.misc.import_file(currdir + "import_exception.py")
self.assertIsNot(orig_path, sys.path)
self.assertEqual(orig_path, sys.path)
def test1(self):
try:
pyutilib.misc.import_file('tfile.py')
except IOError:
pass
else:
self.fail('File does not exist. Expected IOError.')
def test2(self):
try:
pyutilib.misc.import_file('tfile')
except ImportError:
pass
else:
self.fail('Module does not exist. Expected ImportError.')
def test3(self):
dirname = currdir + 'import_data' + os.sep + 'a'
sys.path.insert(0, dirname)
context = {}
m = pyutilib.misc.import_file('tfile', context=context)
self.assertEqual(id(m), id(context['tfile']))
self.assertEqual(m.f(), 'a')
sys.path.remove(dirname)
def test4(self):
dirname = currdir + 'import_data' + os.sep + 'a'
sys.path.insert(0, dirname)
context = {}
m = pyutilib.misc.import_file('tfile', context=context)
self.assertEqual(id(m), id(context['tfile']))
self.assertEqual(m.f(), 'a')
sys.path.remove(dirname)
dirname = currdir + 'import_data' + os.sep + 'b'
sys.path.insert(0, dirname)
m = pyutilib.misc.import_file('tfile', context=context, name='junk')
self.assertEqual(id(m), id(context['junk']))
self.assertEqual(m.f(), 'b')
sys.path.remove(dirname)
def test4a(self):
dirname = currdir + 'import_data' + os.sep + 'a'
sys.path.insert(0, dirname)
context = {}
m = pyutilib.misc.import_file('tfile', context=context)
self.assertEqual(id(m), id(context['tfile']))
self.assertEqual(m.f(), 'a')
sys.path.remove(dirname)
dirname = currdir + 'import_data' + os.sep + 'b'
sys.path.insert(0, dirname)
m = pyutilib.misc.import_file('tfile', context=context)
self.assertEqual(id(m), id(context['tfile']))
self.assertEqual(m.f(), 'a')
sys.path.remove(dirname)
def test5(self):
dirname = currdir + 'import_data' + os.sep + 'a' + os.sep
context = {}
m = pyutilib.misc.import_file(dirname + 'tfile', context=context)
self.assertEqual(id(m), id(context['tfile']))
self.assertEqual(m.f(), 'a')
def test6(self):
dirname = currdir + 'import_data' + os.sep + 'a' + os.sep
context = {}
m = pyutilib.misc.import_file(dirname + 'tfile', context=context)
self.assertEqual(id(m), id(context['tfile']))
self.assertEqual(m.f(), 'a')
dirname = currdir + 'import_data' + os.sep + 'b' + os.sep
m = pyutilib.misc.import_file(
dirname + 'tfile', context=context, name='junk')
self.assertEqual(id(m), id(context['junk']))
self.assertEqual(m.f(), 'b')
def test6a(self):
dirname = currdir + 'import_data' + os.sep + 'a' + os.sep
context = {}
m = pyutilib.misc.import_file(dirname + 'tfile', context=context)
self.assertEqual(id(m), id(context['tfile']))
self.assertEqual(m.f(), 'a')
dirname = currdir + 'import_data' + os.sep + 'b' + os.sep
m = pyutilib.misc.import_file(dirname + 'tfile', context=context)
self.assertEqual(id(m), id(context['tfile']))
self.assertEqual(m.f(), 'a')
def test7(self):
dirname = currdir + 'import_data' + os.sep + 'a' + os.sep
context = {}
m = pyutilib.misc.import_file(dirname + 'tfile.py', context=context)
self.assertEqual(id(m), id(context['tfile']))
self.assertEqual(m.f(), 'a')
def test8(self):
dirname = currdir + 'import_data' + os.sep + 'a' + os.sep
context = {}
m = pyutilib.misc.import_file(dirname + 'tfile.py', context=context)
self.assertEqual(id(m), id(context['tfile']))
self.assertEqual(m.f(), 'a')
dirname = currdir + 'import_data' + os.sep + 'b' + os.sep
m = pyutilib.misc.import_file(
dirname + 'tfile.py', context=context, name='junk')
self.assertEqual(id(m), id(context['junk']))
self.assertEqual(m.f(), 'b')
def test8a(self):
dirname = currdir + 'import_data' + os.sep + 'a' + os.sep
context = {}
m = pyutilib.misc.import_file(dirname + 'tfile.py', context=context)
self.assertEqual(id(m), id(context['tfile']))
self.assertEqual(m.f(), 'a')
dirname = currdir + 'import_data' + os.sep + 'b' + os.sep
m = pyutilib.misc.import_file(dirname + 'tfile.py', context=context)
self.assertEqual(id(m), id(context['tfile']))
self.assertEqual(m.f(), 'a')
def test9(self):
try:
pyutilib.misc.import_file('foo/bar')
except ImportError:
pass
else:
self.fail('Module does not exist. Expected ImportError.')
def test9a(self):
try:
pyutilib.misc.import_file('foo/bar', name='junk')
except ImportError:
pass
else:
self.fail('Module does not exist. Expected ImportError.')
def test10(self):
try:
pyutilib.misc.import_file('foo.bar')
except ImportError:
pass
else:
self.fail('Module does not exist. Expected ImportError.')
def test10a(self):
try:
pyutilib.misc.import_file('foo.bar', name='junk')
except ImportError:
pass
else:
self.fail('Module does not exist. Expected ImportError.')
def test11(self):
try:
pyutilib.misc.import_file('baz/foo.bar')
except ImportError:
pass
else:
self.fail('Module does not exist. Expected ImportError.')
def test11a(self):
try:
pyutilib.misc.import_file('baz/foo.bar', name='junk')
except ImportError:
pass
else:
self.fail('Module does not exist. Expected ImportError.')
def test12(self):
dirname = currdir + 'import_data'
sys.path.insert(0, dirname)
context = {}
m = pyutilib.misc.import_file('tfile1.0', context=context)
self.assertEqual(id(m), id(context['tfile1.0']))
self.assertEqual(m.f(), 'tfile1.0')
sys.path.remove(dirname)
def test12a(self):
dirname = currdir + 'import_data'
sys.path.insert(0, dirname)
context = {}
m = pyutilib.misc.import_file('tfile1.0', context=context, name='junk')
self.assertEqual(id(m), id(context['junk']))
self.assertEqual(m.f(), 'tfile1.0')
sys.path.remove(dirname)
def test13(self):
dirname = currdir + 'import_data' + os.sep
context = {}
m = pyutilib.misc.import_file(dirname + 'tfile1.0', context=context)
self.assertEqual(id(m), id(context['tfile1.0']))
self.assertEqual(m.f(), 'tfile1.0')
def test13a(self):
dirname = currdir + 'import_data' + os.sep
context = {}
m = pyutilib.misc.import_file(
dirname + 'tfile1.0', context=context, name='junk')
self.assertEqual(id(m), id(context['junk']))
self.assertEqual(m.f(), 'tfile1.0')
def test14(self):
context = {}
m = pyutilib.misc.import_file(
'pyutilib.misc.tests.import_data.a.tfile', context=context)
self.assertEqual(
id(m), id(context['pyutilib.misc.tests.import_data.a.tfile']))
self.assertEqual(m.f(), 'a')
# Apply decorator explicitly
TestRunFile = unittest.skipIf(not _runpy, "Cannot import 'runpy'")(TestRunFile)
if __name__ == "__main__":
unittest.main()
| 36.063091
| 80
| 0.5838
| 1,364
| 11,432
| 4.791789
| 0.101906
| 0.080783
| 0.070685
| 0.107711
| 0.805539
| 0.760251
| 0.742809
| 0.71634
| 0.675031
| 0.658201
| 0
| 0.011376
| 0.277204
| 11,432
| 316
| 81
| 36.177215
| 0.77962
| 0.005511
| 0
| 0.63197
| 0
| 0
| 0.154097
| 0.017777
| 0
| 0
| 0
| 0
| 0.185874
| 1
| 0.118959
| false
| 0.033457
| 0.39777
| 0
| 0.524164
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
5348589f8d691210630c1ac5a74f6df6075fca76
| 384
|
py
|
Python
|
ch_1/test_flat.py
|
2Clutch/magic
|
21bb6dfe725a57ad3fff62931630ef8c54d5e09f
|
[
"MIT"
] | null | null | null |
ch_1/test_flat.py
|
2Clutch/magic
|
21bb6dfe725a57ad3fff62931630ef8c54d5e09f
|
[
"MIT"
] | null | null | null |
ch_1/test_flat.py
|
2Clutch/magic
|
21bb6dfe725a57ad3fff62931630ef8c54d5e09f
|
[
"MIT"
] | null | null | null |
from ch_1.flat import Flat
def test_flat():
assert Flat.flat(Flat(), sample_list=[1, 2, [3]]) == [1, 2, 3]
assert Flat.flat(Flat(), sample_list=[1, 2, [3], []]) == [1, 2, 3]
assert Flat.flat(Flat(), sample_list=[1, 2, [3], [3, 4, 5]]) == [1, 2, 3, 3, 4, 5]
assert Flat.flat(Flat(), sample_list=[1, 2, [3], [7, [9, 2, 5], 4, 3, 2]]) == [1, 2, 3, 7, 9, 2, 5, 4, 3, 2]
| 42.666667
| 112
| 0.505208
| 77
| 384
| 2.441558
| 0.207792
| 0.340426
| 0.12766
| 0.382979
| 0.829787
| 0.829787
| 0.781915
| 0.781915
| 0.781915
| 0.632979
| 0
| 0.148515
| 0.210938
| 384
| 8
| 113
| 48
| 0.471947
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.666667
| 1
| 0.166667
| true
| 0
| 0.166667
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 12
|
726a3b6d56f51e66e1ddd0df8ea70bc4b2fd452e
| 25,220
|
py
|
Python
|
tests/unit/contrib/compilation_infra.py
|
Xilinx/pyxir
|
bef661d6d77adcdbd2cf4163f2cf3a1d31d40406
|
[
"Apache-2.0"
] | 25
|
2020-06-17T22:41:13.000Z
|
2022-03-22T16:28:22.000Z
|
tests/unit/contrib/compilation_infra.py
|
Xilinx/pyxir
|
bef661d6d77adcdbd2cf4163f2cf3a1d31d40406
|
[
"Apache-2.0"
] | 25
|
2021-03-16T06:26:44.000Z
|
2022-03-18T11:28:33.000Z
|
tests/unit/contrib/compilation_infra.py
|
Xilinx/pyxir
|
bef661d6d77adcdbd2cf4163f2cf3a1d31d40406
|
[
"Apache-2.0"
] | 19
|
2020-07-30T10:03:02.000Z
|
2021-06-29T01:18:16.000Z
|
# Copyright 2021 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for testing DPUCZDX8G compilation"""
import os
import xir
import shutil
import numpy as np
import pyxir as px
from pyxir.target_registry import TargetRegistry
from pyxir.graph import XGraph
from pyxir.graph.xgraph_factory import XGraphFactory
XGRAPH_FACTORY = XGraphFactory()
TARGET_REGISTRY = TargetRegistry()
FILE_PATH = os.path.dirname(os.path.realpath(__file__))
def remove_all_files_with_suffix(dir_path, suffix):
files_with_suffix = [f for f in os.listdir(dir_path) if f.endswith(suffix)]
[os.remove(os.path.join(FILE_PATH, f)) for f in files_with_suffix]
def get_child_subgraphs(graph: "Graph"):
assert graph is not None, "'graph' should not be None."
root_subgraph = graph.get_root_subgraph()
assert (
root_subgraph is not None
), "Failed to get root subgraph of input Graph object."
if root_subgraph.is_leaf:
return []
child_subgraphs = root_subgraph.toposort_child_subgraph()
assert child_subgraphs is not None and len(child_subgraphs) > 0
# import pdb; pdb.set_trace()
return child_subgraphs
# return [
# cs
# for cs in child_subgraphs
# if cs.has_attr("device") and cs.get_attr("device").upper() == "DPU"
# ]
def _create_conv2d_pool2d_nhwc_oihw(
in_shape,
w_shape,
conv_padding,
conv_strides,
conv_dilation,
pool_type,
pool_size,
pool_padding=[0, 0],
pool_strides=[1, 1],
conv_groups=1,
conv_invalid=False,
kernel_layout="OIHW",
target="DPUCZDX8G-zcu104",
conv_name="conv1",
pool_name="pool1",
) -> XGraph:
kernel_w, kernel_h = w_shape[2], w_shape[3]
W = np.random.randint(-10, 10, size=w_shape).astype(np.float32)
# B = np.array([1., -1.], dtype=np.float32)
x1 = px.ops.input("in1", shape=list(in_shape))
w1 = px.ops.constant("weight", W)
conv1 = px.ops.conv2d(
op_name=conv_name,
input_layer=x1,
weights_layer=w1,
kernel_size=[kernel_w, kernel_h],
strides=list(conv_strides),
padding_hw=list(conv_padding),
dilation=list(conv_dilation),
groups=conv_groups,
data_layout="NHWC",
)
pool1 = px.ops.pool2d(
op_name=pool_name,
input_layer=conv1,
pool_type=pool_type,
pool_size=list(pool_size),
padding=list(pool_padding),
layout="NHWC",
)
net = [x1, conv1, pool1]
xgraph = XGRAPH_FACTORY.build_from_xlayer(net)
xgraph = px.partition(xgraph, [target])
return xgraph
def conv2d_pool2d_nhwc_oihw_test(
in_shape,
w_shape,
conv_padding,
conv_strides,
conv_dilation,
pool_type,
pool_size,
pool_padding=[0, 0],
pool_strides=[1, 1],
conv_groups=1,
conv_invalid=False,
kernel_layout="OIHW",
targets=["DPUCZDX8G-zcu104"],
) -> None:
for target in targets:
xgraph = _create_conv2d_pool2d_nhwc_oihw(
in_shape,
w_shape,
conv_padding,
conv_strides,
conv_dilation,
pool_type,
pool_size,
pool_padding,
pool_strides,
conv_groups,
conv_invalid,
kernel_layout,
target,
)
def inputs_func(iter):
inputs = np.ones(in_shape, dtype=np.float32)
return {"in1": inputs}
work_dir = os.path.join(FILE_PATH, "work")
build_dir = os.path.join(FILE_PATH, "build")
quantize_func = TARGET_REGISTRY.get_target_quantizer(target)
q_xgraph = quantize_func(xgraph, inputs_func, work_dir=work_dir)
opt_xgraph = px.optimize(q_xgraph, target)
c_xgraph = px.compile(
opt_xgraph, target, work_dir=work_dir, build_dir=build_dir
)
c_output = c_xgraph.get_compiler_output()
assert list(c_output.keys()) == ["xp0"]
assert c_output.get_in_map("xp0") == {"xinput0": "xinput0:0"}
assert c_output.get_out_map("xp0") == {"pool1": "pool1:0"}
assert len(c_output.get_code_files("xp0")) == 1
shutil.rmtree(work_dir)
shutil.rmtree(build_dir)
def xcompiler_conv2d_pool2d_nhwc_oihw_test(
in_shape,
w_shape,
conv_padding,
conv_strides,
conv_dilation,
pool_type,
pool_size,
pool_padding=[0, 0],
pool_strides=[1, 1],
conv_groups=1,
conv_invalid=False,
kernel_layout="OIHW",
targets=["DPUCAHX8H-u50"],
expected_nb_subgraphs=3,
expected_name={"pool1": "pool1"},
):
for target in targets:
xgraph = _create_conv2d_pool2d_nhwc_oihw(
in_shape,
w_shape,
conv_padding,
conv_strides,
conv_dilation,
pool_type,
pool_size,
pool_padding,
pool_strides,
conv_groups,
conv_invalid,
kernel_layout,
target,
)
def inputs_func(iter):
inputs = np.ones(in_shape, dtype=np.float32)
return {"in1": inputs}
work_dir = os.path.join(FILE_PATH, "work")
build_dir = os.path.join(FILE_PATH, "build")
quantize_func = TARGET_REGISTRY.get_target_quantizer(target)
q_xgraph = quantize_func(xgraph, inputs_func, work_dir=work_dir)
opt_xgraph = px.optimize(q_xgraph, target)
c_xgraph = px.compile(
opt_xgraph, target, work_dir=work_dir, build_dir=build_dir
)
c_output = c_xgraph.get_compiler_output()
# print(get_child_subgraphs(xir.Graph.deserialize(os.path.join(build_dir, "xp0.xmodel")))[-1])
assert list(c_output.keys()) == ["xp0"]
assert c_output.get_in_map("xp0") == {"xinput0": "xinput0"}
assert c_output.get_out_map("xp0") == expected_name, "out_map: {}".format(
c_output.get_out_map("xp0")
)
assert len(c_output.get_code_files("xp0")) == 1
g = xir.Graph.deserialize(os.path.join(build_dir, "xp0.xmodel"))
# TODO subgraphs[1].get_attr("device") -> *** RuntimeError: bad any_cast
subgraphs = get_child_subgraphs(g)
assert len(subgraphs) == expected_nb_subgraphs
dpu_subgraph = subgraphs[1]
# import pdb; pdb.set_trace()
# assert len(dpu_subgraph.get_children()) == 3
shutil.rmtree(work_dir)
shutil.rmtree(build_dir)
def conv2d_pool2d_naming_test(conv_names, pool_names) -> None:
in_shape = (1, 4, 4, 1)
target = "DPUCZDX8G-zcu104"
for conv_name, pool_name in zip(conv_names, pool_names):
xgraph = _create_conv2d_pool2d_nhwc_oihw(
in_shape,
(2, 1, 2, 2),
[0, 0],
[1, 1],
[1, 1],
"Max",
[2, 2],
[0, 0],
[1, 1],
1,
False,
"OIHW",
target,
conv_name,
pool_name,
)
def inputs_func(iter):
inputs = np.ones(in_shape, dtype=np.float32)
return {"in1": inputs}
work_dir = os.path.join(FILE_PATH, "work")
build_dir = os.path.join(FILE_PATH, "build")
quantize_func = TARGET_REGISTRY.get_target_quantizer(target)
q_xgraph = quantize_func(xgraph, inputs_func, work_dir=work_dir)
opt_xgraph = px.optimize(q_xgraph, target)
c_xgraph = px.compile(
opt_xgraph, target, work_dir=work_dir, build_dir=build_dir
)
c_output = c_xgraph.get_compiler_output()
assert list(c_output.keys()) == ["xp0"]
assert c_output.get_in_map("xp0") == {"xinput0": "xinput0:0"}
# assert c_output.get_out_map("xp0") == {pool_name: "pool1:0"}
assert len(c_output.get_code_files("xp0")) == 1
shutil.rmtree(work_dir)
shutil.rmtree(build_dir)
def _create_scale_conv2d_nhwc_oihw(
in_shape,
w_shape,
conv_padding,
conv_strides,
conv_dilation,
conv_groups=1,
kernel_layout="OIHW",
target="DPUCZDX8G-zcu104",
) -> XGraph:
kernel_w, kernel_h = w_shape[2], w_shape[3]
in_ch = w_shape[0]
Gamma = np.random.randint(0, 2, size=(in_ch,))
Beta = np.random.randint(0, 2, size=(in_ch,))
W = np.random.randint(-10, 10, size=w_shape).astype(np.float32)
# B = np.array([1., -1.], dtype=np.float32)
x1 = px.ops.input("in1", shape=list(in_shape))
w1 = px.ops.constant("weight", W)
conv1 = px.ops.conv2d(
op_name="conv1",
input_layer=x1,
weights_layer=w1,
kernel_size=[kernel_w, kernel_h],
strides=list(conv_strides),
padding_hw=list(conv_padding),
dilation=list(conv_dilation),
groups=conv_groups,
data_layout="NHWC",
)
pool1 = px.ops.pool2d(
op_name="pool1",
input_layer=conv1,
pool_type="Max",
pool_size=[3, 3],
padding=[0, 0],
layout="NHWC",
)
g1 = px.ops.constant("gamma", Gamma)
b1 = px.ops.constant("beta", Beta)
scale = px.ops.scale("scale1", pool1, g1, b1, axis=3)
r1 = px.ops.relu("r1", [scale])
W2 = np.random.randint(-10, 10, size=(in_ch, in_ch, 1, 1)).astype(np.float32)
w2 = px.ops.constant("weight2", W2)
conv2 = px.ops.conv2d(
op_name="conv2",
input_layer=r1,
weights_layer=w2,
kernel_size=[1, 1],
strides=[1, 1],
padding_hw=[0, 0],
dilation=[1, 1],
groups=1,
data_layout="NHWC",
)
net = [x1, conv1, pool1, scale, r1, conv2]
xgraph = XGRAPH_FACTORY.build_from_xlayer(net)
xgraph = px.partition(xgraph, [target])
return xgraph
def xcompiler_scale_conv2d_nhwc_oihw_test(
in_shape,
w_shape,
conv_padding,
conv_strides,
conv_dilation,
conv_groups=1,
kernel_layout="OIHW",
target="DPUCAHX8H-u50",
expected_nb_subgraphs=3,
):
xgraph = _create_scale_conv2d_nhwc_oihw(
in_shape,
w_shape,
conv_padding,
conv_strides,
conv_dilation,
conv_groups,
kernel_layout,
target,
)
def inputs_func(iter):
inputs = np.ones(in_shape, dtype=np.float32)
return {"in1": inputs}
work_dir = os.path.join(FILE_PATH, "work")
build_dir = os.path.join(FILE_PATH, "build")
quantize_func = TARGET_REGISTRY.get_target_quantizer(target)
q_xgraph = quantize_func(xgraph, inputs_func, work_dir=work_dir)
opt_xgraph = px.optimize(q_xgraph, target)
c_xgraph = px.compile(opt_xgraph, target, work_dir=work_dir, build_dir=build_dir)
c_output = c_xgraph.get_compiler_output()
g = xir.Graph.deserialize(os.path.join(build_dir, "xp0.xmodel"))
# TODO subgraphs[1].get_attr("device") -> *** RuntimeError: bad any_cast
subgraphs = get_child_subgraphs(g)
assert (
len(subgraphs) == expected_nb_subgraphs
), "Expected {0} subgraphs but got: {1}".format(
expected_nb_subgraphs, len(subgraphs)
)
shutil.rmtree(work_dir)
shutil.rmtree(build_dir)
def _create_resnetv1_block(
in_shape,
pool_size,
pool_strides,
w1_shape,
w2_shape,
w3_shape,
w4_shape,
c1_padding=[0, 0, 0, 0],
c2_padding=[0, 0, 0, 0],
c3_padding=[0, 0, 0, 0],
c4_padding=[0, 0, 0, 0],
c1_strides=[1, 1],
c2_strides=[1, 1],
c3_strides=[1, 1],
c4_strides=[1, 1],
c1_dilation=[1, 1],
c2_dilation=[1, 1],
c3_dilation=[1, 1],
c4_dilation=[1, 1],
kernel_layout="OIHW",
target="DPUCZDX8G-zcu104",
) -> XGraph:
x1 = px.ops.input("in1", shape=list(in_shape))
pool1 = px.ops.pool2d(
op_name="pool1",
input_layer=x1,
pool_type="Max",
pool_size=pool_size,
padding=[0, 0],
strides=pool_strides,
layout="NHWC",
)
W1 = np.random.randint(-10, 10, size=w1_shape).astype(np.float32)
w1 = px.ops.constant("w1", W1)
conv1 = px.ops.conv2d(
op_name="conv1",
input_layer=pool1,
weights_layer=w1,
kernel_size=[w1_shape[2], w1_shape[3]],
strides=list(c1_strides),
padding_hw=list(c1_padding),
dilation=list(c1_dilation),
groups=1,
data_layout="NHWC",
)
W2 = np.random.randint(-10, 10, size=w2_shape).astype(np.float32)
w2 = px.ops.constant("w2", W2)
conv2 = px.ops.conv2d(
op_name="conv2",
input_layer=pool1,
weights_layer=w2,
kernel_size=[w2_shape[2], w2_shape[3]],
strides=list(c2_strides),
padding_hw=list(c2_padding),
dilation=list(c2_dilation),
groups=1,
data_layout="NHWC",
)
W3 = np.random.randint(-10, 10, size=w3_shape).astype(np.float32)
w3 = px.ops.constant("w3", W3)
conv3 = px.ops.conv2d(
op_name="conv3",
input_layer=conv2,
weights_layer=w3,
kernel_size=[w3_shape[2], w3_shape[3]],
strides=list(c3_strides),
padding_hw=list(c3_padding),
dilation=list(c3_dilation),
groups=1,
data_layout="NHWC",
)
W4 = np.random.randint(-10, 10, size=w4_shape).astype(np.float32)
w4 = px.ops.constant("w4", W4)
conv4 = px.ops.conv2d(
op_name="conv4",
input_layer=conv3,
weights_layer=w4,
kernel_size=[w4_shape[2], w4_shape[3]],
strides=list(c4_strides),
padding_hw=list(c4_padding),
dilation=list(c4_dilation),
groups=1,
data_layout="NHWC",
)
add = px.ops.eltwise("add", conv1, conv4)
net = [x1, pool1, conv1, conv2, conv3, conv4, add]
xgraph = XGRAPH_FACTORY.build_from_xlayer(net)
xgraph = px.partition(xgraph, [target])
return xgraph
def xcompiler_resnetv1_block_test(
in_shape,
pool_size,
pool_strides,
w1_shape,
w2_shape,
w3_shape,
w4_shape,
c1_padding=[0, 0, 0, 0],
c2_padding=[0, 0, 0, 0],
c3_padding=[0, 0, 0, 0],
c4_padding=[0, 0, 0, 0],
c1_strides=[1, 1],
c2_strides=[1, 1],
c3_strides=[1, 1],
c4_strides=[1, 1],
c1_dilation=[1, 1],
c2_dilation=[1, 1],
c3_dilation=[1, 1],
c4_dilation=[1, 1],
kernel_layout="OIHW",
target="DPUCAHX8H-u50",
expected_nb_subgraphs=3,
):
xgraph = _create_resnetv1_block(
in_shape,
pool_size,
pool_strides,
w1_shape,
w2_shape,
w3_shape,
w4_shape,
c1_padding,
c2_padding,
c3_padding,
c4_padding,
c1_strides,
c2_strides,
c3_strides,
c4_strides,
c1_dilation,
c2_dilation,
c3_dilation,
c4_dilation,
kernel_layout,
target,
)
def inputs_func(iter):
inputs = np.ones(in_shape, dtype=np.float32)
return {"in1": inputs}
work_dir = os.path.join(FILE_PATH, "work")
build_dir = os.path.join(FILE_PATH, "build")
quantize_func = TARGET_REGISTRY.get_target_quantizer(target)
q_xgraph = quantize_func(xgraph, inputs_func, work_dir=work_dir)
opt_xgraph = px.optimize(q_xgraph, target)
c_xgraph = px.compile(opt_xgraph, target, work_dir=work_dir, build_dir=build_dir)
c_output = c_xgraph.get_compiler_output()
g = xir.Graph.deserialize(os.path.join(build_dir, "xp0.xmodel"))
# TODO subgraphs[1].get_attr("device") -> *** RuntimeError: bad any_cast
subgraphs = get_child_subgraphs(g)
assert (
len(subgraphs) == expected_nb_subgraphs
), "Expected {0} subgraphs but got: {1}".format(
expected_nb_subgraphs, len(subgraphs)
)
shutil.rmtree(work_dir)
shutil.rmtree(build_dir)
def _create_conv2d_leaky_relu_nhwc_oihw(
in_shape,
w_shape,
conv_padding,
conv_strides,
conv_dilation,
kernel_layout="OIHW",
target="DPUCZDX8G-zcu104",
) -> XGraph:
kernel_w, kernel_h = w_shape[2], w_shape[3]
W = np.random.randint(-10, 10, size=w_shape).astype(np.float32)
# B = np.array([1., -1.], dtype=np.float32)
x1 = px.ops.input("in1", shape=list(in_shape))
w1 = px.ops.constant("weight", W)
conv1 = px.ops.conv2d(
op_name="conv1",
input_layer=x1,
weights_layer=w1,
kernel_size=[kernel_w, kernel_h],
strides=list(conv_strides),
padding_hw=list(conv_padding),
dilation=list(conv_dilation),
data_layout="NHWC",
)
lr1 = px.ops.leaky_relu("lr1", [conv1], alpha=0.1)
net = [x1, conv1, lr1]
xgraph = XGRAPH_FACTORY.build_from_xlayer(net)
xgraph = px.partition(xgraph, [target])
return xgraph
def conv2d_leaky_relu_nhwc_oihw_test(
in_shape,
w_shape,
conv_padding,
conv_strides,
conv_dilation,
kernel_layout="OIHW",
targets=["DPUCZDX8G-zcu104"],
) -> None:
for target in targets:
xgraph = _create_conv2d_leaky_relu_nhwc_oihw(
in_shape,
w_shape,
conv_padding,
conv_strides,
conv_dilation,
kernel_layout,
target,
)
def inputs_func(iter):
inputs = np.ones(in_shape, dtype=np.float32)
return {"in1": inputs}
work_dir = os.path.join(FILE_PATH, "work")
build_dir = os.path.join(FILE_PATH, "build")
quantize_func = TARGET_REGISTRY.get_target_quantizer(target)
q_xgraph = quantize_func(xgraph, inputs_func, work_dir=work_dir)
opt_xgraph = px.optimize(q_xgraph, target)
c_xgraph = px.compile(
opt_xgraph, target, work_dir=work_dir, build_dir=build_dir
)
c_output = c_xgraph.get_compiler_output()
assert list(c_output.keys()) == ["xp0"]
assert c_output.get_in_map("xp0") == {"xinput0": "xinput0:0"}
assert c_output.get_out_map("xp0") == {"lr1": "lr1:0"}
assert len(c_output.get_code_files("xp0")) == 1
shutil.rmtree(work_dir)
shutil.rmtree(build_dir)
def xcompiler_conv2d_leaky_relu_nhwc_oihw_test(
in_shape,
w_shape,
conv_padding,
conv_strides,
conv_dilation,
kernel_layout="OIHW",
targets=["DPUCZDX8G-zcu104"],
expected_nb_subgraphs=3,
) -> None:
for target in targets:
xgraph = _create_conv2d_leaky_relu_nhwc_oihw(
in_shape,
w_shape,
conv_padding,
conv_strides,
conv_dilation,
kernel_layout,
target,
)
def inputs_func(iter):
inputs = np.ones(in_shape, dtype=np.float32)
return {"in1": inputs}
work_dir = os.path.join(FILE_PATH, "work")
build_dir = os.path.join(FILE_PATH, "build")
quantize_func = TARGET_REGISTRY.get_target_quantizer(target)
q_xgraph = quantize_func(xgraph, inputs_func, work_dir=work_dir)
opt_xgraph = px.optimize(q_xgraph, target)
c_xgraph = px.compile(
opt_xgraph, target, work_dir=work_dir, build_dir=build_dir
)
g = xir.Graph.deserialize(os.path.join(build_dir, "xp0.xmodel"))
# TODO subgraphs[1].get_attr("device") -> *** RuntimeError: bad any_cast
subgraphs = get_child_subgraphs(g)
assert (
len(subgraphs) == expected_nb_subgraphs
), "Expected {0} subgraphs but got: {1}".format(
expected_nb_subgraphs, len(subgraphs)
)
def _create_conv2d_bias_add_relu_nhwc_oihw(
in_shape,
w_shape,
conv_padding,
conv_strides,
conv_dilation,
kernel_layout="OIHW",
target="DPUCZDX8G-zcu104",
) -> XGraph:
kernel_w, kernel_h = w_shape[2], w_shape[3]
channels = w_shape[kernel_layout.index("O")]
W = np.random.randint(-10, 10, size=w_shape).astype(np.float32)
B = np.random.randint(-10, 10, size=(channels,)).astype(np.float32)
x1 = px.ops.input("in1", shape=list(in_shape))
w1 = px.ops.constant("weight", W)
conv1 = px.ops.conv2d(
op_name="conv1",
input_layer=x1,
weights_layer=w1,
kernel_size=[kernel_w, kernel_h],
strides=list(conv_strides),
padding_hw=list(conv_padding),
dilation=list(conv_dilation),
data_layout="NHWC",
)
b = px.ops.constant("bias", B)
bias_add = px.ops.bias_add("bias_add", conv1, b, axis=3)
r1 = px.ops.relu("r1", [bias_add])
net = [x1, conv1, bias_add, r1]
xgraph = XGRAPH_FACTORY.build_from_xlayer(net)
xgraph = px.partition(xgraph, [target])
return xgraph
def xcompiler_conv2d_bias_add_relu_nhwc_oihw_test(
in_shape,
w_shape,
conv_padding,
conv_strides,
conv_dilation,
kernel_layout="OIHW",
targets=["DPUCZDX8G-zcu104"],
expected_nb_subgraphs=3,
) -> None:
for target in targets:
xgraph = _create_conv2d_bias_add_relu_nhwc_oihw(
in_shape,
w_shape,
conv_padding,
conv_strides,
conv_dilation,
kernel_layout,
target,
)
def inputs_func(iter):
inputs = np.ones(in_shape, dtype=np.float32)
return {"in1": inputs}
work_dir = os.path.join(FILE_PATH, "work")
build_dir = os.path.join(FILE_PATH, "build")
quantize_func = TARGET_REGISTRY.get_target_quantizer(target)
q_xgraph = quantize_func(xgraph, inputs_func, work_dir=work_dir)
opt_xgraph = px.optimize(q_xgraph, target)
c_xgraph = px.compile(
opt_xgraph, target, work_dir=work_dir, build_dir=build_dir
)
g = xir.Graph.deserialize(os.path.join(build_dir, "xp0.xmodel"))
# TODO subgraphs[1].get_attr("device") -> *** RuntimeError: bad any_cast
subgraphs = get_child_subgraphs(g)
assert (
len(subgraphs) == expected_nb_subgraphs
), "Expected {0} subgraphs but got: {1}".format(
expected_nb_subgraphs, len(subgraphs)
)
def _create_multi_output_conv2d_nhwc_oihw(
in_shape,
w_shape,
conv_padding,
conv_strides,
conv_dilation,
kernel_layout="OIHW",
target="DPUCZDX8G-zcu104",
out_names=["out1", "out2"],
) -> XGraph:
kernel_w, kernel_h = w_shape[2], w_shape[3]
W = np.random.randint(-100, 100, size=w_shape).astype(np.float32)
# B = np.array([1., -1.], dtype=np.float32)
out_ch = w_shape[kernel_layout.index("O")]
x1 = px.ops.input("in1", shape=list(in_shape))
w1 = px.ops.constant("weight", W)
conv1 = px.ops.conv2d(
op_name="conv1",
input_layer=x1,
weights_layer=w1,
kernel_size=[kernel_w, kernel_h],
strides=list(conv_strides),
padding_hw=list(conv_padding),
dilation=list(conv_dilation),
data_layout="NHWC",
)
r1 = px.ops.relu("r1", [conv1])
W2 = np.random.randint(-10, 10, size=(10, out_ch, 1, 1)).astype(np.float32)
w2 = px.ops.constant("weight2", W2)
conv2 = px.ops.conv2d(
op_name=out_names[0],
input_layer=r1,
weights_layer=w2,
kernel_size=[1, 1],
strides=[1, 1],
padding_hw=[0, 0],
dilation=[1, 1],
data_layout="NHWC",
)
W3 = np.random.randint(-10, 10, size=(10, out_ch, 1, 1)).astype(np.float32)
w3 = px.ops.constant("weight2", W3)
conv3 = px.ops.conv2d(
op_name="conv3",
input_layer=r1,
weights_layer=w3,
kernel_size=[1, 1],
strides=[1, 1],
padding_hw=[0, 0],
dilation=[1, 1],
data_layout="NHWC",
)
r3 = px.ops.relu(out_names[1], [conv3])
net = [x1, conv1, r1, conv2, conv3, r3]
xgraph = XGRAPH_FACTORY.build_from_xlayer(net)
xgraph = px.partition(xgraph, [target])
return xgraph
def multi_output_conv2d_naming_test(out_names) -> None:
in_shape = (1, 20, 20, 10)
target = "DPUCZDX8G-zcu104"
xgraph = _create_multi_output_conv2d_nhwc_oihw(
in_shape, (10, 10, 2, 2), [0, 0], [1, 1], [1, 1], "OIHW", target, out_names,
)
def inputs_func(iter):
inputs = np.ones(in_shape, dtype=np.float32)
return {"in1": inputs}
work_dir = os.path.join(FILE_PATH, "work")
build_dir = os.path.join(FILE_PATH, "build")
quantize_func = TARGET_REGISTRY.get_target_quantizer(target)
q_xgraph = quantize_func(xgraph, inputs_func, work_dir=work_dir)
opt_xgraph = px.optimize(q_xgraph, target)
c_xgraph = px.compile(opt_xgraph, target, work_dir=work_dir, build_dir=build_dir)
c_output = c_xgraph.get_compiler_output()
assert list(c_output.keys()) == ["xp0"]
assert c_output.get_in_map("xp0") == {"xinput0": "xinput0:0"}
# assert c_output.get_out_map("xp0") == {pool_name: "pool1:0"}
assert len(c_output.get_code_files("xp0")) == 1
shutil.rmtree(work_dir)
shutil.rmtree(build_dir)
| 29.428238
| 102
| 0.616217
| 3,396
| 25,220
| 4.295936
| 0.075088
| 0.02495
| 0.017136
| 0.018233
| 0.810542
| 0.790184
| 0.766125
| 0.752759
| 0.740764
| 0.727466
| 0
| 0.03917
| 0.259001
| 25,220
| 856
| 103
| 29.462617
| 0.741492
| 0.061538
| 0
| 0.748252
| 0
| 0
| 0.047059
| 0
| 0
| 0
| 0
| 0.001168
| 0.036364
| 1
| 0.036364
| false
| 0
| 0.011189
| 0
| 0.071329
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
726d849511be626b36a6aa6702b7bbd43a929127
| 1,955
|
py
|
Python
|
CyberHealth/assessment/migrations/0009_auto_20210513_1301.py
|
communitiesuk/cyber-health
|
1854edf35d7c1f9aa0e5553e72ac7decda69d7c1
|
[
"MIT"
] | null | null | null |
CyberHealth/assessment/migrations/0009_auto_20210513_1301.py
|
communitiesuk/cyber-health
|
1854edf35d7c1f9aa0e5553e72ac7decda69d7c1
|
[
"MIT"
] | 16
|
2021-04-09T11:29:36.000Z
|
2021-05-17T16:45:08.000Z
|
CyberHealth/assessment/migrations/0009_auto_20210513_1301.py
|
communitiesuk/cyber-health
|
1854edf35d7c1f9aa0e5553e72ac7decda69d7c1
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.3 on 2021-05-13 13:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('assessment', '0007_pathway_pathwaygroup'),
]
operations = [
migrations.AlterField(
model_name='answer',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='choice',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='organisation',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='organisationregion',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='organisationtype',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='pathway',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='pathwaygroup',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='question',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]
| 36.203704
| 111
| 0.607161
| 196
| 1,955
| 5.882653
| 0.219388
| 0.083261
| 0.173461
| 0.201214
| 0.765828
| 0.765828
| 0.765828
| 0.765828
| 0.765828
| 0.765828
| 0
| 0.013371
| 0.273146
| 1,955
| 53
| 112
| 36.886792
| 0.79803
| 0.023018
| 0
| 0.680851
| 1
| 0
| 0.079665
| 0.013103
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.021277
| 0
| 0.085106
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7270727a9b4944d62920d9b2f79d851c0e493b12
| 4,786
|
py
|
Python
|
python/tests/test_publish_async.py
|
parasyte/pubnub-python
|
4a44c563ea8af7211197d166596be41ede05c179
|
[
"MIT"
] | null | null | null |
python/tests/test_publish_async.py
|
parasyte/pubnub-python
|
4a44c563ea8af7211197d166596be41ede05c179
|
[
"MIT"
] | null | null | null |
python/tests/test_publish_async.py
|
parasyte/pubnub-python
|
4a44c563ea8af7211197d166596be41ede05c179
|
[
"MIT"
] | null | null | null |
from Pubnub import Pubnub
import time
pubnub = Pubnub("demo","demo")
pubnub_enc = Pubnub("demo", "demo", cipher_key="enigma")
pubnub_pam = Pubnub("pub-c-c077418d-f83c-4860-b213-2f6c77bde29a",
"sub-c-e8839098-f568-11e2-a11a-02ee2ddab7fe", "sec-c-OGU3Y2Q4ZWUtNDQwMC00NTI1LThjNWYtNWJmY2M4OGIwNjEy")
# Publish and receive string
def test_1():
channel = "test_1-" + str(time.time())
message = "I am a string"
def _cb(resp, ch=None):
assert resp == message
pubnub.unsubscribe(channel)
def _connect(resp):
pubnub.publish(channel,message)
def _error(resp):
assert False
pubnub.subscribe(channel, callback=_cb, connect=_connect, error=_error)
# Publish and receive array
def test_2():
channel = "test_2-" + str(time.time())
message = [1,2]
def _cb(resp, ch=None):
assert resp == message
pubnub.unsubscribe(channel)
def _connect(resp):
pubnub.publish(channel,message)
def _error(resp):
assert False
pubnub.subscribe(channel, callback=_cb, connect=_connect, error=_error)
# Publish and receive json object
def test_3():
channel = "test_2-" + str(time.time())
message = { "a" : "b" }
def _cb(resp, ch=None):
assert resp == message
pubnub.unsubscribe(channel)
def _connect(resp):
pubnub.publish(channel,message)
def _error(resp):
assert False
pubnub.subscribe(channel, callback=_cb, connect=_connect, error=_error)
# Publish and receive number
def test_4():
channel = "test_2-" + str(time.time())
message = 100
def _cb(resp, ch=None):
assert resp == message
pubnub.unsubscribe(channel)
def _connect(resp):
pubnub.publish(channel,message)
def _error(resp):
assert False
pubnub.subscribe(channel, callback=_cb, connect=_connect, error=_error)
# Publish and receive number string
def test_5():
channel = "test_5-" + str(time.time())
message = "100"
def _cb(resp, ch=None):
assert resp == message
pubnub.unsubscribe(channel)
def _connect(resp):
pubnub.publish(channel,message)
def _error(resp):
assert False
pubnub.subscribe(channel, callback=_cb, connect=_connect, error=_error)
# Publish and receive string (Encryption enabled)
def test_6():
channel = "test_6-" + str(time.time())
message = "I am a string"
def _cb(resp, ch=None):
assert resp == message
pubnub_enc.unsubscribe(channel)
def _connect(resp):
pubnub_enc.publish(channel,message)
def _error(resp):
assert False
pubnub_enc.subscribe(channel, callback=_cb, connect=_connect, error=_error)
# Publish and receive array (Encryption enabled)
def test_7():
channel = "test_7-" + str(time.time())
message = [1,2]
def _cb(resp, ch=None):
assert resp == message
pubnub_enc.unsubscribe(channel)
def _connect(resp):
pubnub.publish(channel,message)
def _error(resp):
assert False
pubnub.subscribe(channel, callback=_cb, connect=_connect, error=_error)
# Publish and receive json object (Encryption enabled)
def test_8():
channel = "test_8-" + str(time.time())
message = { "a" : "b" }
def _cb(resp, ch=None):
assert resp == message
pubnub_enc.unsubscribe(channel)
def _connect(resp):
pubnub_enc.publish(channel,message)
def _error(resp):
assert False
pubnub_enc.subscribe(channel, callback=_cb, connect=_connect, error=_error)
# Publish and receive number (Encryption enabled)
def test_9():
channel = "test_9-" + str(time.time())
message = 100
def _cb(resp, ch=None):
assert resp == message
pubnub_enc.unsubscribe(channel)
def _connect(resp):
pubnub_enc.publish(channel,message)
def _error(resp):
assert False
pubnub_enc.subscribe(channel, callback=_cb, connect=_connect, error=_error)
# Publish and receive number string (Encryption enabled)
def test_10():
channel = "test_10-" + str(time.time())
message = "100"
def _cb(resp, ch=None):
assert resp == message
pubnub_enc.unsubscribe(channel)
def _connect(resp):
pubnub_enc.publish(channel,message)
def _error(resp):
assert False
pubnub_enc.subscribe(channel, callback=_cb, connect=_connect, error=_error)
# Publish and receive object string (Encryption enabled)
def test_11():
channel = "test_11-" + str(time.time())
message = '{"a" : "b"}'
def _cb(resp, ch=None):
assert resp == message
pubnub_enc.unsubscribe(channel)
def _connect(resp):
pubnub_enc.publish(channel,message)
def _error(resp):
assert False
pubnub_enc.subscribe(channel, callback=_cb, connect=_connect, error=_error)
# Publish and receive array string (Encryption enabled)
def test_12():
channel = "test_12-" + str(time.time())
message = '[1,2]'
def _cb(resp, ch=None):
assert resp == message
pubnub_enc.unsubscribe(channel)
def _connect(resp):
pubnub_enc.publish(channel,message)
def _error(resp):
assert False
pubnub_enc.subscribe(channel, callback=_cb, connect=_connect, error=_error)
| 20.899563
| 104
| 0.720017
| 653
| 4,786
| 5.096478
| 0.104135
| 0.054087
| 0.061298
| 0.064904
| 0.852464
| 0.818209
| 0.818209
| 0.807392
| 0.807392
| 0.807392
| 0
| 0.023558
| 0.148558
| 4,786
| 228
| 105
| 20.991228
| 0.793129
| 0.105516
| 0
| 0.789855
| 0
| 0
| 0.070089
| 0.032349
| 0
| 0
| 0
| 0
| 0.173913
| 1
| 0.347826
| false
| 0
| 0.014493
| 0
| 0.362319
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
72b5665d532c2533d769c337f0fec1cd3c816a6b
| 10,932
|
py
|
Python
|
packages/gtmapi/lmsrvlabbook/tests/snapshots/snap_test_labbook_remote_ops.py
|
jjwatts/gigantum-client
|
88ce0475fb6880322bdd06d987c494e29064f278
|
[
"MIT"
] | null | null | null |
packages/gtmapi/lmsrvlabbook/tests/snapshots/snap_test_labbook_remote_ops.py
|
jjwatts/gigantum-client
|
88ce0475fb6880322bdd06d987c494e29064f278
|
[
"MIT"
] | null | null | null |
packages/gtmapi/lmsrvlabbook/tests/snapshots/snap_test_labbook_remote_ops.py
|
jjwatts/gigantum-client
|
88ce0475fb6880322bdd06d987c494e29064f278
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# snapshottest: v1 - https://goo.gl/zC4yUc
from __future__ import unicode_literals
from snapshottest import Snapshot
snapshots = Snapshot()
snapshots['TestLabBookRemoteOperations.test_list_remote_labbooks_az 1'] = {
'data': {
'labbookList': {
'remoteLabbooks': {
'edges': [
{
'cursor': 'eyJwYWdlIjogMSwgIml0ZW0iOiAxfQ==',
'node': {
'creationDateUtc': '2018-08-30T18:01:33.312Z',
'description': 'No Description',
'id': 'UmVtb3RlTGFiYm9vazp0ZXN0ZXImdGVzdC1wcm9qLTE=',
'isLocal': False,
'modifiedDateUtc': '2018-08-30T18:01:33.312Z',
'name': 'test-proj-1',
'owner': 'tester'
}
},
{
'cursor': 'eyJwYWdlIjogMSwgIml0ZW0iOiAzfQ==',
'node': {
'creationDateUtc': '2018-08-29T18:01:33.312Z',
'description': 'No Description',
'id': 'UmVtb3RlTGFiYm9vazp0ZXN0ZXImdGVzdC1wcm9qLTI=',
'isLocal': False,
'modifiedDateUtc': '2018-09-01T18:01:33.312Z',
'name': 'test-proj-2',
'owner': 'tester'
}
}
],
'pageInfo': {
'hasNextPage': True
}
}
}
}
}
snapshots['TestLabBookRemoteOperations.test_list_remote_labbooks_az 2'] = {
'data': {
'labbookList': {
'remoteLabbooks': {
'edges': [
{
'cursor': 'eyJwYWdlIjogMSwgIml0ZW0iOiAzfQ==',
'node': {
'creationDateUtc': '2018-08-29T18:01:33.312Z',
'description': 'No Description',
'id': 'UmVtb3RlTGFiYm9vazp0ZXN0ZXImdGVzdC1wcm9qLTI=',
'modifiedDateUtc': '2018-09-01T18:01:33.312Z',
'name': 'test-proj-2',
'owner': 'tester'
}
},
{
'cursor': 'eyJwYWdlIjogMSwgIml0ZW0iOiAxfQ==',
'node': {
'creationDateUtc': '2018-08-30T18:01:33.312Z',
'description': 'No Description',
'id': 'UmVtb3RlTGFiYm9vazp0ZXN0ZXImdGVzdC1wcm9qLTE=',
'modifiedDateUtc': '2018-08-30T18:01:33.312Z',
'name': 'test-proj-1',
'owner': 'tester'
}
}
],
'pageInfo': {
'hasNextPage': False
}
}
}
}
}
snapshots['TestLabBookRemoteOperations.test_list_remote_labbooks_modified 1'] = {
'data': {
'labbookList': {
'remoteLabbooks': {
'edges': [
{
'cursor': 'eyJwYWdlIjogMSwgIml0ZW0iOiAzfQ==',
'node': {
'creationDateUtc': '2018-08-29T18:01:33.312Z',
'description': 'No Description',
'id': 'UmVtb3RlTGFiYm9vazp0ZXN0ZXImdGVzdC1wcm9qLTI=',
'isLocal': False,
'modifiedDateUtc': '2018-09-01T18:01:33.312Z',
'name': 'test-proj-2',
'owner': 'tester'
}
},
{
'cursor': 'eyJwYWdlIjogMSwgIml0ZW0iOiAxfQ==',
'node': {
'creationDateUtc': '2018-08-30T18:01:33.312Z',
'description': 'No Description',
'id': 'UmVtb3RlTGFiYm9vazp0ZXN0ZXImdGVzdC1wcm9qLTE=',
'isLocal': False,
'modifiedDateUtc': '2018-08-30T18:01:33.312Z',
'name': 'test-proj-1',
'owner': 'tester'
}
}
],
'pageInfo': {
'hasNextPage': True
}
}
}
}
}
snapshots['TestLabBookRemoteOperations.test_list_remote_labbooks_modified 2'] = {
'data': {
'labbookList': {
'remoteLabbooks': {
'edges': [
{
'cursor': 'eyJwYWdlIjogMSwgIml0ZW0iOiAxfQ==',
'node': {
'creationDateUtc': '2018-08-30T18:01:33.312Z',
'description': 'No Description',
'id': 'UmVtb3RlTGFiYm9vazp0ZXN0ZXImdGVzdC1wcm9qLTE=',
'modifiedDateUtc': '2018-08-30T18:01:33.312Z',
'name': 'test-proj-1',
'owner': 'tester'
}
},
{
'cursor': 'eyJwYWdlIjogMSwgIml0ZW0iOiAzfQ==',
'node': {
'creationDateUtc': '2018-08-29T18:01:33.312Z',
'description': 'No Description',
'id': 'UmVtb3RlTGFiYm9vazp0ZXN0ZXImdGVzdC1wcm9qLTI=',
'modifiedDateUtc': '2018-09-01T18:01:33.312Z',
'name': 'test-proj-2',
'owner': 'tester'
}
}
],
'pageInfo': {
'hasNextPage': False
}
}
}
}
}
snapshots['TestLabBookRemoteOperations.test_list_remote_labbooks_created 1'] = {
'data': {
'labbookList': {
'remoteLabbooks': {
'edges': [
{
'cursor': 'eyJwYWdlIjogMSwgIml0ZW0iOiAxfQ==',
'node': {
'creationDateUtc': '2018-08-30T18:01:33.312Z',
'description': 'No Description',
'id': 'UmVtb3RlTGFiYm9vazp0ZXN0ZXImdGVzdC1wcm9qLTE=',
'isLocal': False,
'modifiedDateUtc': '2018-08-30T18:01:33.312Z',
'name': 'test-proj-1',
'owner': 'tester'
}
},
{
'cursor': 'eyJwYWdlIjogMSwgIml0ZW0iOiAzfQ==',
'node': {
'creationDateUtc': '2018-08-29T18:01:33.312Z',
'description': 'No Description',
'id': 'UmVtb3RlTGFiYm9vazp0ZXN0ZXImdGVzdC1wcm9qLTI=',
'isLocal': False,
'modifiedDateUtc': '2018-09-01T18:01:33.312Z',
'name': 'test-proj-2',
'owner': 'tester'
}
}
],
'pageInfo': {
'hasNextPage': True
}
}
}
}
}
snapshots['TestLabBookRemoteOperations.test_list_remote_labbooks_created 2'] = {
'data': {
'labbookList': {
'remoteLabbooks': {
'edges': [
{
'cursor': 'eyJwYWdlIjogMSwgIml0ZW0iOiAzfQ==',
'node': {
'creationDateUtc': '2018-08-29T18:01:33.312Z',
'description': 'No Description',
'id': 'UmVtb3RlTGFiYm9vazp0ZXN0ZXImdGVzdC1wcm9qLTI=',
'modifiedDateUtc': '2018-09-01T18:01:33.312Z',
'name': 'test-proj-2',
'owner': 'tester'
}
},
{
'cursor': 'eyJwYWdlIjogMSwgIml0ZW0iOiAxfQ==',
'node': {
'creationDateUtc': '2018-08-30T18:01:33.312Z',
'description': 'No Description',
'id': 'UmVtb3RlTGFiYm9vazp0ZXN0ZXImdGVzdC1wcm9qLTE=',
'modifiedDateUtc': '2018-08-30T18:01:33.312Z',
'name': 'test-proj-1',
'owner': 'tester'
}
}
],
'pageInfo': {
'hasNextPage': False
}
}
}
}
}
snapshots['TestLabBookRemoteOperations.test_list_remote_labbooks_page 1'] = {
'data': {
'labbookList': {
'remoteLabbooks': {
'edges': [
{
'cursor': 'eyJwYWdlIjogMSwgIml0ZW0iOiAxfQ==',
'node': {
'creationDateUtc': '2018-08-30T18:01:33.312Z',
'description': 'No Description',
'id': 'UmVtb3RlTGFiYm9vazp0ZXN0ZXImdGVzdC1wcm9qLTE=',
'isLocal': False,
'modifiedDateUtc': '2018-08-30T18:01:33.312Z',
'name': 'test-proj-1',
'owner': 'tester'
}
}
],
'pageInfo': {
'hasNextPage': True
}
}
}
}
}
snapshots['TestLabBookRemoteOperations.test_list_remote_labbooks_page 2'] = {
'data': {
'labbookList': {
'remoteLabbooks': {
'edges': [
{
'cursor': 'eyJwYWdlIjogMSwgIml0ZW0iOiAzfQ==',
'node': {
'creationDateUtc': '2018-08-29T18:01:33.312Z',
'description': 'No Description',
'id': 'UmVtb3RlTGFiYm9vazp0ZXN0ZXImdGVzdC1wcm9qLTI=',
'modifiedDateUtc': '2018-09-01T18:01:33.312Z',
'name': 'test-proj-2',
'owner': 'tester'
}
}
],
'pageInfo': {
'hasNextPage': True
}
}
}
}
}
| 38.765957
| 81
| 0.368277
| 546
| 10,932
| 7.305861
| 0.10989
| 0.028077
| 0.056154
| 0.087741
| 0.970419
| 0.970419
| 0.969917
| 0.944848
| 0.944848
| 0.944848
| 0
| 0.116584
| 0.519027
| 10,932
| 281
| 82
| 38.903915
| 0.642069
| 0.005671
| 0
| 0.652985
| 0
| 0
| 0.368179
| 0.203368
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.007463
| 0
| 0.007463
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f41043395753a12d20ee2b8b2b8f48f9d9773f59
| 243
|
py
|
Python
|
universe/vncdriver/screen/__init__.py
|
BitJetKit/universe
|
cc9ce6ec241821bfb0f3b85dd455bd36e4ee7a8c
|
[
"MIT"
] | 8,120
|
2016-12-05T06:37:45.000Z
|
2022-03-21T14:45:20.000Z
|
universe/vncdriver/screen/__init__.py
|
BitJetKit/universe
|
cc9ce6ec241821bfb0f3b85dd455bd36e4ee7a8c
|
[
"MIT"
] | 213
|
2016-12-05T09:57:37.000Z
|
2018-04-05T18:55:14.000Z
|
universe/vncdriver/screen/__init__.py
|
BitJetKit/universe
|
cc9ce6ec241821bfb0f3b85dd455bd36e4ee7a8c
|
[
"MIT"
] | 1,140
|
2016-12-05T06:50:43.000Z
|
2022-03-23T08:28:32.000Z
|
from universe.vncdriver.screen.base import Screen
from universe.vncdriver.screen.numpy_screen import NumpyScreen
from universe.vncdriver.screen.pyglet_screen import PygletScreen
from universe.vncdriver.screen.screen_buffer import ScreenBuffer
| 48.6
| 64
| 0.884774
| 31
| 243
| 6.83871
| 0.387097
| 0.226415
| 0.396226
| 0.509434
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.065844
| 243
| 4
| 65
| 60.75
| 0.933921
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
f4711b97b145ccdcb223bda2951f4845760728ff
| 82
|
py
|
Python
|
app/public/__init__.py
|
Anioko/frontcms
|
3a39c57881ae4d97b3aa65d033e30719aca648ac
|
[
"MIT"
] | 1
|
2020-06-26T05:03:48.000Z
|
2020-06-26T05:03:48.000Z
|
app/public/__init__.py
|
Anioko/frontcms
|
3a39c57881ae4d97b3aa65d033e30719aca648ac
|
[
"MIT"
] | 1
|
2021-06-02T02:15:07.000Z
|
2021-06-02T02:15:07.000Z
|
app/public/__init__.py
|
Anioko/frontcms
|
3a39c57881ae4d97b3aa65d033e30719aca648ac
|
[
"MIT"
] | null | null | null |
from app.public import errors # noqa
from app.public.views import public # noqa
| 20.5
| 42
| 0.768293
| 13
| 82
| 4.846154
| 0.538462
| 0.222222
| 0.412698
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.170732
| 82
| 3
| 43
| 27.333333
| 0.926471
| 0.109756
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
f488cc03c51c32404c669eb61637158b228d6b70
| 28,535
|
py
|
Python
|
test/test_analyzer.py
|
ckyycc/hana_os_monitor_script_v2
|
86c16b320da024caec10bb4bc71a39f55b5923fa
|
[
"MIT"
] | 6
|
2020-01-08T18:04:49.000Z
|
2021-04-25T02:11:12.000Z
|
test/test_analyzer.py
|
ckyycc/hana_os_monitor_script_v2
|
86c16b320da024caec10bb4bc71a39f55b5923fa
|
[
"MIT"
] | null | null | null |
test/test_analyzer.py
|
ckyycc/hana_os_monitor_script_v2
|
86c16b320da024caec10bb4bc71a39f55b5923fa
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
from unittest.mock import MagicMock, call, patch
from util import MonitorUtility as Mu
from util import MonitorConst as Mc
from util import InfoType
from analyzer import DataAnalyzer
class TestAnalyzer(TestCase):
def setUp(self):
self.server_id = 1
self.check_id = "20191125010101001"
Mu.generate_check_id = MagicMock(return_value=self.check_id)
def test_analyze_disk(self):
disk_total, disk_free = 1234567890, 34567890
msg_list = [{Mc.MSG_TYPE: InfoType.DISK.value, Mc.FIELD_SERVER_ID: self.server_id, Mc.MSG_HEADER: True},
{Mc.FIELD_CHECK_ID: self.check_id, Mc.FIELD_DISK_TOTAL: disk_total, Mc.FIELD_DISK_FREE: disk_free,
Mc.MSG_TYPE: InfoType.DISK.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.FIELD_USER_NAME: "ck1adm", Mc.FIELD_FOLDER: "folder1", Mc.FIELD_DISK_USAGE_KB: 10000,
Mc.MSG_TYPE: InfoType.DISK.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.FIELD_USER_NAME: "ck2adm", Mc.FIELD_FOLDER: "folder2", Mc.FIELD_DISK_USAGE_KB: 20000,
Mc.MSG_TYPE: InfoType.DISK.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.FIELD_USER_NAME: "ck3adm", Mc.FIELD_FOLDER: "folder3", Mc.FIELD_DISK_USAGE_KB: 30000,
Mc.MSG_TYPE: InfoType.DISK.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.MSG_TYPE: InfoType.DISK.value, Mc.FIELD_SERVER_ID: self.server_id, Mc.MSG_ENDING: True}]
mock_producer = self.__mock_analyze(msg_list)
calls = [
call(Mc.TOPIC_FILTERED_INFO,
{Mc.MSG_TYPE: InfoType.DISK.value, Mc.MSG_INFO:
{"folder1": {"ck1adm": 10000}, 'folder2': {'ck2adm': 20000}, 'folder3': {'ck3adm': 30000}},
Mc.FIELD_CHECK_ID: self.check_id, Mc.FIELD_SERVER_ID: self.server_id, Mc.FIELD_DISK_TOTAL: disk_total,
Mc.FIELD_DISK_FREE: disk_free})]
mock_producer.return_value.send.assert_has_calls(calls, any_order=False) # should be sequential
def test_analyze_mem(self):
mem_total, mem_free = 1000000000, 2500000
msg_list = [{Mc.MSG_TYPE: InfoType.MEMORY.value, Mc.FIELD_SERVER_ID: self.server_id, Mc.MSG_HEADER: True},
{Mc.FIELD_CHECK_ID: self.check_id, Mc.FIELD_MEM_TOTAL: mem_total, Mc.FIELD_MEM_FREE: mem_free,
Mc.MSG_TYPE: InfoType.MEMORY.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.FIELD_USER_NAME: "ck1adm", Mc.FIELD_PROCESS_COMMAND: "c1", Mc.FIELD_PROCESS_ID: 1001,
Mc.FIELD_MEM: 15, Mc.MSG_TYPE: InfoType.MEMORY.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.FIELD_USER_NAME: "ck2adm", Mc.FIELD_PROCESS_COMMAND: "c2", Mc.FIELD_PROCESS_ID: 2001,
Mc.FIELD_MEM: 25, Mc.MSG_TYPE: InfoType.MEMORY.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.FIELD_USER_NAME: "ck3adm", Mc.FIELD_PROCESS_COMMAND: "c3", Mc.FIELD_PROCESS_ID: 3001,
Mc.FIELD_MEM: 35, Mc.MSG_TYPE: InfoType.MEMORY.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.MSG_TYPE: InfoType.MEMORY.value, Mc.FIELD_SERVER_ID: self.server_id, Mc.MSG_ENDING: True}]
mock_producer = self.__mock_analyze(msg_list)
calls = [
call(Mc.TOPIC_FILTERED_INFO,
{Mc.MSG_TYPE: InfoType.MEMORY.value, Mc.MSG_INFO: {"ck1adm": 15, 'ck2adm': 25, 'ck3adm': 35},
Mc.FIELD_CHECK_ID: self.check_id, Mc.FIELD_SERVER_ID: self.server_id, Mc.FIELD_MEM_TOTAL: mem_total,
Mc.FIELD_MEM_FREE: mem_free})]
mock_producer.return_value.send.assert_has_calls(calls, any_order=False) # should be sequential
def test_analyze_cpu(self):
num, usage = 512, 78
msg_list = [{Mc.MSG_TYPE: InfoType.CPU.value, Mc.FIELD_SERVER_ID: self.server_id, Mc.MSG_HEADER: True},
{Mc.FIELD_CHECK_ID: self.check_id, Mc.FIELD_CPU_NUMBER: num, Mc.FIELD_CPU_UTILIZATION: usage,
Mc.MSG_TYPE: InfoType.CPU.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.FIELD_USER_NAME: "ck1adm", Mc.FIELD_PROCESS_COMMAND: "c5", Mc.FIELD_PROCESS_ID: 1002,
Mc.FIELD_CPU: 18, Mc.MSG_TYPE: InfoType.CPU.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.FIELD_USER_NAME: "ck2adm", Mc.FIELD_PROCESS_COMMAND: "c6", Mc.FIELD_PROCESS_ID: 2002,
Mc.FIELD_CPU: 28, Mc.MSG_TYPE: InfoType.CPU.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.FIELD_USER_NAME: "ck3adm", Mc.FIELD_PROCESS_COMMAND: "c7", Mc.FIELD_PROCESS_ID: 3002,
Mc.FIELD_CPU: 38, Mc.MSG_TYPE: InfoType.CPU.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.MSG_TYPE: InfoType.CPU.value, Mc.FIELD_SERVER_ID: self.server_id, Mc.MSG_ENDING: True}]
mock_producer = self.__mock_analyze(msg_list)
calls = [
call(Mc.TOPIC_FILTERED_INFO,
{Mc.MSG_TYPE: InfoType.CPU.value, Mc.MSG_INFO: {"ck1adm": 18, 'ck2adm': 28, 'ck3adm': 38},
Mc.FIELD_CHECK_ID: self.check_id, Mc.FIELD_SERVER_ID: self.server_id, Mc.FIELD_CPU_NUMBER: num,
Mc.FIELD_CPU_UTILIZATION: usage})]
mock_producer.return_value.send.assert_has_calls(calls, any_order=False) # should be sequential
def test_analyze_instance(self):
msg_list = [{Mc.MSG_TYPE: InfoType.INSTANCE.value, Mc.FIELD_SERVER_ID: self.server_id, Mc.MSG_HEADER: True},
{Mc.FIELD_CHECK_ID: self.check_id, Mc.MSG_TYPE: InfoType.INSTANCE.value,
Mc.FIELD_SERVER_ID: self.server_id},
{Mc.FIELD_SID: "CK1", Mc.FIELD_INSTANCE_NO: "00", Mc.FIELD_HOST: "server_1",
Mc.FIELD_REVISION: '1.00.122.25', Mc.FIELD_EDITION: 'Database',
Mc.MSG_TYPE: InfoType.INSTANCE.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.FIELD_SID: "CK2", Mc.FIELD_INSTANCE_NO: "12", Mc.FIELD_HOST: "server_2",
Mc.FIELD_REVISION: '2.00.033.00', Mc.FIELD_EDITION: 'Cockpit',
Mc.MSG_TYPE: InfoType.INSTANCE.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.FIELD_SID: "CK3", Mc.FIELD_INSTANCE_NO: "22", Mc.FIELD_HOST: "server_3",
Mc.FIELD_REVISION: '2.00.044.00', Mc.FIELD_EDITION: 'Database',
Mc.MSG_TYPE: InfoType.INSTANCE.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.MSG_TYPE: InfoType.INSTANCE.value, Mc.FIELD_SERVER_ID: self.server_id, Mc.MSG_ENDING: True}]
mock_producer = self.__mock_analyze(msg_list)
calls = [
call(Mc.TOPIC_FILTERED_INFO,
{Mc.MSG_TYPE: InfoType.INSTANCE.value, Mc.MSG_INFO:
{"CK1": {Mc.FIELD_SID: "CK1", Mc.FIELD_INSTANCE_NO: "00", Mc.FIELD_HOST: "server_1",
Mc.FIELD_REVISION: '1.00.122.25', Mc.FIELD_EDITION: 'Database',
Mc.FIELD_SERVER_ID: self.server_id},
"CK2": {Mc.FIELD_SID: "CK2", Mc.FIELD_INSTANCE_NO: "12", Mc.FIELD_HOST: "server_2",
Mc.FIELD_REVISION: '2.00.033.00', Mc.FIELD_EDITION: 'Cockpit',
Mc.FIELD_SERVER_ID: self.server_id},
"CK3": {Mc.FIELD_SID: "CK3", Mc.FIELD_INSTANCE_NO: "22", Mc.FIELD_HOST: "server_3",
Mc.FIELD_REVISION: '2.00.044.00', Mc.FIELD_EDITION: 'Database',
Mc.FIELD_SERVER_ID: self.server_id}},
Mc.FIELD_CHECK_ID: self.check_id, Mc.FIELD_SERVER_ID: self.server_id})]
mock_producer.return_value.send.assert_has_calls(calls, any_order=False) # should be sequential
def test_analyze_mix(self):
num, usage = 512, 78
mem_total, mem_free = 1000000000, 2500000
disk_total, disk_free = 1234567890, 34567890
msg_list = [{Mc.MSG_TYPE: InfoType.INSTANCE.value, Mc.FIELD_SERVER_ID: self.server_id, Mc.MSG_HEADER: True},
{Mc.FIELD_CHECK_ID: self.check_id, Mc.MSG_TYPE: InfoType.INSTANCE.value,
Mc.FIELD_SERVER_ID: self.server_id},
{Mc.FIELD_SID: "CK1", Mc.FIELD_INSTANCE_NO: "00", Mc.FIELD_HOST: "server_1",
Mc.FIELD_REVISION: '1.00.122.25', Mc.FIELD_EDITION: 'Database',
Mc.MSG_TYPE: InfoType.INSTANCE.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.MSG_TYPE: InfoType.CPU.value, Mc.FIELD_SERVER_ID: self.server_id, Mc.MSG_HEADER: True},
{Mc.FIELD_CHECK_ID: self.check_id, Mc.FIELD_CPU_NUMBER: num, Mc.FIELD_CPU_UTILIZATION: usage,
Mc.MSG_TYPE: InfoType.CPU.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.FIELD_USER_NAME: "ck1adm", Mc.FIELD_PROCESS_COMMAND: "c5", Mc.FIELD_PROCESS_ID: 1002,
Mc.FIELD_CPU: 18, Mc.MSG_TYPE: InfoType.CPU.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.FIELD_SID: "CK2", Mc.FIELD_INSTANCE_NO: "12", Mc.FIELD_HOST: "server_2",
Mc.FIELD_REVISION: '2.00.033.00', Mc.FIELD_EDITION: 'Cockpit',
Mc.MSG_TYPE: InfoType.INSTANCE.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.MSG_TYPE: InfoType.MEMORY.value, Mc.FIELD_SERVER_ID: self.server_id, Mc.MSG_HEADER: True},
{Mc.FIELD_CHECK_ID: self.check_id, Mc.FIELD_MEM_TOTAL: mem_total, Mc.FIELD_MEM_FREE: mem_free,
Mc.MSG_TYPE: InfoType.MEMORY.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.FIELD_USER_NAME: "ck1adm", Mc.FIELD_PROCESS_COMMAND: "c1", Mc.FIELD_PROCESS_ID: 1001,
Mc.FIELD_MEM: 15, Mc.MSG_TYPE: InfoType.MEMORY.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.MSG_TYPE: InfoType.DISK.value, Mc.FIELD_SERVER_ID: self.server_id, Mc.MSG_HEADER: True},
{Mc.FIELD_CHECK_ID: self.check_id, Mc.FIELD_DISK_TOTAL: disk_total, Mc.FIELD_DISK_FREE: disk_free,
Mc.MSG_TYPE: InfoType.DISK.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.FIELD_USER_NAME: "ck2adm", Mc.FIELD_PROCESS_COMMAND: "c2", Mc.FIELD_PROCESS_ID: 2001,
Mc.FIELD_MEM: 25, Mc.MSG_TYPE: InfoType.MEMORY.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.FIELD_USER_NAME: "ck3adm", Mc.FIELD_PROCESS_COMMAND: "c3", Mc.FIELD_PROCESS_ID: 3001,
Mc.FIELD_MEM: 35, Mc.MSG_TYPE: InfoType.MEMORY.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.FIELD_USER_NAME: "ck2adm", Mc.FIELD_PROCESS_COMMAND: "c6", Mc.FIELD_PROCESS_ID: 2002,
Mc.FIELD_CPU: 28, Mc.MSG_TYPE: InfoType.CPU.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.FIELD_USER_NAME: "ck3adm", Mc.FIELD_PROCESS_COMMAND: "c7", Mc.FIELD_PROCESS_ID: 3002,
Mc.FIELD_CPU: 38, Mc.MSG_TYPE: InfoType.CPU.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.MSG_TYPE: InfoType.CPU.value, Mc.FIELD_SERVER_ID: self.server_id, Mc.MSG_ENDING: True},
{Mc.FIELD_USER_NAME: "ck1adm", Mc.FIELD_FOLDER: "folder1", Mc.FIELD_DISK_USAGE_KB: 10000,
Mc.MSG_TYPE: InfoType.DISK.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.FIELD_USER_NAME: "ck2adm", Mc.FIELD_FOLDER: "folder2", Mc.FIELD_DISK_USAGE_KB: 20000,
Mc.MSG_TYPE: InfoType.DISK.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.FIELD_USER_NAME: "ck3adm", Mc.FIELD_FOLDER: "folder3", Mc.FIELD_DISK_USAGE_KB: 30000,
Mc.MSG_TYPE: InfoType.DISK.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.MSG_TYPE: InfoType.DISK.value, Mc.FIELD_SERVER_ID: self.server_id, Mc.MSG_ENDING: True},
{Mc.FIELD_SID: "CK3", Mc.FIELD_INSTANCE_NO: "22", Mc.FIELD_HOST: "server_3",
Mc.FIELD_REVISION: '2.00.044.00', Mc.FIELD_EDITION: 'Database',
Mc.MSG_TYPE: InfoType.INSTANCE.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.MSG_TYPE: InfoType.INSTANCE.value, Mc.FIELD_SERVER_ID: self.server_id, Mc.MSG_ENDING: True},
{Mc.MSG_TYPE: InfoType.MEMORY.value, Mc.FIELD_SERVER_ID: self.server_id, Mc.MSG_ENDING: True}]
mock_producer = self.__mock_analyze(msg_list)
calls = [
call(Mc.TOPIC_FILTERED_INFO,
{Mc.MSG_TYPE: InfoType.CPU.value, Mc.MSG_INFO: {"ck1adm": 18, 'ck2adm': 28, 'ck3adm': 38},
Mc.FIELD_CHECK_ID: self.check_id, Mc.FIELD_SERVER_ID: self.server_id, Mc.FIELD_CPU_NUMBER: num,
Mc.FIELD_CPU_UTILIZATION: usage}),
call(Mc.TOPIC_FILTERED_INFO, {Mc.MSG_TYPE: InfoType.DISK.value,
Mc.MSG_INFO: {"folder1": {"ck1adm": 10000}, 'folder2': {'ck2adm': 20000},
'folder3': {'ck3adm': 30000}}, Mc.FIELD_CHECK_ID: self.check_id,
Mc.FIELD_SERVER_ID: self.server_id, Mc.FIELD_DISK_TOTAL: disk_total,
Mc.FIELD_DISK_FREE: disk_free}),
call(Mc.TOPIC_FILTERED_INFO, {Mc.MSG_TYPE: InfoType.INSTANCE.value, Mc.MSG_INFO: {
"CK1": {Mc.FIELD_SID: "CK1", Mc.FIELD_INSTANCE_NO: "00", Mc.FIELD_HOST: "server_1",
Mc.FIELD_REVISION: '1.00.122.25', Mc.FIELD_EDITION: 'Database',
Mc.FIELD_SERVER_ID: self.server_id},
"CK2": {Mc.FIELD_SID: "CK2", Mc.FIELD_INSTANCE_NO: "12", Mc.FIELD_HOST: "server_2",
Mc.FIELD_REVISION: '2.00.033.00', Mc.FIELD_EDITION: 'Cockpit',
Mc.FIELD_SERVER_ID: self.server_id},
"CK3": {Mc.FIELD_SID: "CK3", Mc.FIELD_INSTANCE_NO: "22", Mc.FIELD_HOST: "server_3",
Mc.FIELD_REVISION: '2.00.044.00', Mc.FIELD_EDITION: 'Database',
Mc.FIELD_SERVER_ID: self.server_id}}, Mc.FIELD_CHECK_ID: self.check_id,
Mc.FIELD_SERVER_ID: self.server_id}),
call(Mc.TOPIC_FILTERED_INFO,
{Mc.MSG_TYPE: InfoType.MEMORY.value, Mc.MSG_INFO: {"ck1adm": 15, 'ck2adm': 25, 'ck3adm': 35},
Mc.FIELD_CHECK_ID: self.check_id, Mc.FIELD_SERVER_ID: self.server_id, Mc.FIELD_MEM_TOTAL: mem_total,
Mc.FIELD_MEM_FREE: mem_free})
]
mock_producer.return_value.send.assert_has_calls(calls, any_order=False) # should be sequential
def test_analyze_mix_abandon_if_no_ending(self):
num, usage = 512, 78
mem_total, mem_free = 1000000000, 2500000
disk_total, disk_free = 1234567890, 34567890
msg_list = [{Mc.MSG_TYPE: InfoType.INSTANCE.value, Mc.FIELD_SERVER_ID: self.server_id, Mc.MSG_HEADER: True},
{Mc.FIELD_CHECK_ID: self.check_id, Mc.MSG_TYPE: InfoType.INSTANCE.value,
Mc.FIELD_SERVER_ID: self.server_id},
{Mc.FIELD_SID: "CK1", Mc.FIELD_INSTANCE_NO: "00", Mc.FIELD_HOST: "server_1",
Mc.FIELD_REVISION: '1.00.122.25', Mc.FIELD_EDITION: 'Database',
Mc.MSG_TYPE: InfoType.INSTANCE.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.MSG_TYPE: InfoType.CPU.value, Mc.FIELD_SERVER_ID: self.server_id, Mc.MSG_HEADER: True},
{Mc.FIELD_CHECK_ID: self.check_id, Mc.FIELD_CPU_NUMBER: num, Mc.FIELD_CPU_UTILIZATION: usage,
Mc.MSG_TYPE: InfoType.CPU.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.FIELD_USER_NAME: "ck1adm", Mc.FIELD_PROCESS_COMMAND: "c5", Mc.FIELD_PROCESS_ID: 1002,
Mc.FIELD_CPU: 18, Mc.MSG_TYPE: InfoType.CPU.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.FIELD_SID: "CK2", Mc.FIELD_INSTANCE_NO: "12", Mc.FIELD_HOST: "server_2",
Mc.FIELD_REVISION: '2.00.033.00', Mc.FIELD_EDITION: 'Cockpit',
Mc.MSG_TYPE: InfoType.INSTANCE.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.MSG_TYPE: InfoType.MEMORY.value, Mc.FIELD_SERVER_ID: self.server_id, Mc.MSG_HEADER: True},
{Mc.FIELD_CHECK_ID: self.check_id, Mc.FIELD_MEM_TOTAL: mem_total, Mc.FIELD_MEM_FREE: mem_free,
Mc.MSG_TYPE: InfoType.MEMORY.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.FIELD_USER_NAME: "ck1adm", Mc.FIELD_PROCESS_COMMAND: "c1", Mc.FIELD_PROCESS_ID: 1001,
Mc.FIELD_MEM: 15, Mc.MSG_TYPE: InfoType.MEMORY.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.MSG_TYPE: InfoType.DISK.value, Mc.FIELD_SERVER_ID: self.server_id, Mc.MSG_HEADER: True},
{Mc.FIELD_CHECK_ID: self.check_id, Mc.FIELD_DISK_TOTAL: disk_total, Mc.FIELD_DISK_FREE: disk_free,
Mc.MSG_TYPE: InfoType.DISK.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.FIELD_USER_NAME: "ck2adm", Mc.FIELD_PROCESS_COMMAND: "c2", Mc.FIELD_PROCESS_ID: 2001,
Mc.FIELD_MEM: 25, Mc.MSG_TYPE: InfoType.MEMORY.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.FIELD_USER_NAME: "ck3adm", Mc.FIELD_PROCESS_COMMAND: "c3", Mc.FIELD_PROCESS_ID: 3001,
Mc.FIELD_MEM: 35, Mc.MSG_TYPE: InfoType.MEMORY.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.FIELD_USER_NAME: "ck2adm", Mc.FIELD_PROCESS_COMMAND: "c6", Mc.FIELD_PROCESS_ID: 2002,
Mc.FIELD_CPU: 28, Mc.MSG_TYPE: InfoType.CPU.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.FIELD_USER_NAME: "ck3adm", Mc.FIELD_PROCESS_COMMAND: "c7", Mc.FIELD_PROCESS_ID: 3002,
Mc.FIELD_CPU: 38, Mc.MSG_TYPE: InfoType.CPU.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.MSG_TYPE: InfoType.CPU.value, Mc.FIELD_SERVER_ID: self.server_id, Mc.MSG_ENDING: True},
{Mc.FIELD_USER_NAME: "ck1adm", Mc.FIELD_FOLDER: "folder1", Mc.FIELD_DISK_USAGE_KB: 10000,
Mc.MSG_TYPE: InfoType.DISK.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.FIELD_USER_NAME: "ck2adm", Mc.FIELD_FOLDER: "folder2", Mc.FIELD_DISK_USAGE_KB: 20000,
Mc.MSG_TYPE: InfoType.DISK.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.FIELD_USER_NAME: "ck3adm", Mc.FIELD_FOLDER: "folder3", Mc.FIELD_DISK_USAGE_KB: 30000,
Mc.MSG_TYPE: InfoType.DISK.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.MSG_TYPE: InfoType.DISK.value, Mc.FIELD_SERVER_ID: self.server_id, Mc.MSG_ENDING: True},
{Mc.FIELD_SID: "CK3", Mc.FIELD_INSTANCE_NO: "22", Mc.FIELD_HOST: "server_3",
Mc.FIELD_REVISION: '2.00.044.00', Mc.FIELD_EDITION: 'Database',
Mc.MSG_TYPE: InfoType.INSTANCE.value, Mc.FIELD_SERVER_ID: self.server_id},
# remove ending of instance
# {Mc.MSG_TYPE: InfoType.INSTANCE.value, Mc.FIELD_SERVER_ID: self.server_id, Mc.MSG_ENDING: True},
{Mc.MSG_TYPE: InfoType.MEMORY.value, Mc.FIELD_SERVER_ID: self.server_id, Mc.MSG_ENDING: True}]
mock_producer = self.__mock_analyze(msg_list)
calls = [
call(Mc.TOPIC_FILTERED_INFO,
{Mc.MSG_TYPE: InfoType.CPU.value, Mc.MSG_INFO: {"ck1adm": 18, 'ck2adm': 28, 'ck3adm': 38},
Mc.FIELD_CHECK_ID: self.check_id, Mc.FIELD_SERVER_ID: self.server_id, Mc.FIELD_CPU_NUMBER: num,
Mc.FIELD_CPU_UTILIZATION: usage}),
call(Mc.TOPIC_FILTERED_INFO, {Mc.MSG_TYPE: InfoType.DISK.value,
Mc.MSG_INFO: {"folder1": {"ck1adm": 10000}, 'folder2': {'ck2adm': 20000},
'folder3': {'ck3adm': 30000}}, Mc.FIELD_CHECK_ID: self.check_id,
Mc.FIELD_SERVER_ID: self.server_id, Mc.FIELD_DISK_TOTAL: disk_total,
Mc.FIELD_DISK_FREE: disk_free}),
call(Mc.TOPIC_FILTERED_INFO,
{Mc.MSG_TYPE: InfoType.MEMORY.value, Mc.MSG_INFO: {"ck1adm": 15, 'ck2adm': 25, 'ck3adm': 35},
Mc.FIELD_CHECK_ID: self.check_id, Mc.FIELD_SERVER_ID: self.server_id, Mc.FIELD_MEM_TOTAL: mem_total,
Mc.FIELD_MEM_FREE: mem_free})
]
mock_producer.return_value.send.assert_has_calls(calls, any_order=False) # should be sequential
def test_analyze_mix_abandon_first_msg_if_second_comes_before_ending_of_first_one(self):
num, usage = 512, 78
mem_total, mem_free = 1000000000, 2500000
disk_total, disk_free = 1234567890, 34567890
msg_list = [{Mc.MSG_TYPE: InfoType.INSTANCE.value, Mc.FIELD_SERVER_ID: self.server_id, Mc.MSG_HEADER: True},
{Mc.FIELD_CHECK_ID: self.check_id, Mc.MSG_TYPE: InfoType.INSTANCE.value,
Mc.FIELD_SERVER_ID: self.server_id},
{Mc.FIELD_SID: "CK1", Mc.FIELD_INSTANCE_NO: "00", Mc.FIELD_HOST: "server_1",
Mc.FIELD_REVISION: '1.00.122.25', Mc.FIELD_EDITION: 'Database',
Mc.MSG_TYPE: InfoType.INSTANCE.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.MSG_TYPE: InfoType.CPU.value, Mc.FIELD_SERVER_ID: self.server_id, Mc.MSG_HEADER: True},
{Mc.FIELD_CHECK_ID: self.check_id, Mc.FIELD_CPU_NUMBER: num, Mc.FIELD_CPU_UTILIZATION: usage,
Mc.MSG_TYPE: InfoType.CPU.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.FIELD_USER_NAME: "ck1adm", Mc.FIELD_PROCESS_COMMAND: "c5", Mc.FIELD_PROCESS_ID: 1002,
Mc.FIELD_CPU: 18, Mc.MSG_TYPE: InfoType.CPU.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.FIELD_SID: "CK2", Mc.FIELD_INSTANCE_NO: "12", Mc.FIELD_HOST: "server_2",
Mc.FIELD_REVISION: '2.00.033.00', Mc.FIELD_EDITION: 'Cockpit',
Mc.MSG_TYPE: InfoType.INSTANCE.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.MSG_TYPE: InfoType.MEMORY.value, Mc.FIELD_SERVER_ID: self.server_id, Mc.MSG_HEADER: True},
{Mc.FIELD_CHECK_ID: self.check_id, Mc.FIELD_MEM_TOTAL: mem_total, Mc.FIELD_MEM_FREE: mem_free,
Mc.MSG_TYPE: InfoType.MEMORY.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.FIELD_USER_NAME: "ck1adm", Mc.FIELD_PROCESS_COMMAND: "c1", Mc.FIELD_PROCESS_ID: 1001,
Mc.FIELD_MEM: 15, Mc.MSG_TYPE: InfoType.MEMORY.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.MSG_TYPE: InfoType.DISK.value, Mc.FIELD_SERVER_ID: self.server_id, Mc.MSG_HEADER: True},
{Mc.FIELD_CHECK_ID: self.check_id, Mc.FIELD_DISK_TOTAL: disk_total, Mc.FIELD_DISK_FREE: disk_free,
Mc.MSG_TYPE: InfoType.DISK.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.FIELD_USER_NAME: "ck2adm", Mc.FIELD_PROCESS_COMMAND: "c2", Mc.FIELD_PROCESS_ID: 2001,
Mc.FIELD_MEM: 25, Mc.MSG_TYPE: InfoType.MEMORY.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.FIELD_USER_NAME: "ck3adm", Mc.FIELD_PROCESS_COMMAND: "c3", Mc.FIELD_PROCESS_ID: 3001,
Mc.FIELD_MEM: 35, Mc.MSG_TYPE: InfoType.MEMORY.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.FIELD_USER_NAME: "ck2adm", Mc.FIELD_PROCESS_COMMAND: "c6", Mc.FIELD_PROCESS_ID: 2002,
Mc.FIELD_CPU: 28, Mc.MSG_TYPE: InfoType.CPU.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.FIELD_USER_NAME: "ck3adm", Mc.FIELD_PROCESS_COMMAND: "c7", Mc.FIELD_PROCESS_ID: 3002,
Mc.FIELD_CPU: 38, Mc.MSG_TYPE: InfoType.CPU.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.MSG_TYPE: InfoType.CPU.value, Mc.FIELD_SERVER_ID: self.server_id, Mc.MSG_ENDING: True},
{Mc.FIELD_USER_NAME: "ck1adm", Mc.FIELD_FOLDER: "folder1", Mc.FIELD_DISK_USAGE_KB: 10000,
Mc.MSG_TYPE: InfoType.DISK.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.FIELD_USER_NAME: "ck2adm", Mc.FIELD_FOLDER: "folder2", Mc.FIELD_DISK_USAGE_KB: 20000,
Mc.MSG_TYPE: InfoType.DISK.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.FIELD_USER_NAME: "ck3adm", Mc.FIELD_FOLDER: "folder3", Mc.FIELD_DISK_USAGE_KB: 30000,
Mc.MSG_TYPE: InfoType.DISK.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.MSG_TYPE: InfoType.DISK.value, Mc.FIELD_SERVER_ID: self.server_id, Mc.MSG_ENDING: True},
{Mc.FIELD_SID: "CK3", Mc.FIELD_INSTANCE_NO: "22", Mc.FIELD_HOST: "server_3",
Mc.FIELD_REVISION: '2.00.044.00', Mc.FIELD_EDITION: 'Database',
Mc.MSG_TYPE: InfoType.INSTANCE.value, Mc.FIELD_SERVER_ID: self.server_id},
# remove ending of instance
# {Mc.MSG_TYPE: InfoType.INSTANCE.value, Mc.FIELD_SERVER_ID: self.server_id, Mc.MSG_ENDING: True},
# second one comes
{Mc.MSG_TYPE: InfoType.INSTANCE.value, Mc.FIELD_SERVER_ID: self.server_id, Mc.MSG_HEADER: True},
{Mc.FIELD_CHECK_ID: self.check_id, Mc.MSG_TYPE: InfoType.INSTANCE.value,
Mc.FIELD_SERVER_ID: self.server_id},
{Mc.FIELD_SID: "CK5", Mc.FIELD_INSTANCE_NO: "05", Mc.FIELD_HOST: "server_1",
Mc.FIELD_REVISION: '1.00.122.25', Mc.FIELD_EDITION: 'Database',
Mc.MSG_TYPE: InfoType.INSTANCE.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.MSG_TYPE: InfoType.INSTANCE.value, Mc.FIELD_SERVER_ID: self.server_id, Mc.MSG_ENDING: True},
{Mc.MSG_TYPE: InfoType.MEMORY.value, Mc.FIELD_SERVER_ID: self.server_id, Mc.MSG_ENDING: True}]
mock_producer = self.__mock_analyze(msg_list)
calls = [
call(Mc.TOPIC_FILTERED_INFO,
{Mc.MSG_TYPE: InfoType.CPU.value, Mc.MSG_INFO: {"ck1adm": 18, 'ck2adm': 28, 'ck3adm': 38},
Mc.FIELD_CHECK_ID: self.check_id, Mc.FIELD_SERVER_ID: self.server_id, Mc.FIELD_CPU_NUMBER: num,
Mc.FIELD_CPU_UTILIZATION: usage}),
call(Mc.TOPIC_FILTERED_INFO, {Mc.MSG_TYPE: InfoType.DISK.value,
Mc.MSG_INFO: {"folder1": {"ck1adm": 10000}, 'folder2': {'ck2adm': 20000},
'folder3': {'ck3adm': 30000}}, Mc.FIELD_CHECK_ID: self.check_id,
Mc.FIELD_SERVER_ID: self.server_id, Mc.FIELD_DISK_TOTAL: disk_total,
Mc.FIELD_DISK_FREE: disk_free}),
call(Mc.TOPIC_FILTERED_INFO,
{Mc.MSG_TYPE: InfoType.INSTANCE.value, Mc.MSG_INFO:
{"CK5": {Mc.FIELD_SID: "CK5", Mc.FIELD_INSTANCE_NO: "05", Mc.FIELD_HOST: "server_1",
Mc.FIELD_REVISION: '1.00.122.25', Mc.FIELD_EDITION: 'Database',
Mc.FIELD_SERVER_ID: self.server_id}},
Mc.FIELD_CHECK_ID: self.check_id, Mc.FIELD_SERVER_ID: self.server_id}),
call(Mc.TOPIC_FILTERED_INFO,
{Mc.MSG_TYPE: InfoType.MEMORY.value, Mc.MSG_INFO: {"ck1adm": 15, 'ck2adm': 25, 'ck3adm': 35},
Mc.FIELD_CHECK_ID: self.check_id, Mc.FIELD_SERVER_ID: self.server_id, Mc.FIELD_MEM_TOTAL: mem_total,
Mc.FIELD_MEM_FREE: mem_free})
]
mock_producer.return_value.send.assert_has_calls(calls, any_order=False) # should be sequential
@patch("util.KafkaProducer")
def __mock_analyze(self, msg_list, mock_producer):
infos = TestAnalyzer.__get_mock_msg_list(msg_list)
analyzer = DataAnalyzer()
analyzer._DataAnalyzer__analyze(infos)
return mock_producer
@staticmethod
def __get_mock_msg_list(msg_list):
infos = []
for msg in msg_list:
info = lambda: None
setattr(info, "value", {})
info.value = msg
infos.append(info)
return infos
| 76.706989
| 120
| 0.617032
| 3,934
| 28,535
| 4.141586
| 0.03635
| 0.18646
| 0.090591
| 0.112318
| 0.958939
| 0.957896
| 0.952495
| 0.952495
| 0.952495
| 0.952495
| 0
| 0.040864
| 0.265043
| 28,535
| 371
| 121
| 76.913747
| 0.736029
| 0.014509
| 0
| 0.82263
| 0
| 0
| 0.04778
| 0
| 0
| 0
| 0
| 0
| 0.021407
| 1
| 0.030581
| false
| 0
| 0.018349
| 0
| 0.058104
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f48fadd1f777f7eec871da03e4d80ee051cb51ca
| 5,011
|
py
|
Python
|
cisco-ios-xr/ydk/models/_deviate/_cisco_xr_openconfig_if_ip_deviations.py
|
tkamata-test/ydk-py
|
b637e7853a8edbbd31fbc05afa3aa4110b31c5f9
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
cisco-ios-xr/ydk/models/_deviate/_cisco_xr_openconfig_if_ip_deviations.py
|
tkamata-test/ydk-py
|
b637e7853a8edbbd31fbc05afa3aa4110b31c5f9
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
cisco-ios-xr/ydk/models/_deviate/_cisco_xr_openconfig_if_ip_deviations.py
|
tkamata-test/ydk-py
|
b637e7853a8edbbd31fbc05afa3aa4110b31c5f9
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
from enum import Enum
from ydk._core._dm_meta_info import _MetaInfoClassMember, _MetaInfoClass, _MetaInfoEnum
from ydk.types import Empty, YList, DELETE, Decimal64, FixedBitsDict
from ydk._core._dm_meta_info import ATTRIBUTE, REFERENCE_CLASS, REFERENCE_LIST, REFERENCE_LEAFLIST, REFERENCE_IDENTITY_CLASS, REFERENCE_ENUM_CLASS, REFERENCE_BITS, REFERENCE_UNION
from ydk.providers._importer import _yang_ns
_deviation_table = {
'Interfaces.Interface.RoutedVlan.Ipv4.Address' : {
'deviation_typ' : 'not_supported',
},
'Interfaces.Interface.RoutedVlan.Ipv4.Config.enabled' : {
'deviation_typ' : 'not_supported',
},
'Interfaces.Interface.RoutedVlan.Ipv4.Config.mtu' : {
'deviation_typ' : 'not_supported',
},
'Interfaces.Interface.RoutedVlan.Ipv4.Neighbor' : {
'deviation_typ' : 'not_supported',
},
'Interfaces.Interface.RoutedVlan.Ipv4.State.enabled' : {
'deviation_typ' : 'not_supported',
},
'Interfaces.Interface.RoutedVlan.Ipv4.State.mtu' : {
'deviation_typ' : 'not_supported',
},
'Interfaces.Interface.RoutedVlan.Ipv6.Address' : {
'deviation_typ' : 'not_supported',
},
'Interfaces.Interface.RoutedVlan.Ipv6.Autoconf.Config.create_global_addresses' : {
'deviation_typ' : 'not_supported',
},
'Interfaces.Interface.RoutedVlan.Ipv6.Autoconf.Config.create_temporary_addresses' : {
'deviation_typ' : 'not_supported',
},
'Interfaces.Interface.RoutedVlan.Ipv6.Autoconf.Config.temporary_preferred_lifetime' : {
'deviation_typ' : 'not_supported',
},
'Interfaces.Interface.RoutedVlan.Ipv6.Autoconf.Config.temporary_valid_lifetime' : {
'deviation_typ' : 'not_supported',
},
'Interfaces.Interface.RoutedVlan.Ipv6.Autoconf.State.create_global_addresses' : {
'deviation_typ' : 'not_supported',
},
'Interfaces.Interface.RoutedVlan.Ipv6.Autoconf.State.create_temporary_addresses' : {
'deviation_typ' : 'not_supported',
},
'Interfaces.Interface.RoutedVlan.Ipv6.Autoconf.State.temporary_preferred_lifetime' : {
'deviation_typ' : 'not_supported',
},
'Interfaces.Interface.RoutedVlan.Ipv6.Autoconf.State.temporary_valid_lifetime' : {
'deviation_typ' : 'not_supported',
},
'Interfaces.Interface.RoutedVlan.Ipv6.Config.dup_addr_detect_transmits' : {
'deviation_typ' : 'not_supported',
},
'Interfaces.Interface.RoutedVlan.Ipv6.Config.enabled' : {
'deviation_typ' : 'not_supported',
},
'Interfaces.Interface.RoutedVlan.Ipv6.Config.mtu' : {
'deviation_typ' : 'not_supported',
},
'Interfaces.Interface.RoutedVlan.Ipv6.Neighbor' : {
'deviation_typ' : 'not_supported',
},
'Interfaces.Interface.RoutedVlan.Ipv6.State.dup_addr_detect_transmits' : {
'deviation_typ' : 'not_supported',
},
'Interfaces.Interface.RoutedVlan.Ipv6.State.enabled' : {
'deviation_typ' : 'not_supported',
},
'Interfaces.Interface.RoutedVlan.Ipv6.State.mtu' : {
'deviation_typ' : 'not_supported',
},
'Interfaces.Interface.Subinterfaces.Subinterface.Ipv4.Address' : {
'deviation_typ' : 'not_supported',
},
'Interfaces.Interface.Subinterfaces.Subinterface.Ipv4.Config.enabled' : {
'deviation_typ' : 'not_supported',
},
'Interfaces.Interface.Subinterfaces.Subinterface.Ipv4.State.enabled' : {
'deviation_typ' : 'not_supported',
},
'Interfaces.Interface.Subinterfaces.Subinterface.Ipv6.Address.Vrrp.VrrpGroup.State.virtual_address' : {
'deviation_typ' : 'not_supported',
},
'Interfaces.Interface.Subinterfaces.Subinterface.Ipv6.Autoconf.Config.create_global_addresses' : {
'deviation_typ' : 'not_supported',
},
'Interfaces.Interface.Subinterfaces.Subinterface.Ipv6.Autoconf.Config.create_temporary_addresses' : {
'deviation_typ' : 'not_supported',
},
'Interfaces.Interface.Subinterfaces.Subinterface.Ipv6.Autoconf.Config.temporary_preferred_lifetime' : {
'deviation_typ' : 'not_supported',
},
'Interfaces.Interface.Subinterfaces.Subinterface.Ipv6.Autoconf.Config.temporary_valid_lifetime' : {
'deviation_typ' : 'not_supported',
},
'Interfaces.Interface.Subinterfaces.Subinterface.Ipv6.Autoconf.State.create_global_addresses' : {
'deviation_typ' : 'not_supported',
},
'Interfaces.Interface.Subinterfaces.Subinterface.Ipv6.Autoconf.State.create_temporary_addresses' : {
'deviation_typ' : 'not_supported',
},
'Interfaces.Interface.Subinterfaces.Subinterface.Ipv6.Autoconf.State.temporary_preferred_lifetime' : {
'deviation_typ' : 'not_supported',
},
'Interfaces.Interface.Subinterfaces.Subinterface.Ipv6.Autoconf.State.temporary_valid_lifetime' : {
'deviation_typ' : 'not_supported',
},
'Interfaces.Interface.Subinterfaces.Subinterface.Ipv6.Neighbor.State.is_router' : {
'deviation_typ' : 'not_supported',
},
}
| 43.198276
| 183
| 0.701656
| 474
| 5,011
| 7.132911
| 0.135021
| 0.196687
| 0.15528
| 0.248447
| 0.883466
| 0.883466
| 0.883466
| 0.867495
| 0.855368
| 0.564034
| 0
| 0.00889
| 0.169427
| 5,011
| 115
| 184
| 43.573913
| 0.80346
| 0
| 0
| 0.3125
| 0
| 0
| 0.669062
| 0.487425
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.044643
| 0
| 0.044643
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
beb2cf87091a47c44ecd2fa6343ddfaee14202f4
| 25,109
|
py
|
Python
|
Quartz/CoreVideo/_metadata.py
|
EnjoyLifeFund/py36pkgs
|
0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2
|
[
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null |
Quartz/CoreVideo/_metadata.py
|
EnjoyLifeFund/py36pkgs
|
0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2
|
[
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null |
Quartz/CoreVideo/_metadata.py
|
EnjoyLifeFund/py36pkgs
|
0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2
|
[
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null |
# This file is generated by objective.metadata
#
# Last update: Mon Oct 24 11:14:07 2016
import objc, sys
if sys.maxsize > 2 ** 32:
def sel32or64(a, b): return b
else:
def sel32or64(a, b): return a
if sys.byteorder == 'little':
def littleOrBig(a, b): return a
else:
def littleOrBig(a, b): return b
misc = {
}
misc.update({'CVTimeStamp': objc.createStructType('CVTimeStamp', sel32or64(b'{_CVTimeStamp=IiqQdq{CVSMPTETime=ssLLLssss}QQ}', b'{_CVTimeStamp=IiqQdq{CVSMPTETime=ssIIIssss}QQ}'), ['version', 'videoTimeScale', 'videoTime', 'hostTime', 'rateScalar', 'videoRefreshPeriod', 'smpteTime', 'flags', 'reserved']), 'CVPlanarPixelBufferInfo_YCbCrBiPlanar': objc.createStructType('CVPlanarPixelBufferInfo_YCbCrBiPlanar', b'{CVPlanarPixelBufferInfo_YCbCrBiPlanar={CVPlanarComponentInfo=iI}{CVPlanarComponentInfo=iI}}', ['componentInfoY', 'componentInfoCbCr']), 'CVPlanarPixelBufferInfo_YCbCrPlanar': objc.createStructType('CVPlanarPixelBufferInfo_YCbCrPlanar', b'{CVPlanarPixelBufferInfo_YCbCrPlanar={CVPlanarComponentInfo=iI}{CVPlanarComponentInfo=iI}{CVPlanarComponentInfo=iI}}', ['componentInfoY', 'componentInfoCb', 'componentInfoCr']), 'CVPlanarComponentInfo': objc.createStructType('CVPlanarComponentInfo', b'{CVPlanarComponentInfo=iI}', ['offset', 'rowBytes']), 'CVTime': objc.createStructType('CVTime', b'{_CVTime=qii}', ['timeValue', 'timeScale', 'flags']), 'CVSMPTETime': objc.createStructType('CVSMPTETime', sel32or64(b'{CVSMPTETime=ssLLLssss}', b'{CVSMPTETime=ssIIIssss}'), ['subframes', 'subframeDivisor', 'counter', 'type', 'flags', 'hours', 'minutes', 'seconds', 'frames']), 'CVPlanarPixelBufferInfo': objc.createStructType('CVPlanarPixelBufferInfo', b'{CVPlanarPixelBufferInfo=[1{CVPlanarComponentInfo=iI}]}', ['componentInfo'])})
constants = '''$kCVBufferMovieTimeKey@^{__CFString=}$kCVBufferNonPropagatedAttachmentsKey@^{__CFString=}$kCVBufferPropagatedAttachmentsKey@^{__CFString=}$kCVBufferTimeScaleKey@^{__CFString=}$kCVBufferTimeValueKey@^{__CFString=}$kCVImageBufferAlphaChannelIsOpaque@^{__CFString=}$kCVImageBufferCGColorSpaceKey@^{__CFString=}$kCVImageBufferChromaLocationBottomFieldKey@^{__CFString=}$kCVImageBufferChromaLocationTopFieldKey@^{__CFString=}$kCVImageBufferChromaLocation_Bottom@^{__CFString=}$kCVImageBufferChromaLocation_BottomLeft@^{__CFString=}$kCVImageBufferChromaLocation_Center@^{__CFString=}$kCVImageBufferChromaLocation_DV420@^{__CFString=}$kCVImageBufferChromaLocation_Left@^{__CFString=}$kCVImageBufferChromaLocation_Top@^{__CFString=}$kCVImageBufferChromaLocation_TopLeft@^{__CFString=}$kCVImageBufferChromaSubsamplingKey@^{__CFString=}$kCVImageBufferChromaSubsampling_411@^{__CFString=}$kCVImageBufferChromaSubsampling_420@^{__CFString=}$kCVImageBufferChromaSubsampling_422@^{__CFString=}$kCVImageBufferCleanApertureHeightKey@^{__CFString=}$kCVImageBufferCleanApertureHorizontalOffsetKey@^{__CFString=}$kCVImageBufferCleanApertureKey@^{__CFString=}$kCVImageBufferCleanApertureVerticalOffsetKey@^{__CFString=}$kCVImageBufferCleanApertureWidthKey@^{__CFString=}$kCVImageBufferColorPrimariesKey@^{__CFString=}$kCVImageBufferColorPrimaries_DCI_P3$kCVImageBufferColorPrimaries_EBU_3213@^{__CFString=}$kCVImageBufferColorPrimaries_ITU_R_2020$kCVImageBufferColorPrimaries_ITU_R_709_2@^{__CFString=}$kCVImageBufferColorPrimaries_P22@^{__CFString=}$kCVImageBufferColorPrimaries_P3_D65$kCVImageBufferColorPrimaries_SMPTE_C@^{__CFString=}$kCVImageBufferDisplayDimensionsKey@^{__CFString=}$kCVImageBufferDisplayHeightKey@^{__CFString=}$kCVImageBufferDisplayWidthKey@^{__CFString=}$kCVImageBufferFieldCountKey@^{__CFString=}$kCVImageBufferFieldDetailKey@^{__CFString=}$kCVImageBufferFieldDetailSpatialFirstLineEarly@^{__CFString=}$kCVImageBufferFieldDetailSpatialFirstLineLate@^{__CFString=}$kCVImageBufferFieldDetailTemporalBottomFirst@^{__CFString=}$kCVImageBufferFieldDetailTemporalTopFirst@^{__CFString=}$kCVImageBufferGammaLevelKey@^{__CFString=}$kCVImageBufferICCProfileKey@^{__CFString=}$kCVImageBufferPixelAspectRatioHorizontalSpacingKey@^{__CFString=}$kCVImageBufferPixelAspectRatioKey@^{__CFString=}$kCVImageBufferPixelAspectRatioVerticalSpacingKey@^{__CFString=}$kCVImageBufferPreferredCleanApertureKey@^{__CFString=}$kCVImageBufferTransferFunctionKey@^{__CFString=}$kCVImageBufferTransferFunction_EBU_3213@^{__CFString=}$kCVImageBufferTransferFunction_ITU_R_2020$kCVImageBufferTransferFunction_ITU_R_709_2@^{__CFString=}$kCVImageBufferTransferFunction_SMPTE_240M_1995@^{__CFString=}$kCVImageBufferTransferFunction_SMPTE_C@^{__CFString=}$kCVImageBufferTransferFunction_SMPTE_ST_428_1$kCVImageBufferTransferFunction_UseGamma@^{__CFString=}$kCVImageBufferYCbCrMatrixKey@^{__CFString=}$kCVImageBufferYCbCrMatrix_ITU_R_2020$kCVImageBufferYCbCrMatrix_ITU_R_601_4@^{__CFString=}$kCVImageBufferYCbCrMatrix_ITU_R_709_2@^{__CFString=}$kCVImageBufferYCbCrMatrix_SMPTE_240M_1995@^{__CFString=}$kCVIndefiniteTime@{_CVTime=qii}$kCVMetalTextureCacheMaximumTextureAgeKey$kCVOpenGLBufferHeight@^{__CFString=}$kCVOpenGLBufferInternalFormat@^{__CFString=}$kCVOpenGLBufferMaximumMipmapLevel@^{__CFString=}$kCVOpenGLBufferPoolMaximumBufferAgeKey@^{__CFString=}$kCVOpenGLBufferPoolMinimumBufferCountKey@^{__CFString=}$kCVOpenGLBufferTarget@^{__CFString=}$kCVOpenGLBufferWidth@^{__CFString=}$kCVOpenGLTextureCacheChromaSamplingModeAutomatic@^{__CFString=}$kCVOpenGLTextureCacheChromaSamplingModeBestPerformance@^{__CFString=}$kCVOpenGLTextureCacheChromaSamplingModeHighestQuality@^{__CFString=}$kCVOpenGLTextureCacheChromaSamplingModeKey@^{__CFString=}$kCVPixelBufferBytesPerRowAlignmentKey@^{__CFString=}$kCVPixelBufferCGBitmapContextCompatibilityKey@^{__CFString=}$kCVPixelBufferCGImageCompatibilityKey@^{__CFString=}$kCVPixelBufferExtendedPixelsBottomKey@^{__CFString=}$kCVPixelBufferExtendedPixelsLeftKey@^{__CFString=}$kCVPixelBufferExtendedPixelsRightKey@^{__CFString=}$kCVPixelBufferExtendedPixelsTopKey@^{__CFString=}$kCVPixelBufferHeightKey@^{__CFString=}$kCVPixelBufferIOSurfaceCoreAnimationCompatibilityKey@^{__CFString=}$kCVPixelBufferIOSurfaceOpenGLESFBOCompatibilityKey@^{__CFString=}$kCVPixelBufferIOSurfaceOpenGLESTextureCompatibilityKey@^{__CFString=}$kCVPixelBufferIOSurfaceOpenGLFBOCompatibilityKey@^{__CFString=}$kCVPixelBufferIOSurfaceOpenGLTextureCompatibilityKey@^{__CFString=}$kCVPixelBufferIOSurfacePropertiesKey@^{__CFString=}$kCVPixelBufferMemoryAllocatorKey@^{__CFString=}$kCVPixelBufferMetalCompatibilityKey@^{__CFString=}$kCVPixelBufferOpenGLCompatibilityKey@^{__CFString=}$kCVPixelBufferOpenGLESCompatibilityKey@^{__CFString=}$kCVPixelBufferOpenGLTextureCacheCompatibilityKey$kCVPixelBufferPixelFormatTypeKey@^{__CFString=}$kCVPixelBufferPlaneAlignmentKey@^{__CFString=}$kCVPixelBufferPoolAllocationThresholdKey@^{__CFString=}$kCVPixelBufferPoolFlushExcessBuffers$kCVPixelBufferPoolFreeBufferNotification@^{__CFString=}$kCVPixelBufferPoolMaximumBufferAgeKey@^{__CFString=}$kCVPixelBufferPoolMinimumBufferCountKey@^{__CFString=}$kCVPixelBufferWidthKey@^{__CFString=}$kCVPixelFormatBitsPerBlock@^{__CFString=}$kCVPixelFormatBlackBlock@^{__CFString=}$kCVPixelFormatBlockHeight@^{__CFString=}$kCVPixelFormatBlockHorizontalAlignment@^{__CFString=}$kCVPixelFormatBlockVerticalAlignment@^{__CFString=}$kCVPixelFormatBlockWidth@^{__CFString=}$kCVPixelFormatCGBitmapContextCompatibility@^{__CFString=}$kCVPixelFormatCGBitmapInfo@^{__CFString=}$kCVPixelFormatCGImageCompatibility@^{__CFString=}$kCVPixelFormatCodecType@^{__CFString=}$kCVPixelFormatComponentRange$kCVPixelFormatComponentRange_FullRange$kCVPixelFormatComponentRange_VideoRange$kCVPixelFormatComponentRange_WideRange$kCVPixelFormatConstant@^{__CFString=}$kCVPixelFormatContainsAlpha@^{__CFString=}$kCVPixelFormatContainsRGB@^{__CFString=}$kCVPixelFormatContainsYCbCr@^{__CFString=}$kCVPixelFormatFillExtendedPixelsCallback@^{__CFString=}$kCVPixelFormatFourCC@^{__CFString=}$kCVPixelFormatHorizontalSubsampling@^{__CFString=}$kCVPixelFormatName@^{__CFString=}$kCVPixelFormatOpenGLCompatibility@^{__CFString=}$kCVPixelFormatOpenGLESCompatibility@^{__CFString=}$kCVPixelFormatOpenGLFormat@^{__CFString=}$kCVPixelFormatOpenGLInternalFormat@^{__CFString=}$kCVPixelFormatOpenGLType@^{__CFString=}$kCVPixelFormatPlanes@^{__CFString=}$kCVPixelFormatQDCompatibility@^{__CFString=}$kCVPixelFormatVerticalSubsampling@^{__CFString=}$kCVZeroTime@{_CVTime=qii}$'''
enums = '''$kCVAttachmentMode_ShouldNotPropagate@0$kCVAttachmentMode_ShouldPropagate@1$kCVPixelBufferLock_ReadOnly@1$kCVPixelBufferPoolFlushExcessBuffers@1$kCVPixelFormatType_128RGBAFloat@1380410945$kCVPixelFormatType_14Bayer_BGGR@1650943796$kCVPixelFormatType_14Bayer_GBRG@1734505012$kCVPixelFormatType_14Bayer_GRBG@1735549492$kCVPixelFormatType_14Bayer_RGGB@1919379252$kCVPixelFormatType_16BE555@16$kCVPixelFormatType_16BE565@1110783541$kCVPixelFormatType_16Gray@1647392359$kCVPixelFormatType_16LE555@1278555445$kCVPixelFormatType_16LE5551@892679473$kCVPixelFormatType_16LE565@1278555701$kCVPixelFormatType_1IndexedGray_WhiteIsZero@33$kCVPixelFormatType_1Monochrome@1$kCVPixelFormatType_24BGR@842285639$kCVPixelFormatType_24RGB@24$kCVPixelFormatType_2Indexed@2$kCVPixelFormatType_2IndexedGray_WhiteIsZero@34$kCVPixelFormatType_30RGB@1378955371$kCVPixelFormatType_30RGBLEPackedWideGamut@1999843442$kCVPixelFormatType_32ABGR@1094862674$kCVPixelFormatType_32ARGB@32$kCVPixelFormatType_32AlphaGray@1647522401$kCVPixelFormatType_32BGRA@1111970369$kCVPixelFormatType_32RGBA@1380401729$kCVPixelFormatType_420YpCbCr8BiPlanarFullRange@875704422$kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange@875704438$kCVPixelFormatType_420YpCbCr8Planar@2033463856$kCVPixelFormatType_420YpCbCr8PlanarFullRange@1714696752$kCVPixelFormatType_422YpCbCr10@1983000880$kCVPixelFormatType_422YpCbCr16@1983000886$kCVPixelFormatType_422YpCbCr8@846624121$kCVPixelFormatType_422YpCbCr8FullRange@2037741158$kCVPixelFormatType_422YpCbCr8_yuvs@2037741171$kCVPixelFormatType_422YpCbCr_4A_8BiPlanar@1630697081$kCVPixelFormatType_4444AYpCbCr16@2033463606$kCVPixelFormatType_4444AYpCbCr8@2033463352$kCVPixelFormatType_4444YpCbCrA8@1983131704$kCVPixelFormatType_4444YpCbCrA8R@1916022840$kCVPixelFormatType_444YpCbCr10@1983131952$kCVPixelFormatType_444YpCbCr8@1983066168$kCVPixelFormatType_48RGB@1647589490$kCVPixelFormatType_4Indexed@4$kCVPixelFormatType_4IndexedGray_WhiteIsZero@36$kCVPixelFormatType_64ARGB@1647719521$kCVPixelFormatType_64RGBAHalf@1380411457$kCVPixelFormatType_8Indexed@8$kCVPixelFormatType_8IndexedGray_WhiteIsZero@40$kCVPixelFormatType_OneComponent16Half@1278226536$kCVPixelFormatType_OneComponent32Float@1278226534$kCVPixelFormatType_OneComponent8@1278226488$kCVPixelFormatType_TwoComponent16Half@843264104$kCVPixelFormatType_TwoComponent32Float@843264102$kCVPixelFormatType_TwoComponent8@843264056$kCVReturnAllocationFailed@-6662$kCVReturnDisplayLinkAlreadyRunning@-6671$kCVReturnDisplayLinkCallbacksNotSet@-6673$kCVReturnDisplayLinkNotRunning@-6672$kCVReturnError@-6660$kCVReturnFirst@-6660$kCVReturnInvalidArgument@-6661$kCVReturnInvalidDisplay@-6670$kCVReturnInvalidPixelBufferAttributes@-6682$kCVReturnInvalidPixelFormat@-6680$kCVReturnInvalidPoolAttributes@-6691$kCVReturnInvalidSize@-6681$kCVReturnLast@-6699$kCVReturnPixelBufferNotMetalCompatible@-6684$kCVReturnPixelBufferNotOpenGLCompatible@-6683$kCVReturnPoolAllocationFailed@-6690$kCVReturnRetry@-6692$kCVReturnSuccess@0$kCVReturnUnsupported@-6663$kCVReturnWouldExceedAllocationThreshold@-6689$kCVSMPTETimeRunning@2$kCVSMPTETimeType24@0$kCVSMPTETimeType25@1$kCVSMPTETimeType2997@4$kCVSMPTETimeType2997Drop@5$kCVSMPTETimeType30@3$kCVSMPTETimeType30Drop@2$kCVSMPTETimeType5994@7$kCVSMPTETimeType60@6$kCVSMPTETimeValid@1$kCVTimeIsIndefinite@1$kCVTimeStampBottomField@131072$kCVTimeStampHostTimeValid@2$kCVTimeStampIsInterlaced@196608$kCVTimeStampRateScalarValid@16$kCVTimeStampSMPTETimeValid@4$kCVTimeStampTopField@65536$kCVTimeStampVideoHostTimeValid@3$kCVTimeStampVideoRefreshPeriodValid@8$kCVTimeStampVideoTimeValid@1$kReturnRetry@-6692$'''
misc.update({})
functions={'CVImageBufferGetEncodedSize': (sel32or64(b'{CGSize=ff}^{__CVBuffer=}', b'{CGSize=dd}^{__CVBuffer=}'),), 'CVOpenGLTextureRelease': (b'v^{__CVBuffer=}',), 'CVPixelBufferPoolRelease': (b'v^{__CVPixelBufferPool=}',), 'CVPixelBufferPoolGetTypeID': (sel32or64(b'L', b'Q'),), 'CVPixelBufferCreate': (sel32or64(b'i^{__CFAllocator=}LLL^{__CFDictionary=}^^{__CVBuffer=}', b'i^{__CFAllocator=}QQI^{__CFDictionary=}^^{__CVBuffer=}'), '', {'retval': {'already_cfretained': True}}), 'CVOpenGLBufferPoolGetTypeID': (sel32or64(b'L', b'Q'),), 'CVPixelBufferFillExtendedPixels': (b'i^{__CVBuffer=}',), 'CVOpenGLTextureCacheRetain': (b'^{__CVOpenGLTextureCache=}^{__CVOpenGLTextureCache=}',), 'CVOpenGLBufferPoolCreateOpenGLBuffer': (b'i^{__CFAllocator=}^{__CVOpenGLBufferPool=}^^{__CVBuffer=}', '', {'retval': {'already_cfretained': True}, 'arguments': {2: {'already_cfretained': True, 'type_modifier': 'o'}}}), 'CVDisplayLinkSetCurrentCGDisplay': (b'i^{__CVDisplayLink=}I',), 'CVBufferSetAttachment': (b'v^{__CVBuffer=}^{__CFString=}@I',), 'CVGetCurrentHostTime': (b'Q', '', {'variadic': False}), 'CVPixelBufferPoolCreate': (b'i^{__CFAllocator=}^{__CFDictionary=}^{__CFDictionary=}^^{__CVPixelBufferPool=}', '', {'retval': {'already_cfretained': True}, 'arguments': {3: {'already_cfretained': True, 'type_modifier': 'o'}}}), 'CVPixelBufferGetHeightOfPlane': (sel32or64(b'L^{__CVBuffer=}L', b'Q^{__CVBuffer=}Q'),), 'CVBufferRetain': (b'^{__CVBuffer=}^{__CVBuffer=}',), 'CVDisplayLinkTranslateTime': (sel32or64(b'i^{__CVDisplayLink=}^{_CVTimeStamp=IiqQdq{CVSMPTETime=ssLLLssss}QQ}^{_CVTimeStamp=IiqQdq{CVSMPTETime=ssLLLssss}QQ}', b'i^{__CVDisplayLink=}^{_CVTimeStamp=IiqQdq{CVSMPTETime=ssIIIssss}QQ}^{_CVTimeStamp=IiqQdq{CVSMPTETime=ssIIIssss}QQ}'), '', {'arguments': {1: {'type_modifier': 'n'}, 2: {'type_modifier': 'o'}}}), 'CVPixelBufferRetain': (b'^{__CVBuffer=}^{__CVBuffer=}',), 'CVPixelBufferGetPlaneCount': (sel32or64(b'L^{__CVBuffer=}', b'Q^{__CVBuffer=}'),), 'CVOpenGLTextureCacheRelease': (b'v^{__CVOpenGLTextureCache=}',), 'CVPixelBufferGetBaseAddress': (b'^v^{__CVBuffer=}', '', {'retval': {'c_array_of_variable_length': True}}), 'CVOpenGLBufferPoolRelease': (b'v^{__CVOpenGLBufferPool=}',), 'CVPixelBufferLockBaseAddress': (b'i^{__CVBuffer=}Q',), 'CVOpenGLTextureCacheGetTypeID': (sel32or64(b'L', b'Q'),), 'CVPixelBufferUnlockBaseAddress': (b'i^{__CVBuffer=}Q',), 'CVMetalTextureCacheCreateTextureFromImage': (b'i@@^{__CVBuffer=}@QLLL^^{__CVBuffer=}', '', {'arguments': {8: {'type_modifier': 'o'}}}), 'CVOpenGLTextureIsFlipped': (b'Z^{__CVBuffer=}',), 'CVMetalTextureCacheFlush': (b'v@Q',), 'CVPixelBufferGetTypeID': (sel32or64(b'L', b'Q'),), 'CVDisplayLinkGetActualOutputVideoRefreshPeriod': (b'd^{__CVDisplayLink=}',), 'CVPixelBufferGetWidth': (sel32or64(b'L^{__CVBuffer=}', b'Q^{__CVBuffer=}'),), 'CVMetalTextureCacheGetTypeID': (b'L',), 'CVDisplayLinkCreateWithCGDisplay': (b'iI^^{__CVDisplayLink=}', '', {'retval': {'already_cfretained': True}, 'arguments': {1: {'already_cfretained': True, 'type_modifier': 'o'}}}), 'CVBufferRelease': (b'v^{__CVBuffer=}',), 'CVDisplayLinkStart': (b'i^{__CVDisplayLink=}',), 'CVDisplayLinkGetCurrentTime': (sel32or64(b'i^{__CVDisplayLink=}^{_CVTimeStamp=IiqQdq{CVSMPTETime=ssLLLssss}QQ}', b'i^{__CVDisplayLink=}^{_CVTimeStamp=IiqQdq{CVSMPTETime=ssIIIssss}QQ}'), '', {'arguments': {1: {'type_modifier': 'o'}}}), 'CVPixelFormatDescriptionArrayCreateWithAllPixelFormatTypes': (b'^{__CFArray=}^{__CFAllocator=}', '', {'retval': {'already_cfretained': True}}), 'CVPixelBufferPoolGetAttributes': (b'^{__CFDictionary=}^{__CVPixelBufferPool=}',), 'CVBufferGetAttachments': (b'^{__CFDictionary=}^{__CVBuffer=}I',), 'CVPixelFormatDescriptionRegisterDescriptionWithPixelFormatType': (sel32or64(b'v^{__CFDictionary=}L', b'v^{__CFDictionary=}I'),), 'CVOpenGLBufferPoolCreate': (b'i^{__CFAllocator=}^{__CFDictionary=}^{__CFDictionary=}^^{__CVOpenGLBufferPool=}', '', {'retval': {'already_cfretained': True}, 'arguments': {3: {'already_cfretained': True, 'type_modifier': 'o'}}}), 'CVDisplayLinkRetain': (b'^{__CVDisplayLink=}^{__CVDisplayLink=}',), 'CVPixelBufferCreateWithIOSurface': (b'i^{__CFAllocator=}^{__IOSurface=}^{__CFDictionary=}^^{__CVBuffer=}', '', {'retval': {'already_cfretained': True}, 'arguments': {3: {'already_cfretained': True, 'type_modifier': 'o'}}}), 'CVDisplayLinkCreateWithOpenGLDisplayMask': (b'iI^^{__CVDisplayLink=}', '', {'retval': {'already_cfretained': True}, 'arguments': {1: {'already_cfretained': True, 'type_modifier': 'o'}}}), 'CVDisplayLinkSetOutputHandler': (b'i^{__CVDisplayLink=}@?', '', {'arguments': {1: {'callable': {'retval': {'type': b'i'}, 'arguments': {0: {'type': '^v'}, 1: {'type': '^{__CVDisplayLink=}'}, 2: {'type': '^{_CVTimeStamp=IiqQdq{CVSMPTETime=ssIIIssss}QQ}', 'type_modifier': 'n'}, 3: {'type': '^{_CVTimeStamp=IiqQdq{CVSMPTETime=ssIIIssss}QQ}', 'type_modifier': 'n'}, 4: {'type': 'Q'}, 5: {'type': 'o^Q'}}}}}}), 'CVOpenGLBufferCreate': (sel32or64(b'i^{__CFAllocator=}LL^{__CFDictionary=}^^{__CVBuffer=}', b'i^{__CFAllocator=}QQ^{__CFDictionary=}^^{__CVBuffer=}'), '', {'retval': {'already_cfretained': True}, 'arguments': {4: {'already_cfretained': True, 'type_modifier': 'o'}}}), 'CVPixelBufferPoolCreatePixelBufferWithAuxAttributes': (b'i^{__CFAllocator=}^{__CVPixelBufferPool=}^{__CFDictionary=}^^{__CVBuffer=}', '', {'retval': {'already_cfretained': True}, 'arguments': {3: {'already_cfretained': True, 'type_modifier': 'o'}}}), 'CVOpenGLTextureCacheFlush': (b'v^{__CVOpenGLTextureCache=}Q',), 'CVDisplayLinkCreateWithActiveCGDisplays': (b'i^^{__CVDisplayLink=}', '', {'retval': {'already_cfretained': True}, 'arguments': {0: {'already_cfretained': True, 'type_modifier': 'o'}}}), 'CVDisplayLinkGetNominalOutputVideoRefreshPeriod': (b'{_CVTime=qii}^{__CVDisplayLink=}',), 'CVPixelBufferCreateResolvedAttributesDictionary': (b'i^{__CFAllocator=}^{__CFArray=}^^{__CFDictionary=}', '', {'retval': {'already_cfretained': True}, 'arguments': {2: {'already_cfretained': True, 'type_modifier': 'o'}}}), 'CVDisplayLinkSetOutputCallback': (b'i^{__CVDisplayLink=}^?^v', '', {'arguments': {1: {'callable': {'retval': {'type': b'i'}, 'arguments': {0: {'type': b'^{__CVDisplayLink=}'}, 1: {'type': b'^{_CVTimeStamp=IiqQdq{CVSMPTETime=ssIIIssss}QQ}', 'type_modifier': 'n'}, 2: {'type': b'^{_CVTimeStamp=IiqQdq{CVSMPTETime=ssIIIssss}QQ}', 'type_modifier': 'N'}, 3: {'type': b'Q'}, 4: {'type': b'^Q', 'type_modifier': 'o'}, 5: {'type': b'^v'}}}}}}), 'CVOpenGLTextureGetName': (b'I^{__CVBuffer=}',), 'CVOpenGLBufferRelease': (b'v^{__CVBuffer=}',), 'CVOpenGLTextureRetain': (b'^{__CVBuffer=}^{__CVBuffer=}',), 'CVOpenGLBufferPoolGetAttributes': (b'^{__CFDictionary=}^{__CVOpenGLBufferPool=}',), 'CVPixelBufferGetWidthOfPlane': (sel32or64(b'L^{__CVBuffer=}L', b'Q^{__CVBuffer=}Q'),), 'CVBufferPropagateAttachments': (b'v^{__CVBuffer=}^{__CVBuffer=}',), 'CVPixelBufferPoolRetain': (b'^{__CVPixelBufferPool=}^{__CVPixelBufferPool=}',), 'CVPixelBufferGetHeight': (sel32or64(b'L^{__CVBuffer=}', b'Q^{__CVBuffer=}'),), 'CVPixelBufferGetExtendedPixels': (sel32or64(b'v^{__CVBuffer=}^L^L^L^L', b'v^{__CVBuffer=}^Q^Q^Q^Q'), '', {'arguments': {1: {'type_modifier': 'o'}, 2: {'type_modifier': 'o'}, 3: {'type_modifier': 'o'}, 4: {'type_modifier': 'o'}}}), 'CVOpenGLBufferGetTypeID': (sel32or64(b'L', b'Q'),), 'CVDisplayLinkRelease': (b'v^{__CVDisplayLink=}',), 'CVBufferGetAttachment': (b'@^{__CVBuffer=}^{__CFString=}^I', '', {'arguments': {2: {'type_modifier': 'o'}}}), 'CVDisplayLinkStop': (b'i^{__CVDisplayLink=}',), 'CVPixelFormatDescriptionCreateWithPixelFormatType': (sel32or64(b'^{__CFDictionary=}^{__CFAllocator=}L', b'^{__CFDictionary=}^{__CFAllocator=}I'), '', {'retval': {'already_cfretained': True}}), 'CVMetalTextureGetCleanTexCoords': (b'v^{__CVBuffer=}^f^f^f^f', '', {'arguments': {1: {'c_array_of_fixed_length': 2, 'type_modifier': 'o'}, 2: {'c_array_of_fixed_length': 2, 'type_modifier': 'o'}, 3: {'c_array_of_fixed_length': 2, 'type_modifier': 'o'}, 4: {'c_array_of_fixed_length': 2, 'type_modifier': 'o'}}}), 'CVPixelBufferGetIOSurface': (b'^{__IOSurface=}^{__CVBuffer=}',), 'CVOpenGLTextureCacheCreateTextureFromImage': (b'i^{__CFAllocator=}^{__CVOpenGLTextureCache=}^{__CVBuffer=}^{__CFDictionary=}^^{__CVBuffer=}', '', {'retval': {'already_cfretained': True}}), 'CVDisplayLinkCreateWithCGDisplays': (sel32or64(b'i^Il^^{__CVDisplayLink=}', b'i^Iq^^{__CVDisplayLink=}'), '', {'retval': {'already_cfretained': True}, 'arguments': {0: {'c_array_length_in_arg': 1, 'type_modifier': 'n'}, 2: {'already_cfretained': True, 'type_modifier': 'o'}}}), 'CVPixelBufferPoolGetPixelBufferAttributes': (b'^{__CFDictionary=}^{__CVPixelBufferPool=}',), 'CVOpenGLTextureGetTypeID': (sel32or64(b'L', b'Q'),), 'CVImageBufferIsFlipped': (b'Z^{__CVBuffer=}',), 'CVMetalTextureGetTexture': (b'@^{__CVBuffer=}',), 'CVPixelBufferIsPlanar': (b'Z^{__CVBuffer=}',), 'CVBufferRemoveAllAttachments': (b'v^{__CVBuffer=}',), 'CVPixelBufferCreateWithBytes': (sel32or64(b'i^{__CFAllocator=}LLL^vL^?^v^{__CFDictionary=}^^{__CVBuffer=}', b'i^{__CFAllocator=}QQI^vQ^?^v^{__CFDictionary=}^^{__CVBuffer=}'), '', {'retval': {'already_cfretained': True}, 'arguments': {6: {'callable': {'retval': {'type': b'v'}, 'arguments': {0: {'type': b'^v'}, 1: {'type': b'^v'}}}}}}), 'CVMetalTextureGetTypeID': (b'L',), 'CVOpenGLBufferPoolRetain': (b'^{__CVOpenGLBufferPool=}^{__CVOpenGLBufferPool=}',), 'CVPixelBufferCreateWithPlanarBytes': (sel32or64(b'i^{__CFAllocator=}LLL^vLL^^v^L^L^L^?^v^{__CFDictionary=}^^{__CVBuffer=}', b'i^{__CFAllocator=}QQI^vQQ^^v^Q^Q^Q^?^v^{__CFDictionary=}^^{__CVBuffer=}'), '', {'retval': {'already_cfretained': True}, 'arguments': {11: {'callable': {'retval': {'type': b'v'}, 'arguments': {0: {'type': b'^v'}, 1: {'type': b'^v'}, 2: {'type': b'Q'}, 3: {'type': b'Q'}, 4: {'type': b'^^v'}}}}}}), 'CVImageBufferGetCleanRect': (sel32or64(b'{CGRect={CGPoint=ff}{CGSize=ff}}^{__CVBuffer=}', b'{CGRect={CGPoint=dd}{CGSize=dd}}^{__CVBuffer=}'),), 'CVImageBufferCreateColorSpaceFromAttachments': (b'^{CGColorSpace=}^{__CFDictionary=}', '', {'retval': {'already_cfretained': True}}), 'CVPixelBufferGetBytesPerRowOfPlane': (sel32or64(b'L^{__CVBuffer=}L', b'Q^{__CVBuffer=}Q'),), 'CVDisplayLinkGetTypeID': (sel32or64(b'L', b'Q'),), 'CVImageBufferGetDisplaySize': (sel32or64(b'{CGSize=ff}^{__CVBuffer=}', b'{CGSize=dd}^{__CVBuffer=}'),), 'CVPixelBufferGetDataSize': (sel32or64(b'L^{__CVBuffer=}', b'Q^{__CVBuffer=}'),), 'CVOpenGLBufferPoolGetOpenGLBufferAttributes': (b'^{__CFDictionary=}^{__CVOpenGLBufferPool=}',), 'CVOpenGLBufferAttach': (b'i^{__CVBuffer=}^{_CGLContextObject=}Iii',), 'CVPixelBufferGetBaseAddressOfPlane': (sel32or64(b'^v^{__CVBuffer=}L', b'^v^{__CVBuffer=}Q'), '', {'retval': {'c_array_of_variable_length': True}}), 'CVDisplayLinkIsRunning': (b'Z^{__CVDisplayLink=}',), 'CVPixelBufferGetPixelFormatType': (sel32or64(b'L^{__CVBuffer=}', b'I^{__CVBuffer=}'),), 'CVBufferRemoveAttachment': (b'v^{__CVBuffer=}^{__CFString=}',), 'CVOpenGLBufferGetAttributes': (b'^{__CFDictionary=}^{__CVBuffer=}',), 'CVDisplayLinkGetOutputVideoLatency': (b'{_CVTime=qii}^{__CVDisplayLink=}',), 'CVPixelBufferGetBytesPerRow': (sel32or64(b'L^{__CVBuffer=}', b'Q^{__CVBuffer=}'),), 'CVMetalTextureCacheCreate': (b'i@@@@^{__CVBuffer=}',), 'CVPixelBufferPoolCreatePixelBuffer': (b'i^{__CFAllocator=}^{__CVPixelBufferPool=}^^{__CVBuffer=}', '', {'retval': {'already_cfretained': True}, 'arguments': {2: {'already_cfretained': True, 'type_modifier': 'o'}}}), 'CVImageBufferGetColorSpace': (b'^{CGColorSpace=}^{__CVBuffer=}',), 'CVDisplayLinkGetCurrentCGDisplay': (b'I^{__CVDisplayLink=}',), 'CVDisplayLinkSetCurrentCGDisplayFromOpenGLContext': (b'i^{__CVDisplayLink=}^{_CGLContextObject=}^{_CGLPixelFormatObject=}',), 'CVPixelBufferRelease': (b'v^{__CVBuffer=}',), 'CVBufferSetAttachments': (b'v^{__CVBuffer=}^{__CFDictionary=}I',), 'CVOpenGLTextureGetTarget': (b'I^{__CVBuffer=}',), 'CVGetHostClockFrequency': (b'd', '', {'variadic': False}), 'CVGetHostClockMinimumTimeDelta': (b'I', '', {'variadic': False}), 'CVOpenGLBufferRetain': (b'^{__CVBuffer=}^{__CVBuffer=}',), 'CVMetalTextureIsFlipped': (b'Z^{__CVBuffer=}',), 'CVOpenGLTextureCacheCreate': (b'i^{__CFAllocator=}^{__CFDictionary=}^{_CGLContextObject=}^{_CGLPixelFormatObject=}^{__CFDictionary=}^^{__CVOpenGLTextureCache=}', '', {'retval': {'already_cfretained': True}}), 'CVPixelBufferPoolFlush': (b'v^{__CVPixelBufferPool=}L',), 'CVOpenGLTextureGetCleanTexCoords': (b'v^{__CVBuffer=}[2f][2f][2f][2f]',)}
aliases = {'CV_INLINE': 'CF_INLINE', 'CVImageBufferRef': 'CVBufferRef', 'kCVReturnError': 'kCVReturnFirst'}
cftypes=[('CVBufferRef', b'^{__CVBuffer=}', 'CVBufferGetTypeID', None), ('CVDisplayLinkRef', b'^{__CVDisplayLink=}', 'CVDisplayLinkGetTypeID', None), ('CVOpenGLBufferPoolRef', b'^{__CVOpenGLBufferPool=}', 'CVOpenGLBufferPoolGetTypeID', None), ('CVOpenGLTextureCacheRef', b'^{__CVOpenGLTextureCache=}', 'CVOpenGLTextureCacheGetTypeID', None), ('CVPixelBufferPoolRef', b'^{__CVPixelBufferPool=}', 'CVPixelBufferPoolGetTypeID', None), ('CVOpenGLBufferRef', b'^{__CVOpenGLBuffer=}', 'CVOpenGLBufferGetTypeID', None), ('CVPixelBufferRef', b'^{__CVPixelBuffer=}', 'CVPixelBufferGetTypeID', None), ('CVOpenGLTextureRef', b'^{__CVOpenGLTexture=}', 'CVOpenGLTextureGetTypeID', None)]
expressions = {}
# END OF FILE
| 896.75
| 12,413
| 0.787726
| 1,921
| 25,109
| 9.84227
| 0.300885
| 0.004549
| 0.035542
| 0.028561
| 0.180145
| 0.142064
| 0.127413
| 0.101232
| 0.095203
| 0.066801
| 0
| 0.041437
| 0.030228
| 25,109
| 27
| 12,414
| 929.962963
| 0.735031
| 0.003744
| 0
| 0.105263
| 1
| 0.315789
| 0.82615
| 0.697321
| 0
| 0
| 0
| 0
| 0
| 1
| 0.210526
| false
| 0
| 0.052632
| 0.210526
| 0.263158
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 8
|
beb69c7f8a24205a42871951c66dbdd507d316dd
| 15,666
|
py
|
Python
|
guestroom/tests/test_views.py
|
YuriyCherniy/just-blog
|
ea9810a8e4db77f109d211bb45c04fb79d0f1e5c
|
[
"MIT"
] | 3
|
2022-01-15T18:23:34.000Z
|
2022-01-15T20:32:52.000Z
|
guestroom/tests/test_views.py
|
YuriyCherniy/just-blog
|
ea9810a8e4db77f109d211bb45c04fb79d0f1e5c
|
[
"MIT"
] | null | null | null |
guestroom/tests/test_views.py
|
YuriyCherniy/just-blog
|
ea9810a8e4db77f109d211bb45c04fb79d0f1e5c
|
[
"MIT"
] | 1
|
2022-01-19T11:31:52.000Z
|
2022-01-19T11:31:52.000Z
|
from django.db import IntegrityError, transaction
from django.test import Client, TestCase
from django.urls import reverse
from guestroom.models import GuestComment, GuestPost, NewGuestPostCounter
from users.models import BlogUser
class GuestPostListViewTestCase(TestCase):
@classmethod
def setUpTestData(cls):
BlogUser.objects.create_superuser(
username='s_user',
password='0000'
)
def setUp(self):
self.c = Client()
def test_status_code_200(self):
response = self.c.get(reverse('guest_room'))
self.assertEqual(response.status_code, 200)
def test_template_used(self):
response = self.c.get(reverse('guest_room'))
self.assertTemplateUsed(response, 'guestroom/guestpost_list.html')
def test_reset_guest_post_counter(self):
counter = NewGuestPostCounter.objects.first()
counter.add_one()
self.c.login(username='s_user', password='0000')
self.c.get(reverse('guest_room'))
counter = NewGuestPostCounter.objects.first()
self.assertEqual(counter.get_counter(), 0)
class GuestPostDetailViewTestCase(TestCase):
@classmethod
def setUpTestData(cls):
BlogUser.objects.create_superuser(
username='s_user',
password='0000'
)
def setUp(self):
self.c = Client()
def test_status_code_403(self):
response = self.c.get(reverse('guest_post_detail', args=[1]))
self.assertEqual(response.status_code, 403)
def test_status_code_200(self):
post = GuestPost.objects.create(
anonymous_username='anonym',
text='Some text'
)
self.c.login(username='s_user', password='0000')
response = self.c.get(reverse('guest_post_detail', args=[post.pk]))
self.assertEqual(response.status_code, 200)
def test_template_used(self):
post = GuestPost.objects.create(
anonymous_username='anonym',
text='Some text'
)
self.c.login(username='s_user', password='0000')
response = self.c.get(reverse('guest_post_detail', args=[post.pk]))
self.assertTemplateUsed(response, 'guestroom/guestpost_detail.html')
class GuestPostCreateViewTestCase(TestCase):
@classmethod
def setUpTestData(cls):
BlogUser.objects.create_superuser(
username='s_user',
password='0000'
)
def setUp(self):
self.c = Client()
def test_status_code_200_get(self):
response = self.c.get(reverse('guest_post_create'))
self.assertEqual(response.status_code, 200)
def test_status_code_200_post(self):
response = self.c.post(
reverse('guest_post_create'),
{'anonymous_username': 'anonym', 'text': 'some text'},
follow=True
)
self.assertEqual(response.status_code, 200)
# def test_template_used_post_valid_data(self):
# response = self.c.post(
# reverse('guest_post_create'),
# {'anonymous_username': 'anonym', 'text': 'some text'},
# follow=True
# )
# self.assertTemplateUsed(response, 'guestroom/guestpost_list.html')
def test_template_used_post_invalid_data(self):
response = self.c.post(
reverse('guest_post_create'),
{'anonymous_username': '', 'text': ''},
follow=True
)
self.assertTemplateUsed(response, 'guestroom/guestpost_form.html')
# def test_post_counter(self):
# counter = NewGuestPostCounter()
# self.c.post(
# reverse('guest_post_create'),
# {'anonymous_username': 'anonym', 'text': 'some text', 'g-recaptcha-response': 's'},
# follow=True
# )
# self.assertEqual(counter.get_counter(), 1)
class GuestPostUpdateViewTestCase(TestCase):
@classmethod
def setUpTestData(cls):
BlogUser.objects.create_superuser(
username='s_user',
password='0000'
)
def setUp(self):
self.c = Client()
def test_status_code_200_get(self):
post = GuestPost.objects.create(
anonymous_username='anonym', text='some text'
)
self.c.login(username='s_user', password='0000')
response = self.c.get(reverse('guest_post_update', args=[post.pk]))
self.assertEqual(response.status_code, 200)
def test_status_code_200_post(self):
post = GuestPost.objects.create(
anonymous_username='anonym', text='some text'
)
self.c.login(username='s_user', password='0000')
response = self.c.post(
reverse('guest_post_update', args=[post.pk]),
{'anonymous_username': 'anonym', 'text': 'some text'},
follow=True
)
self.assertEqual(response.status_code, 200)
def test_status_code_403(self):
response = self.c.get(reverse('guest_post_update', args=[1]))
self.assertEqual(response.status_code, 403)
def test_template_used_post_valid_data(self):
post = GuestPost.objects.create(
anonymous_username='anonym', text='some text'
)
self.c.login(username='s_user', password='0000')
response = self.c.post(
reverse('guest_post_update', args=[post.pk]),
{'anonymous_username': 'anonym', 'text': 'some text'},
follow=True
)
self.assertTemplateUsed(response, 'guestroom/guestpost_list.html')
def test_template_used_post_invalid_data(self):
post = GuestPost.objects.create(
anonymous_username='anonym', text='some text'
)
self.c.login(username='s_user', password='0000')
response = self.c.post(
reverse('guest_post_update', args=[post.pk]),
{'anonymous_username': '', 'text': ''},
follow=True
)
self.assertTemplateUsed(
response, 'guestroom/guestpost_update_form.html'
)
class GuestPostDeleteViewTestCase(TestCase):
@classmethod
def setUpTestData(cls):
BlogUser.objects.create_superuser(username='s_user', password='0000')
def setUp(self):
self.c = Client()
def test_status_code_200(self):
post = GuestPost.objects.create(
anonymous_username='anonym', text='some text'
)
self.c.login(username='s_user', password='0000')
response = self.c.delete(
reverse('guest_post_delete', args=[post.pk]), follow=True
)
self.assertEqual(response.status_code, 200)
def test_status_code_403(self):
response = self.c.delete(reverse('guest_post_delete', args=[1]))
self.assertEqual(response.status_code, 403)
def test_template_used_delete(self):
post = GuestPost.objects.create(
anonymous_username='anonym', text='some text'
)
self.c.login(username='s_user', password='0000')
response = self.c.delete(
reverse('guest_post_delete', args=[post.pk]), follow=True
)
self.assertTemplateUsed(response, 'guestroom/guestpost_list.html')
def test_template_used_get(self):
post = GuestPost.objects.create(
anonymous_username='anonym', text='some text'
)
self.c.login(username='s_user', password='0000')
response = self.c.get(
reverse('guest_post_delete', args=[post.pk]), follow=True
)
self.assertTemplateUsed(
response, 'guestroom/guestpost_confirm_delete.html'
)
class GuestCommentCreateViewTestCase(TestCase):
@classmethod
def setUpTestData(cls):
BlogUser.objects.create_superuser(username='s_user', password='0000')
GuestPost.objects.create(
anonymous_username='anonym',
text='some text'
)
def setUp(self):
self.c = Client()
def test_status_code_200(self):
self.c.login(username='s_user', password='0000')
response = self.c.post(reverse('guest_comment_create'))
self.assertEqual(response.status_code, 200)
def test_status_code_403(self):
response = self.c.post(reverse('guest_comment_create'))
self.assertEqual(response.status_code, 403)
def test_guest_comment_create_valid_data(self):
self.c.login(username='s_user', password='0000')
guest_post = GuestPost.objects.first()
self.c.post(
reverse('guest_comment_create'),
{'username': 's_user', 'text': 'some text', 'guest_post_pk': guest_post.pk}
)
guest_comments = GuestComment.objects.all()
self.assertEqual(len(guest_comments), 1)
def test_guest_comment_create_valid_data_template_used(self):
self.c.login(username='s_user', password='0000')
guest_post = GuestPost.objects.first()
response = self.c.post(
reverse('guest_comment_create'),
{'username': 's_user', 'text': 'some text', 'guest_post_pk': guest_post.pk},
follow=True
)
self.assertTemplateUsed(response, 'guestroom/guestpost_list.html')
def test_guest_comment_create_invalid_data(self):
self.c.login(username='s_user', password='0000')
guest_post = GuestPost.objects.first()
self.c.post(
reverse('guest_comment_create'),
{'username': '', 'text': '', 'guest_post_pk': guest_post.pk}
)
guest_comments = GuestComment.objects.all()
self.assertEqual(len(guest_comments), 0)
def test_guest_comment_create_invalid_data_template_used(self):
self.c.login(username='s_user', password='0000')
guest_post = GuestPost.objects.first()
response = self.c.post(
reverse('guest_comment_create'),
{'username': '', 'text': '', 'guest_post_pk': guest_post.pk},
follow=True
)
self.assertTemplateUsed(response, 'guestroom/guestcomment_form.html')
def test_guest_comment_create_comment_already_exists(self):
self.c.login(username='s_user', password='0000')
guest_post = GuestPost.objects.first()
self.c.post(
reverse('guest_comment_create'),
{'username': 's_user', 'text': 'some text', 'guest_post_pk': guest_post.pk}
)
try:
with transaction.atomic():
self.c.post(
reverse('guest_comment_create'),
{'username': 's_user', 'text': 'new text', 'guest_post_pk': guest_post.pk}
)
except IntegrityError:
pass
guest_comments = GuestComment.objects.all()
self.assertEqual(len(guest_comments), 1)
def test_guest_comment_create_comment_already_exists_status_code_302(self):
self.c.login(username='s_user', password='0000')
guest_post = GuestPost.objects.first()
self.c.post(
reverse('guest_comment_create'),
{'username': 's_user', 'text': 'some text', 'guest_post_pk': guest_post.pk}
)
try:
with transaction.atomic():
response = self.c.post(
reverse('guest_comment_create'),
{'username': 's_user', 'text': 'new text', 'guest_post_pk': guest_post.pk},
)
except IntegrityError:
pass
self.assertEqual(response.status_code, 302)
class GuestCommentUpdateViewTestCase(TestCase):
@classmethod
def setUpTestData(cls):
BlogUser.objects.create_superuser(
username='s_user', password='0000'
)
guest_post = GuestPost.objects.create(
anonymous_username='anonym', text='some text'
)
GuestComment.objects.create(
username='s_user',
text='some text',
guest_post=guest_post
)
def setUp(self):
self.c = Client()
def test_status_code_200(self):
post = GuestComment.objects.first()
self.c.login(username='s_user', password='0000')
response = self.c.get(reverse('guest_comment_update', args=[post.pk]))
self.assertEqual(response.status_code, 200)
def test_status_code_403(self):
response = self.c.get(reverse('guest_comment_update', args=[1]))
self.assertEqual(response.status_code, 403)
def test_template_used_get(self):
post = GuestComment.objects.first()
self.c.login(username='s_user', password='0000')
response = self.c.get(reverse('guest_comment_update', args=[post.pk]))
self.assertTemplateUsed(
response, 'guestroom/guestcomment_update_form.html'
)
def test_template_used_post_valid_data(self):
post = GuestComment.objects.first()
self.c.login(username='s_user', password='0000')
response = self.c.post(
reverse('guest_comment_update', args=[post.pk]),
{'username': 's_user', 'text': 'new text'},
follow=True
)
self.assertTemplateUsed(response, 'guestroom/guestpost_list.html')
def test_template_used_post_invalid_data(self):
post = GuestComment.objects.first()
self.c.login(username='s_user', password='0000')
response = self.c.post(
reverse('guest_comment_update', args=[post.pk]),
{'username': '', 'text': ''},
follow=True
)
self.assertTemplateUsed(
response, 'guestroom/guestcomment_update_form.html'
)
class GuestCommentDeleteViewTestCase(TestCase):
@classmethod
def setUpTestData(cls):
BlogUser.objects.create_superuser(
username='s_user', password='0000'
)
guest_post = GuestPost.objects.create(
anonymous_username='anonym', text='some text'
)
GuestComment.objects.create(
username='s_user',
text='some text',
guest_post=guest_post
)
def setUp(self):
self.c = Client()
def test_status_code_200_get(self):
post = GuestComment.objects.first()
self.c.login(username='s_user', password='0000')
response = self.c.get(reverse('guest_comment_delete', args=[post.pk]))
self.assertEqual(response.status_code, 200)
def test_status_code_200_delete(self):
post = GuestComment.objects.first()
self.c.login(username='s_user', password='0000')
response = self.c.delete(
reverse('guest_comment_delete', args=[post.pk]), follow=True
)
self.assertEqual(response.status_code, 200)
def test_status_code_403_get(self):
response = self.c.get(reverse('guest_comment_delete', args=[1]))
self.assertEqual(response.status_code, 403)
def test_status_code_403_delete(self):
response = self.c.delete(reverse('guest_comment_delete', args=[1]))
self.assertEqual(response.status_code, 403)
def test_template_used_get(self):
post = GuestComment.objects.first()
self.c.login(username='s_user', password='0000')
response = self.c.get(
reverse('guest_comment_delete', args=[post.pk]), follow=True
)
self.assertTemplateUsed(
response, 'guestroom/guestcomment_confirm_delete.html'
)
def test_template_used_delete(self):
post = GuestComment.objects.first()
self.c.login(username='s_user', password='0000')
response = self.c.delete(
reverse('guest_comment_delete', args=[post.pk]), follow=True
)
self.assertTemplateUsed(response, 'guestroom/guestpost_list.html')
| 35.52381
| 97
| 0.62358
| 1,734
| 15,666
| 5.410035
| 0.055363
| 0.039441
| 0.058203
| 0.073873
| 0.929538
| 0.908752
| 0.904488
| 0.888605
| 0.856838
| 0.82326
| 0
| 0.021955
| 0.252777
| 15,666
| 440
| 98
| 35.604545
| 0.779429
| 0.035491
| 0
| 0.727778
| 0
| 0
| 0.144892
| 0.030542
| 0
| 0
| 0
| 0
| 0.102778
| 1
| 0.147222
| false
| 0.097222
| 0.013889
| 0
| 0.183333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
fe2a83d933ab7c857f93768f35f78c1acef19380
| 55
|
py
|
Python
|
envs/gap_env/block_env/__init__.py
|
Wangweiyao/causal-manipulation
|
8e695a33e5d7cf32ce0d878dd66e5a57fde76b84
|
[
"MIT"
] | null | null | null |
envs/gap_env/block_env/__init__.py
|
Wangweiyao/causal-manipulation
|
8e695a33e5d7cf32ce0d878dd66e5a57fde76b84
|
[
"MIT"
] | null | null | null |
envs/gap_env/block_env/__init__.py
|
Wangweiyao/causal-manipulation
|
8e695a33e5d7cf32ce0d878dd66e5a57fde76b84
|
[
"MIT"
] | null | null | null |
from gap_env.block_env.block_env import SawyerBlockEnv
| 27.5
| 54
| 0.890909
| 9
| 55
| 5.111111
| 0.666667
| 0.347826
| 0.478261
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.072727
| 55
| 1
| 55
| 55
| 0.901961
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
fe5576f659f0dc9e63048915f61eb46802f68b2a
| 123,016
|
py
|
Python
|
tinkoff/invest/grpc/marketdata_pb2.py
|
forked-group/invest-python
|
3398391f5bb4a52020c312855de175cfe8cdc021
|
[
"Apache-2.0"
] | null | null | null |
tinkoff/invest/grpc/marketdata_pb2.py
|
forked-group/invest-python
|
3398391f5bb4a52020c312855de175cfe8cdc021
|
[
"Apache-2.0"
] | null | null | null |
tinkoff/invest/grpc/marketdata_pb2.py
|
forked-group/invest-python
|
3398391f5bb4a52020c312855de175cfe8cdc021
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: tinkoff/invest/grpc/marketdata.proto
"""Generated protocol buffer code."""
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
from tinkoff.invest.grpc import common_pb2 as tinkoff_dot_invest_dot_grpc_dot_common__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='tinkoff/invest/grpc/marketdata.proto',
package='tinkoff.public.invest.api.contract.v1',
syntax='proto3',
serialized_options=b'\n\034ru.tinkoff.piapi.contract.v1P\001Z\014./;investapi\242\002\005TIAPI\252\002\024Tinkoff.InvestApi.V1\312\002\021Tinkoff\\Invest\\V1',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n$tinkoff/invest/grpc/marketdata.proto\x12%tinkoff.public.invest.api.contract.v1\x1a\x1fgoogle/protobuf/timestamp.proto\x1a tinkoff/invest/grpc/common.proto\"\x99\x04\n\x11MarketDataRequest\x12\x63\n\x19subscribe_candles_request\x18\x01 \x01(\x0b\x32>.tinkoff.public.invest.api.contract.v1.SubscribeCandlesRequestH\x00\x12h\n\x1csubscribe_order_book_request\x18\x02 \x01(\x0b\x32@.tinkoff.public.invest.api.contract.v1.SubscribeOrderBookRequestH\x00\x12\x61\n\x18subscribe_trades_request\x18\x03 \x01(\x0b\x32=.tinkoff.public.invest.api.contract.v1.SubscribeTradesRequestH\x00\x12]\n\x16subscribe_info_request\x18\x04 \x01(\x0b\x32;.tinkoff.public.invest.api.contract.v1.SubscribeInfoRequestH\x00\x12h\n\x1csubscribe_last_price_request\x18\x05 \x01(\x0b\x32@.tinkoff.public.invest.api.contract.v1.SubscribeLastPriceRequestH\x00\x42\t\n\x07payload\"\xc0\x07\n\x12MarketDataResponse\x12\x65\n\x1asubscribe_candles_response\x18\x01 \x01(\x0b\x32?.tinkoff.public.invest.api.contract.v1.SubscribeCandlesResponseH\x00\x12j\n\x1dsubscribe_order_book_response\x18\x02 \x01(\x0b\x32\x41.tinkoff.public.invest.api.contract.v1.SubscribeOrderBookResponseH\x00\x12\x63\n\x19subscribe_trades_response\x18\x03 \x01(\x0b\x32>.tinkoff.public.invest.api.contract.v1.SubscribeTradesResponseH\x00\x12_\n\x17subscribe_info_response\x18\x04 \x01(\x0b\x32<.tinkoff.public.invest.api.contract.v1.SubscribeInfoResponseH\x00\x12?\n\x06\x63\x61ndle\x18\x05 \x01(\x0b\x32-.tinkoff.public.invest.api.contract.v1.CandleH\x00\x12=\n\x05trade\x18\x06 \x01(\x0b\x32,.tinkoff.public.invest.api.contract.v1.TradeH\x00\x12\x45\n\torderbook\x18\x07 \x01(\x0b\x32\x30.tinkoff.public.invest.api.contract.v1.OrderBookH\x00\x12N\n\x0etrading_status\x18\x08 \x01(\x0b\x32\x34.tinkoff.public.invest.api.contract.v1.TradingStatusH\x00\x12;\n\x04ping\x18\t \x01(\x0b\x32+.tinkoff.public.invest.api.contract.v1.PingH\x00\x12j\n\x1dsubscribe_last_price_response\x18\n \x01(\x0b\x32\x41.tinkoff.public.invest.api.contract.v1.SubscribeLastPriceResponseH\x00\x12\x46\n\nlast_price\x18\x0b \x01(\x0b\x32\x30.tinkoff.public.invest.api.contract.v1.LastPriceH\x00\x42\t\n\x07payload\"\xbf\x01\n\x17SubscribeCandlesRequest\x12V\n\x13subscription_action\x18\x01 \x01(\x0e\x32\x39.tinkoff.public.invest.api.contract.v1.SubscriptionAction\x12L\n\x0binstruments\x18\x02 \x03(\x0b\x32\x37.tinkoff.public.invest.api.contract.v1.CandleInstrument\"o\n\x10\x43\x61ndleInstrument\x12\x0c\n\x04\x66igi\x18\x01 \x01(\t\x12M\n\x08interval\x18\x02 \x01(\x0e\x32;.tinkoff.public.invest.api.contract.v1.SubscriptionInterval\"\x89\x01\n\x18SubscribeCandlesResponse\x12\x13\n\x0btracking_id\x18\x01 \x01(\t\x12X\n\x15\x63\x61ndles_subscriptions\x18\x02 \x03(\x0b\x32\x39.tinkoff.public.invest.api.contract.v1.CandleSubscription\"\xc9\x01\n\x12\x43\x61ndleSubscription\x12\x0c\n\x04\x66igi\x18\x01 \x01(\t\x12M\n\x08interval\x18\x02 \x01(\x0e\x32;.tinkoff.public.invest.api.contract.v1.SubscriptionInterval\x12V\n\x13subscription_status\x18\x03 \x01(\x0e\x32\x39.tinkoff.public.invest.api.contract.v1.SubscriptionStatus\"\xc4\x01\n\x19SubscribeOrderBookRequest\x12V\n\x13subscription_action\x18\x01 \x01(\x0e\x32\x39.tinkoff.public.invest.api.contract.v1.SubscriptionAction\x12O\n\x0binstruments\x18\x02 \x03(\x0b\x32:.tinkoff.public.invest.api.contract.v1.OrderBookInstrument\"2\n\x13OrderBookInstrument\x12\x0c\n\x04\x66igi\x18\x01 \x01(\t\x12\r\n\x05\x64\x65pth\x18\x02 \x01(\x05\"\x91\x01\n\x1aSubscribeOrderBookResponse\x12\x13\n\x0btracking_id\x18\x01 \x01(\t\x12^\n\x18order_book_subscriptions\x18\x02 \x03(\x0b\x32<.tinkoff.public.invest.api.contract.v1.OrderBookSubscription\"\x8c\x01\n\x15OrderBookSubscription\x12\x0c\n\x04\x66igi\x18\x01 \x01(\t\x12\r\n\x05\x64\x65pth\x18\x02 \x01(\x05\x12V\n\x13subscription_status\x18\x03 \x01(\x0e\x32\x39.tinkoff.public.invest.api.contract.v1.SubscriptionStatus\"\xbd\x01\n\x16SubscribeTradesRequest\x12V\n\x13subscription_action\x18\x01 \x01(\x0e\x32\x39.tinkoff.public.invest.api.contract.v1.SubscriptionAction\x12K\n\x0binstruments\x18\x02 \x03(\x0b\x32\x36.tinkoff.public.invest.api.contract.v1.TradeInstrument\"\x1f\n\x0fTradeInstrument\x12\x0c\n\x04\x66igi\x18\x01 \x01(\t\"\x85\x01\n\x17SubscribeTradesResponse\x12\x13\n\x0btracking_id\x18\x01 \x01(\t\x12U\n\x13trade_subscriptions\x18\x02 \x03(\x0b\x32\x38.tinkoff.public.invest.api.contract.v1.TradeSubscription\"y\n\x11TradeSubscription\x12\x0c\n\x04\x66igi\x18\x01 \x01(\t\x12V\n\x13subscription_status\x18\x02 \x01(\x0e\x32\x39.tinkoff.public.invest.api.contract.v1.SubscriptionStatus\"\xba\x01\n\x14SubscribeInfoRequest\x12V\n\x13subscription_action\x18\x01 \x01(\x0e\x32\x39.tinkoff.public.invest.api.contract.v1.SubscriptionAction\x12J\n\x0binstruments\x18\x02 \x03(\x0b\x32\x35.tinkoff.public.invest.api.contract.v1.InfoInstrument\"\x1e\n\x0eInfoInstrument\x12\x0c\n\x04\x66igi\x18\x01 \x01(\t\"\x81\x01\n\x15SubscribeInfoResponse\x12\x13\n\x0btracking_id\x18\x01 \x01(\t\x12S\n\x12info_subscriptions\x18\x02 \x03(\x0b\x32\x37.tinkoff.public.invest.api.contract.v1.InfoSubscription\"x\n\x10InfoSubscription\x12\x0c\n\x04\x66igi\x18\x01 \x01(\t\x12V\n\x13subscription_status\x18\x02 \x01(\x0e\x32\x39.tinkoff.public.invest.api.contract.v1.SubscriptionStatus\"\xc4\x01\n\x19SubscribeLastPriceRequest\x12V\n\x13subscription_action\x18\x01 \x01(\x0e\x32\x39.tinkoff.public.invest.api.contract.v1.SubscriptionAction\x12O\n\x0binstruments\x18\x02 \x03(\x0b\x32:.tinkoff.public.invest.api.contract.v1.LastPriceInstrument\"#\n\x13LastPriceInstrument\x12\x0c\n\x04\x66igi\x18\x01 \x01(\t\"\x91\x01\n\x1aSubscribeLastPriceResponse\x12\x13\n\x0btracking_id\x18\x01 \x01(\t\x12^\n\x18last_price_subscriptions\x18\x02 \x03(\x0b\x32<.tinkoff.public.invest.api.contract.v1.LastPriceSubscription\"}\n\x15LastPriceSubscription\x12\x0c\n\x04\x66igi\x18\x01 \x01(\t\x12V\n\x13subscription_status\x18\x02 \x01(\x0e\x32\x39.tinkoff.public.invest.api.contract.v1.SubscriptionStatus\"\xd2\x03\n\x06\x43\x61ndle\x12\x0c\n\x04\x66igi\x18\x01 \x01(\t\x12M\n\x08interval\x18\x02 \x01(\x0e\x32;.tinkoff.public.invest.api.contract.v1.SubscriptionInterval\x12>\n\x04open\x18\x03 \x01(\x0b\x32\x30.tinkoff.public.invest.api.contract.v1.Quotation\x12>\n\x04high\x18\x04 \x01(\x0b\x32\x30.tinkoff.public.invest.api.contract.v1.Quotation\x12=\n\x03low\x18\x05 \x01(\x0b\x32\x30.tinkoff.public.invest.api.contract.v1.Quotation\x12?\n\x05\x63lose\x18\x06 \x01(\x0b\x32\x30.tinkoff.public.invest.api.contract.v1.Quotation\x12\x0e\n\x06volume\x18\x07 \x01(\x03\x12(\n\x04time\x18\x08 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x31\n\rlast_trade_ts\x18\t \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\xeb\x02\n\tOrderBook\x12\x0c\n\x04\x66igi\x18\x01 \x01(\t\x12\r\n\x05\x64\x65pth\x18\x02 \x01(\x05\x12\x15\n\ris_consistent\x18\x03 \x01(\x08\x12:\n\x04\x62ids\x18\x04 \x03(\x0b\x32,.tinkoff.public.invest.api.contract.v1.Order\x12:\n\x04\x61sks\x18\x05 \x03(\x0b\x32,.tinkoff.public.invest.api.contract.v1.Order\x12(\n\x04time\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x42\n\x08limit_up\x18\x07 \x01(\x0b\x32\x30.tinkoff.public.invest.api.contract.v1.Quotation\x12\x44\n\nlimit_down\x18\x08 \x01(\x0b\x32\x30.tinkoff.public.invest.api.contract.v1.Quotation\"Z\n\x05Order\x12?\n\x05price\x18\x01 \x01(\x0b\x32\x30.tinkoff.public.invest.api.contract.v1.Quotation\x12\x10\n\x08quantity\x18\x02 \x01(\x03\"\xdc\x01\n\x05Trade\x12\x0c\n\x04\x66igi\x18\x01 \x01(\t\x12H\n\tdirection\x18\x02 \x01(\x0e\x32\x35.tinkoff.public.invest.api.contract.v1.TradeDirection\x12?\n\x05price\x18\x03 \x01(\x0b\x32\x30.tinkoff.public.invest.api.contract.v1.Quotation\x12\x10\n\x08quantity\x18\x04 \x01(\x03\x12(\n\x04time\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\xe6\x01\n\rTradingStatus\x12\x0c\n\x04\x66igi\x18\x01 \x01(\t\x12T\n\x0etrading_status\x18\x02 \x01(\x0e\x32<.tinkoff.public.invest.api.contract.v1.SecurityTradingStatus\x12(\n\x04time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\"\n\x1alimit_order_available_flag\x18\x04 \x01(\x08\x12#\n\x1bmarket_order_available_flag\x18\x05 \x01(\x08\"\xbc\x01\n\x11GetCandlesRequest\x12\x0c\n\x04\x66igi\x18\x01 \x01(\t\x12(\n\x04\x66rom\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12&\n\x02to\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12G\n\x08interval\x18\x04 \x01(\x0e\x32\x35.tinkoff.public.invest.api.contract.v1.CandleInterval\"\\\n\x12GetCandlesResponse\x12\x46\n\x07\x63\x61ndles\x18\x01 \x03(\x0b\x32\x35.tinkoff.public.invest.api.contract.v1.HistoricCandle\"\xdf\x02\n\x0eHistoricCandle\x12>\n\x04open\x18\x01 \x01(\x0b\x32\x30.tinkoff.public.invest.api.contract.v1.Quotation\x12>\n\x04high\x18\x02 \x01(\x0b\x32\x30.tinkoff.public.invest.api.contract.v1.Quotation\x12=\n\x03low\x18\x03 \x01(\x0b\x32\x30.tinkoff.public.invest.api.contract.v1.Quotation\x12?\n\x05\x63lose\x18\x04 \x01(\x0b\x32\x30.tinkoff.public.invest.api.contract.v1.Quotation\x12\x0e\n\x06volume\x18\x05 \x01(\x03\x12(\n\x04time\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x13\n\x0bis_complete\x18\x07 \x01(\x08\"$\n\x14GetLastPricesRequest\x12\x0c\n\x04\x66igi\x18\x01 \x03(\t\"^\n\x15GetLastPricesResponse\x12\x45\n\x0blast_prices\x18\x01 \x03(\x0b\x32\x30.tinkoff.public.invest.api.contract.v1.LastPrice\"\x84\x01\n\tLastPrice\x12\x0c\n\x04\x66igi\x18\x01 \x01(\t\x12?\n\x05price\x18\x02 \x01(\x0b\x32\x30.tinkoff.public.invest.api.contract.v1.Quotation\x12(\n\x04time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"2\n\x13GetOrderBookRequest\x12\x0c\n\x04\x66igi\x18\x01 \x01(\t\x12\r\n\x05\x64\x65pth\x18\x02 \x01(\x05\"\xc2\x03\n\x14GetOrderBookResponse\x12\x0c\n\x04\x66igi\x18\x01 \x01(\t\x12\r\n\x05\x64\x65pth\x18\x02 \x01(\x05\x12:\n\x04\x62ids\x18\x03 \x03(\x0b\x32,.tinkoff.public.invest.api.contract.v1.Order\x12:\n\x04\x61sks\x18\x04 \x03(\x0b\x32,.tinkoff.public.invest.api.contract.v1.Order\x12\x44\n\nlast_price\x18\x05 \x01(\x0b\x32\x30.tinkoff.public.invest.api.contract.v1.Quotation\x12\x45\n\x0b\x63lose_price\x18\x06 \x01(\x0b\x32\x30.tinkoff.public.invest.api.contract.v1.Quotation\x12\x42\n\x08limit_up\x18\x07 \x01(\x0b\x32\x30.tinkoff.public.invest.api.contract.v1.Quotation\x12\x44\n\nlimit_down\x18\x08 \x01(\x0b\x32\x30.tinkoff.public.invest.api.contract.v1.Quotation\"\'\n\x17GetTradingStatusRequest\x12\x0c\n\x04\x66igi\x18\x01 \x01(\t\"\xc7\x01\n\x18GetTradingStatusResponse\x12\x0c\n\x04\x66igi\x18\x01 \x01(\t\x12T\n\x0etrading_status\x18\x02 \x01(\x0e\x32<.tinkoff.public.invest.api.contract.v1.SecurityTradingStatus\x12\"\n\x1alimit_order_available_flag\x18\x03 \x01(\x08\x12#\n\x1bmarket_order_available_flag\x18\x04 \x01(\x08*\x81\x01\n\x12SubscriptionAction\x12#\n\x1fSUBSCRIPTION_ACTION_UNSPECIFIED\x10\x00\x12!\n\x1dSUBSCRIPTION_ACTION_SUBSCRIBE\x10\x01\x12#\n\x1fSUBSCRIPTION_ACTION_UNSUBSCRIBE\x10\x02*\x8b\x01\n\x14SubscriptionInterval\x12%\n!SUBSCRIPTION_INTERVAL_UNSPECIFIED\x10\x00\x12$\n SUBSCRIPTION_INTERVAL_ONE_MINUTE\x10\x01\x12&\n\"SUBSCRIPTION_INTERVAL_FIVE_MINUTES\x10\x02*\xea\x02\n\x12SubscriptionStatus\x12#\n\x1fSUBSCRIPTION_STATUS_UNSPECIFIED\x10\x00\x12\x1f\n\x1bSUBSCRIPTION_STATUS_SUCCESS\x10\x01\x12,\n(SUBSCRIPTION_STATUS_INSTRUMENT_NOT_FOUND\x10\x02\x12\x36\n2SUBSCRIPTION_STATUS_SUBSCRIPTION_ACTION_IS_INVALID\x10\x03\x12(\n$SUBSCRIPTION_STATUS_DEPTH_IS_INVALID\x10\x04\x12+\n\'SUBSCRIPTION_STATUS_INTERVAL_IS_INVALID\x10\x05\x12)\n%SUBSCRIPTION_STATUS_LIMIT_IS_EXCEEDED\x10\x06\x12&\n\"SUBSCRIPTION_STATUS_INTERNAL_ERROR\x10\x07*d\n\x0eTradeDirection\x12\x1f\n\x1bTRADE_DIRECTION_UNSPECIFIED\x10\x00\x12\x17\n\x13TRADE_DIRECTION_BUY\x10\x01\x12\x18\n\x14TRADE_DIRECTION_SELL\x10\x02*\xb6\x01\n\x0e\x43\x61ndleInterval\x12\x1f\n\x1b\x43\x41NDLE_INTERVAL_UNSPECIFIED\x10\x00\x12\x19\n\x15\x43\x41NDLE_INTERVAL_1_MIN\x10\x01\x12\x19\n\x15\x43\x41NDLE_INTERVAL_5_MIN\x10\x02\x12\x1a\n\x16\x43\x41NDLE_INTERVAL_15_MIN\x10\x03\x12\x18\n\x14\x43\x41NDLE_INTERVAL_HOUR\x10\x04\x12\x17\n\x13\x43\x41NDLE_INTERVAL_DAY\x10\x05\x32\xc4\x04\n\x11MarketDataService\x12\x81\x01\n\nGetCandles\x12\x38.tinkoff.public.invest.api.contract.v1.GetCandlesRequest\x1a\x39.tinkoff.public.invest.api.contract.v1.GetCandlesResponse\x12\x8a\x01\n\rGetLastPrices\x12;.tinkoff.public.invest.api.contract.v1.GetLastPricesRequest\x1a<.tinkoff.public.invest.api.contract.v1.GetLastPricesResponse\x12\x87\x01\n\x0cGetOrderBook\x12:.tinkoff.public.invest.api.contract.v1.GetOrderBookRequest\x1a;.tinkoff.public.invest.api.contract.v1.GetOrderBookResponse\x12\x93\x01\n\x10GetTradingStatus\x12>.tinkoff.public.invest.api.contract.v1.GetTradingStatusRequest\x1a?.tinkoff.public.invest.api.contract.v1.GetTradingStatusResponse2\xa7\x01\n\x17MarketDataStreamService\x12\x8b\x01\n\x10MarketDataStream\x12\x38.tinkoff.public.invest.api.contract.v1.MarketDataRequest\x1a\x39.tinkoff.public.invest.api.contract.v1.MarketDataResponse(\x01\x30\x01\x42\x61\n\x1cru.tinkoff.piapi.contract.v1P\x01Z\x0c./;investapi\xa2\x02\x05TIAPI\xaa\x02\x14Tinkoff.InvestApi.V1\xca\x02\x11Tinkoff\\Invest\\V1b\x06proto3'
,
dependencies=[google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,tinkoff_dot_invest_dot_grpc_dot_common__pb2.DESCRIPTOR,])
_SUBSCRIPTIONACTION = _descriptor.EnumDescriptor(
name='SubscriptionAction',
full_name='tinkoff.public.invest.api.contract.v1.SubscriptionAction',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='SUBSCRIPTION_ACTION_UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='SUBSCRIPTION_ACTION_SUBSCRIBE', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='SUBSCRIPTION_ACTION_UNSUBSCRIBE', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=7352,
serialized_end=7481,
)
_sym_db.RegisterEnumDescriptor(_SUBSCRIPTIONACTION)
SubscriptionAction = enum_type_wrapper.EnumTypeWrapper(_SUBSCRIPTIONACTION)
_SUBSCRIPTIONINTERVAL = _descriptor.EnumDescriptor(
name='SubscriptionInterval',
full_name='tinkoff.public.invest.api.contract.v1.SubscriptionInterval',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='SUBSCRIPTION_INTERVAL_UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='SUBSCRIPTION_INTERVAL_ONE_MINUTE', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='SUBSCRIPTION_INTERVAL_FIVE_MINUTES', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=7484,
serialized_end=7623,
)
_sym_db.RegisterEnumDescriptor(_SUBSCRIPTIONINTERVAL)
SubscriptionInterval = enum_type_wrapper.EnumTypeWrapper(_SUBSCRIPTIONINTERVAL)
_SUBSCRIPTIONSTATUS = _descriptor.EnumDescriptor(
name='SubscriptionStatus',
full_name='tinkoff.public.invest.api.contract.v1.SubscriptionStatus',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='SUBSCRIPTION_STATUS_UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='SUBSCRIPTION_STATUS_SUCCESS', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='SUBSCRIPTION_STATUS_INSTRUMENT_NOT_FOUND', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='SUBSCRIPTION_STATUS_SUBSCRIPTION_ACTION_IS_INVALID', index=3, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='SUBSCRIPTION_STATUS_DEPTH_IS_INVALID', index=4, number=4,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='SUBSCRIPTION_STATUS_INTERVAL_IS_INVALID', index=5, number=5,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='SUBSCRIPTION_STATUS_LIMIT_IS_EXCEEDED', index=6, number=6,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='SUBSCRIPTION_STATUS_INTERNAL_ERROR', index=7, number=7,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=7626,
serialized_end=7988,
)
_sym_db.RegisterEnumDescriptor(_SUBSCRIPTIONSTATUS)
SubscriptionStatus = enum_type_wrapper.EnumTypeWrapper(_SUBSCRIPTIONSTATUS)
_TRADEDIRECTION = _descriptor.EnumDescriptor(
name='TradeDirection',
full_name='tinkoff.public.invest.api.contract.v1.TradeDirection',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='TRADE_DIRECTION_UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='TRADE_DIRECTION_BUY', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='TRADE_DIRECTION_SELL', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=7990,
serialized_end=8090,
)
_sym_db.RegisterEnumDescriptor(_TRADEDIRECTION)
TradeDirection = enum_type_wrapper.EnumTypeWrapper(_TRADEDIRECTION)
_CANDLEINTERVAL = _descriptor.EnumDescriptor(
name='CandleInterval',
full_name='tinkoff.public.invest.api.contract.v1.CandleInterval',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='CANDLE_INTERVAL_UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='CANDLE_INTERVAL_1_MIN', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='CANDLE_INTERVAL_5_MIN', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='CANDLE_INTERVAL_15_MIN', index=3, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='CANDLE_INTERVAL_HOUR', index=4, number=4,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='CANDLE_INTERVAL_DAY', index=5, number=5,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=8093,
serialized_end=8275,
)
_sym_db.RegisterEnumDescriptor(_CANDLEINTERVAL)
CandleInterval = enum_type_wrapper.EnumTypeWrapper(_CANDLEINTERVAL)
SUBSCRIPTION_ACTION_UNSPECIFIED = 0
SUBSCRIPTION_ACTION_SUBSCRIBE = 1
SUBSCRIPTION_ACTION_UNSUBSCRIBE = 2
SUBSCRIPTION_INTERVAL_UNSPECIFIED = 0
SUBSCRIPTION_INTERVAL_ONE_MINUTE = 1
SUBSCRIPTION_INTERVAL_FIVE_MINUTES = 2
SUBSCRIPTION_STATUS_UNSPECIFIED = 0
SUBSCRIPTION_STATUS_SUCCESS = 1
SUBSCRIPTION_STATUS_INSTRUMENT_NOT_FOUND = 2
SUBSCRIPTION_STATUS_SUBSCRIPTION_ACTION_IS_INVALID = 3
SUBSCRIPTION_STATUS_DEPTH_IS_INVALID = 4
SUBSCRIPTION_STATUS_INTERVAL_IS_INVALID = 5
SUBSCRIPTION_STATUS_LIMIT_IS_EXCEEDED = 6
SUBSCRIPTION_STATUS_INTERNAL_ERROR = 7
TRADE_DIRECTION_UNSPECIFIED = 0
TRADE_DIRECTION_BUY = 1
TRADE_DIRECTION_SELL = 2
CANDLE_INTERVAL_UNSPECIFIED = 0
CANDLE_INTERVAL_1_MIN = 1
CANDLE_INTERVAL_5_MIN = 2
CANDLE_INTERVAL_15_MIN = 3
CANDLE_INTERVAL_HOUR = 4
CANDLE_INTERVAL_DAY = 5
_MARKETDATAREQUEST = _descriptor.Descriptor(
name='MarketDataRequest',
full_name='tinkoff.public.invest.api.contract.v1.MarketDataRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='subscribe_candles_request', full_name='tinkoff.public.invest.api.contract.v1.MarketDataRequest.subscribe_candles_request', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='subscribe_order_book_request', full_name='tinkoff.public.invest.api.contract.v1.MarketDataRequest.subscribe_order_book_request', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='subscribe_trades_request', full_name='tinkoff.public.invest.api.contract.v1.MarketDataRequest.subscribe_trades_request', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='subscribe_info_request', full_name='tinkoff.public.invest.api.contract.v1.MarketDataRequest.subscribe_info_request', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='subscribe_last_price_request', full_name='tinkoff.public.invest.api.contract.v1.MarketDataRequest.subscribe_last_price_request', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='payload', full_name='tinkoff.public.invest.api.contract.v1.MarketDataRequest.payload',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
],
serialized_start=147,
serialized_end=684,
)
_MARKETDATARESPONSE = _descriptor.Descriptor(
name='MarketDataResponse',
full_name='tinkoff.public.invest.api.contract.v1.MarketDataResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='subscribe_candles_response', full_name='tinkoff.public.invest.api.contract.v1.MarketDataResponse.subscribe_candles_response', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='subscribe_order_book_response', full_name='tinkoff.public.invest.api.contract.v1.MarketDataResponse.subscribe_order_book_response', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='subscribe_trades_response', full_name='tinkoff.public.invest.api.contract.v1.MarketDataResponse.subscribe_trades_response', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='subscribe_info_response', full_name='tinkoff.public.invest.api.contract.v1.MarketDataResponse.subscribe_info_response', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='candle', full_name='tinkoff.public.invest.api.contract.v1.MarketDataResponse.candle', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='trade', full_name='tinkoff.public.invest.api.contract.v1.MarketDataResponse.trade', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='orderbook', full_name='tinkoff.public.invest.api.contract.v1.MarketDataResponse.orderbook', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='trading_status', full_name='tinkoff.public.invest.api.contract.v1.MarketDataResponse.trading_status', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='ping', full_name='tinkoff.public.invest.api.contract.v1.MarketDataResponse.ping', index=8,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='subscribe_last_price_response', full_name='tinkoff.public.invest.api.contract.v1.MarketDataResponse.subscribe_last_price_response', index=9,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='last_price', full_name='tinkoff.public.invest.api.contract.v1.MarketDataResponse.last_price', index=10,
number=11, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='payload', full_name='tinkoff.public.invest.api.contract.v1.MarketDataResponse.payload',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
],
serialized_start=687,
serialized_end=1647,
)
_SUBSCRIBECANDLESREQUEST = _descriptor.Descriptor(
name='SubscribeCandlesRequest',
full_name='tinkoff.public.invest.api.contract.v1.SubscribeCandlesRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='subscription_action', full_name='tinkoff.public.invest.api.contract.v1.SubscribeCandlesRequest.subscription_action', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='instruments', full_name='tinkoff.public.invest.api.contract.v1.SubscribeCandlesRequest.instruments', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1650,
serialized_end=1841,
)
_CANDLEINSTRUMENT = _descriptor.Descriptor(
name='CandleInstrument',
full_name='tinkoff.public.invest.api.contract.v1.CandleInstrument',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='figi', full_name='tinkoff.public.invest.api.contract.v1.CandleInstrument.figi', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='interval', full_name='tinkoff.public.invest.api.contract.v1.CandleInstrument.interval', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1843,
serialized_end=1954,
)
_SUBSCRIBECANDLESRESPONSE = _descriptor.Descriptor(
name='SubscribeCandlesResponse',
full_name='tinkoff.public.invest.api.contract.v1.SubscribeCandlesResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='tracking_id', full_name='tinkoff.public.invest.api.contract.v1.SubscribeCandlesResponse.tracking_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='candles_subscriptions', full_name='tinkoff.public.invest.api.contract.v1.SubscribeCandlesResponse.candles_subscriptions', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1957,
serialized_end=2094,
)
_CANDLESUBSCRIPTION = _descriptor.Descriptor(
name='CandleSubscription',
full_name='tinkoff.public.invest.api.contract.v1.CandleSubscription',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='figi', full_name='tinkoff.public.invest.api.contract.v1.CandleSubscription.figi', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='interval', full_name='tinkoff.public.invest.api.contract.v1.CandleSubscription.interval', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='subscription_status', full_name='tinkoff.public.invest.api.contract.v1.CandleSubscription.subscription_status', index=2,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2097,
serialized_end=2298,
)
_SUBSCRIBEORDERBOOKREQUEST = _descriptor.Descriptor(
name='SubscribeOrderBookRequest',
full_name='tinkoff.public.invest.api.contract.v1.SubscribeOrderBookRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='subscription_action', full_name='tinkoff.public.invest.api.contract.v1.SubscribeOrderBookRequest.subscription_action', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='instruments', full_name='tinkoff.public.invest.api.contract.v1.SubscribeOrderBookRequest.instruments', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2301,
serialized_end=2497,
)
_ORDERBOOKINSTRUMENT = _descriptor.Descriptor(
name='OrderBookInstrument',
full_name='tinkoff.public.invest.api.contract.v1.OrderBookInstrument',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='figi', full_name='tinkoff.public.invest.api.contract.v1.OrderBookInstrument.figi', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='depth', full_name='tinkoff.public.invest.api.contract.v1.OrderBookInstrument.depth', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2499,
serialized_end=2549,
)
_SUBSCRIBEORDERBOOKRESPONSE = _descriptor.Descriptor(
name='SubscribeOrderBookResponse',
full_name='tinkoff.public.invest.api.contract.v1.SubscribeOrderBookResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='tracking_id', full_name='tinkoff.public.invest.api.contract.v1.SubscribeOrderBookResponse.tracking_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='order_book_subscriptions', full_name='tinkoff.public.invest.api.contract.v1.SubscribeOrderBookResponse.order_book_subscriptions', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2552,
serialized_end=2697,
)
_ORDERBOOKSUBSCRIPTION = _descriptor.Descriptor(
name='OrderBookSubscription',
full_name='tinkoff.public.invest.api.contract.v1.OrderBookSubscription',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='figi', full_name='tinkoff.public.invest.api.contract.v1.OrderBookSubscription.figi', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='depth', full_name='tinkoff.public.invest.api.contract.v1.OrderBookSubscription.depth', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='subscription_status', full_name='tinkoff.public.invest.api.contract.v1.OrderBookSubscription.subscription_status', index=2,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2700,
serialized_end=2840,
)
_SUBSCRIBETRADESREQUEST = _descriptor.Descriptor(
name='SubscribeTradesRequest',
full_name='tinkoff.public.invest.api.contract.v1.SubscribeTradesRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='subscription_action', full_name='tinkoff.public.invest.api.contract.v1.SubscribeTradesRequest.subscription_action', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='instruments', full_name='tinkoff.public.invest.api.contract.v1.SubscribeTradesRequest.instruments', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2843,
serialized_end=3032,
)
_TRADEINSTRUMENT = _descriptor.Descriptor(
name='TradeInstrument',
full_name='tinkoff.public.invest.api.contract.v1.TradeInstrument',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='figi', full_name='tinkoff.public.invest.api.contract.v1.TradeInstrument.figi', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3034,
serialized_end=3065,
)
_SUBSCRIBETRADESRESPONSE = _descriptor.Descriptor(
name='SubscribeTradesResponse',
full_name='tinkoff.public.invest.api.contract.v1.SubscribeTradesResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='tracking_id', full_name='tinkoff.public.invest.api.contract.v1.SubscribeTradesResponse.tracking_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='trade_subscriptions', full_name='tinkoff.public.invest.api.contract.v1.SubscribeTradesResponse.trade_subscriptions', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3068,
serialized_end=3201,
)
_TRADESUBSCRIPTION = _descriptor.Descriptor(
name='TradeSubscription',
full_name='tinkoff.public.invest.api.contract.v1.TradeSubscription',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='figi', full_name='tinkoff.public.invest.api.contract.v1.TradeSubscription.figi', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='subscription_status', full_name='tinkoff.public.invest.api.contract.v1.TradeSubscription.subscription_status', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3203,
serialized_end=3324,
)
_SUBSCRIBEINFOREQUEST = _descriptor.Descriptor(
name='SubscribeInfoRequest',
full_name='tinkoff.public.invest.api.contract.v1.SubscribeInfoRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='subscription_action', full_name='tinkoff.public.invest.api.contract.v1.SubscribeInfoRequest.subscription_action', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='instruments', full_name='tinkoff.public.invest.api.contract.v1.SubscribeInfoRequest.instruments', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3327,
serialized_end=3513,
)
_INFOINSTRUMENT = _descriptor.Descriptor(
name='InfoInstrument',
full_name='tinkoff.public.invest.api.contract.v1.InfoInstrument',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='figi', full_name='tinkoff.public.invest.api.contract.v1.InfoInstrument.figi', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3515,
serialized_end=3545,
)
_SUBSCRIBEINFORESPONSE = _descriptor.Descriptor(
name='SubscribeInfoResponse',
full_name='tinkoff.public.invest.api.contract.v1.SubscribeInfoResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='tracking_id', full_name='tinkoff.public.invest.api.contract.v1.SubscribeInfoResponse.tracking_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='info_subscriptions', full_name='tinkoff.public.invest.api.contract.v1.SubscribeInfoResponse.info_subscriptions', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3548,
serialized_end=3677,
)
_INFOSUBSCRIPTION = _descriptor.Descriptor(
name='InfoSubscription',
full_name='tinkoff.public.invest.api.contract.v1.InfoSubscription',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='figi', full_name='tinkoff.public.invest.api.contract.v1.InfoSubscription.figi', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='subscription_status', full_name='tinkoff.public.invest.api.contract.v1.InfoSubscription.subscription_status', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3679,
serialized_end=3799,
)
_SUBSCRIBELASTPRICEREQUEST = _descriptor.Descriptor(
name='SubscribeLastPriceRequest',
full_name='tinkoff.public.invest.api.contract.v1.SubscribeLastPriceRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='subscription_action', full_name='tinkoff.public.invest.api.contract.v1.SubscribeLastPriceRequest.subscription_action', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='instruments', full_name='tinkoff.public.invest.api.contract.v1.SubscribeLastPriceRequest.instruments', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3802,
serialized_end=3998,
)
_LASTPRICEINSTRUMENT = _descriptor.Descriptor(
name='LastPriceInstrument',
full_name='tinkoff.public.invest.api.contract.v1.LastPriceInstrument',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='figi', full_name='tinkoff.public.invest.api.contract.v1.LastPriceInstrument.figi', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4000,
serialized_end=4035,
)
_SUBSCRIBELASTPRICERESPONSE = _descriptor.Descriptor(
name='SubscribeLastPriceResponse',
full_name='tinkoff.public.invest.api.contract.v1.SubscribeLastPriceResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='tracking_id', full_name='tinkoff.public.invest.api.contract.v1.SubscribeLastPriceResponse.tracking_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='last_price_subscriptions', full_name='tinkoff.public.invest.api.contract.v1.SubscribeLastPriceResponse.last_price_subscriptions', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4038,
serialized_end=4183,
)
_LASTPRICESUBSCRIPTION = _descriptor.Descriptor(
name='LastPriceSubscription',
full_name='tinkoff.public.invest.api.contract.v1.LastPriceSubscription',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='figi', full_name='tinkoff.public.invest.api.contract.v1.LastPriceSubscription.figi', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='subscription_status', full_name='tinkoff.public.invest.api.contract.v1.LastPriceSubscription.subscription_status', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4185,
serialized_end=4310,
)
_CANDLE = _descriptor.Descriptor(
name='Candle',
full_name='tinkoff.public.invest.api.contract.v1.Candle',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='figi', full_name='tinkoff.public.invest.api.contract.v1.Candle.figi', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='interval', full_name='tinkoff.public.invest.api.contract.v1.Candle.interval', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='open', full_name='tinkoff.public.invest.api.contract.v1.Candle.open', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='high', full_name='tinkoff.public.invest.api.contract.v1.Candle.high', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='low', full_name='tinkoff.public.invest.api.contract.v1.Candle.low', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='close', full_name='tinkoff.public.invest.api.contract.v1.Candle.close', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='volume', full_name='tinkoff.public.invest.api.contract.v1.Candle.volume', index=6,
number=7, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='time', full_name='tinkoff.public.invest.api.contract.v1.Candle.time', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='last_trade_ts', full_name='tinkoff.public.invest.api.contract.v1.Candle.last_trade_ts', index=8,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4313,
serialized_end=4779,
)
_ORDERBOOK = _descriptor.Descriptor(
name='OrderBook',
full_name='tinkoff.public.invest.api.contract.v1.OrderBook',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='figi', full_name='tinkoff.public.invest.api.contract.v1.OrderBook.figi', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='depth', full_name='tinkoff.public.invest.api.contract.v1.OrderBook.depth', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='is_consistent', full_name='tinkoff.public.invest.api.contract.v1.OrderBook.is_consistent', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='bids', full_name='tinkoff.public.invest.api.contract.v1.OrderBook.bids', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='asks', full_name='tinkoff.public.invest.api.contract.v1.OrderBook.asks', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='time', full_name='tinkoff.public.invest.api.contract.v1.OrderBook.time', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='limit_up', full_name='tinkoff.public.invest.api.contract.v1.OrderBook.limit_up', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='limit_down', full_name='tinkoff.public.invest.api.contract.v1.OrderBook.limit_down', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4782,
serialized_end=5145,
)
_ORDER = _descriptor.Descriptor(
name='Order',
full_name='tinkoff.public.invest.api.contract.v1.Order',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='price', full_name='tinkoff.public.invest.api.contract.v1.Order.price', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='quantity', full_name='tinkoff.public.invest.api.contract.v1.Order.quantity', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5147,
serialized_end=5237,
)
_TRADE = _descriptor.Descriptor(
name='Trade',
full_name='tinkoff.public.invest.api.contract.v1.Trade',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='figi', full_name='tinkoff.public.invest.api.contract.v1.Trade.figi', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='direction', full_name='tinkoff.public.invest.api.contract.v1.Trade.direction', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='price', full_name='tinkoff.public.invest.api.contract.v1.Trade.price', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='quantity', full_name='tinkoff.public.invest.api.contract.v1.Trade.quantity', index=3,
number=4, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='time', full_name='tinkoff.public.invest.api.contract.v1.Trade.time', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5240,
serialized_end=5460,
)
_TRADINGSTATUS = _descriptor.Descriptor(
name='TradingStatus',
full_name='tinkoff.public.invest.api.contract.v1.TradingStatus',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='figi', full_name='tinkoff.public.invest.api.contract.v1.TradingStatus.figi', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='trading_status', full_name='tinkoff.public.invest.api.contract.v1.TradingStatus.trading_status', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='time', full_name='tinkoff.public.invest.api.contract.v1.TradingStatus.time', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='limit_order_available_flag', full_name='tinkoff.public.invest.api.contract.v1.TradingStatus.limit_order_available_flag', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='market_order_available_flag', full_name='tinkoff.public.invest.api.contract.v1.TradingStatus.market_order_available_flag', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5463,
serialized_end=5693,
)
_GETCANDLESREQUEST = _descriptor.Descriptor(
name='GetCandlesRequest',
full_name='tinkoff.public.invest.api.contract.v1.GetCandlesRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='figi', full_name='tinkoff.public.invest.api.contract.v1.GetCandlesRequest.figi', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='from', full_name='tinkoff.public.invest.api.contract.v1.GetCandlesRequest.from', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='to', full_name='tinkoff.public.invest.api.contract.v1.GetCandlesRequest.to', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='interval', full_name='tinkoff.public.invest.api.contract.v1.GetCandlesRequest.interval', index=3,
number=4, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5696,
serialized_end=5884,
)
_GETCANDLESRESPONSE = _descriptor.Descriptor(
name='GetCandlesResponse',
full_name='tinkoff.public.invest.api.contract.v1.GetCandlesResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='candles', full_name='tinkoff.public.invest.api.contract.v1.GetCandlesResponse.candles', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5886,
serialized_end=5978,
)
_HISTORICCANDLE = _descriptor.Descriptor(
name='HistoricCandle',
full_name='tinkoff.public.invest.api.contract.v1.HistoricCandle',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='open', full_name='tinkoff.public.invest.api.contract.v1.HistoricCandle.open', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='high', full_name='tinkoff.public.invest.api.contract.v1.HistoricCandle.high', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='low', full_name='tinkoff.public.invest.api.contract.v1.HistoricCandle.low', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='close', full_name='tinkoff.public.invest.api.contract.v1.HistoricCandle.close', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='volume', full_name='tinkoff.public.invest.api.contract.v1.HistoricCandle.volume', index=4,
number=5, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='time', full_name='tinkoff.public.invest.api.contract.v1.HistoricCandle.time', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='is_complete', full_name='tinkoff.public.invest.api.contract.v1.HistoricCandle.is_complete', index=6,
number=7, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5981,
serialized_end=6332,
)
_GETLASTPRICESREQUEST = _descriptor.Descriptor(
name='GetLastPricesRequest',
full_name='tinkoff.public.invest.api.contract.v1.GetLastPricesRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='figi', full_name='tinkoff.public.invest.api.contract.v1.GetLastPricesRequest.figi', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=6334,
serialized_end=6370,
)
_GETLASTPRICESRESPONSE = _descriptor.Descriptor(
name='GetLastPricesResponse',
full_name='tinkoff.public.invest.api.contract.v1.GetLastPricesResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='last_prices', full_name='tinkoff.public.invest.api.contract.v1.GetLastPricesResponse.last_prices', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=6372,
serialized_end=6466,
)
_LASTPRICE = _descriptor.Descriptor(
name='LastPrice',
full_name='tinkoff.public.invest.api.contract.v1.LastPrice',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='figi', full_name='tinkoff.public.invest.api.contract.v1.LastPrice.figi', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='price', full_name='tinkoff.public.invest.api.contract.v1.LastPrice.price', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='time', full_name='tinkoff.public.invest.api.contract.v1.LastPrice.time', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=6469,
serialized_end=6601,
)
_GETORDERBOOKREQUEST = _descriptor.Descriptor(
name='GetOrderBookRequest',
full_name='tinkoff.public.invest.api.contract.v1.GetOrderBookRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='figi', full_name='tinkoff.public.invest.api.contract.v1.GetOrderBookRequest.figi', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='depth', full_name='tinkoff.public.invest.api.contract.v1.GetOrderBookRequest.depth', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=6603,
serialized_end=6653,
)
_GETORDERBOOKRESPONSE = _descriptor.Descriptor(
name='GetOrderBookResponse',
full_name='tinkoff.public.invest.api.contract.v1.GetOrderBookResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='figi', full_name='tinkoff.public.invest.api.contract.v1.GetOrderBookResponse.figi', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='depth', full_name='tinkoff.public.invest.api.contract.v1.GetOrderBookResponse.depth', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='bids', full_name='tinkoff.public.invest.api.contract.v1.GetOrderBookResponse.bids', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='asks', full_name='tinkoff.public.invest.api.contract.v1.GetOrderBookResponse.asks', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='last_price', full_name='tinkoff.public.invest.api.contract.v1.GetOrderBookResponse.last_price', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='close_price', full_name='tinkoff.public.invest.api.contract.v1.GetOrderBookResponse.close_price', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='limit_up', full_name='tinkoff.public.invest.api.contract.v1.GetOrderBookResponse.limit_up', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='limit_down', full_name='tinkoff.public.invest.api.contract.v1.GetOrderBookResponse.limit_down', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=6656,
serialized_end=7106,
)
_GETTRADINGSTATUSREQUEST = _descriptor.Descriptor(
name='GetTradingStatusRequest',
full_name='tinkoff.public.invest.api.contract.v1.GetTradingStatusRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='figi', full_name='tinkoff.public.invest.api.contract.v1.GetTradingStatusRequest.figi', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=7108,
serialized_end=7147,
)
_GETTRADINGSTATUSRESPONSE = _descriptor.Descriptor(
name='GetTradingStatusResponse',
full_name='tinkoff.public.invest.api.contract.v1.GetTradingStatusResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='figi', full_name='tinkoff.public.invest.api.contract.v1.GetTradingStatusResponse.figi', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='trading_status', full_name='tinkoff.public.invest.api.contract.v1.GetTradingStatusResponse.trading_status', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='limit_order_available_flag', full_name='tinkoff.public.invest.api.contract.v1.GetTradingStatusResponse.limit_order_available_flag', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='market_order_available_flag', full_name='tinkoff.public.invest.api.contract.v1.GetTradingStatusResponse.market_order_available_flag', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=7150,
serialized_end=7349,
)
_MARKETDATAREQUEST.fields_by_name['subscribe_candles_request'].message_type = _SUBSCRIBECANDLESREQUEST
_MARKETDATAREQUEST.fields_by_name['subscribe_order_book_request'].message_type = _SUBSCRIBEORDERBOOKREQUEST
_MARKETDATAREQUEST.fields_by_name['subscribe_trades_request'].message_type = _SUBSCRIBETRADESREQUEST
_MARKETDATAREQUEST.fields_by_name['subscribe_info_request'].message_type = _SUBSCRIBEINFOREQUEST
_MARKETDATAREQUEST.fields_by_name['subscribe_last_price_request'].message_type = _SUBSCRIBELASTPRICEREQUEST
_MARKETDATAREQUEST.oneofs_by_name['payload'].fields.append(
_MARKETDATAREQUEST.fields_by_name['subscribe_candles_request'])
_MARKETDATAREQUEST.fields_by_name['subscribe_candles_request'].containing_oneof = _MARKETDATAREQUEST.oneofs_by_name['payload']
_MARKETDATAREQUEST.oneofs_by_name['payload'].fields.append(
_MARKETDATAREQUEST.fields_by_name['subscribe_order_book_request'])
_MARKETDATAREQUEST.fields_by_name['subscribe_order_book_request'].containing_oneof = _MARKETDATAREQUEST.oneofs_by_name['payload']
_MARKETDATAREQUEST.oneofs_by_name['payload'].fields.append(
_MARKETDATAREQUEST.fields_by_name['subscribe_trades_request'])
_MARKETDATAREQUEST.fields_by_name['subscribe_trades_request'].containing_oneof = _MARKETDATAREQUEST.oneofs_by_name['payload']
_MARKETDATAREQUEST.oneofs_by_name['payload'].fields.append(
_MARKETDATAREQUEST.fields_by_name['subscribe_info_request'])
_MARKETDATAREQUEST.fields_by_name['subscribe_info_request'].containing_oneof = _MARKETDATAREQUEST.oneofs_by_name['payload']
_MARKETDATAREQUEST.oneofs_by_name['payload'].fields.append(
_MARKETDATAREQUEST.fields_by_name['subscribe_last_price_request'])
_MARKETDATAREQUEST.fields_by_name['subscribe_last_price_request'].containing_oneof = _MARKETDATAREQUEST.oneofs_by_name['payload']
_MARKETDATARESPONSE.fields_by_name['subscribe_candles_response'].message_type = _SUBSCRIBECANDLESRESPONSE
_MARKETDATARESPONSE.fields_by_name['subscribe_order_book_response'].message_type = _SUBSCRIBEORDERBOOKRESPONSE
_MARKETDATARESPONSE.fields_by_name['subscribe_trades_response'].message_type = _SUBSCRIBETRADESRESPONSE
_MARKETDATARESPONSE.fields_by_name['subscribe_info_response'].message_type = _SUBSCRIBEINFORESPONSE
_MARKETDATARESPONSE.fields_by_name['candle'].message_type = _CANDLE
_MARKETDATARESPONSE.fields_by_name['trade'].message_type = _TRADE
_MARKETDATARESPONSE.fields_by_name['orderbook'].message_type = _ORDERBOOK
_MARKETDATARESPONSE.fields_by_name['trading_status'].message_type = _TRADINGSTATUS
_MARKETDATARESPONSE.fields_by_name['ping'].message_type = tinkoff_dot_invest_dot_grpc_dot_common__pb2._PING
_MARKETDATARESPONSE.fields_by_name['subscribe_last_price_response'].message_type = _SUBSCRIBELASTPRICERESPONSE
_MARKETDATARESPONSE.fields_by_name['last_price'].message_type = _LASTPRICE
_MARKETDATARESPONSE.oneofs_by_name['payload'].fields.append(
_MARKETDATARESPONSE.fields_by_name['subscribe_candles_response'])
_MARKETDATARESPONSE.fields_by_name['subscribe_candles_response'].containing_oneof = _MARKETDATARESPONSE.oneofs_by_name['payload']
_MARKETDATARESPONSE.oneofs_by_name['payload'].fields.append(
_MARKETDATARESPONSE.fields_by_name['subscribe_order_book_response'])
_MARKETDATARESPONSE.fields_by_name['subscribe_order_book_response'].containing_oneof = _MARKETDATARESPONSE.oneofs_by_name['payload']
_MARKETDATARESPONSE.oneofs_by_name['payload'].fields.append(
_MARKETDATARESPONSE.fields_by_name['subscribe_trades_response'])
_MARKETDATARESPONSE.fields_by_name['subscribe_trades_response'].containing_oneof = _MARKETDATARESPONSE.oneofs_by_name['payload']
_MARKETDATARESPONSE.oneofs_by_name['payload'].fields.append(
_MARKETDATARESPONSE.fields_by_name['subscribe_info_response'])
_MARKETDATARESPONSE.fields_by_name['subscribe_info_response'].containing_oneof = _MARKETDATARESPONSE.oneofs_by_name['payload']
_MARKETDATARESPONSE.oneofs_by_name['payload'].fields.append(
_MARKETDATARESPONSE.fields_by_name['candle'])
_MARKETDATARESPONSE.fields_by_name['candle'].containing_oneof = _MARKETDATARESPONSE.oneofs_by_name['payload']
_MARKETDATARESPONSE.oneofs_by_name['payload'].fields.append(
_MARKETDATARESPONSE.fields_by_name['trade'])
_MARKETDATARESPONSE.fields_by_name['trade'].containing_oneof = _MARKETDATARESPONSE.oneofs_by_name['payload']
_MARKETDATARESPONSE.oneofs_by_name['payload'].fields.append(
_MARKETDATARESPONSE.fields_by_name['orderbook'])
_MARKETDATARESPONSE.fields_by_name['orderbook'].containing_oneof = _MARKETDATARESPONSE.oneofs_by_name['payload']
_MARKETDATARESPONSE.oneofs_by_name['payload'].fields.append(
_MARKETDATARESPONSE.fields_by_name['trading_status'])
_MARKETDATARESPONSE.fields_by_name['trading_status'].containing_oneof = _MARKETDATARESPONSE.oneofs_by_name['payload']
_MARKETDATARESPONSE.oneofs_by_name['payload'].fields.append(
_MARKETDATARESPONSE.fields_by_name['ping'])
_MARKETDATARESPONSE.fields_by_name['ping'].containing_oneof = _MARKETDATARESPONSE.oneofs_by_name['payload']
_MARKETDATARESPONSE.oneofs_by_name['payload'].fields.append(
_MARKETDATARESPONSE.fields_by_name['subscribe_last_price_response'])
_MARKETDATARESPONSE.fields_by_name['subscribe_last_price_response'].containing_oneof = _MARKETDATARESPONSE.oneofs_by_name['payload']
_MARKETDATARESPONSE.oneofs_by_name['payload'].fields.append(
_MARKETDATARESPONSE.fields_by_name['last_price'])
_MARKETDATARESPONSE.fields_by_name['last_price'].containing_oneof = _MARKETDATARESPONSE.oneofs_by_name['payload']
_SUBSCRIBECANDLESREQUEST.fields_by_name['subscription_action'].enum_type = _SUBSCRIPTIONACTION
_SUBSCRIBECANDLESREQUEST.fields_by_name['instruments'].message_type = _CANDLEINSTRUMENT
_CANDLEINSTRUMENT.fields_by_name['interval'].enum_type = _SUBSCRIPTIONINTERVAL
_SUBSCRIBECANDLESRESPONSE.fields_by_name['candles_subscriptions'].message_type = _CANDLESUBSCRIPTION
_CANDLESUBSCRIPTION.fields_by_name['interval'].enum_type = _SUBSCRIPTIONINTERVAL
_CANDLESUBSCRIPTION.fields_by_name['subscription_status'].enum_type = _SUBSCRIPTIONSTATUS
_SUBSCRIBEORDERBOOKREQUEST.fields_by_name['subscription_action'].enum_type = _SUBSCRIPTIONACTION
_SUBSCRIBEORDERBOOKREQUEST.fields_by_name['instruments'].message_type = _ORDERBOOKINSTRUMENT
_SUBSCRIBEORDERBOOKRESPONSE.fields_by_name['order_book_subscriptions'].message_type = _ORDERBOOKSUBSCRIPTION
_ORDERBOOKSUBSCRIPTION.fields_by_name['subscription_status'].enum_type = _SUBSCRIPTIONSTATUS
_SUBSCRIBETRADESREQUEST.fields_by_name['subscription_action'].enum_type = _SUBSCRIPTIONACTION
_SUBSCRIBETRADESREQUEST.fields_by_name['instruments'].message_type = _TRADEINSTRUMENT
_SUBSCRIBETRADESRESPONSE.fields_by_name['trade_subscriptions'].message_type = _TRADESUBSCRIPTION
_TRADESUBSCRIPTION.fields_by_name['subscription_status'].enum_type = _SUBSCRIPTIONSTATUS
_SUBSCRIBEINFOREQUEST.fields_by_name['subscription_action'].enum_type = _SUBSCRIPTIONACTION
_SUBSCRIBEINFOREQUEST.fields_by_name['instruments'].message_type = _INFOINSTRUMENT
_SUBSCRIBEINFORESPONSE.fields_by_name['info_subscriptions'].message_type = _INFOSUBSCRIPTION
_INFOSUBSCRIPTION.fields_by_name['subscription_status'].enum_type = _SUBSCRIPTIONSTATUS
_SUBSCRIBELASTPRICEREQUEST.fields_by_name['subscription_action'].enum_type = _SUBSCRIPTIONACTION
_SUBSCRIBELASTPRICEREQUEST.fields_by_name['instruments'].message_type = _LASTPRICEINSTRUMENT
_SUBSCRIBELASTPRICERESPONSE.fields_by_name['last_price_subscriptions'].message_type = _LASTPRICESUBSCRIPTION
_LASTPRICESUBSCRIPTION.fields_by_name['subscription_status'].enum_type = _SUBSCRIPTIONSTATUS
_CANDLE.fields_by_name['interval'].enum_type = _SUBSCRIPTIONINTERVAL
_CANDLE.fields_by_name['open'].message_type = tinkoff_dot_invest_dot_grpc_dot_common__pb2._QUOTATION
_CANDLE.fields_by_name['high'].message_type = tinkoff_dot_invest_dot_grpc_dot_common__pb2._QUOTATION
_CANDLE.fields_by_name['low'].message_type = tinkoff_dot_invest_dot_grpc_dot_common__pb2._QUOTATION
_CANDLE.fields_by_name['close'].message_type = tinkoff_dot_invest_dot_grpc_dot_common__pb2._QUOTATION
_CANDLE.fields_by_name['time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_CANDLE.fields_by_name['last_trade_ts'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_ORDERBOOK.fields_by_name['bids'].message_type = _ORDER
_ORDERBOOK.fields_by_name['asks'].message_type = _ORDER
_ORDERBOOK.fields_by_name['time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_ORDERBOOK.fields_by_name['limit_up'].message_type = tinkoff_dot_invest_dot_grpc_dot_common__pb2._QUOTATION
_ORDERBOOK.fields_by_name['limit_down'].message_type = tinkoff_dot_invest_dot_grpc_dot_common__pb2._QUOTATION
_ORDER.fields_by_name['price'].message_type = tinkoff_dot_invest_dot_grpc_dot_common__pb2._QUOTATION
_TRADE.fields_by_name['direction'].enum_type = _TRADEDIRECTION
_TRADE.fields_by_name['price'].message_type = tinkoff_dot_invest_dot_grpc_dot_common__pb2._QUOTATION
_TRADE.fields_by_name['time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_TRADINGSTATUS.fields_by_name['trading_status'].enum_type = tinkoff_dot_invest_dot_grpc_dot_common__pb2._SECURITYTRADINGSTATUS
_TRADINGSTATUS.fields_by_name['time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_GETCANDLESREQUEST.fields_by_name['from'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_GETCANDLESREQUEST.fields_by_name['to'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_GETCANDLESREQUEST.fields_by_name['interval'].enum_type = _CANDLEINTERVAL
_GETCANDLESRESPONSE.fields_by_name['candles'].message_type = _HISTORICCANDLE
_HISTORICCANDLE.fields_by_name['open'].message_type = tinkoff_dot_invest_dot_grpc_dot_common__pb2._QUOTATION
_HISTORICCANDLE.fields_by_name['high'].message_type = tinkoff_dot_invest_dot_grpc_dot_common__pb2._QUOTATION
_HISTORICCANDLE.fields_by_name['low'].message_type = tinkoff_dot_invest_dot_grpc_dot_common__pb2._QUOTATION
_HISTORICCANDLE.fields_by_name['close'].message_type = tinkoff_dot_invest_dot_grpc_dot_common__pb2._QUOTATION
_HISTORICCANDLE.fields_by_name['time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_GETLASTPRICESRESPONSE.fields_by_name['last_prices'].message_type = _LASTPRICE
_LASTPRICE.fields_by_name['price'].message_type = tinkoff_dot_invest_dot_grpc_dot_common__pb2._QUOTATION
_LASTPRICE.fields_by_name['time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_GETORDERBOOKRESPONSE.fields_by_name['bids'].message_type = _ORDER
_GETORDERBOOKRESPONSE.fields_by_name['asks'].message_type = _ORDER
_GETORDERBOOKRESPONSE.fields_by_name['last_price'].message_type = tinkoff_dot_invest_dot_grpc_dot_common__pb2._QUOTATION
_GETORDERBOOKRESPONSE.fields_by_name['close_price'].message_type = tinkoff_dot_invest_dot_grpc_dot_common__pb2._QUOTATION
_GETORDERBOOKRESPONSE.fields_by_name['limit_up'].message_type = tinkoff_dot_invest_dot_grpc_dot_common__pb2._QUOTATION
_GETORDERBOOKRESPONSE.fields_by_name['limit_down'].message_type = tinkoff_dot_invest_dot_grpc_dot_common__pb2._QUOTATION
_GETTRADINGSTATUSRESPONSE.fields_by_name['trading_status'].enum_type = tinkoff_dot_invest_dot_grpc_dot_common__pb2._SECURITYTRADINGSTATUS
DESCRIPTOR.message_types_by_name['MarketDataRequest'] = _MARKETDATAREQUEST
DESCRIPTOR.message_types_by_name['MarketDataResponse'] = _MARKETDATARESPONSE
DESCRIPTOR.message_types_by_name['SubscribeCandlesRequest'] = _SUBSCRIBECANDLESREQUEST
DESCRIPTOR.message_types_by_name['CandleInstrument'] = _CANDLEINSTRUMENT
DESCRIPTOR.message_types_by_name['SubscribeCandlesResponse'] = _SUBSCRIBECANDLESRESPONSE
DESCRIPTOR.message_types_by_name['CandleSubscription'] = _CANDLESUBSCRIPTION
DESCRIPTOR.message_types_by_name['SubscribeOrderBookRequest'] = _SUBSCRIBEORDERBOOKREQUEST
DESCRIPTOR.message_types_by_name['OrderBookInstrument'] = _ORDERBOOKINSTRUMENT
DESCRIPTOR.message_types_by_name['SubscribeOrderBookResponse'] = _SUBSCRIBEORDERBOOKRESPONSE
DESCRIPTOR.message_types_by_name['OrderBookSubscription'] = _ORDERBOOKSUBSCRIPTION
DESCRIPTOR.message_types_by_name['SubscribeTradesRequest'] = _SUBSCRIBETRADESREQUEST
DESCRIPTOR.message_types_by_name['TradeInstrument'] = _TRADEINSTRUMENT
DESCRIPTOR.message_types_by_name['SubscribeTradesResponse'] = _SUBSCRIBETRADESRESPONSE
DESCRIPTOR.message_types_by_name['TradeSubscription'] = _TRADESUBSCRIPTION
DESCRIPTOR.message_types_by_name['SubscribeInfoRequest'] = _SUBSCRIBEINFOREQUEST
DESCRIPTOR.message_types_by_name['InfoInstrument'] = _INFOINSTRUMENT
DESCRIPTOR.message_types_by_name['SubscribeInfoResponse'] = _SUBSCRIBEINFORESPONSE
DESCRIPTOR.message_types_by_name['InfoSubscription'] = _INFOSUBSCRIPTION
DESCRIPTOR.message_types_by_name['SubscribeLastPriceRequest'] = _SUBSCRIBELASTPRICEREQUEST
DESCRIPTOR.message_types_by_name['LastPriceInstrument'] = _LASTPRICEINSTRUMENT
DESCRIPTOR.message_types_by_name['SubscribeLastPriceResponse'] = _SUBSCRIBELASTPRICERESPONSE
DESCRIPTOR.message_types_by_name['LastPriceSubscription'] = _LASTPRICESUBSCRIPTION
DESCRIPTOR.message_types_by_name['Candle'] = _CANDLE
DESCRIPTOR.message_types_by_name['OrderBook'] = _ORDERBOOK
DESCRIPTOR.message_types_by_name['Order'] = _ORDER
DESCRIPTOR.message_types_by_name['Trade'] = _TRADE
DESCRIPTOR.message_types_by_name['TradingStatus'] = _TRADINGSTATUS
DESCRIPTOR.message_types_by_name['GetCandlesRequest'] = _GETCANDLESREQUEST
DESCRIPTOR.message_types_by_name['GetCandlesResponse'] = _GETCANDLESRESPONSE
DESCRIPTOR.message_types_by_name['HistoricCandle'] = _HISTORICCANDLE
DESCRIPTOR.message_types_by_name['GetLastPricesRequest'] = _GETLASTPRICESREQUEST
DESCRIPTOR.message_types_by_name['GetLastPricesResponse'] = _GETLASTPRICESRESPONSE
DESCRIPTOR.message_types_by_name['LastPrice'] = _LASTPRICE
DESCRIPTOR.message_types_by_name['GetOrderBookRequest'] = _GETORDERBOOKREQUEST
DESCRIPTOR.message_types_by_name['GetOrderBookResponse'] = _GETORDERBOOKRESPONSE
DESCRIPTOR.message_types_by_name['GetTradingStatusRequest'] = _GETTRADINGSTATUSREQUEST
DESCRIPTOR.message_types_by_name['GetTradingStatusResponse'] = _GETTRADINGSTATUSRESPONSE
DESCRIPTOR.enum_types_by_name['SubscriptionAction'] = _SUBSCRIPTIONACTION
DESCRIPTOR.enum_types_by_name['SubscriptionInterval'] = _SUBSCRIPTIONINTERVAL
DESCRIPTOR.enum_types_by_name['SubscriptionStatus'] = _SUBSCRIPTIONSTATUS
DESCRIPTOR.enum_types_by_name['TradeDirection'] = _TRADEDIRECTION
DESCRIPTOR.enum_types_by_name['CandleInterval'] = _CANDLEINTERVAL
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
MarketDataRequest = _reflection.GeneratedProtocolMessageType('MarketDataRequest', (_message.Message,), {
'DESCRIPTOR' : _MARKETDATAREQUEST,
'__module__' : 'tinkoff.invest.grpc.marketdata_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.MarketDataRequest)
})
_sym_db.RegisterMessage(MarketDataRequest)
MarketDataResponse = _reflection.GeneratedProtocolMessageType('MarketDataResponse', (_message.Message,), {
'DESCRIPTOR' : _MARKETDATARESPONSE,
'__module__' : 'tinkoff.invest.grpc.marketdata_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.MarketDataResponse)
})
_sym_db.RegisterMessage(MarketDataResponse)
SubscribeCandlesRequest = _reflection.GeneratedProtocolMessageType('SubscribeCandlesRequest', (_message.Message,), {
'DESCRIPTOR' : _SUBSCRIBECANDLESREQUEST,
'__module__' : 'tinkoff.invest.grpc.marketdata_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.SubscribeCandlesRequest)
})
_sym_db.RegisterMessage(SubscribeCandlesRequest)
CandleInstrument = _reflection.GeneratedProtocolMessageType('CandleInstrument', (_message.Message,), {
'DESCRIPTOR' : _CANDLEINSTRUMENT,
'__module__' : 'tinkoff.invest.grpc.marketdata_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.CandleInstrument)
})
_sym_db.RegisterMessage(CandleInstrument)
SubscribeCandlesResponse = _reflection.GeneratedProtocolMessageType('SubscribeCandlesResponse', (_message.Message,), {
'DESCRIPTOR' : _SUBSCRIBECANDLESRESPONSE,
'__module__' : 'tinkoff.invest.grpc.marketdata_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.SubscribeCandlesResponse)
})
_sym_db.RegisterMessage(SubscribeCandlesResponse)
CandleSubscription = _reflection.GeneratedProtocolMessageType('CandleSubscription', (_message.Message,), {
'DESCRIPTOR' : _CANDLESUBSCRIPTION,
'__module__' : 'tinkoff.invest.grpc.marketdata_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.CandleSubscription)
})
_sym_db.RegisterMessage(CandleSubscription)
SubscribeOrderBookRequest = _reflection.GeneratedProtocolMessageType('SubscribeOrderBookRequest', (_message.Message,), {
'DESCRIPTOR' : _SUBSCRIBEORDERBOOKREQUEST,
'__module__' : 'tinkoff.invest.grpc.marketdata_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.SubscribeOrderBookRequest)
})
_sym_db.RegisterMessage(SubscribeOrderBookRequest)
OrderBookInstrument = _reflection.GeneratedProtocolMessageType('OrderBookInstrument', (_message.Message,), {
'DESCRIPTOR' : _ORDERBOOKINSTRUMENT,
'__module__' : 'tinkoff.invest.grpc.marketdata_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.OrderBookInstrument)
})
_sym_db.RegisterMessage(OrderBookInstrument)
SubscribeOrderBookResponse = _reflection.GeneratedProtocolMessageType('SubscribeOrderBookResponse', (_message.Message,), {
'DESCRIPTOR' : _SUBSCRIBEORDERBOOKRESPONSE,
'__module__' : 'tinkoff.invest.grpc.marketdata_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.SubscribeOrderBookResponse)
})
_sym_db.RegisterMessage(SubscribeOrderBookResponse)
OrderBookSubscription = _reflection.GeneratedProtocolMessageType('OrderBookSubscription', (_message.Message,), {
'DESCRIPTOR' : _ORDERBOOKSUBSCRIPTION,
'__module__' : 'tinkoff.invest.grpc.marketdata_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.OrderBookSubscription)
})
_sym_db.RegisterMessage(OrderBookSubscription)
SubscribeTradesRequest = _reflection.GeneratedProtocolMessageType('SubscribeTradesRequest', (_message.Message,), {
'DESCRIPTOR' : _SUBSCRIBETRADESREQUEST,
'__module__' : 'tinkoff.invest.grpc.marketdata_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.SubscribeTradesRequest)
})
_sym_db.RegisterMessage(SubscribeTradesRequest)
TradeInstrument = _reflection.GeneratedProtocolMessageType('TradeInstrument', (_message.Message,), {
'DESCRIPTOR' : _TRADEINSTRUMENT,
'__module__' : 'tinkoff.invest.grpc.marketdata_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.TradeInstrument)
})
_sym_db.RegisterMessage(TradeInstrument)
SubscribeTradesResponse = _reflection.GeneratedProtocolMessageType('SubscribeTradesResponse', (_message.Message,), {
'DESCRIPTOR' : _SUBSCRIBETRADESRESPONSE,
'__module__' : 'tinkoff.invest.grpc.marketdata_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.SubscribeTradesResponse)
})
_sym_db.RegisterMessage(SubscribeTradesResponse)
TradeSubscription = _reflection.GeneratedProtocolMessageType('TradeSubscription', (_message.Message,), {
'DESCRIPTOR' : _TRADESUBSCRIPTION,
'__module__' : 'tinkoff.invest.grpc.marketdata_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.TradeSubscription)
})
_sym_db.RegisterMessage(TradeSubscription)
SubscribeInfoRequest = _reflection.GeneratedProtocolMessageType('SubscribeInfoRequest', (_message.Message,), {
'DESCRIPTOR' : _SUBSCRIBEINFOREQUEST,
'__module__' : 'tinkoff.invest.grpc.marketdata_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.SubscribeInfoRequest)
})
_sym_db.RegisterMessage(SubscribeInfoRequest)
InfoInstrument = _reflection.GeneratedProtocolMessageType('InfoInstrument', (_message.Message,), {
'DESCRIPTOR' : _INFOINSTRUMENT,
'__module__' : 'tinkoff.invest.grpc.marketdata_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.InfoInstrument)
})
_sym_db.RegisterMessage(InfoInstrument)
SubscribeInfoResponse = _reflection.GeneratedProtocolMessageType('SubscribeInfoResponse', (_message.Message,), {
'DESCRIPTOR' : _SUBSCRIBEINFORESPONSE,
'__module__' : 'tinkoff.invest.grpc.marketdata_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.SubscribeInfoResponse)
})
_sym_db.RegisterMessage(SubscribeInfoResponse)
InfoSubscription = _reflection.GeneratedProtocolMessageType('InfoSubscription', (_message.Message,), {
'DESCRIPTOR' : _INFOSUBSCRIPTION,
'__module__' : 'tinkoff.invest.grpc.marketdata_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.InfoSubscription)
})
_sym_db.RegisterMessage(InfoSubscription)
SubscribeLastPriceRequest = _reflection.GeneratedProtocolMessageType('SubscribeLastPriceRequest', (_message.Message,), {
'DESCRIPTOR' : _SUBSCRIBELASTPRICEREQUEST,
'__module__' : 'tinkoff.invest.grpc.marketdata_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.SubscribeLastPriceRequest)
})
_sym_db.RegisterMessage(SubscribeLastPriceRequest)
LastPriceInstrument = _reflection.GeneratedProtocolMessageType('LastPriceInstrument', (_message.Message,), {
'DESCRIPTOR' : _LASTPRICEINSTRUMENT,
'__module__' : 'tinkoff.invest.grpc.marketdata_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.LastPriceInstrument)
})
_sym_db.RegisterMessage(LastPriceInstrument)
SubscribeLastPriceResponse = _reflection.GeneratedProtocolMessageType('SubscribeLastPriceResponse', (_message.Message,), {
'DESCRIPTOR' : _SUBSCRIBELASTPRICERESPONSE,
'__module__' : 'tinkoff.invest.grpc.marketdata_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.SubscribeLastPriceResponse)
})
_sym_db.RegisterMessage(SubscribeLastPriceResponse)
LastPriceSubscription = _reflection.GeneratedProtocolMessageType('LastPriceSubscription', (_message.Message,), {
'DESCRIPTOR' : _LASTPRICESUBSCRIPTION,
'__module__' : 'tinkoff.invest.grpc.marketdata_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.LastPriceSubscription)
})
_sym_db.RegisterMessage(LastPriceSubscription)
Candle = _reflection.GeneratedProtocolMessageType('Candle', (_message.Message,), {
'DESCRIPTOR' : _CANDLE,
'__module__' : 'tinkoff.invest.grpc.marketdata_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.Candle)
})
_sym_db.RegisterMessage(Candle)
OrderBook = _reflection.GeneratedProtocolMessageType('OrderBook', (_message.Message,), {
'DESCRIPTOR' : _ORDERBOOK,
'__module__' : 'tinkoff.invest.grpc.marketdata_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.OrderBook)
})
_sym_db.RegisterMessage(OrderBook)
Order = _reflection.GeneratedProtocolMessageType('Order', (_message.Message,), {
'DESCRIPTOR' : _ORDER,
'__module__' : 'tinkoff.invest.grpc.marketdata_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.Order)
})
_sym_db.RegisterMessage(Order)
Trade = _reflection.GeneratedProtocolMessageType('Trade', (_message.Message,), {
'DESCRIPTOR' : _TRADE,
'__module__' : 'tinkoff.invest.grpc.marketdata_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.Trade)
})
_sym_db.RegisterMessage(Trade)
TradingStatus = _reflection.GeneratedProtocolMessageType('TradingStatus', (_message.Message,), {
'DESCRIPTOR' : _TRADINGSTATUS,
'__module__' : 'tinkoff.invest.grpc.marketdata_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.TradingStatus)
})
_sym_db.RegisterMessage(TradingStatus)
GetCandlesRequest = _reflection.GeneratedProtocolMessageType('GetCandlesRequest', (_message.Message,), {
'DESCRIPTOR' : _GETCANDLESREQUEST,
'__module__' : 'tinkoff.invest.grpc.marketdata_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.GetCandlesRequest)
})
_sym_db.RegisterMessage(GetCandlesRequest)
GetCandlesResponse = _reflection.GeneratedProtocolMessageType('GetCandlesResponse', (_message.Message,), {
'DESCRIPTOR' : _GETCANDLESRESPONSE,
'__module__' : 'tinkoff.invest.grpc.marketdata_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.GetCandlesResponse)
})
_sym_db.RegisterMessage(GetCandlesResponse)
HistoricCandle = _reflection.GeneratedProtocolMessageType('HistoricCandle', (_message.Message,), {
'DESCRIPTOR' : _HISTORICCANDLE,
'__module__' : 'tinkoff.invest.grpc.marketdata_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.HistoricCandle)
})
_sym_db.RegisterMessage(HistoricCandle)
GetLastPricesRequest = _reflection.GeneratedProtocolMessageType('GetLastPricesRequest', (_message.Message,), {
'DESCRIPTOR' : _GETLASTPRICESREQUEST,
'__module__' : 'tinkoff.invest.grpc.marketdata_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.GetLastPricesRequest)
})
_sym_db.RegisterMessage(GetLastPricesRequest)
GetLastPricesResponse = _reflection.GeneratedProtocolMessageType('GetLastPricesResponse', (_message.Message,), {
'DESCRIPTOR' : _GETLASTPRICESRESPONSE,
'__module__' : 'tinkoff.invest.grpc.marketdata_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.GetLastPricesResponse)
})
_sym_db.RegisterMessage(GetLastPricesResponse)
LastPrice = _reflection.GeneratedProtocolMessageType('LastPrice', (_message.Message,), {
'DESCRIPTOR' : _LASTPRICE,
'__module__' : 'tinkoff.invest.grpc.marketdata_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.LastPrice)
})
_sym_db.RegisterMessage(LastPrice)
GetOrderBookRequest = _reflection.GeneratedProtocolMessageType('GetOrderBookRequest', (_message.Message,), {
'DESCRIPTOR' : _GETORDERBOOKREQUEST,
'__module__' : 'tinkoff.invest.grpc.marketdata_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.GetOrderBookRequest)
})
_sym_db.RegisterMessage(GetOrderBookRequest)
GetOrderBookResponse = _reflection.GeneratedProtocolMessageType('GetOrderBookResponse', (_message.Message,), {
'DESCRIPTOR' : _GETORDERBOOKRESPONSE,
'__module__' : 'tinkoff.invest.grpc.marketdata_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.GetOrderBookResponse)
})
_sym_db.RegisterMessage(GetOrderBookResponse)
GetTradingStatusRequest = _reflection.GeneratedProtocolMessageType('GetTradingStatusRequest', (_message.Message,), {
'DESCRIPTOR' : _GETTRADINGSTATUSREQUEST,
'__module__' : 'tinkoff.invest.grpc.marketdata_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.GetTradingStatusRequest)
})
_sym_db.RegisterMessage(GetTradingStatusRequest)
GetTradingStatusResponse = _reflection.GeneratedProtocolMessageType('GetTradingStatusResponse', (_message.Message,), {
'DESCRIPTOR' : _GETTRADINGSTATUSRESPONSE,
'__module__' : 'tinkoff.invest.grpc.marketdata_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.GetTradingStatusResponse)
})
_sym_db.RegisterMessage(GetTradingStatusResponse)
DESCRIPTOR._options = None
_MARKETDATASERVICE = _descriptor.ServiceDescriptor(
name='MarketDataService',
full_name='tinkoff.public.invest.api.contract.v1.MarketDataService',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=8278,
serialized_end=8858,
methods=[
_descriptor.MethodDescriptor(
name='GetCandles',
full_name='tinkoff.public.invest.api.contract.v1.MarketDataService.GetCandles',
index=0,
containing_service=None,
input_type=_GETCANDLESREQUEST,
output_type=_GETCANDLESRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='GetLastPrices',
full_name='tinkoff.public.invest.api.contract.v1.MarketDataService.GetLastPrices',
index=1,
containing_service=None,
input_type=_GETLASTPRICESREQUEST,
output_type=_GETLASTPRICESRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='GetOrderBook',
full_name='tinkoff.public.invest.api.contract.v1.MarketDataService.GetOrderBook',
index=2,
containing_service=None,
input_type=_GETORDERBOOKREQUEST,
output_type=_GETORDERBOOKRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='GetTradingStatus',
full_name='tinkoff.public.invest.api.contract.v1.MarketDataService.GetTradingStatus',
index=3,
containing_service=None,
input_type=_GETTRADINGSTATUSREQUEST,
output_type=_GETTRADINGSTATUSRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_MARKETDATASERVICE)
DESCRIPTOR.services_by_name['MarketDataService'] = _MARKETDATASERVICE
_MARKETDATASTREAMSERVICE = _descriptor.ServiceDescriptor(
name='MarketDataStreamService',
full_name='tinkoff.public.invest.api.contract.v1.MarketDataStreamService',
file=DESCRIPTOR,
index=1,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=8861,
serialized_end=9028,
methods=[
_descriptor.MethodDescriptor(
name='MarketDataStream',
full_name='tinkoff.public.invest.api.contract.v1.MarketDataStreamService.MarketDataStream',
index=0,
containing_service=None,
input_type=_MARKETDATAREQUEST,
output_type=_MARKETDATARESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_MARKETDATASTREAMSERVICE)
DESCRIPTOR.services_by_name['MarketDataStreamService'] = _MARKETDATASTREAMSERVICE
# @@protoc_insertion_point(module_scope)
| 49.069007
| 12,942
| 0.782199
| 14,969
| 123,016
| 6.097535
| 0.036876
| 0.036374
| 0.060576
| 0.067971
| 0.795342
| 0.76793
| 0.760458
| 0.728258
| 0.711583
| 0.653713
| 0
| 0.034182
| 0.101068
| 123,016
| 2,506
| 12,943
| 49.088587
| 0.791207
| 0.030459
| 0
| 0.677821
| 1
| 0.001716
| 0.251581
| 0.215547
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.003003
| 0
| 0.003003
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
fe5bdcbb6940e2ca8942a4acf06dce1ec53d6ba6
| 5,952
|
py
|
Python
|
tests/dhcpv4/classification/test_v4_classification_ipxe.py
|
shawnmullaney/forge
|
aaaef0a0645f73d24666aab6a400f3604e753aac
|
[
"0BSD"
] | null | null | null |
tests/dhcpv4/classification/test_v4_classification_ipxe.py
|
shawnmullaney/forge
|
aaaef0a0645f73d24666aab6a400f3604e753aac
|
[
"0BSD"
] | null | null | null |
tests/dhcpv4/classification/test_v4_classification_ipxe.py
|
shawnmullaney/forge
|
aaaef0a0645f73d24666aab6a400f3604e753aac
|
[
"0BSD"
] | null | null | null |
"""Client Classification DHCPv4"""
# pylint: disable=invalid-name,line-too-long
import pytest
import srv_control
import misc
import srv_msg
@pytest.mark.v4
@pytest.mark.dhcp4
@pytest.mark.classification
def test_v4_client_classification_iPXE_client_arch():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.50-192.168.50.50')
srv_control.create_new_class('ipxe_efi_x64')
srv_control.add_test_to_class('1', 'test', 'option[93].hex == 0x0009')
srv_control.add_test_to_class('1', 'next-server', '192.0.2.254')
srv_control.add_test_to_class('1', 'server-hostname', 'hal9000')
srv_control.add_test_to_class('1', 'boot-file-name', '/dev/null')
srv_control.config_client_classification('0', 'ipxe_efi_x64')
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'chaddr', 'ff:01:02:03:ff:04')
srv_msg.client_does_include_with_value('client_id', 'ff:01:11:11:11:11:11:22')
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_dont_wait_for_message()
misc.test_procedure()
srv_msg.client_sets_value('Client', 'chaddr', 'ff:01:02:03:ff:04')
srv_msg.client_does_include_with_value('client_id', 'ff:01:02:03:ff:04:11:22')
srv_msg.client_does_include_with_value('pxe_client_architecture', '9')
srv_msg.client_does_include_with_value('pxe_client_network_interface', '320')
srv_msg.client_does_include_with_value('pxe_client_machine_identifier', '123456789a')
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'OFFER')
srv_msg.response_check_content('Response', None, 'yiaddr', '192.168.50.50')
srv_msg.response_check_content('Response', None, 'siaddr', '192.0.2.254')
srv_msg.response_check_content('Response', None, 'file', '/dev/null')
srv_msg.response_check_content('Response', None, 'sname', 'hal9000')
@pytest.mark.v4
@pytest.mark.dhcp4
@pytest.mark.classification
def test_v4_client_classification_iPXE_client_inter():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.50-192.168.50.50')
srv_control.create_new_class('ipxe_efi_x64')
srv_control.add_test_to_class('1', 'test', 'option[94].hex == 0x030200')
srv_control.add_test_to_class('1', 'next-server', '192.0.2.254')
srv_control.add_test_to_class('1', 'server-hostname', 'hal9000')
srv_control.add_test_to_class('1', 'boot-file-name', '/dev/null')
srv_control.config_client_classification('0', 'ipxe_efi_x64')
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'chaddr', 'ff:01:02:03:ff:04')
srv_msg.client_does_include_with_value('client_id', 'ff:01:11:11:11:11:11:22')
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_dont_wait_for_message()
misc.test_procedure()
srv_msg.client_sets_value('Client', 'chaddr', 'ff:01:02:03:ff:04')
srv_msg.client_does_include_with_value('client_id', 'ff:01:02:03:ff:04:11:22')
srv_msg.client_does_include_with_value('pxe_client_architecture', '9')
srv_msg.client_does_include_with_value('pxe_client_network_interface', '320')
srv_msg.client_does_include_with_value('pxe_client_machine_identifier', '123456789a')
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'OFFER')
srv_msg.response_check_content('Response', None, 'yiaddr', '192.168.50.50')
srv_msg.response_check_content('Response', None, 'siaddr', '192.0.2.254')
srv_msg.response_check_content('Response', None, 'file', '/dev/null')
srv_msg.response_check_content('Response', None, 'sname', 'hal9000')
@pytest.mark.v4
@pytest.mark.dhcp4
@pytest.mark.classification
def test_v4_client_classification_iPXE_machine_ident():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.50-192.168.50.50')
srv_control.create_new_class('ipxe_efi_x64')
srv_control.add_test_to_class('1', 'test', 'option[97].hex == 0x0102030405060708090a')
srv_control.add_test_to_class('1', 'next-server', '192.0.2.254')
srv_control.add_test_to_class('1', 'server-hostname', 'hal9000')
srv_control.add_test_to_class('1', 'boot-file-name', '/dev/null')
srv_control.config_client_classification('0', 'ipxe_efi_x64')
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'chaddr', 'ff:01:02:03:ff:04')
srv_msg.client_does_include_with_value('client_id', 'ff:01:11:11:11:11:11:22')
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_dont_wait_for_message()
misc.test_procedure()
srv_msg.client_sets_value('Client', 'chaddr', 'ff:01:02:03:ff:04')
srv_msg.client_does_include_with_value('client_id', 'ff:01:02:03:ff:04:11:22')
srv_msg.client_does_include_with_value('pxe_client_architecture', '9')
srv_msg.client_does_include_with_value('pxe_client_network_interface', '320')
srv_msg.client_does_include_with_value('pxe_client_machine_identifier', '123456789a')
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'OFFER')
srv_msg.response_check_content('Response', None, 'yiaddr', '192.168.50.50')
srv_msg.response_check_content('Response', None, 'siaddr', '192.0.2.254')
srv_msg.response_check_content('Response', None, 'file', '/dev/null')
srv_msg.response_check_content('Response', None, 'sname', 'hal9000')
#
# 208: "pxelinux_magic",
# 209: "pxelinux_configuration_file",
# 210: "pxelinux_path_prefix",
# 211: "pxelinux_reboot_time",
| 42.820144
| 90
| 0.737231
| 923
| 5,952
| 4.370531
| 0.127844
| 0.068418
| 0.080317
| 0.059494
| 0.933317
| 0.933317
| 0.933317
| 0.933317
| 0.933317
| 0.933317
| 0
| 0.084086
| 0.106855
| 5,952
| 138
| 91
| 43.130435
| 0.674755
| 0.031754
| 0
| 0.902913
| 0
| 0
| 0.276947
| 0.083623
| 0
| 0
| 0.006259
| 0
| 0
| 1
| 0.029126
| true
| 0.058252
| 0.038835
| 0
| 0.067961
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 9
|
fe85cd171414ebfd1dc34a43d20a1a3b0957663f
| 4,734
|
py
|
Python
|
run_panoramix.py
|
NiklasRz/panoramix
|
fcb2dfe3df48ceee188cea37793da74aa9a05eb6
|
[
"MIT"
] | null | null | null |
run_panoramix.py
|
NiklasRz/panoramix
|
fcb2dfe3df48ceee188cea37793da74aa9a05eb6
|
[
"MIT"
] | null | null | null |
run_panoramix.py
|
NiklasRz/panoramix
|
fcb2dfe3df48ceee188cea37793da74aa9a05eb6
|
[
"MIT"
] | null | null | null |
import panoramix
"""
Niklas' version to decompile a file of bytecode
add a read-in function to read in the bytecode from the files
add an output function to better format the output
"""
# replace with address and code loader - load from file
address = "0x888666CA69E0f178DED6D75b5726Cee99A87D698"
code = "0x606060405236156100c45760e060020a600035046306fdde0381146100c9578063095ea7b31461012b57806318160ddd1461013e57806323b872dd1461014c578063313ce5671461016d578063366a68dc1461017e57806354fd4d50146101e457806370a08231146102475780638da5cb5b1461027157806395d89b4114610288578063a39a45b7146102eb578063a4e2d63414610374578063a9059cbb14610381578063cae9ca511461039a578063d8162db714610460578063dd62ed3e1461046e575b610002565b346100025760408051600180546020600282841615610100026000190190921691909104601f81018290048202840182019094528383526104a793908301828280156105995780601f1061056e57610100808354040283529160200191610599565b34610002576105156004356024356103f4565b346100025761052960005481565b34610002576105156004356024356044356000836105a15b60065443901190565b346100025761053b60025460ff1681565b3461000257610515600435600554600090600160a060020a0390811633909116141561026c5760068290556040805183815290517f6c04066f6ede40cc1642c211ba9d18f1a096ccc84fb8d11be28ea6c3c6f68b369181900360200190a150600161026c565b34610002576040805160048054602060026001831615610100026000190190921691909104601f81018290048202840182019094528383526104a793908301828280156105995780601f1061056e57610100808354040283529160200191610599565b3461000257610529600435600160a060020a0381166000908152600760205260409020545b919050565b3461000257610551600554600160a060020a031681565b34610002576040805160038054602060026001831615610100026000190190921691909104601f81018290048202840182019094528383526104a793908301828280156105995780601f1061056e57610100808354040283529160200191610599565b3461000257610515600435600554600090600160a060020a0390811633909116141561026c576005805473ffffffffffffffffffffffffffffffffffffffff19168317905560408051600160a060020a038416815290517f3edd90e7770f06fafde38004653b33870066c33bfc923ff6102acd601f85dfbc9181900360200190a150600161026c565b3461000257610515610164565b34610002576105156004356024356000336106cf610164565b3461000257604080516020604435600481810135601f81018490048402850184019095528484526105159481359460248035959394606494929391019181908401838280828437509496505050505050506000836107b781855b33600160a060020a03908116600081815260086020908152604080832094871680845294825280832086905580518681529051929493927f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925929181900390910190a35060015b92915050565b346100025761052960065481565b3461000257610529600435602435600160a060020a0382811660009081526008602090815260408083209385168352929052205461045a565b60405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156105075780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b604080519115158252519081900360200190f35b60408051918252519081900360200190f35b6040805160ff9092168252519081900360200190f35b60408051600160a060020a03929092168252519081900360200190f35b820191906000526020600020905b81548152906001019060200180831161057c57829003601f168201915b505050505081565b15806105bb5750600554600160a060020a03908116908216145b156106c7578330600160a060020a031681600160a060020a03161415156106c557600160a060020a0386166000908152600760205260409020548490108015906106255750600860209081526040600081812033600160a060020a03168252909252902054849010155b80156106315750600084115b156106c057600160a060020a03858116600081815260076020908152604080832080548a0190558a851680845281842080548b90039055600883528184203390961684529482529182902080548990039055815188815291519293927fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef9281900390910190a3600192506106c5565b600092505b505b509392505050565b15806106e95750600554600160a060020a03908116908216145b156107b0578330600160a060020a031681600160a060020a03161415156107ae5733600160a060020a03166000908152600760205260409020548490108015906107335750600084115b156107a95733600160a060020a03908116600081815260076020908152604080832080548a90039055938916808352918490208054890190558351888152935191937fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef929081900390910190a3600192506107ae565b600092505b505b5092915050565b156106c75780600160a060020a0316638f4ffcb1338630876040518560e060020a0281526004018085600160a060020a0316815260200184815260200183600160a060020a03168152602001806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f16801561085f5780820380516001836020036101000a031916815260200191505b5095505050505050600060405180830381600087803b156100025760325a03f11561000257505050600191506106c756"
panoramix.decompile(address, code)
| 278.470588
| 4,395
| 0.983523
| 49
| 4,734
| 95.020408
| 0.612245
| 0.002577
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.85479
| 0.012252
| 4,734
| 16
| 4,396
| 295.875
| 0.140932
| 0.011196
| 0
| 0
| 0
| 0
| 0.982036
| 0.982036
| 0
| 1
| 0.982036
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 0.25
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
fe8646b0aa17641a186e8f6047bb10ab156435eb
| 2,456
|
py
|
Python
|
tests/test_parser.py
|
jaywoo123/lux
|
52d6b326264ced2f9cf3c380a320fb1856166535
|
[
"Apache-2.0"
] | null | null | null |
tests/test_parser.py
|
jaywoo123/lux
|
52d6b326264ced2f9cf3c380a320fb1856166535
|
[
"Apache-2.0"
] | null | null | null |
tests/test_parser.py
|
jaywoo123/lux
|
52d6b326264ced2f9cf3c380a320fb1856166535
|
[
"Apache-2.0"
] | null | null | null |
import pandas as pd
import lux
def test_case1():
ldf = pd.read_csv("lux/data/car.csv")
ldf.setContext(["Horsepower"])
assert(type(ldf.context[0]) is lux.Spec)
assert(ldf.context[0].attribute == "Horsepower")
def test_case2():
ldf = pd.read_csv("lux/data/car.csv")
ldf.setContext(["Horsepower", lux.Spec("MilesPerGal",channel="x")])
assert(type(ldf.context[0]) is lux.Spec)
assert(ldf.context[0].attribute == "Horsepower")
assert(type(ldf.context[1]) is lux.Spec)
assert(ldf.context[1].attribute == "MilesPerGal")
def test_case3():
ldf = pd.read_csv("lux/data/car.csv")
ldf.setContext(["Horsepower", "Origin=USA"])
assert(type(ldf.context[0]) is lux.Spec)
assert(ldf.context[0].attribute == "Horsepower")
assert(type(ldf.context[1]) is lux.Spec)
assert(ldf.context[1].attribute == "Origin")
assert(ldf.context[1].value == "USA")
def test_case4():
ldf = pd.read_csv("lux/data/car.csv")
ldf.setContext(["Horsepower", "Origin=USA|Japan"])
assert(type(ldf.context[0]) is lux.Spec)
assert(ldf.context[0].attribute == "Horsepower")
assert(type(ldf.context[1]) is lux.Spec)
assert(ldf.context[1].attribute == "Origin")
assert(ldf.context[1].value == ["USA","Japan"])
def test_case5():
ldf = pd.read_csv("lux/data/car.csv")
ldf.setContext([["Horsepower", "MilesPerGal", "Weight"], "Origin=USA"])
assert(type(ldf.context[0]) is lux.Spec)
assert(ldf.context[0].attribute == ["Horsepower", "MilesPerGal", "Weight"])
assert(type(ldf.context[1]) is lux.Spec)
assert(ldf.context[1].attribute == "Origin")
assert(ldf.context[1].value == "USA")
ldf.setContext(["Horsepower|MilesPerGal|Weight", "Origin=USA"])
assert(type(ldf.context[0]) is lux.Spec)
assert(ldf.context[0].attribute == ["Horsepower", "MilesPerGal", "Weight"])
assert(type(ldf.context[1]) is lux.Spec)
assert(ldf.context[1].attribute == "Origin")
assert(ldf.context[1].value == "USA")
def test_case6():
ldf = pd.read_csv("lux/data/car.csv")
ldf.setContext(["Horsepower", "Origin=?"])
assert(type(ldf.context[0]) is lux.Spec)
assert(ldf.context[0].attribute == "Horsepower")
assert(type(ldf.context[1]) is lux.Spec)
assert(ldf.context[1].attribute == "Origin")
assert(ldf.context[1].value == ["USA","Japan","Europe"])
# TODO: Need to support this case
'''
lux.setContext(["Horsepower","MPG","Acceleration"],"Origin")
lux.setContext("Horsepower/MPG/Acceleration", "Origin")
--> [Spec(attr= ["Horsepower","MPG","Acceleration"], type= "attributeGroup")]
'''
| 37.212121
| 79
| 0.694218
| 356
| 2,456
| 4.755618
| 0.132022
| 0.183107
| 0.170112
| 0.153574
| 0.877732
| 0.877732
| 0.825753
| 0.825753
| 0.825753
| 0.825753
| 0
| 0.01654
| 0.089169
| 2,456
| 66
| 80
| 37.212121
| 0.740277
| 0.012622
| 0
| 0.653846
| 0
| 0
| 0.20009
| 0.013069
| 0
| 0
| 0
| 0.015152
| 0.596154
| 1
| 0.115385
| false
| 0
| 0.038462
| 0
| 0.153846
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
22864e8e579e705e8b21e36a4cc7aba9ba4a5c70
| 36,093
|
py
|
Python
|
automl/tests/unit/gapic/v1/test_auto_ml_client_v1.py
|
conwaychriscosmo/google-cloud-python
|
8e7b7f8a5f4bb04d13f4d88ec3848f017faf834a
|
[
"Apache-2.0"
] | 1
|
2019-03-26T21:44:51.000Z
|
2019-03-26T21:44:51.000Z
|
automl/tests/unit/gapic/v1/test_auto_ml_client_v1.py
|
conwaychriscosmo/google-cloud-python
|
8e7b7f8a5f4bb04d13f4d88ec3848f017faf834a
|
[
"Apache-2.0"
] | 1
|
2019-03-29T22:03:48.000Z
|
2019-04-02T22:24:45.000Z
|
automl/tests/unit/gapic/v1/test_auto_ml_client_v1.py
|
conwaychriscosmo/google-cloud-python
|
8e7b7f8a5f4bb04d13f4d88ec3848f017faf834a
|
[
"Apache-2.0"
] | 1
|
2019-03-29T18:26:16.000Z
|
2019-03-29T18:26:16.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests."""
import mock
import pytest
from google.rpc import status_pb2
from google.cloud import automl_v1
from google.cloud.automl_v1.proto import annotation_spec_pb2
from google.cloud.automl_v1.proto import dataset_pb2
from google.cloud.automl_v1.proto import io_pb2
from google.cloud.automl_v1.proto import model_evaluation_pb2
from google.cloud.automl_v1.proto import model_pb2
from google.cloud.automl_v1.proto import service_pb2
from google.longrunning import operations_pb2
from google.protobuf import empty_pb2
from google.protobuf import field_mask_pb2
class MultiCallableStub(object):
"""Stub for the grpc.UnaryUnaryMultiCallable interface."""
def __init__(self, method, channel_stub):
self.method = method
self.channel_stub = channel_stub
def __call__(self, request, timeout=None, metadata=None, credentials=None):
self.channel_stub.requests.append((self.method, request))
response = None
if self.channel_stub.responses:
response = self.channel_stub.responses.pop()
if isinstance(response, Exception):
raise response
if response:
return response
class ChannelStub(object):
"""Stub for the grpc.Channel interface."""
def __init__(self, responses=[]):
self.responses = responses
self.requests = []
def unary_unary(self, method, request_serializer=None, response_deserializer=None):
return MultiCallableStub(method, self)
class CustomException(Exception):
pass
class TestAutoMlClient(object):
def test_create_dataset(self):
# Setup Expected Response
name = "name3373707"
display_name = "displayName1615086568"
description = "description-1724546052"
example_count = 1517063674
etag = "etag3123477"
expected_response = {
"name": name,
"display_name": display_name,
"description": description,
"example_count": example_count,
"etag": etag,
}
expected_response = dataset_pb2.Dataset(**expected_response)
operation = operations_pb2.Operation(
name="operations/test_create_dataset", done=True
)
operation.response.Pack(expected_response)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1.AutoMlClient()
# Setup Request
parent = client.location_path("[PROJECT]", "[LOCATION]")
dataset = {}
response = client.create_dataset(parent, dataset)
result = response.result()
assert expected_response == result
assert len(channel.requests) == 1
expected_request = service_pb2.CreateDatasetRequest(
parent=parent, dataset=dataset
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_create_dataset_exception(self):
# Setup Response
error = status_pb2.Status()
operation = operations_pb2.Operation(
name="operations/test_create_dataset_exception", done=True
)
operation.error.CopyFrom(error)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1.AutoMlClient()
# Setup Request
parent = client.location_path("[PROJECT]", "[LOCATION]")
dataset = {}
response = client.create_dataset(parent, dataset)
exception = response.exception()
assert exception.errors[0] == error
def test_update_dataset(self):
# Setup Expected Response
name = "name3373707"
display_name = "displayName1615086568"
description = "description-1724546052"
example_count = 1517063674
etag = "etag3123477"
expected_response = {
"name": name,
"display_name": display_name,
"description": description,
"example_count": example_count,
"etag": etag,
}
expected_response = dataset_pb2.Dataset(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1.AutoMlClient()
# Setup Request
dataset = {}
update_mask = {}
response = client.update_dataset(dataset, update_mask)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = service_pb2.UpdateDatasetRequest(
dataset=dataset, update_mask=update_mask
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_update_dataset_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1.AutoMlClient()
# Setup request
dataset = {}
update_mask = {}
with pytest.raises(CustomException):
client.update_dataset(dataset, update_mask)
def test_get_dataset(self):
# Setup Expected Response
name_2 = "name2-1052831874"
display_name = "displayName1615086568"
description = "description-1724546052"
example_count = 1517063674
etag = "etag3123477"
expected_response = {
"name": name_2,
"display_name": display_name,
"description": description,
"example_count": example_count,
"etag": etag,
}
expected_response = dataset_pb2.Dataset(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1.AutoMlClient()
# Setup Request
name = client.dataset_path("[PROJECT]", "[LOCATION]", "[DATASET]")
response = client.get_dataset(name)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = service_pb2.GetDatasetRequest(name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_get_dataset_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1.AutoMlClient()
# Setup request
name = client.dataset_path("[PROJECT]", "[LOCATION]", "[DATASET]")
with pytest.raises(CustomException):
client.get_dataset(name)
def test_list_datasets(self):
# Setup Expected Response
next_page_token = ""
datasets_element = {}
datasets = [datasets_element]
expected_response = {"next_page_token": next_page_token, "datasets": datasets}
expected_response = service_pb2.ListDatasetsResponse(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1.AutoMlClient()
# Setup Request
parent = client.location_path("[PROJECT]", "[LOCATION]")
paged_list_response = client.list_datasets(parent)
resources = list(paged_list_response)
assert len(resources) == 1
assert expected_response.datasets[0] == resources[0]
assert len(channel.requests) == 1
expected_request = service_pb2.ListDatasetsRequest(parent=parent)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_list_datasets_exception(self):
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1.AutoMlClient()
# Setup request
parent = client.location_path("[PROJECT]", "[LOCATION]")
paged_list_response = client.list_datasets(parent)
with pytest.raises(CustomException):
list(paged_list_response)
def test_delete_dataset(self):
# Setup Expected Response
expected_response = {}
expected_response = empty_pb2.Empty(**expected_response)
operation = operations_pb2.Operation(
name="operations/test_delete_dataset", done=True
)
operation.response.Pack(expected_response)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1.AutoMlClient()
# Setup Request
name = client.dataset_path("[PROJECT]", "[LOCATION]", "[DATASET]")
response = client.delete_dataset(name)
result = response.result()
assert expected_response == result
assert len(channel.requests) == 1
expected_request = service_pb2.DeleteDatasetRequest(name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_delete_dataset_exception(self):
# Setup Response
error = status_pb2.Status()
operation = operations_pb2.Operation(
name="operations/test_delete_dataset_exception", done=True
)
operation.error.CopyFrom(error)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1.AutoMlClient()
# Setup Request
name = client.dataset_path("[PROJECT]", "[LOCATION]", "[DATASET]")
response = client.delete_dataset(name)
exception = response.exception()
assert exception.errors[0] == error
def test_import_data(self):
# Setup Expected Response
expected_response = {}
expected_response = empty_pb2.Empty(**expected_response)
operation = operations_pb2.Operation(
name="operations/test_import_data", done=True
)
operation.response.Pack(expected_response)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1.AutoMlClient()
# Setup Request
name = client.dataset_path("[PROJECT]", "[LOCATION]", "[DATASET]")
input_config = {}
response = client.import_data(name, input_config)
result = response.result()
assert expected_response == result
assert len(channel.requests) == 1
expected_request = service_pb2.ImportDataRequest(
name=name, input_config=input_config
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_import_data_exception(self):
# Setup Response
error = status_pb2.Status()
operation = operations_pb2.Operation(
name="operations/test_import_data_exception", done=True
)
operation.error.CopyFrom(error)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1.AutoMlClient()
# Setup Request
name = client.dataset_path("[PROJECT]", "[LOCATION]", "[DATASET]")
input_config = {}
response = client.import_data(name, input_config)
exception = response.exception()
assert exception.errors[0] == error
def test_export_data(self):
# Setup Expected Response
expected_response = {}
expected_response = empty_pb2.Empty(**expected_response)
operation = operations_pb2.Operation(
name="operations/test_export_data", done=True
)
operation.response.Pack(expected_response)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1.AutoMlClient()
# Setup Request
name = client.dataset_path("[PROJECT]", "[LOCATION]", "[DATASET]")
output_config = {}
response = client.export_data(name, output_config)
result = response.result()
assert expected_response == result
assert len(channel.requests) == 1
expected_request = service_pb2.ExportDataRequest(
name=name, output_config=output_config
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_export_data_exception(self):
# Setup Response
error = status_pb2.Status()
operation = operations_pb2.Operation(
name="operations/test_export_data_exception", done=True
)
operation.error.CopyFrom(error)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1.AutoMlClient()
# Setup Request
name = client.dataset_path("[PROJECT]", "[LOCATION]", "[DATASET]")
output_config = {}
response = client.export_data(name, output_config)
exception = response.exception()
assert exception.errors[0] == error
def test_get_annotation_spec(self):
# Setup Expected Response
name_2 = "name2-1052831874"
display_name = "displayName1615086568"
example_count = 1517063674
expected_response = {
"name": name_2,
"display_name": display_name,
"example_count": example_count,
}
expected_response = annotation_spec_pb2.AnnotationSpec(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1.AutoMlClient()
# Setup Request
name = client.annotation_spec_path(
"[PROJECT]", "[LOCATION]", "[DATASET]", "[ANNOTATION_SPEC]"
)
response = client.get_annotation_spec(name)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = service_pb2.GetAnnotationSpecRequest(name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_get_annotation_spec_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1.AutoMlClient()
# Setup request
name = client.annotation_spec_path(
"[PROJECT]", "[LOCATION]", "[DATASET]", "[ANNOTATION_SPEC]"
)
with pytest.raises(CustomException):
client.get_annotation_spec(name)
def test_create_model(self):
# Setup Expected Response
name = "name3373707"
display_name = "displayName1615086568"
dataset_id = "datasetId-2115646910"
etag = "etag3123477"
expected_response = {
"name": name,
"display_name": display_name,
"dataset_id": dataset_id,
"etag": etag,
}
expected_response = model_pb2.Model(**expected_response)
operation = operations_pb2.Operation(
name="operations/test_create_model", done=True
)
operation.response.Pack(expected_response)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1.AutoMlClient()
# Setup Request
parent = client.location_path("[PROJECT]", "[LOCATION]")
model = {}
response = client.create_model(parent, model)
result = response.result()
assert expected_response == result
assert len(channel.requests) == 1
expected_request = service_pb2.CreateModelRequest(parent=parent, model=model)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_create_model_exception(self):
# Setup Response
error = status_pb2.Status()
operation = operations_pb2.Operation(
name="operations/test_create_model_exception", done=True
)
operation.error.CopyFrom(error)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1.AutoMlClient()
# Setup Request
parent = client.location_path("[PROJECT]", "[LOCATION]")
model = {}
response = client.create_model(parent, model)
exception = response.exception()
assert exception.errors[0] == error
def test_get_model(self):
# Setup Expected Response
name_2 = "name2-1052831874"
display_name = "displayName1615086568"
dataset_id = "datasetId-2115646910"
etag = "etag3123477"
expected_response = {
"name": name_2,
"display_name": display_name,
"dataset_id": dataset_id,
"etag": etag,
}
expected_response = model_pb2.Model(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1.AutoMlClient()
# Setup Request
name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]")
response = client.get_model(name)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = service_pb2.GetModelRequest(name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_get_model_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1.AutoMlClient()
# Setup request
name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]")
with pytest.raises(CustomException):
client.get_model(name)
def test_update_model(self):
# Setup Expected Response
name = "name3373707"
display_name = "displayName1615086568"
dataset_id = "datasetId-2115646910"
etag = "etag3123477"
expected_response = {
"name": name,
"display_name": display_name,
"dataset_id": dataset_id,
"etag": etag,
}
expected_response = model_pb2.Model(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1.AutoMlClient()
# Setup Request
model = {}
update_mask = {}
response = client.update_model(model, update_mask)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = service_pb2.UpdateModelRequest(
model=model, update_mask=update_mask
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_update_model_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1.AutoMlClient()
# Setup request
model = {}
update_mask = {}
with pytest.raises(CustomException):
client.update_model(model, update_mask)
def test_list_models(self):
# Setup Expected Response
next_page_token = ""
model_element = {}
model = [model_element]
expected_response = {"next_page_token": next_page_token, "model": model}
expected_response = service_pb2.ListModelsResponse(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1.AutoMlClient()
# Setup Request
parent = client.location_path("[PROJECT]", "[LOCATION]")
paged_list_response = client.list_models(parent)
resources = list(paged_list_response)
assert len(resources) == 1
assert expected_response.model[0] == resources[0]
assert len(channel.requests) == 1
expected_request = service_pb2.ListModelsRequest(parent=parent)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_list_models_exception(self):
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1.AutoMlClient()
# Setup request
parent = client.location_path("[PROJECT]", "[LOCATION]")
paged_list_response = client.list_models(parent)
with pytest.raises(CustomException):
list(paged_list_response)
def test_delete_model(self):
# Setup Expected Response
expected_response = {}
expected_response = empty_pb2.Empty(**expected_response)
operation = operations_pb2.Operation(
name="operations/test_delete_model", done=True
)
operation.response.Pack(expected_response)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1.AutoMlClient()
# Setup Request
name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]")
response = client.delete_model(name)
result = response.result()
assert expected_response == result
assert len(channel.requests) == 1
expected_request = service_pb2.DeleteModelRequest(name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_delete_model_exception(self):
# Setup Response
error = status_pb2.Status()
operation = operations_pb2.Operation(
name="operations/test_delete_model_exception", done=True
)
operation.error.CopyFrom(error)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1.AutoMlClient()
# Setup Request
name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]")
response = client.delete_model(name)
exception = response.exception()
assert exception.errors[0] == error
def test_deploy_model(self):
# Setup Expected Response
expected_response = {}
expected_response = empty_pb2.Empty(**expected_response)
operation = operations_pb2.Operation(
name="operations/test_deploy_model", done=True
)
operation.response.Pack(expected_response)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1.AutoMlClient()
# Setup Request
name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]")
response = client.deploy_model(name)
result = response.result()
assert expected_response == result
assert len(channel.requests) == 1
expected_request = service_pb2.DeployModelRequest(name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_deploy_model_exception(self):
# Setup Response
error = status_pb2.Status()
operation = operations_pb2.Operation(
name="operations/test_deploy_model_exception", done=True
)
operation.error.CopyFrom(error)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1.AutoMlClient()
# Setup Request
name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]")
response = client.deploy_model(name)
exception = response.exception()
assert exception.errors[0] == error
def test_undeploy_model(self):
# Setup Expected Response
expected_response = {}
expected_response = empty_pb2.Empty(**expected_response)
operation = operations_pb2.Operation(
name="operations/test_undeploy_model", done=True
)
operation.response.Pack(expected_response)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1.AutoMlClient()
# Setup Request
name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]")
response = client.undeploy_model(name)
result = response.result()
assert expected_response == result
assert len(channel.requests) == 1
expected_request = service_pb2.UndeployModelRequest(name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_undeploy_model_exception(self):
# Setup Response
error = status_pb2.Status()
operation = operations_pb2.Operation(
name="operations/test_undeploy_model_exception", done=True
)
operation.error.CopyFrom(error)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1.AutoMlClient()
# Setup Request
name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]")
response = client.undeploy_model(name)
exception = response.exception()
assert exception.errors[0] == error
def test_export_model(self):
# Setup Expected Response
expected_response = {}
expected_response = empty_pb2.Empty(**expected_response)
operation = operations_pb2.Operation(
name="operations/test_export_model", done=True
)
operation.response.Pack(expected_response)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1.AutoMlClient()
# Setup Request
name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]")
output_config = {}
response = client.export_model(name, output_config)
result = response.result()
assert expected_response == result
assert len(channel.requests) == 1
expected_request = service_pb2.ExportModelRequest(
name=name, output_config=output_config
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_export_model_exception(self):
# Setup Response
error = status_pb2.Status()
operation = operations_pb2.Operation(
name="operations/test_export_model_exception", done=True
)
operation.error.CopyFrom(error)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1.AutoMlClient()
# Setup Request
name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]")
output_config = {}
response = client.export_model(name, output_config)
exception = response.exception()
assert exception.errors[0] == error
def test_get_model_evaluation(self):
# Setup Expected Response
name_2 = "name2-1052831874"
annotation_spec_id = "annotationSpecId60690191"
display_name = "displayName1615086568"
evaluated_example_count = 277565350
expected_response = {
"name": name_2,
"annotation_spec_id": annotation_spec_id,
"display_name": display_name,
"evaluated_example_count": evaluated_example_count,
}
expected_response = model_evaluation_pb2.ModelEvaluation(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1.AutoMlClient()
# Setup Request
name = client.model_evaluation_path(
"[PROJECT]", "[LOCATION]", "[MODEL]", "[MODEL_EVALUATION]"
)
response = client.get_model_evaluation(name)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = service_pb2.GetModelEvaluationRequest(name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_get_model_evaluation_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1.AutoMlClient()
# Setup request
name = client.model_evaluation_path(
"[PROJECT]", "[LOCATION]", "[MODEL]", "[MODEL_EVALUATION]"
)
with pytest.raises(CustomException):
client.get_model_evaluation(name)
def test_list_model_evaluations(self):
# Setup Expected Response
next_page_token = ""
model_evaluation_element = {}
model_evaluation = [model_evaluation_element]
expected_response = {
"next_page_token": next_page_token,
"model_evaluation": model_evaluation,
}
expected_response = service_pb2.ListModelEvaluationsResponse(
**expected_response
)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1.AutoMlClient()
# Setup Request
parent = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]")
filter_ = "filter-1274492040"
paged_list_response = client.list_model_evaluations(parent, filter_)
resources = list(paged_list_response)
assert len(resources) == 1
assert expected_response.model_evaluation[0] == resources[0]
assert len(channel.requests) == 1
expected_request = service_pb2.ListModelEvaluationsRequest(
parent=parent, filter=filter_
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_list_model_evaluations_exception(self):
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1.AutoMlClient()
# Setup request
parent = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]")
filter_ = "filter-1274492040"
paged_list_response = client.list_model_evaluations(parent, filter_)
with pytest.raises(CustomException):
list(paged_list_response)
| 36.384073
| 87
| 0.648713
| 3,715
| 36,093
| 6.061104
| 0.05895
| 0.076742
| 0.043167
| 0.031976
| 0.887596
| 0.872852
| 0.864902
| 0.857708
| 0.840831
| 0.831061
| 0
| 0.02009
| 0.260798
| 36,093
| 991
| 88
| 36.420787
| 0.823876
| 0.068434
| 0
| 0.73542
| 0
| 0
| 0.117145
| 0.072574
| 0
| 0
| 0
| 0
| 0.093883
| 1
| 0.056899
| false
| 0.001422
| 0.02845
| 0.001422
| 0.093883
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
22bc006657037fbcac44d10884f4fef483feaa62
| 76,537
|
py
|
Python
|
zerver/tests/test_auth_backends.py
|
GauravVirmani/zulip
|
5a204d7c84d60e193f1ea0900d42848c5276a095
|
[
"Apache-2.0"
] | null | null | null |
zerver/tests/test_auth_backends.py
|
GauravVirmani/zulip
|
5a204d7c84d60e193f1ea0900d42848c5276a095
|
[
"Apache-2.0"
] | null | null | null |
zerver/tests/test_auth_backends.py
|
GauravVirmani/zulip
|
5a204d7c84d60e193f1ea0900d42848c5276a095
|
[
"Apache-2.0"
] | 1
|
2021-06-10T15:12:52.000Z
|
2021-06-10T15:12:52.000Z
|
# -*- coding: utf-8 -*-
from django.conf import settings
from django.http import HttpResponse
from django.test import TestCase, override_settings
from django_auth_ldap.backend import _LDAPUser
from django.test.client import RequestFactory
from typing import Any, Callable, Dict, Optional, Text
from builtins import object
from oauth2client.crypt import AppIdentityError
from django.core import signing
from django.core.urlresolvers import reverse
import jwt
import mock
import re
from zerver.forms import HomepageForm
from zerver.lib.actions import do_deactivate_realm, do_deactivate_user, \
do_reactivate_realm, do_reactivate_user
from zerver.lib.initial_password import initial_password
from zerver.lib.session_user import get_session_dict_user
from zerver.lib.test_classes import (
ZulipTestCase,
)
from zerver.models import \
get_realm, get_user_profile_by_email, email_to_username, UserProfile, \
PreregistrationUser, Realm
from confirmation.models import Confirmation
from zproject.backends import ZulipDummyBackend, EmailAuthBackend, \
GoogleMobileOauth2Backend, ZulipRemoteUserBackend, ZulipLDAPAuthBackend, \
ZulipLDAPUserPopulator, DevAuthBackend, GitHubAuthBackend, ZulipAuthMixin, \
dev_auth_enabled, password_auth_enabled, github_auth_enabled, \
SocialAuthMixin, AUTH_BACKEND_NAME_MAP
from zerver.views.auth import maybe_send_to_registration
from social_core.exceptions import AuthFailed
from social_django.strategy import DjangoStrategy
from social_django.storage import BaseDjangoStorage
from social_core.backends.github import GithubOrganizationOAuth2, GithubTeamOAuth2, \
GithubOAuth2
from six.moves import urllib
from six.moves.http_cookies import SimpleCookie
import ujson
from zerver.lib.test_helpers import MockLDAP
class AuthBackendTest(TestCase):
def verify_backend(self, backend, good_args=None,
good_kwargs=None, bad_kwargs=None,
email_to_username=None):
# type: (Any, List[Any], Dict[str, Any], Dict[str, Any], Callable[[Text], Text]) -> None
if good_args is None:
good_args = []
if good_kwargs is None:
good_kwargs = {}
email = u"hamlet@zulip.com"
user_profile = get_user_profile_by_email(email)
username = email
if email_to_username is not None:
username = email_to_username(email)
# If bad_kwargs was specified, verify auth fails in that case
if bad_kwargs is not None:
self.assertIsNone(backend.authenticate(username, **bad_kwargs))
# Verify auth works
result = backend.authenticate(username, *good_args, **good_kwargs)
self.assertEqual(user_profile, result)
# Verify auth fails with a deactivated user
do_deactivate_user(user_profile)
self.assertIsNone(backend.authenticate(username, *good_args, **good_kwargs))
# Reactivate the user and verify auth works again
do_reactivate_user(user_profile)
result = backend.authenticate(username, *good_args, **good_kwargs)
self.assertEqual(user_profile, result)
# Verify auth fails with a deactivated realm
do_deactivate_realm(user_profile.realm)
self.assertIsNone(backend.authenticate(username, *good_args, **good_kwargs))
# Verify auth works again after reactivating the realm
do_reactivate_realm(user_profile.realm)
result = backend.authenticate(username, *good_args, **good_kwargs)
self.assertEqual(user_profile, result)
# ZulipDummyBackend isn't a real backend so the remainder
# doesn't make sense for it
if isinstance(backend, ZulipDummyBackend):
return
# Verify auth fails if the auth backend is disabled on server
with self.settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipDummyBackend',)):
self.assertIsNone(backend.authenticate(username, *good_args, **good_kwargs))
# Verify auth fails if the auth backend is disabled for the realm
for backend_name in AUTH_BACKEND_NAME_MAP.keys():
if isinstance(backend, AUTH_BACKEND_NAME_MAP[backend_name]):
break
index = getattr(user_profile.realm.authentication_methods, backend_name).number
user_profile.realm.authentication_methods.set_bit(index, False)
user_profile.realm.save()
self.assertIsNone(backend.authenticate(username, *good_args, **good_kwargs))
user_profile.realm.authentication_methods.set_bit(index, True)
user_profile.realm.save()
def test_dummy_backend(self):
# type: () -> None
self.verify_backend(ZulipDummyBackend(),
good_kwargs=dict(use_dummy_backend=True),
bad_kwargs=dict(use_dummy_backend=False))
def setup_subdomain(self, user_profile):
# type: (UserProfile) -> None
realm = user_profile.realm
realm.string_id = 'zulip'
realm.save()
def test_email_auth_backend(self):
# type: () -> None
email = "hamlet@zulip.com"
user_profile = get_user_profile_by_email(email)
password = "testpassword"
user_profile.set_password(password)
user_profile.save()
self.setup_subdomain(user_profile)
self.verify_backend(EmailAuthBackend(),
bad_kwargs=dict(password=''),
good_kwargs=dict(password=password))
# Subdomain is ignored when feature is not enabled
self.verify_backend(EmailAuthBackend(),
good_kwargs=dict(password=password,
realm_subdomain='acme',
return_data=dict()))
with self.settings(REALMS_HAVE_SUBDOMAINS=True):
# With subdomains, authenticating with the right subdomain
# works; using the wrong subdomain doesn't
self.verify_backend(EmailAuthBackend(),
good_kwargs=dict(password=password,
realm_subdomain='zulip',
return_data=dict()),
bad_kwargs=dict(password=password,
realm_subdomain='acme',
return_data=dict()))
# Things work normally in the event that we're using a
# non-subdomain login page, even if subdomains are enabled
self.verify_backend(EmailAuthBackend(),
bad_kwargs=dict(password="wrong"),
good_kwargs=dict(password=password))
def test_email_auth_backend_disabled_password_auth(self):
# type: () -> None
email = u"hamlet@zulip.com"
user_profile = get_user_profile_by_email(email)
password = "testpassword"
user_profile.set_password(password)
user_profile.save()
# Verify if a realm has password auth disabled, correct password is rejected
with mock.patch('zproject.backends.password_auth_enabled', return_value=False):
self.assertIsNone(EmailAuthBackend().authenticate(email, password))
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.GoogleMobileOauth2Backend',))
def test_google_backend(self):
# type: () -> None
email = "hamlet@zulip.com"
backend = GoogleMobileOauth2Backend()
payload = dict(email_verified=True,
email=email)
user_profile = get_user_profile_by_email(email)
self.setup_subdomain(user_profile)
with mock.patch('apiclient.sample_tools.client.verify_id_token', return_value=payload):
self.verify_backend(backend)
# With REALMS_HAVE_SUBDOMAINS off, subdomain is ignored
with mock.patch('apiclient.sample_tools.client.verify_id_token', return_value=payload):
self.verify_backend(backend,
good_kwargs=dict(realm_subdomain='acme'))
with self.settings(REALMS_HAVE_SUBDOMAINS=True):
# With subdomains, authenticating with the right subdomain
# works; using the wrong subdomain doesn't
with mock.patch('apiclient.sample_tools.client.verify_id_token', return_value=payload):
self.verify_backend(backend,
good_kwargs=dict(realm_subdomain="zulip"),
bad_kwargs=dict(realm_subdomain='acme'))
# Verify valid_attestation parameter is set correctly
unverified_payload = dict(email_verified=False)
with mock.patch('apiclient.sample_tools.client.verify_id_token', return_value=unverified_payload):
ret = dict() # type: Dict[str, str]
result = backend.authenticate(return_data=ret)
self.assertIsNone(result)
self.assertFalse(ret["valid_attestation"])
nonexistent_user_payload = dict(email_verified=True, email="invalid@zulip.com")
with mock.patch('apiclient.sample_tools.client.verify_id_token',
return_value=nonexistent_user_payload):
ret = dict()
result = backend.authenticate(return_data=ret)
self.assertIsNone(result)
self.assertTrue(ret["valid_attestation"])
with mock.patch('apiclient.sample_tools.client.verify_id_token',
side_effect=AppIdentityError):
ret = dict()
result = backend.authenticate(return_data=ret)
self.assertIsNone(result)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_ldap_backend(self):
# type: () -> None
email = "hamlet@zulip.com"
password = "test_password"
user_profile = get_user_profile_by_email(email)
self.setup_subdomain(user_profile)
backend = ZulipLDAPAuthBackend()
# Test LDAP auth fails when LDAP server rejects password
with mock.patch('django_auth_ldap.backend._LDAPUser._authenticate_user_dn',
side_effect=_LDAPUser.AuthenticationFailed("Failed")), (
mock.patch('django_auth_ldap.backend._LDAPUser._check_requirements')), (
mock.patch('django_auth_ldap.backend._LDAPUser._get_user_attrs',
return_value=dict(full_name=['Hamlet']))):
self.assertIsNone(backend.authenticate(email, password))
# For this backend, we mock the internals of django_auth_ldap
with mock.patch('django_auth_ldap.backend._LDAPUser._authenticate_user_dn'), (
mock.patch('django_auth_ldap.backend._LDAPUser._check_requirements')), (
mock.patch('django_auth_ldap.backend._LDAPUser._get_user_attrs',
return_value=dict(full_name=['Hamlet']))):
self.verify_backend(backend, good_kwargs=dict(password=password))
with mock.patch('django_auth_ldap.backend._LDAPUser._authenticate_user_dn'), (
mock.patch('django_auth_ldap.backend._LDAPUser._check_requirements')), (
mock.patch('django_auth_ldap.backend._LDAPUser._get_user_attrs',
return_value=dict(full_name=['Hamlet']))):
self.verify_backend(backend, good_kwargs=dict(password=password,
realm_subdomain='acme'))
with self.settings(REALMS_HAVE_SUBDOMAINS=True):
# With subdomains, authenticating with the right subdomain
# works; using the wrong subdomain doesn't
with mock.patch('django_auth_ldap.backend._LDAPUser._authenticate_user_dn'), (
mock.patch('django_auth_ldap.backend._LDAPUser._check_requirements')), (
mock.patch('django_auth_ldap.backend._LDAPUser._get_user_attrs',
return_value=dict(full_name=['Hamlet']))):
self.verify_backend(backend,
bad_kwargs=dict(password=password,
realm_subdomain='acme'),
good_kwargs=dict(password=password,
realm_subdomain='zulip'))
def test_devauth_backend(self):
# type: () -> None
self.verify_backend(DevAuthBackend())
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipRemoteUserBackend',))
def test_remote_user_backend(self):
# type: () -> None
self.setup_subdomain(get_user_profile_by_email(u'hamlet@zulip.com'))
self.verify_backend(ZulipRemoteUserBackend(),
good_kwargs=dict(realm_subdomain='acme'))
with self.settings(REALMS_HAVE_SUBDOMAINS=True):
# With subdomains, authenticating with the right subdomain
# works; using the wrong subdomain doesn't
self.verify_backend(ZulipRemoteUserBackend(),
good_kwargs=dict(realm_subdomain='zulip'),
bad_kwargs=dict(realm_subdomain='acme'))
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipRemoteUserBackend',))
def test_remote_user_backend_sso_append_domain(self):
# type: () -> None
self.setup_subdomain(get_user_profile_by_email(u'hamlet@zulip.com'))
with self.settings(SSO_APPEND_DOMAIN='zulip.com'):
self.verify_backend(ZulipRemoteUserBackend(),
email_to_username=email_to_username,
good_kwargs=dict(realm_subdomain='acme'))
with self.settings(REALMS_HAVE_SUBDOMAINS=True):
# With subdomains, authenticating with the right subdomain
# works; using the wrong subdomain doesn't
with self.settings(SSO_APPEND_DOMAIN='zulip.com'):
self.verify_backend(ZulipRemoteUserBackend(),
email_to_username=email_to_username,
good_kwargs=dict(realm_subdomain='zulip'),
bad_kwargs=dict(realm_subdomain='acme'))
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.GitHubAuthBackend',))
def test_github_backend(self):
# type: () -> None
email = 'hamlet@zulip.com'
self.setup_subdomain(get_user_profile_by_email(email))
good_kwargs = dict(response=dict(email=email), return_data=dict(),
realm_subdomain='acme')
self.verify_backend(GitHubAuthBackend(),
good_kwargs=good_kwargs,
bad_kwargs=dict())
with self.settings(REALMS_HAVE_SUBDOMAINS=True):
# With subdomains, authenticating with the right subdomain
# works; using the wrong subdomain doesn't
good_kwargs = dict(response=dict(email=email), return_data=dict(),
realm_subdomain='zulip')
bad_kwargs = dict(response=dict(email=email), return_data=dict(),
realm_subdomain='acme')
self.verify_backend(GitHubAuthBackend(),
good_kwargs=good_kwargs,
bad_kwargs=bad_kwargs)
class SocialAuthMixinTest(ZulipTestCase):
def test_social_auth_mixing(self):
# type: () -> None
mixin = SocialAuthMixin()
with self.assertRaises(NotImplementedError):
mixin.get_email_address()
with self.assertRaises(NotImplementedError):
mixin.get_full_name()
class GitHubAuthBackendTest(ZulipTestCase):
def setUp(self):
# type: () -> None
self.email = 'hamlet@zulip.com'
self.name = 'Hamlet'
self.backend = GitHubAuthBackend()
self.backend.strategy = DjangoStrategy(storage=BaseDjangoStorage())
self.user_profile = get_user_profile_by_email(self.email)
self.user_profile.backend = self.backend
rf = RequestFactory()
request = rf.get('/complete')
request.session = {}
request.get_host = lambda: 'acme.testserver'
request.user = self.user_profile
self.backend.strategy.request = request
def do_auth(self, *args, **kwargs):
# type: (*Any, **Any) -> UserProfile
with self.settings(AUTHENTICATION_BACKENDS=('zproject.backends.GitHubAuthBackend',)):
return self.backend.authenticate(*args, **kwargs)
def test_github_auth_enabled(self):
# type: () -> None
with self.settings(AUTHENTICATION_BACKENDS=('zproject.backends.GitHubAuthBackend',)):
self.assertTrue(github_auth_enabled())
def test_full_name_with_missing_key(self):
# type: () -> None
self.assertEqual(self.backend.get_full_name(), '')
def test_github_backend_do_auth_without_subdomains(self):
# type: () -> None
with mock.patch('social_core.backends.github.GithubOAuth2.do_auth',
side_effect=self.do_auth), \
mock.patch('zerver.views.auth.login'):
response = dict(email=self.email, name=self.name)
result = self.backend.do_auth(response=response)
self.assertNotIn('subdomain=1', result.url)
def test_github_backend_do_auth_with_non_existing_subdomain(self):
# type: () -> None
with mock.patch('social_core.backends.github.GithubOAuth2.do_auth',
side_effect=self.do_auth):
with self.settings(REALMS_HAVE_SUBDOMAINS=True):
self.backend.strategy.session_set('subdomain', 'test')
response = dict(email=self.email, name=self.name)
result = self.backend.do_auth(response=response)
self.assertIn('subdomain=1', result.url)
def test_github_backend_do_auth_with_subdomains(self):
# type: () -> None
with mock.patch('social_core.backends.github.GithubOAuth2.do_auth',
side_effect=self.do_auth):
with self.settings(REALMS_HAVE_SUBDOMAINS=True):
self.backend.strategy.session_set('subdomain', 'zulip')
response = dict(email=self.email, name=self.name)
result = self.backend.do_auth(response=response)
self.assertEqual('http://zulip.testserver/accounts/login/subdomain/', result.url)
def test_github_backend_do_auth_for_default(self):
# type: () -> None
with mock.patch('social_core.backends.github.GithubOAuth2.do_auth',
side_effect=self.do_auth), \
mock.patch('zproject.backends.SocialAuthMixin.process_do_auth') as result:
response = dict(email=self.email, name=self.name)
self.backend.do_auth('fake-access-token', response=response)
kwargs = {'realm_subdomain': 'acme',
'response': response,
'return_data': {}}
result.assert_called_with(self.user_profile, 'fake-access-token', **kwargs)
def test_github_backend_do_auth_for_team(self):
# type: () -> None
with mock.patch('social_core.backends.github.GithubTeamOAuth2.do_auth',
side_effect=self.do_auth), \
mock.patch('zproject.backends.SocialAuthMixin.process_do_auth') as result:
response = dict(email=self.email, name=self.name)
with self.settings(SOCIAL_AUTH_GITHUB_TEAM_ID='zulip-webapp'):
self.backend.do_auth('fake-access-token', response=response)
kwargs = {'realm_subdomain': 'acme',
'response': response,
'return_data': {}}
result.assert_called_with(self.user_profile, 'fake-access-token', **kwargs)
def test_github_backend_do_auth_for_team_auth_failed(self):
# type: () -> None
with mock.patch('social_core.backends.github.GithubTeamOAuth2.do_auth',
side_effect=AuthFailed('Not found')), \
mock.patch('logging.info'), \
mock.patch('zproject.backends.SocialAuthMixin.process_do_auth') as result:
response = dict(email=self.email, name=self.name)
with self.settings(SOCIAL_AUTH_GITHUB_TEAM_ID='zulip-webapp'):
self.backend.do_auth('fake-access-token', response=response)
kwargs = {'realm_subdomain': 'acme',
'response': response,
'return_data': {}}
result.assert_called_with(None, 'fake-access-token', **kwargs)
def test_github_backend_do_auth_for_org(self):
# type: () -> None
with mock.patch('social_core.backends.github.GithubOrganizationOAuth2.do_auth',
side_effect=self.do_auth), \
mock.patch('zproject.backends.SocialAuthMixin.process_do_auth') as result:
response = dict(email=self.email, name=self.name)
with self.settings(SOCIAL_AUTH_GITHUB_ORG_NAME='Zulip'):
self.backend.do_auth('fake-access-token', response=response)
kwargs = {'realm_subdomain': 'acme',
'response': response,
'return_data': {}}
result.assert_called_with(self.user_profile, 'fake-access-token', **kwargs)
def test_github_backend_do_auth_for_org_auth_failed(self):
# type: () -> None
with mock.patch('social_core.backends.github.GithubOrganizationOAuth2.do_auth',
side_effect=AuthFailed('Not found')), \
mock.patch('logging.info'), \
mock.patch('zproject.backends.SocialAuthMixin.process_do_auth') as result:
response = dict(email=self.email, name=self.name)
with self.settings(SOCIAL_AUTH_GITHUB_ORG_NAME='Zulip'):
self.backend.do_auth('fake-access-token', response=response)
kwargs = {'realm_subdomain': 'acme',
'response': response,
'return_data': {}}
result.assert_called_with(None, 'fake-access-token', **kwargs)
def test_github_backend_authenticate_nonexisting_user(self):
# type: () -> None
with mock.patch('zproject.backends.get_user_profile_by_email',
side_effect=UserProfile.DoesNotExist("Do not exist")):
response = dict(email=self.email, name=self.name)
return_data = dict() # type: Dict[str, Any]
user = self.backend.authenticate(return_data=return_data, response=response)
self.assertIs(user, None)
self.assertTrue(return_data['valid_attestation'])
def test_github_backend_inactive_user(self):
# type: () -> None
def do_auth_inactive(*args, **kwargs):
# type: (*Any, **Any) -> UserProfile
return_data = kwargs['return_data']
return_data['inactive_user'] = True
return self.user_profile
with mock.patch('zerver.views.auth.login_or_register_remote_user') as result, \
mock.patch('social_core.backends.github.GithubOAuth2.do_auth',
side_effect=do_auth_inactive):
response = dict(email=self.email, name=self.name)
user = self.backend.do_auth(response=response)
result.assert_not_called()
self.assertIs(user, None)
def test_github_backend_new_user(self):
# type: () -> None
rf = RequestFactory()
request = rf.get('/complete')
request.session = {}
request.user = self.user_profile
self.backend.strategy.request = request
def do_auth(*args, **kwargs):
# type: (*Any, **Any) -> UserProfile
return_data = kwargs['return_data']
return_data['valid_attestation'] = True
return None
with mock.patch('social_core.backends.github.GithubOAuth2.do_auth',
side_effect=do_auth):
response = dict(email='nonexisting@phantom.com', name='Ghost')
result = self.backend.do_auth(response=response)
self.assert_in_response('action="/register/"', result)
self.assert_in_response('Your email address does not correspond to any '
'existing organization.', result)
def test_login_url(self):
# type: () -> None
result = self.client_get('/accounts/login/social/github')
self.assertIn(reverse('social:begin', args=['github']), result.url)
class ResponseMock(object):
def __init__(self, status_code, data):
# type: (int, Any) -> None
self.status_code = status_code
self.data = data
def json(self):
# type: () -> str
return self.data
@property
def text(self):
# type: () -> str
return "Response text"
class GoogleOAuthTest(ZulipTestCase):
def google_oauth2_test(self, token_response, account_response, subdomain=None):
# type: (ResponseMock, ResponseMock, Optional[str]) -> HttpResponse
url = "/accounts/login/google/send/"
if subdomain is not None:
url += "?subdomain=" + subdomain
result = self.client_get(url)
self.assertEqual(result.status_code, 302)
if 'google' not in result.url:
return result
self.client.cookies = result.cookies
# Now extract the CSRF token from the redirect URL
parsed_url = urllib.parse.urlparse(result.url)
csrf_state = urllib.parse.parse_qs(parsed_url.query)['state']
with mock.patch("requests.post", return_value=token_response), (
mock.patch("requests.get", return_value=account_response)):
result = self.client_get("/accounts/login/google/done/",
dict(state=csrf_state))
return result
class GoogleSubdomainLoginTest(GoogleOAuthTest):
def get_signed_subdomain_cookie(self, data):
# type: (Dict[str, str]) -> Dict[str, str]
key = 'subdomain.signature'
salt = key + 'zerver.views.auth'
value = ujson.dumps(data)
return {key: signing.get_cookie_signer(salt=salt).sign(value)}
def unsign_subdomain_cookie(self, result):
# type: (HttpResponse) -> Dict[str, Any]
key = 'subdomain.signature'
salt = key + 'zerver.views.auth'
cookie = result.cookies.get(key)
value = signing.get_cookie_signer(salt=salt).unsign(cookie.value, max_age=15)
return ujson.loads(value)
def test_google_oauth2_start(self):
# type: () -> None
with mock.patch('zerver.views.auth.get_subdomain', return_value='zulip'):
result = self.client_get('/accounts/login/google/')
self.assertEqual(result.status_code, 302)
parsed_url = urllib.parse.urlparse(result.url)
subdomain = urllib.parse.parse_qs(parsed_url.query)['subdomain']
self.assertEqual(subdomain, ['zulip'])
def test_google_oauth2_success(self):
# type: () -> None
token_response = ResponseMock(200, {'access_token': "unique_token"})
account_data = dict(name=dict(formatted="Full Name"),
emails=[dict(type="account",
value="hamlet@zulip.com")])
account_response = ResponseMock(200, account_data)
with self.settings(REALMS_HAVE_SUBDOMAINS=True):
result = self.google_oauth2_test(token_response, account_response, 'zulip')
data = self.unsign_subdomain_cookie(result)
self.assertEqual(data['email'], 'hamlet@zulip.com')
self.assertEqual(data['name'], 'Full Name')
self.assertEqual(data['subdomain'], 'zulip')
self.assertEqual(result.status_code, 302)
parsed_url = urllib.parse.urlparse(result.url)
uri = "{}://{}{}".format(parsed_url.scheme, parsed_url.netloc,
parsed_url.path)
self.assertEqual(uri, 'http://zulip.testserver/accounts/login/subdomain/')
def test_log_into_subdomain(self):
# type: () -> None
data = {'name': 'Full Name',
'email': 'hamlet@zulip.com',
'subdomain': 'zulip'}
self.client.cookies = SimpleCookie(self.get_signed_subdomain_cookie(data))
with mock.patch('zerver.views.auth.get_subdomain', return_value='zulip'):
result = self.client_get('/accounts/login/subdomain/')
self.assertEqual(result.status_code, 302)
user_profile = get_user_profile_by_email('hamlet@zulip.com')
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
def test_user_cannot_log_into_nonexisting_realm(self):
# type: () -> None
token_response = ResponseMock(200, {'access_token': "unique_token"})
account_data = dict(name=dict(formatted="Full Name"),
emails=[dict(type="account",
value="hamlet@zulip.com")])
account_response = ResponseMock(200, account_data)
result = self.google_oauth2_test(token_response, account_response, 'acme')
self.assertEqual(result.status_code, 302)
self.assertIn('subdomain=1', result.url)
def test_user_cannot_log_into_wrong_subdomain(self):
# type: () -> None
data = {'name': 'Full Name',
'email': 'hamlet@zulip.com',
'subdomain': 'acme'}
self.client.cookies = SimpleCookie(self.get_signed_subdomain_cookie(data))
with mock.patch('zerver.views.auth.get_subdomain', return_value='zulip'):
result = self.client_get('/accounts/login/subdomain/')
self.assertEqual(result.status_code, 400)
def test_log_into_subdomain_when_signature_is_bad(self):
# type: () -> None
self.client.cookies = SimpleCookie({'subdomain.signature': 'invlaid'})
with mock.patch('zerver.views.auth.get_subdomain', return_value='zulip'):
result = self.client_get('/accounts/login/subdomain/')
self.assertEqual(result.status_code, 400)
def test_log_into_subdomain_when_state_is_not_passed(self):
# type: () -> None
with mock.patch('zerver.views.auth.get_subdomain', return_value='zulip'):
result = self.client_get('/accounts/login/subdomain/')
self.assertEqual(result.status_code, 400)
def test_google_oauth2_registration(self):
# type: () -> None
"""If the user doesn't exist yet, Google auth can be used to register an account"""
with self.settings(REALMS_HAVE_SUBDOMAINS=True), (
mock.patch('zerver.views.auth.get_subdomain', return_value='zulip')), (
mock.patch('zerver.views.registration.get_subdomain', return_value='zulip')):
email = "newuser@zulip.com"
token_response = ResponseMock(200, {'access_token': "unique_token"})
account_data = dict(name=dict(formatted="Full Name"),
emails=[dict(type="account",
value=email)])
account_response = ResponseMock(200, account_data)
result = self.google_oauth2_test(token_response, account_response, 'zulip')
data = self.unsign_subdomain_cookie(result)
self.assertEqual(data['email'], email)
self.assertEqual(data['name'], 'Full Name')
self.assertEqual(data['subdomain'], 'zulip')
self.assertEqual(result.status_code, 302)
parsed_url = urllib.parse.urlparse(result.url)
uri = "{}://{}{}".format(parsed_url.scheme, parsed_url.netloc,
parsed_url.path)
self.assertEqual(uri, 'http://zulip.testserver/accounts/login/subdomain/')
result = self.client_get(result.url)
result = self.client_get(result.url) # Call the confirmation url.
key_match = re.search('value="(?P<key>[0-9a-f]+)" name="key"', result.content.decode("utf-8"))
name_match = re.search('value="(?P<name>[^"]+)" name="full_name"', result.content.decode("utf-8"))
# This goes through a brief stop on a page that auto-submits via JS
result = self.client_post('/accounts/register/',
{'full_name': name_match.group("name"),
'key': key_match.group("key"),
'from_confirmation': "1"})
self.assertEqual(result.status_code, 200)
result = self.client_post('/accounts/register/',
{'full_name': "New User",
'password': 'test_password',
'key': key_match.group("key"),
'terms': True})
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, "http://zulip.testserver/")
user_profile = get_user_profile_by_email(email)
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
class GoogleLoginTest(GoogleOAuthTest):
def test_google_oauth2_success(self):
# type: () -> None
token_response = ResponseMock(200, {'access_token': "unique_token"})
account_data = dict(name=dict(formatted="Full Name"),
emails=[dict(type="account",
value="hamlet@zulip.com")])
account_response = ResponseMock(200, account_data)
self.google_oauth2_test(token_response, account_response)
user_profile = get_user_profile_by_email('hamlet@zulip.com')
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
def test_google_oauth2_registration(self):
# type: () -> None
"""If the user doesn't exist yet, Google auth can be used to register an account"""
email = "newuser@zulip.com"
token_response = ResponseMock(200, {'access_token': "unique_token"})
account_data = dict(name=dict(formatted="Full Name"),
emails=[dict(type="account",
value=email)])
account_response = ResponseMock(200, account_data)
result = self.google_oauth2_test(token_response, account_response)
self.assertEqual(result.status_code, 302)
result = self.client_get(result.url)
key_match = re.search('value="(?P<key>[0-9a-f]+)" name="key"', result.content.decode("utf-8"))
name_match = re.search('value="(?P<name>[^"]+)" name="full_name"', result.content.decode("utf-8"))
# This goes through a brief stop on a page that auto-submits via JS
result = self.client_post('/accounts/register/',
{'full_name': name_match.group("name"),
'key': key_match.group("key"),
'from_confirmation': "1"})
self.assertEqual(result.status_code, 200)
result = self.client_post('/accounts/register/',
{'full_name': "New User",
'password': 'test_password',
'key': key_match.group("key"),
'terms': True})
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, "http://testserver/")
def test_google_oauth2_wrong_subdomain(self):
# type: () -> None
token_response = ResponseMock(200, {'access_token': "unique_token"})
account_data = dict(name=dict(formatted="Full Name"),
emails=[dict(type="account",
value="hamlet@zulip.com")])
account_response = ResponseMock(200, account_data)
with self.settings(REALMS_HAVE_SUBDOMAINS=True):
result = self.google_oauth2_test(token_response, account_response)
self.assertIn('subdomain=1', result.url)
def test_google_oauth2_400_token_response(self):
# type: () -> None
token_response = ResponseMock(400, {})
with mock.patch("logging.warning") as m:
result = self.google_oauth2_test(token_response, None)
self.assertEqual(result.status_code, 400)
self.assertEqual(m.call_args_list[0][0][0],
"User error converting Google oauth2 login to token: Response text")
def test_google_oauth2_500_token_response(self):
# type: () -> None
token_response = ResponseMock(500, {})
with mock.patch("logging.error") as m:
result = self.google_oauth2_test(token_response, None)
self.assertEqual(result.status_code, 400)
self.assertEqual(m.call_args_list[0][0][0],
"Could not convert google oauth2 code to access_token: Response text")
def test_google_oauth2_400_account_response(self):
# type: () -> None
token_response = ResponseMock(200, {'access_token': "unique_token"})
account_response = ResponseMock(400, {})
with mock.patch("logging.warning") as m:
result = self.google_oauth2_test(token_response, account_response)
self.assertEqual(result.status_code, 400)
self.assertEqual(m.call_args_list[0][0][0],
"Google login failed making info API call: Response text")
def test_google_oauth2_500_account_response(self):
# type: () -> None
token_response = ResponseMock(200, {'access_token': "unique_token"})
account_response = ResponseMock(500, {})
with mock.patch("logging.error") as m:
result = self.google_oauth2_test(token_response, account_response)
self.assertEqual(result.status_code, 400)
self.assertEqual(m.call_args_list[0][0][0],
"Google login failed making API call: Response text")
def test_google_oauth2_no_fullname(self):
# type: () -> None
token_response = ResponseMock(200, {'access_token': "unique_token"})
account_data = dict(name=dict(givenName="Test", familyName="User"),
emails=[dict(type="account",
value="hamlet@zulip.com")])
account_response = ResponseMock(200, account_data)
self.google_oauth2_test(token_response, account_response)
user_profile = get_user_profile_by_email('hamlet@zulip.com')
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
def test_google_oauth2_account_response_no_email(self):
# type: () -> None
token_response = ResponseMock(200, {'access_token': "unique_token"})
account_data = dict(name=dict(formatted="Full Name"),
emails=[])
account_response = ResponseMock(200, account_data)
with mock.patch("logging.error") as m:
result = self.google_oauth2_test(token_response, account_response)
self.assertEqual(result.status_code, 400)
self.assertIn("Google oauth2 account email not found:", m.call_args_list[0][0][0])
def test_google_oauth2_error_access_denied(self):
# type: () -> None
result = self.client_get("/accounts/login/google/done/?error=access_denied")
self.assertEqual(result.status_code, 302)
path = urllib.parse.urlparse(result.url).path
self.assertEqual(path, "/")
def test_google_oauth2_error_other(self):
# type: () -> None
with mock.patch("logging.warning") as m:
result = self.client_get("/accounts/login/google/done/?error=some_other_error")
self.assertEqual(result.status_code, 400)
self.assertEqual(m.call_args_list[0][0][0],
"Error from google oauth2 login: some_other_error")
def test_google_oauth2_missing_csrf(self):
# type: () -> None
with mock.patch("logging.warning") as m:
result = self.client_get("/accounts/login/google/done/")
self.assertEqual(result.status_code, 400)
self.assertEqual(m.call_args_list[0][0][0],
'Missing Google oauth2 CSRF state')
def test_google_oauth2_csrf_malformed(self):
# type: () -> None
with mock.patch("logging.warning") as m:
result = self.client_get("/accounts/login/google/done/?state=badstate")
self.assertEqual(result.status_code, 400)
self.assertEqual(m.call_args_list[0][0][0],
'Missing Google oauth2 CSRF state')
def test_google_oauth2_csrf_badstate(self):
# type: () -> None
with mock.patch("logging.warning") as m:
result = self.client_get("/accounts/login/google/done/?state=badstate:otherbadstate:")
self.assertEqual(result.status_code, 400)
self.assertEqual(m.call_args_list[0][0][0],
'Google oauth2 CSRF error')
class FetchAPIKeyTest(ZulipTestCase):
def setUp(self):
# type: () -> None
self.email = "hamlet@zulip.com"
self.user_profile = get_user_profile_by_email(self.email)
def test_success(self):
# type: () -> None
result = self.client_post("/api/v1/fetch_api_key",
dict(username=self.email,
password=initial_password(self.email)))
self.assert_json_success(result)
def test_wrong_password(self):
# type: () -> None
result = self.client_post("/api/v1/fetch_api_key",
dict(username=self.email,
password="wrong"))
self.assert_json_error(result, "Your username or password is incorrect.", 403)
def test_password_auth_disabled(self):
# type: () -> None
with mock.patch('zproject.backends.password_auth_enabled', return_value=False):
result = self.client_post("/api/v1/fetch_api_key",
dict(username=self.email,
password=initial_password(self.email)))
self.assert_json_error_contains(result, "Password auth is disabled", 403)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_ldap_auth_email_auth_disabled_success(self):
# type: () -> None
ldap_patcher = mock.patch('django_auth_ldap.config.ldap.initialize')
self.mock_initialize = ldap_patcher.start()
self.mock_ldap = MockLDAP()
self.mock_initialize.return_value = self.mock_ldap
self.backend = ZulipLDAPAuthBackend()
self.mock_ldap.directory = {
'uid=hamlet,ou=users,dc=zulip,dc=com': {
'userPassword': 'testing'
}
}
with self.settings(
LDAP_APPEND_DOMAIN='zulip.com',
AUTH_LDAP_BIND_PASSWORD='',
AUTH_LDAP_USER_DN_TEMPLATE='uid=%(user)s,ou=users,dc=zulip,dc=com'):
result = self.client_post("/api/v1/fetch_api_key",
dict(username=self.email,
password="testing"))
self.assert_json_success(result)
self.mock_ldap.reset()
self.mock_initialize.stop()
def test_inactive_user(self):
# type: () -> None
do_deactivate_user(self.user_profile)
result = self.client_post("/api/v1/fetch_api_key",
dict(username=self.email,
password=initial_password(self.email)))
self.assert_json_error_contains(result, "Your account has been disabled", 403)
def test_deactivated_realm(self):
# type: () -> None
do_deactivate_realm(self.user_profile.realm)
result = self.client_post("/api/v1/fetch_api_key",
dict(username=self.email,
password=initial_password(self.email)))
self.assert_json_error_contains(result, "Your realm has been deactivated", 403)
class DevFetchAPIKeyTest(ZulipTestCase):
def setUp(self):
# type: () -> None
self.email = "hamlet@zulip.com"
self.user_profile = get_user_profile_by_email(self.email)
def test_success(self):
# type: () -> None
result = self.client_post("/api/v1/dev_fetch_api_key",
dict(username=self.email))
self.assert_json_success(result)
data = ujson.loads(result.content)
self.assertEqual(data["email"], self.email)
self.assertEqual(data['api_key'], self.user_profile.api_key)
def test_inactive_user(self):
# type: () -> None
do_deactivate_user(self.user_profile)
result = self.client_post("/api/v1/dev_fetch_api_key",
dict(username=self.email))
self.assert_json_error_contains(result, "Your account has been disabled", 403)
def test_deactivated_realm(self):
# type: () -> None
do_deactivate_realm(self.user_profile.realm)
result = self.client_post("/api/v1/dev_fetch_api_key",
dict(username=self.email))
self.assert_json_error_contains(result, "Your realm has been deactivated", 403)
def test_dev_auth_disabled(self):
# type: () -> None
with mock.patch('zerver.views.auth.dev_auth_enabled', return_value=False):
result = self.client_post("/api/v1/dev_fetch_api_key",
dict(username=self.email))
self.assert_json_error_contains(result, "Dev environment not enabled.", 400)
class DevGetEmailsTest(ZulipTestCase):
def test_success(self):
# type: () -> None
result = self.client_get("/api/v1/dev_get_emails")
self.assert_json_success(result)
self.assert_in_response("direct_admins", result)
self.assert_in_response("direct_users", result)
def test_dev_auth_disabled(self):
# type: () -> None
with mock.patch('zerver.views.auth.dev_auth_enabled', return_value=False):
result = self.client_get("/api/v1/dev_get_emails")
self.assert_json_error_contains(result, "Dev environment not enabled.", 400)
class FetchAuthBackends(ZulipTestCase):
def test_fetch_auth_backend_format(self):
# type: () -> None
result = self.client_get("/api/v1/get_auth_backends")
self.assert_json_success(result)
data = ujson.loads(result.content)
self.assertEqual(set(data.keys()),
{'msg', 'password', 'google', 'dev', 'result'})
for backend in set(data.keys()) - {'msg', 'result'}:
self.assertTrue(isinstance(data[backend], bool))
def test_fetch_auth_backend(self):
# type: () -> None
backends = [GoogleMobileOauth2Backend(), DevAuthBackend()]
with mock.patch('django.contrib.auth.get_backends', return_value=backends):
result = self.client_get("/api/v1/get_auth_backends")
self.assert_json_success(result)
data = ujson.loads(result.content)
self.assertEqual(data, {
'msg': '',
'password': False,
'google': True,
'dev': True,
'result': 'success',
})
class TestDevAuthBackend(ZulipTestCase):
def test_login_success(self):
# type: () -> None
email = 'hamlet@zulip.com'
user_profile = get_user_profile_by_email(email)
data = {'direct_email': email}
result = self.client_post('/accounts/login/local/', data)
self.assertEqual(result.status_code, 302)
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
def test_login_failure(self):
# type: () -> None
email = 'hamlet@zulip.com'
data = {'direct_email': email}
with self.settings(AUTHENTICATION_BACKENDS=('zproject.backends.EmailAuthBackend',)):
with self.assertRaisesRegex(Exception, 'Direct login not supported.'):
try:
with mock.patch('django.core.handlers.exception.logger'):
self.client_post('/accounts/login/local/', data)
except ImportError:
with mock.patch('django.core.handlers.base.logger'):
self.client_post('/accounts/login/local/', data)
def test_login_failure_due_to_nonexistent_user(self):
# type: () -> None
email = 'nonexisting@zulip.com'
data = {'direct_email': email}
with self.assertRaisesRegex(Exception, 'User cannot login'):
try:
with mock.patch('django.core.handlers.exception.logger'):
self.client_post('/accounts/login/local/', data)
except ImportError:
with mock.patch('django.core.handlers.base.logger'):
self.client_post('/accounts/login/local/', data)
class TestZulipRemoteUserBackend(ZulipTestCase):
def test_login_success(self):
# type: () -> None
email = 'hamlet@zulip.com'
user_profile = get_user_profile_by_email(email)
with self.settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipRemoteUserBackend',)):
result = self.client_post('/accounts/login/sso/', REMOTE_USER=email)
self.assertEqual(result.status_code, 302)
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
def test_authenticate_with_missing_user(self):
# type: () -> None
backend = ZulipRemoteUserBackend()
self.assertIs(backend.authenticate(None), None)
def test_login_success_with_sso_append_domain(self):
# type: () -> None
username = 'hamlet'
email = 'hamlet@zulip.com'
user_profile = get_user_profile_by_email(email)
with self.settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipRemoteUserBackend',),
SSO_APPEND_DOMAIN='zulip.com'):
result = self.client_post('/accounts/login/sso/', REMOTE_USER=username)
self.assertEqual(result.status_code, 302)
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
def test_login_failure(self):
# type: () -> None
email = 'hamlet@zulip.com'
result = self.client_post('/accounts/login/sso/', REMOTE_USER=email)
self.assertEqual(result.status_code, 200) # This should ideally be not 200.
self.assertIs(get_session_dict_user(self.client.session), None)
def test_login_failure_due_to_nonexisting_user(self):
# type: () -> None
email = 'nonexisting@zulip.com'
with self.settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipRemoteUserBackend',)):
result = self.client_post('/accounts/login/sso/', REMOTE_USER=email)
self.assertEqual(result.status_code, 302)
self.assertIs(get_session_dict_user(self.client.session), None)
def test_login_failure_due_to_missing_field(self):
# type: () -> None
with self.settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipRemoteUserBackend',)):
result = self.client_post('/accounts/login/sso/')
self.assert_json_error_contains(result, "No REMOTE_USER set.", 400)
def test_login_failure_due_to_wrong_subdomain(self):
# type: () -> None
email = 'hamlet@zulip.com'
with self.settings(REALMS_HAVE_SUBDOMAINS=True,
AUTHENTICATION_BACKENDS=('zproject.backends.ZulipRemoteUserBackend',)):
with mock.patch('zerver.views.auth.get_subdomain', return_value='acme'):
result = self.client_post('http://testserver:9080/accounts/login/sso/', REMOTE_USER=email)
self.assertEqual(result.status_code, 200)
self.assertIs(get_session_dict_user(self.client.session), None)
self.assertIn(b"Let's get started", result.content)
def test_login_failure_due_to_empty_subdomain(self):
# type: () -> None
email = 'hamlet@zulip.com'
with self.settings(REALMS_HAVE_SUBDOMAINS=True,
AUTHENTICATION_BACKENDS=('zproject.backends.ZulipRemoteUserBackend',)):
with mock.patch('zerver.views.auth.get_subdomain', return_value=''):
result = self.client_post('http://testserver:9080/accounts/login/sso/', REMOTE_USER=email)
self.assertEqual(result.status_code, 200)
self.assertIs(get_session_dict_user(self.client.session), None)
self.assertIn(b"Let's get started", result.content)
def test_login_success_under_subdomains(self):
# type: () -> None
email = 'hamlet@zulip.com'
user_profile = get_user_profile_by_email(email)
with mock.patch('zerver.views.auth.get_subdomain', return_value='zulip'):
with self.settings(
REALMS_HAVE_SUBDOMAINS=True,
AUTHENTICATION_BACKENDS=('zproject.backends.ZulipRemoteUserBackend',)):
result = self.client_post('/accounts/login/sso/', REMOTE_USER=email)
self.assertEqual(result.status_code, 302)
self.assertIs(get_session_dict_user(self.client.session), user_profile.id)
class TestJWTLogin(ZulipTestCase):
"""
JWT uses ZulipDummyBackend.
"""
def test_login_success(self):
# type: () -> None
payload = {'user': 'hamlet', 'realm': 'zulip.com'}
with self.settings(JWT_AUTH_KEYS={'': 'key'}):
email = 'hamlet@zulip.com'
auth_key = settings.JWT_AUTH_KEYS['']
web_token = jwt.encode(payload, auth_key).decode('utf8')
user_profile = get_user_profile_by_email(email)
data = {'json_web_token': web_token}
result = self.client_post('/accounts/login/jwt/', data)
self.assertEqual(result.status_code, 302)
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
def test_login_failure_when_user_is_missing(self):
# type: () -> None
payload = {'realm': 'zulip.com'}
with self.settings(JWT_AUTH_KEYS={'': 'key'}):
auth_key = settings.JWT_AUTH_KEYS['']
web_token = jwt.encode(payload, auth_key).decode('utf8')
data = {'json_web_token': web_token}
result = self.client_post('/accounts/login/jwt/', data)
self.assert_json_error_contains(result, "No user specified in JSON web token claims", 400)
def test_login_failure_when_realm_is_missing(self):
# type: () -> None
payload = {'user': 'hamlet'}
with self.settings(JWT_AUTH_KEYS={'': 'key'}):
auth_key = settings.JWT_AUTH_KEYS['']
web_token = jwt.encode(payload, auth_key).decode('utf8')
data = {'json_web_token': web_token}
result = self.client_post('/accounts/login/jwt/', data)
self.assert_json_error_contains(result, "No realm specified in JSON web token claims", 400)
def test_login_failure_when_key_does_not_exist(self):
# type: () -> None
data = {'json_web_token': 'not relevant'}
result = self.client_post('/accounts/login/jwt/', data)
self.assert_json_error_contains(result, "Auth key for this subdomain not found.", 400)
def test_login_failure_when_key_is_missing(self):
# type: () -> None
with self.settings(JWT_AUTH_KEYS={'': 'key'}):
result = self.client_post('/accounts/login/jwt/')
self.assert_json_error_contains(result, "No JSON web token passed in request", 400)
def test_login_failure_when_bad_token_is_passed(self):
# type: () -> None
with self.settings(JWT_AUTH_KEYS={'': 'key'}):
result = self.client_post('/accounts/login/jwt/')
self.assert_json_error_contains(result, "No JSON web token passed in request", 400)
data = {'json_web_token': 'bad token'}
result = self.client_post('/accounts/login/jwt/', data)
self.assert_json_error_contains(result, "Bad JSON web token", 400)
def test_login_failure_when_user_does_not_exist(self):
# type: () -> None
payload = {'user': 'nonexisting', 'realm': 'zulip.com'}
with self.settings(JWT_AUTH_KEYS={'': 'key'}):
auth_key = settings.JWT_AUTH_KEYS['']
web_token = jwt.encode(payload, auth_key).decode('utf8')
data = {'json_web_token': web_token}
result = self.client_post('/accounts/login/jwt/', data)
self.assertEqual(result.status_code, 302) # This should ideally be not 200.
self.assertIs(get_session_dict_user(self.client.session), None)
def test_login_failure_due_to_wrong_subdomain(self):
# type: () -> None
payload = {'user': 'hamlet', 'realm': 'zulip.com'}
with self.settings(REALMS_HAVE_SUBDOMAINS=True, JWT_AUTH_KEYS={'acme': 'key'}):
with mock.patch('zerver.views.auth.get_subdomain', return_value='acme'):
auth_key = settings.JWT_AUTH_KEYS['acme']
web_token = jwt.encode(payload, auth_key).decode('utf8')
data = {'json_web_token': web_token}
result = self.client_post('/accounts/login/jwt/', data)
self.assert_json_error_contains(result, "Wrong subdomain", 400)
self.assertEqual(get_session_dict_user(self.client.session), None)
def test_login_failure_due_to_empty_subdomain(self):
# type: () -> None
payload = {'user': 'hamlet', 'realm': 'zulip.com'}
with self.settings(REALMS_HAVE_SUBDOMAINS=True, JWT_AUTH_KEYS={'': 'key'}):
with mock.patch('zerver.views.auth.get_subdomain', return_value=''):
auth_key = settings.JWT_AUTH_KEYS['']
web_token = jwt.encode(payload, auth_key).decode('utf8')
data = {'json_web_token': web_token}
result = self.client_post('/accounts/login/jwt/', data)
self.assert_json_error_contains(result, "Wrong subdomain", 400)
self.assertEqual(get_session_dict_user(self.client.session), None)
def test_login_success_under_subdomains(self):
# type: () -> None
payload = {'user': 'hamlet', 'realm': 'zulip.com'}
with self.settings(REALMS_HAVE_SUBDOMAINS=True, JWT_AUTH_KEYS={'zulip': 'key'}):
with mock.patch('zerver.views.auth.get_subdomain', return_value='zulip'):
email = 'hamlet@zulip.com'
auth_key = settings.JWT_AUTH_KEYS['zulip']
web_token = jwt.encode(payload, auth_key).decode('utf8')
data = {'json_web_token': web_token}
result = self.client_post('/accounts/login/jwt/', data)
self.assertEqual(result.status_code, 302)
user_profile = get_user_profile_by_email(email)
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
class TestLDAP(ZulipTestCase):
def setUp(self):
# type: () -> None
email = "hamlet@zulip.com"
user_profile = get_user_profile_by_email(email)
self.setup_subdomain(user_profile)
ldap_patcher = mock.patch('django_auth_ldap.config.ldap.initialize')
self.mock_initialize = ldap_patcher.start()
self.mock_ldap = MockLDAP()
self.mock_initialize.return_value = self.mock_ldap
self.backend = ZulipLDAPAuthBackend()
# Internally `_realm` attribute is automatically set by the
# `authenticate()` method. But for testing the `get_or_create_user()`
# method separately, we need to set it manually.
self.backend._realm = get_realm('zulip')
def tearDown(self):
# type: () -> None
self.mock_ldap.reset()
self.mock_initialize.stop()
def setup_subdomain(self, user_profile):
# type: (UserProfile) -> None
realm = user_profile.realm
realm.string_id = 'zulip'
realm.save()
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_login_success(self):
# type: () -> None
self.mock_ldap.directory = {
'uid=hamlet,ou=users,dc=zulip,dc=com': {
'userPassword': 'testing'
}
}
with self.settings(
LDAP_APPEND_DOMAIN='zulip.com',
AUTH_LDAP_BIND_PASSWORD='',
AUTH_LDAP_USER_DN_TEMPLATE='uid=%(user)s,ou=users,dc=zulip,dc=com'):
user_profile = self.backend.authenticate('hamlet@zulip.com', 'testing')
self.assertEqual(user_profile.email, 'hamlet@zulip.com')
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_login_failure_due_to_wrong_password(self):
# type: () -> None
self.mock_ldap.directory = {
'uid=hamlet,ou=users,dc=zulip,dc=com': {
'userPassword': 'testing'
}
}
with self.settings(
LDAP_APPEND_DOMAIN='zulip.com',
AUTH_LDAP_BIND_PASSWORD='',
AUTH_LDAP_USER_DN_TEMPLATE='uid=%(user)s,ou=users,dc=zulip,dc=com'):
user = self.backend.authenticate('hamlet@zulip.com', 'wrong')
self.assertIs(user, None)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_login_failure_due_to_nonexistent_user(self):
# type: () -> None
self.mock_ldap.directory = {
'uid=hamlet,ou=users,dc=zulip,dc=com': {
'userPassword': 'testing'
}
}
with self.settings(
LDAP_APPEND_DOMAIN='zulip.com',
AUTH_LDAP_BIND_PASSWORD='',
AUTH_LDAP_USER_DN_TEMPLATE='uid=%(user)s,ou=users,dc=zulip,dc=com'):
user = self.backend.authenticate('nonexistent@zulip.com', 'testing')
self.assertIs(user, None)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_ldap_permissions(self):
# type: () -> None
backend = self.backend
self.assertFalse(backend.has_perm(None, None))
self.assertFalse(backend.has_module_perms(None, None))
self.assertTrue(backend.get_all_permissions(None, None) == set())
self.assertTrue(backend.get_group_permissions(None, None) == set())
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_django_to_ldap_username(self):
# type: () -> None
backend = self.backend
with self.settings(LDAP_APPEND_DOMAIN='zulip.com'):
username = backend.django_to_ldap_username('"hamlet@test"@zulip.com')
self.assertEqual(username, '"hamlet@test"')
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_ldap_to_django_username(self):
# type: () -> None
backend = self.backend
with self.settings(LDAP_APPEND_DOMAIN='zulip.com'):
username = backend.ldap_to_django_username('"hamlet@test"')
self.assertEqual(username, '"hamlet@test"@zulip.com')
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_get_or_create_user_when_user_exists(self):
# type: () -> None
class _LDAPUser(object):
attrs = {'fn': ['Full Name'], 'sn': ['Short Name']}
backend = self.backend
email = 'hamlet@zulip.com'
user_profile, created = backend.get_or_create_user(email, _LDAPUser())
self.assertFalse(created)
self.assertEqual(user_profile.email, email)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_get_or_create_user_when_user_does_not_exist(self):
# type: () -> None
class _LDAPUser(object):
attrs = {'fn': ['Full Name'], 'sn': ['Short Name']}
ldap_user_attr_map = {'full_name': 'fn', 'short_name': 'sn'}
with self.settings(AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map):
backend = self.backend
email = 'nonexisting@zulip.com'
user_profile, created = backend.get_or_create_user(email, _LDAPUser())
self.assertTrue(created)
self.assertEqual(user_profile.email, email)
self.assertEqual(user_profile.full_name, 'Full Name')
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_get_or_create_user_when_user_has_invalid_name(self):
# type: () -> None
class _LDAPUser(object):
attrs = {'fn': ['<invalid name>'], 'sn': ['Short Name']}
ldap_user_attr_map = {'full_name': 'fn', 'short_name': 'sn'}
with self.settings(AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map):
backend = self.backend
email = 'nonexisting@zulip.com'
with self.assertRaisesRegex(Exception, "Invalid characters in name!"):
backend.get_or_create_user(email, _LDAPUser())
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_get_or_create_user_when_realm_is_deactivated(self):
# type: () -> None
class _LDAPUser(object):
attrs = {'fn': ['Full Name'], 'sn': ['Short Name']}
ldap_user_attr_map = {'full_name': 'fn', 'short_name': 'sn'}
with self.settings(AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map):
backend = self.backend
email = 'nonexisting@zulip.com'
do_deactivate_realm(backend._realm)
with self.assertRaisesRegex(Exception, 'Realm has been deactivated'):
backend.get_or_create_user(email, _LDAPUser())
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_django_to_ldap_username_when_domain_does_not_match(self):
# type: () -> None
backend = self.backend
email = 'hamlet@zulip.com'
with self.assertRaisesRegex(Exception, 'Username does not match LDAP domain.'):
with self.settings(LDAP_APPEND_DOMAIN='acme.com'):
backend.django_to_ldap_username(email)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_login_failure_due_to_wrong_subdomain(self):
# type: () -> None
self.mock_ldap.directory = {
'uid=hamlet,ou=users,dc=zulip,dc=com': {
'userPassword': 'testing'
}
}
with self.settings(
REALMS_HAVE_SUBDOMAINS=True,
LDAP_APPEND_DOMAIN='zulip.com',
AUTH_LDAP_BIND_PASSWORD='',
AUTH_LDAP_USER_DN_TEMPLATE='uid=%(user)s,ou=users,dc=zulip,dc=com'):
user_profile = self.backend.authenticate('hamlet@zulip.com', 'testing',
realm_subdomain='acme')
self.assertIs(user_profile, None)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_login_failure_due_to_empty_subdomain(self):
# type: () -> None
self.mock_ldap.directory = {
'uid=hamlet,ou=users,dc=zulip,dc=com': {
'userPassword': 'testing'
}
}
with self.settings(
REALMS_HAVE_SUBDOMAINS=True,
LDAP_APPEND_DOMAIN='zulip.com',
AUTH_LDAP_BIND_PASSWORD='',
AUTH_LDAP_USER_DN_TEMPLATE='uid=%(user)s,ou=users,dc=zulip,dc=com'):
user_profile = self.backend.authenticate('hamlet@zulip.com', 'testing',
realm_subdomain='')
self.assertIs(user_profile, None)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_login_success_when_subdomain_is_none(self):
# type: () -> None
self.mock_ldap.directory = {
'uid=hamlet,ou=users,dc=zulip,dc=com': {
'userPassword': 'testing'
}
}
with self.settings(
REALMS_HAVE_SUBDOMAINS=True,
LDAP_APPEND_DOMAIN='zulip.com',
AUTH_LDAP_BIND_PASSWORD='',
AUTH_LDAP_USER_DN_TEMPLATE='uid=%(user)s,ou=users,dc=zulip,dc=com'):
user_profile = self.backend.authenticate('hamlet@zulip.com', 'testing',
realm_subdomain=None)
self.assertEqual(user_profile.email, 'hamlet@zulip.com')
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_login_success_with_valid_subdomain(self):
# type: () -> None
self.mock_ldap.directory = {
'uid=hamlet,ou=users,dc=zulip,dc=com': {
'userPassword': 'testing'
}
}
with self.settings(
REALMS_HAVE_SUBDOMAINS=True,
LDAP_APPEND_DOMAIN='zulip.com',
AUTH_LDAP_BIND_PASSWORD='',
AUTH_LDAP_USER_DN_TEMPLATE='uid=%(user)s,ou=users,dc=zulip,dc=com'):
user_profile = self.backend.authenticate('hamlet@zulip.com', 'testing',
realm_subdomain='zulip')
self.assertEqual(user_profile.email, 'hamlet@zulip.com')
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_login_success_when_user_does_not_exist_with_valid_subdomain(self):
# type: () -> None
self.mock_ldap.directory = {
'uid=nonexisting,ou=users,dc=acme,dc=com': {
'cn': ['NonExisting', ],
'userPassword': 'testing'
}
}
with self.settings(
REALMS_HAVE_SUBDOMAINS=True,
LDAP_APPEND_DOMAIN='acme.com',
AUTH_LDAP_BIND_PASSWORD='',
AUTH_LDAP_USER_DN_TEMPLATE='uid=%(user)s,ou=users,dc=acme,dc=com'):
user_profile = self.backend.authenticate('nonexisting@acme.com', 'testing',
realm_subdomain='zulip')
self.assertEqual(user_profile.email, 'nonexisting@acme.com')
self.assertEqual(user_profile.full_name, 'NonExisting')
self.assertEqual(user_profile.realm.string_id, 'zulip')
class TestZulipLDAPUserPopulator(ZulipTestCase):
def test_authenticate(self):
# type: () -> None
backend = ZulipLDAPUserPopulator()
result = backend.authenticate('hamlet@zulip.com', 'testing') # type: ignore # complains that the function does not return any value!
self.assertIs(result, None)
class TestZulipAuthMixin(ZulipTestCase):
def test_get_user(self):
# type: () -> None
backend = ZulipAuthMixin()
result = backend.get_user(11111)
self.assertIs(result, None)
class TestPasswordAuthEnabled(ZulipTestCase):
def test_password_auth_enabled_for_ldap(self):
# type: () -> None
with self.settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',)):
realm = Realm.objects.get(string_id='zulip')
self.assertTrue(password_auth_enabled(realm))
class TestMaybeSendToRegistration(ZulipTestCase):
def test_sso_only_when_preregistration_user_does_not_exist(self):
# type: () -> None
rf = RequestFactory()
request = rf.get('/')
request.session = {}
request.user = None
# Creating a mock Django form in order to keep the test simple.
# This form will be returned by the create_hompage_form function
# and will always be valid so that the code that we want to test
# actually runs.
class Form(object):
def is_valid(self):
# type: () -> bool
return True
with self.settings(ONLY_SSO=True):
with mock.patch('zerver.views.auth.HomepageForm', return_value=Form()):
self.assertEqual(PreregistrationUser.objects.all().count(), 0)
result = maybe_send_to_registration(request, 'hamlet@zulip.com')
self.assertEqual(result.status_code, 302)
confirmation = Confirmation.objects.all().first()
confirmation_key = confirmation.confirmation_key
self.assertIn('do_confirm/' + confirmation_key, result.url)
self.assertEqual(PreregistrationUser.objects.all().count(), 1)
result = self.client_get(result.url)
self.assert_in_response('action="/accounts/register/"', result)
self.assert_in_response('value="{0}" name="key"'.format(confirmation_key), result)
def test_sso_only_when_preregistration_user_exists(self):
# type: () -> None
rf = RequestFactory()
request = rf.get('/')
request.session = {}
request.user = None
# Creating a mock Django form in order to keep the test simple.
# This form will be returned by the create_hompage_form function
# and will always be valid so that the code that we want to test
# actually runs.
class Form(object):
def is_valid(self):
# type: () -> bool
return True
email = 'hamlet@zulip.com'
user = PreregistrationUser(email=email)
user.save()
with self.settings(ONLY_SSO=True):
with mock.patch('zerver.views.auth.HomepageForm', return_value=Form()):
self.assertEqual(PreregistrationUser.objects.all().count(), 1)
result = maybe_send_to_registration(request, email)
self.assertEqual(result.status_code, 302)
confirmation = Confirmation.objects.all().first()
confirmation_key = confirmation.confirmation_key
self.assertIn('do_confirm/' + confirmation_key, result.url)
self.assertEqual(PreregistrationUser.objects.all().count(), 1)
class TestAdminSetBackends(ZulipTestCase):
def test_change_enabled_backends(self):
# type: () -> None
# Log in as admin
self.login("iago@zulip.com")
result = self.client_patch("/json/realm", {
'authentication_methods': ujson.dumps({u'Email': False, u'Dev': True})})
self.assert_json_success(result)
realm = get_realm('zulip')
self.assertFalse(password_auth_enabled(realm))
self.assertTrue(dev_auth_enabled(realm))
def test_disable_all_backends(self):
# type: () -> None
# Log in as admin
self.login("iago@zulip.com")
result = self.client_patch("/json/realm", {
'authentication_methods': ujson.dumps({u'Email': False, u'Dev': False})})
self.assert_json_error(result, 'At least one authentication method must be enabled.', status_code=403)
realm = get_realm('zulip')
self.assertTrue(password_auth_enabled(realm))
self.assertTrue(dev_auth_enabled(realm))
def test_supported_backends_only_updated(self):
# type: () -> None
# Log in as admin
self.login("iago@zulip.com")
# Set some supported and unsupported backends
result = self.client_patch("/json/realm", {
'authentication_methods': ujson.dumps({u'Email': False, u'Dev': True, u'GitHub': False})})
self.assert_json_success(result)
realm = get_realm('zulip')
# Check that unsupported backend is not enabled
self.assertFalse(github_auth_enabled(realm))
self.assertTrue(dev_auth_enabled(realm))
self.assertFalse(password_auth_enabled(realm))
| 48.257881
| 141
| 0.622496
| 8,440
| 76,537
| 5.399052
| 0.060308
| 0.02019
| 0.029231
| 0.021923
| 0.810174
| 0.786407
| 0.75237
| 0.731083
| 0.702862
| 0.68276
| 0
| 0.006885
| 0.265571
| 76,537
| 1,585
| 142
| 48.288328
| 0.803775
| 0.067823
| 0
| 0.65426
| 0
| 0
| 0.167011
| 0.082261
| 0
| 0
| 0
| 0
| 0.157982
| 1
| 0.103391
| false
| 0.054591
| 0.026468
| 0.003309
| 0.16129
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
4322a5c1bde0700443a33fae289f30609ce2d7e0
| 15,061
|
py
|
Python
|
test_strategies.py
|
Rebeckky/starter-snake-python
|
56861c445c97cca069234bb768de795ffbcfbc44
|
[
"MIT"
] | null | null | null |
test_strategies.py
|
Rebeckky/starter-snake-python
|
56861c445c97cca069234bb768de795ffbcfbc44
|
[
"MIT"
] | null | null | null |
test_strategies.py
|
Rebeckky/starter-snake-python
|
56861c445c97cca069234bb768de795ffbcfbc44
|
[
"MIT"
] | null | null | null |
import global_variables
import strategies
data = {
"game": {
"id": "game-00fe20da-94ad-11ea-bb37",
"ruleset": {"name": "standard", "version": "v.1.2.3"},
"timeout": 500,
},
"turn": 14,
"board": {
"height": 11,
"width": 11,
"food": [{"x": 5, "y": 5}, {"x": 9, "y": 0}, {"x": 2, "y": 6}],
"hazards": [{"x": 3, "y": 2}],
"snakes": [
{
"id": "snake-508e96ac-94ad-11ea-bb37",
"name": "My Snake",
"health": 54,
"body": [{"x": 0, "y": 0}, {"x": 1, "y": 0}, {"x": 2, "y": 0}],
"latency": "111",
"head": {"x": 0, "y": 0},
"length": 3,
"shout": "why are we shouting??",
"squad": "",
},
{
"id": "snake-508e96ac-94ad-1639-be12",
"name": "Snaaaaake",
"health": 54,
"body": [
{"x": 2, "y": 3},
{"x": 2, "y": 2},
{"x": 2, "y": 1},
{"x": 1, "y": 1},
{"x": 0, "y": 1},
],
"latency": "111",
"head": {"x": 2, "y": 3},
"length": 5,
"shout": "why are we shouting??",
"squad": "",
},
{
"id": "snake-b67f4906-94ae-11ea-bb37",
"name": "Another Snake",
"health": 16,
"body": [
{"x": 5, "y": 4},
{"x": 5, "y": 3},
{"x": 6, "y": 3},
{"x": 6, "y": 2},
],
"latency": "222",
"head": {"x": 5, "y": 4},
"length": 4,
"shout": "I'm not really sure...",
"squad": "",
},
],
},
"you": {
"id": "snake-508e96ac-94ad-11ea-bb37",
"name": "My Snake",
"health": 54,
"body": [{"x": 0, "y": 0}, {"x": 1, "y": 0}, {"x": 2, "y": 0}],
"latency": "111",
"head": {"x": 0, "y": 0},
"length": 3,
"shout": "why are we shouting??",
"squad": "",
},
}
global_variables.BOARD_MAXIMUM_X = data["board"]["width"]
global_variables.BOARD_MAXIMUM_Y = data["board"]["height"]
def test_avoid_walls_direction_up():
current_head = data["you"]["head"]
next_move = "up"
move_coords = strategies.convert_direction_to_coords(current_head, next_move)
is_wall_avoided = strategies.avoid_walls(move_coords)
assert is_wall_avoided == True
def test_avoid_walls_direction_down():
current_head = data["you"]["head"]
next_move = "down"
move_coords = strategies.convert_direction_to_coords(current_head, next_move)
is_wall_avoided = strategies.avoid_walls(move_coords)
assert is_wall_avoided == False
def test_avoid_walls_direction_left():
current_head = data["you"]["head"]
next_move = "left"
move_coords = strategies.convert_direction_to_coords(current_head, next_move)
is_wall_avoided = strategies.avoid_walls(move_coords)
assert is_wall_avoided == False
def test_avoid_walls_direction_right():
current_head = data["you"]["head"]
next_move = "right"
move_coords = strategies.convert_direction_to_coords(current_head, next_move)
is_wall_avoided = strategies.avoid_walls(move_coords)
assert is_wall_avoided == True
def test_avoid_snakes_move_right():
current_head = data["you"]["head"]
current_body = data["you"]["body"]
next_move = "right"
move_coords = strategies.convert_direction_to_coords(current_head, next_move)
is_self_avoided = strategies.avoid_snakes(move_coords, current_body)
assert is_self_avoided == False
def test_avoid_self_move_left():
current_head = data["you"]["head"]
current_body = data["you"]["body"]
next_move = "left"
move_coords = strategies.convert_direction_to_coords(current_head, next_move)
is_self_avoided = strategies.avoid_snakes(move_coords, current_body)
assert is_self_avoided == True
def test_avoid_self_move_up():
current_head = data["you"]["head"]
current_body = data["you"]["body"]
next_move = "up"
move_coords = strategies.convert_direction_to_coords(current_head, next_move)
is_self_avoided = strategies.avoid_snakes(move_coords, current_body)
assert is_self_avoided == True
def test_avoid_self_move_down():
current_head = data["you"]["head"]
current_body = data["you"]["body"]
next_move = "down"
move_coords = strategies.convert_direction_to_coords(current_head, next_move)
is_self_avoided = strategies.avoid_snakes(move_coords, current_body)
assert is_self_avoided == True
def test_avoid_snakes():
current_head = data["you"]["head"]
all_snakes = strategies.get_snake_loc_data(data["board"]["snakes"])
next_move = "up"
move_coords = strategies.convert_direction_to_coords(current_head, next_move)
is_snakes_avoided = strategies.avoid_snakes(move_coords, all_snakes)
assert is_snakes_avoided == False
next_move = "right"
move_coords = strategies.convert_direction_to_coords(current_head, next_move)
is_snakes_avoided = strategies.avoid_snakes(move_coords, all_snakes)
assert is_snakes_avoided == False
def test_avoid_snakes_long_body_move_right():
current_head = {"x": 3, "y": 2}
snake = {
"body": [
{"x": 3, "y": 2},
{"x": 4, "y": 2},
{"x": 4, "y": 3},
{"x": 3, "y": 3},
{"x": 2, "y": 3},
{"x": 1, "y": 3},
{"x": 1, "y": 2},
{"x": 1, "y": 1},
{"x": 2, "y": 1},
{"x": 3, "y": 1},
]
}
current_body = snake["body"]
next_move = "right"
move_coords = strategies.convert_direction_to_coords(current_head, next_move)
is_self_avoided = strategies.avoid_snakes(move_coords, current_body)
assert is_self_avoided == False
def test_avoid_snakes_long_body_move_left():
current_head = {"x": 3, "y": 2}
snake = {
"body": [
{"x": 3, "y": 2},
{"x": 4, "y": 2},
{"x": 4, "y": 3},
{"x": 3, "y": 3},
{"x": 2, "y": 3},
{"x": 1, "y": 3},
{"x": 1, "y": 2},
{"x": 1, "y": 1},
{"x": 2, "y": 1},
{"x": 3, "y": 1},
]
}
current_body = snake["body"]
next_move = "left"
move_coords = strategies.convert_direction_to_coords(current_head, next_move)
is_self_avoided = strategies.avoid_snakes(move_coords, current_body)
assert is_self_avoided == True
def test_safe_move_all_moves_unsafe():
next_move = "up"
is_safe_move = strategies.safe_move(next_move, data)
assert is_safe_move == False
next_move = "down"
is_safe_move = strategies.safe_move(next_move, data)
assert is_safe_move == False
next_move = "left"
is_safe_move = strategies.safe_move(next_move, data)
assert is_safe_move == False
next_move = "right"
is_safe_move = strategies.safe_move(next_move, data)
assert is_safe_move == False
def test_safe_move_short_body():
test_data = {
"you": {"head": {"x": 5, "y": 2}},
"board": {
"snakes": [
{
"id": "snake-508e96ac-94ad-11ea-bb37",
"name": "My Snake",
"health": 54,
"body": [{"x": 5, "y": 2}, {"x": 5, "y": 1}, {"x": 5, "y": 1}],
"latency": "111",
"head": {"x": 5, "y": 2},
"length": 3,
"shout": "why are we shouting??",
"squad": "",
},
{
"id": "snake-508e96ac-94ad-1639-be12",
"name": "Snaaaaake",
"health": 54,
"body": [
{"x": 3, "y": 3},
{"x": 2, "y": 3},
{"x": 2, "y": 2},
{"x": 2, "y": 1},
{"x": 1, "y": 1},
{"x": 0, "y": 1},
],
"latency": "111",
"head": {"x": 3, "y": 3},
"length": 6,
"shout": "why are we shouting??",
"squad": "",
},
{
"id": "snake-508e96ac-94ad-11ea-bb37",
"name": "Snek 3",
"health": 54,
"body": [
{"x": 5, "y": 4},
{"x": 5, "y": 3},
{"x": 6, "y": 3},
{"x": 6, "y": 2},
],
"latency": "111",
"head": {"x": 5, "y": 4},
"length": 4,
"shout": "why are we shouting??",
"squad": "",
},
],
"game": {},
},
}
global_variables.MY_SNAKE_ID = test_data["board"]["snakes"][0]["id"]
next_move = "left" # possible head-to-head collision
is_safe_move = strategies.safe_move(next_move, test_data)
assert is_safe_move == True
next_move = "right" # possible snake collision
is_safe_move = strategies.safe_move(next_move, test_data)
assert is_safe_move == False
next_move = "down" # into self
is_safe_move = strategies.safe_move(next_move, test_data)
assert is_safe_move == False
next_move = "up" # open grid location
is_safe_move = strategies.safe_move(next_move, test_data)
assert is_safe_move == False
def test_avoid_head_to_head_collision_unsafe_move():
my_head = {"x": 3, "y": 4}
current_snakes = [
{
"id": "snake-508e96ac-94ad-11ea-bb37",
"name": "My Snake",
"health": 54,
"body": [
{"x": 3, "y": 4},
{"x": 4, "y": 4},
{"x": 4, "y": 3},
{"x": 4, "y": 2},
],
"latency": "111",
"head": {"x": 3, "y": 4},
"length": 3,
"shout": "why are we shouting??",
"squad": "",
},
{
"id": "snake-508e96ac-94ad-1639-be12",
"name": "Snaaaaake",
"health": 54,
"body": [
{"x": 2, "y": 3},
{"x": 2, "y": 2},
{"x": 2, "y": 1},
{"x": 1, "y": 1},
{"x": 0, "y": 1},
],
"latency": "111",
"head": {"x": 2, "y": 3},
"length": 5,
"shout": "why are we shouting??",
"squad": "",
},
]
global_variables.MY_SNAKE_ID = current_snakes[0]["id"]
next_move = "left"
move_coords = strategies.convert_direction_to_coords(my_head, next_move)
is_head_collision_avoided = strategies.avoid_head_to_head_collision(
move_coords, current_snakes
)
assert is_head_collision_avoided == False
next_move = "down"
move_coords = strategies.convert_direction_to_coords(my_head, next_move)
is_head_collision_avoided = strategies.avoid_head_to_head_collision(
move_coords, current_snakes
)
assert is_head_collision_avoided == False
def test_avoid_head_to_head_collision_safe_move():
my_head = {"x": 4, "y": 4}
current_snakes = [
{
"id": "snake-508e96ac-94ad-11ea-bb37",
"name": "My Snake",
"health": 54,
"body": [{"x": 4, "y": 4}, {"x": 4, "y": 3}, {"x": 4, "y": 2}],
"latency": "111",
"head": {"x": 4, "y": 4},
"length": 3,
"shout": "why are we shouting??",
"squad": "",
},
{
"id": "snake-508e96ac-94ad-1639-be12",
"name": "Snaaaaake",
"health": 54,
"body": [
{"x": 2, "y": 3},
{"x": 2, "y": 2},
{"x": 2, "y": 1},
{"x": 1, "y": 1},
{"x": 0, "y": 1},
],
"latency": "111",
"head": {"x": 2, "y": 3},
"length": 5,
"shout": "why are we shouting??",
"squad": "",
},
]
global_variables.MY_SNAKE_ID = current_snakes[0]["id"]
next_move = "left"
move_coords = strategies.convert_direction_to_coords(my_head, next_move)
is_head_collision_avoided = strategies.avoid_head_to_head_collision(
move_coords, current_snakes
)
assert is_head_collision_avoided == True
next_move = "up"
move_coords = strategies.convert_direction_to_coords(my_head, next_move)
is_head_collision_avoided = strategies.avoid_head_to_head_collision(
move_coords, current_snakes
)
assert is_head_collision_avoided == True
def test_avoid_head_to_head_collision_safe_move():
my_head = {"x": 5, "y": 1}
current_snakes = [
{
"id": "snake-508e96ac-94ad-11ea-bb37",
"name": "My Snake",
"health": 54,
"body": [{"x": 5, "y": 1}, {"x": 5, "y": 1}, {"x": 5, "y": 1}],
"latency": "111",
"head": {"x": 5, "y": 1},
"length": 3,
"shout": "why are we shouting??",
"squad": "",
},
{
"id": "snake-508e96ac-94ad-1639-be12",
"name": "Snaaaaake",
"health": 54,
"body": [
{"x": 1, "y": 1},
{"x": 1, "y": 1},
{"x": 1, "y": 1},
],
"latency": "111",
"head": {"x": 1, "y": 1},
"length": 3,
"shout": "why are we shouting??",
"squad": "",
},
]
global_variables.MY_SNAKE_ID = current_snakes[0]["id"]
next_move = "left"
move_coords = strategies.convert_direction_to_coords(my_head, next_move)
is_head_collision_avoided = strategies.avoid_head_to_head_collision(
move_coords, current_snakes
)
assert is_head_collision_avoided == True
next_move = "right"
move_coords = strategies.convert_direction_to_coords(my_head, next_move)
is_head_collision_avoided = strategies.avoid_head_to_head_collision(
move_coords, current_snakes
)
assert is_head_collision_avoided == True
next_move = "up"
move_coords = strategies.convert_direction_to_coords(my_head, next_move)
is_head_collision_avoided = strategies.avoid_head_to_head_collision(
move_coords, current_snakes
)
assert is_head_collision_avoided == True
next_move = "down"
move_coords = strategies.convert_direction_to_coords(my_head, next_move)
is_head_collision_avoided = strategies.avoid_head_to_head_collision(
move_coords, current_snakes
)
assert is_head_collision_avoided == True
| 32.812636
| 83
| 0.494655
| 1,742
| 15,061
| 3.987945
| 0.060276
| 0.064488
| 0.041457
| 0.077731
| 0.920397
| 0.903124
| 0.88657
| 0.872175
| 0.858212
| 0.849719
| 0
| 0.045252
| 0.344134
| 15,061
| 459
| 84
| 32.812636
| 0.658028
| 0.005644
| 0
| 0.753659
| 0
| 0
| 0.13011
| 0.02705
| 0
| 0
| 0
| 0
| 0.068293
| 1
| 0.039024
| false
| 0
| 0.004878
| 0
| 0.043902
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4a69ea147eae923ba17b9630ac51531020a65dec
| 101
|
py
|
Python
|
AutoRec/loss/rmse_loss.py
|
Yangjiaxi/RS-Code
|
cb2798f1e1e4605b98c445b0a58956de1e901eb5
|
[
"MIT"
] | null | null | null |
AutoRec/loss/rmse_loss.py
|
Yangjiaxi/RS-Code
|
cb2798f1e1e4605b98c445b0a58956de1e901eb5
|
[
"MIT"
] | null | null | null |
AutoRec/loss/rmse_loss.py
|
Yangjiaxi/RS-Code
|
cb2798f1e1e4605b98c445b0a58956de1e901eb5
|
[
"MIT"
] | null | null | null |
import torch
def RMSELoss(output, target):
return torch.sqrt(torch.mean(output - target) ** 2)
| 16.833333
| 55
| 0.70297
| 14
| 101
| 5.071429
| 0.714286
| 0.338028
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011905
| 0.168317
| 101
| 5
| 56
| 20.2
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
4a6d86165318469f986a57f757bbcd86d1ed8c11
| 11,886
|
py
|
Python
|
test/test_workspace.py
|
garv123/remote
|
c10b12cc5cb26186cb1607805926315635392547
|
[
"BSD-2-Clause"
] | null | null | null |
test/test_workspace.py
|
garv123/remote
|
c10b12cc5cb26186cb1607805926315635392547
|
[
"BSD-2-Clause"
] | null | null | null |
test/test_workspace.py
|
garv123/remote
|
c10b12cc5cb26186cb1607805926315635392547
|
[
"BSD-2-Clause"
] | null | null | null |
from pathlib import Path
from unittest.mock import ANY, MagicMock, call, patch
import pytest
from remote.configuration import RemoteConfig
from remote.exceptions import InvalidRemoteHostLabel
from remote.workspace import SyncedWorkspace
def test_create_workspace(workspace_config):
working_dir = workspace_config.root / "foo" / "bar"
workspace = SyncedWorkspace.from_config(workspace_config, working_dir)
assert workspace.local_root == workspace_config.root
assert workspace.remote == workspace_config.configurations[0]
assert workspace.remote_working_dir == workspace_config.configurations[0].directory / "foo" / "bar"
assert workspace.ignores == workspace_config.ignores
def test_create_workspace_selects_proper_remote_host(workspace_config):
working_dir = workspace_config.root / "foo" / "bar"
workspace_config.configurations.append(
RemoteConfig(
host="other-host.example.com",
directory=Path("other/dir"),
shell="bash",
shell_options="some options",
label="bar",
)
)
workspace_config.configurations.append(
RemoteConfig(
host="foo.example.com", directory=Path("other/dir"), shell="bash", shell_options="some options", label="foo"
)
)
workspace_config.default_configuration = 1
# workspace should select host from workspace_config.default_configuration
workspace = SyncedWorkspace.from_config(workspace_config, working_dir)
assert workspace.local_root == workspace_config.root
assert workspace.remote == workspace_config.configurations[1]
assert workspace.remote_working_dir == workspace_config.configurations[1].directory / "foo" / "bar"
assert workspace.ignores == workspace_config.ignores
assert workspace.remote.label == "bar"
# now it should select host from override
workspace = SyncedWorkspace.from_config(workspace_config, working_dir, remote_host_id=0)
assert workspace.local_root == workspace_config.root
assert workspace.remote == workspace_config.configurations[0]
assert workspace.remote_working_dir == workspace_config.configurations[0].directory / "foo" / "bar"
assert workspace.ignores == workspace_config.ignores
# now it should select from the label passed
workspace = SyncedWorkspace.from_config(workspace_config, working_dir, remote_host_id="foo")
assert workspace.local_root == workspace_config.root
assert workspace.remote == workspace_config.configurations[2]
assert workspace.remote_working_dir == workspace_config.configurations[2].directory / "foo" / "bar"
assert workspace.ignores == workspace_config.ignores
assert workspace_config.configurations[2].label == "foo"
# now it should raise an exception as the label is not present
with pytest.raises(InvalidRemoteHostLabel):
workspace = SyncedWorkspace.from_config(workspace_config, working_dir, remote_host_id="iamnotpresent")
@patch("remote.util.subprocess.run")
def test_clear_remote_workspace(mock_run, workspace):
mock_run.return_value = MagicMock(returncode=0)
workspace.clear_remote()
# clear should always delete remote root regardless of what the workign dir is
mock_run.assert_called_once_with(
["ssh", "-tKq", "-o", "BatchMode=yes", workspace.remote.host, f"rm -rf {workspace.remote.directory}"],
stderr=ANY,
stdin=ANY,
stdout=ANY,
)
@patch("remote.util.subprocess.run")
def test_push(mock_run, workspace):
mock_run.return_value = MagicMock(returncode=0)
workspace.push()
mock_run.assert_called_once_with(
[
"rsync",
"-arlpmchz",
"--copy-unsafe-links",
"-e",
"ssh -Kq -o BatchMode=yes",
"--force",
"--delete",
"--rsync-path",
"mkdir -p remote/dir && rsync",
"--include-from",
ANY,
f"{workspace.local_root}/",
f"{workspace.remote.host}:{workspace.remote.directory}",
],
stderr=ANY,
stdout=ANY,
)
@patch("remote.util.subprocess.run")
def test_pull(mock_run, workspace):
mock_run.return_value = MagicMock(returncode=0)
workspace.pull()
mock_run.assert_called_once_with(
[
"rsync",
"-arlpmchz",
"--copy-unsafe-links",
"-e",
"ssh -Kq -o BatchMode=yes",
"--force",
"--exclude-from",
ANY,
f"{workspace.remote.host}:{workspace.remote.directory}/",
f"{workspace.local_root}",
],
stderr=ANY,
stdout=ANY,
)
@patch("remote.util.subprocess.run")
def test_pull_with_subdir(mock_run, workspace):
mock_run.return_value = MagicMock(returncode=0)
workspace.pull(subpath=Path("some-path"))
mock_run.assert_called_once_with(
[
"rsync",
"-arlpmchz",
"--copy-unsafe-links",
"-e",
"ssh -Kq -o BatchMode=yes",
"--force",
f"{workspace.remote.host}:{workspace.remote.directory}/foo/bar/some-path",
f"{workspace.local_root}/foo/bar/",
],
stderr=ANY,
stdout=ANY,
)
@patch("remote.util.subprocess.run")
def test_pull_with_subdir_exec_from_root(mock_run, workspace):
workspace.remote_working_dir = workspace.remote.directory
mock_run.return_value = MagicMock(returncode=0)
workspace.pull(subpath=Path("some-path"))
mock_run.assert_called_once_with(
[
"rsync",
"-arlpmchz",
"--copy-unsafe-links",
"-e",
"ssh -Kq -o BatchMode=yes",
"--force",
f"{workspace.remote.host}:{workspace.remote.directory}/some-path",
f"{workspace.local_root}/",
],
stderr=ANY,
stdout=ANY,
)
@patch("remote.util.subprocess.run")
def test_execute(mock_run, workspace):
mock_run.return_value = MagicMock(returncode=0)
code = workspace.execute(["echo", "Hello World!"])
mock_run.assert_called_once_with(
[
"ssh",
"-tKq",
"-o",
"BatchMode=yes",
workspace.remote.host,
"""\
cd remote/dir
if [ -f .remoteenv ]; then
source .remoteenv
fi
cd foo/bar
echo 'Hello World!'
""",
],
stderr=ANY,
stdin=ANY,
stdout=ANY,
)
assert code == 0
@patch("remote.util.subprocess.run")
def test_execute_with_port_forwarding(mock_run, workspace):
mock_run.return_value = MagicMock(returncode=0)
code = workspace.execute(["echo", "Hello World!"], ports=(5005, 5000),)
mock_run.assert_called_once_with(
[
"ssh",
"-tKq",
"-o",
"BatchMode=yes",
"-L",
"5000:localhost:5005",
workspace.remote.host,
"""\
cd remote/dir
if [ -f .remoteenv ]; then
source .remoteenv
fi
cd foo/bar
echo 'Hello World!'
""",
],
stderr=ANY,
stdin=ANY,
stdout=ANY,
)
assert code == 0
@patch("remote.util.subprocess.run")
def test_execute_with_custom_port(mock_run, workspace):
mock_run.return_value = MagicMock(returncode=0)
workspace.remote.port = 4321
code = workspace.execute(["echo", "Hello World!"], ports=(5005, 5000),)
mock_run.assert_called_once_with(
[
"ssh",
"-tKq",
"-o",
"BatchMode=yes",
"-p",
"4321",
"-L",
"5000:localhost:5005",
workspace.remote.host,
"""\
cd remote/dir
if [ -f .remoteenv ]; then
source .remoteenv
fi
cd foo/bar
echo 'Hello World!'
""",
],
stderr=ANY,
stdin=ANY,
stdout=ANY,
)
assert code == 0
@patch("remote.util.subprocess.run")
def test_execute_and_sync(mock_run, workspace):
mock_run.side_effect = [MagicMock(returncode=0), MagicMock(returncode=10), MagicMock(returncode=0)]
code = workspace.execute_in_synced_env(["echo", "Hello World!"])
mock_run.assert_has_calls(
[
call(
[
"rsync",
"-arlpmchz",
"--copy-unsafe-links",
"-e",
"ssh -Kq -o BatchMode=yes",
"--force",
"--delete",
"--rsync-path",
"mkdir -p remote/dir && rsync",
"--include-from",
ANY,
f"{workspace.local_root}/",
f"{workspace.remote.host}:{workspace.remote.directory}",
],
stderr=ANY,
stdout=ANY,
),
call(
[
"ssh",
"-tKq",
"-o",
"BatchMode=yes",
workspace.remote.host,
"""\
cd remote/dir
if [ -f .remoteenv ]; then
source .remoteenv
fi
cd foo/bar
echo 'Hello World!'
""",
],
stderr=ANY,
stdin=ANY,
stdout=ANY,
),
call(
[
"rsync",
"-arlpmchz",
"--copy-unsafe-links",
"-e",
"ssh -Kq -o BatchMode=yes",
"--force",
"--exclude-from",
ANY,
f"{workspace.remote.host}:{workspace.remote.directory}/",
f"{workspace.local_root}",
],
stderr=ANY,
stdout=ANY,
),
]
)
assert code == 10
@patch("remote.util.subprocess.run")
def test_execute_and_sync_with_port_forwarding(mock_run, workspace):
mock_run.side_effect = [MagicMock(returncode=0), MagicMock(returncode=10), MagicMock(returncode=0)]
code = workspace.execute_in_synced_env(["echo", "Hello World!"], ports=(5005, 5000),)
mock_run.assert_has_calls(
[
call(
[
"rsync",
"-arlpmchz",
"--copy-unsafe-links",
"-e",
"ssh -Kq -o BatchMode=yes",
"--force",
"--delete",
"--rsync-path",
"mkdir -p remote/dir && rsync",
"--include-from",
ANY,
f"{workspace.local_root}/",
f"{workspace.remote.host}:{workspace.remote.directory}",
],
stderr=ANY,
stdout=ANY,
),
call(
[
"ssh",
"-tKq",
"-o",
"BatchMode=yes",
"-L",
"5000:localhost:5005",
workspace.remote.host,
"""\
cd remote/dir
if [ -f .remoteenv ]; then
source .remoteenv
fi
cd foo/bar
echo 'Hello World!'
""",
],
stderr=ANY,
stdin=ANY,
stdout=ANY,
),
call(
[
"rsync",
"-arlpmchz",
"--copy-unsafe-links",
"-e",
"ssh -Kq -o BatchMode=yes",
"--force",
"--exclude-from",
ANY,
f"{workspace.remote.host}:{workspace.remote.directory}/",
f"{workspace.local_root}",
],
stderr=ANY,
stdout=ANY,
),
]
)
assert code == 10
| 29.715
| 120
| 0.537187
| 1,170
| 11,886
| 5.295727
| 0.118803
| 0.084732
| 0.029374
| 0.040349
| 0.871207
| 0.861846
| 0.853938
| 0.832795
| 0.807134
| 0.805036
| 0
| 0.011464
| 0.339475
| 11,886
| 399
| 121
| 29.789474
| 0.777735
| 0.024651
| 0
| 0.731013
| 0
| 0
| 0.193009
| 0.085441
| 0
| 0
| 0
| 0
| 0.10443
| 1
| 0.037975
| false
| 0
| 0.018987
| 0
| 0.056962
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4a8866d2bb023ba6e0330662a3e9f88d06f4ee23
| 205
|
py
|
Python
|
general-practice/Exercises solved/codingbat/Warmup2/array_count.py
|
lugabrielbueno/Projeto
|
f012c5bb9ce6f6d7c9e8196cc7986127dba3eba0
|
[
"MIT"
] | null | null | null |
general-practice/Exercises solved/codingbat/Warmup2/array_count.py
|
lugabrielbueno/Projeto
|
f012c5bb9ce6f6d7c9e8196cc7986127dba3eba0
|
[
"MIT"
] | null | null | null |
general-practice/Exercises solved/codingbat/Warmup2/array_count.py
|
lugabrielbueno/Projeto
|
f012c5bb9ce6f6d7c9e8196cc7986127dba3eba0
|
[
"MIT"
] | null | null | null |
#Given an array of ints, return the number of 9's in the array.
#array_count9([1, 2, 9]) → 1
#array_count9([1, 9, 9]) → 2
#array_count9([1, 9, 9, 3, 9]) → 3
def array_count9(lis):
return lis.count(9)
| 25.625
| 63
| 0.634146
| 45
| 205
| 2.866667
| 0.422222
| 0.341085
| 0.27907
| 0.20155
| 0.217054
| 0
| 0
| 0
| 0
| 0
| 0
| 0.120482
| 0.190244
| 205
| 8
| 64
| 25.625
| 0.638554
| 0.726829
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
43820c9fd31032ec64d50aadf2490d84c2add51a
| 94
|
py
|
Python
|
Hello-world-Louis-W.py
|
egriswol/astr-119-section-assignments
|
6cbeb0397b6a1a9102e8efa771e8d9ed1d3fab18
|
[
"MIT"
] | 2
|
2018-09-28T18:46:05.000Z
|
2018-09-28T19:00:27.000Z
|
Hello-world-Louis-W.py
|
egriswol/astr-119-section-assignments
|
6cbeb0397b6a1a9102e8efa771e8d9ed1d3fab18
|
[
"MIT"
] | 67
|
2018-09-26T06:39:43.000Z
|
2018-10-03T15:32:12.000Z
|
Hello-world-Louis-W.py
|
egriswol/astr-119-section-assignments
|
6cbeb0397b6a1a9102e8efa771e8d9ed1d3fab18
|
[
"MIT"
] | 62
|
2018-09-27T20:12:32.000Z
|
2018-10-03T23:53:47.000Z
|
#!/usr/bin/env python3
#this program will print "Hello from Louis"
print("Hello from Louis.")
| 23.5
| 43
| 0.734043
| 15
| 94
| 4.6
| 0.733333
| 0.289855
| 0.405797
| 0.550725
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012195
| 0.12766
| 94
| 4
| 44
| 23.5
| 0.829268
| 0.670213
| 0
| 0
| 0
| 0
| 0.566667
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
43b23e4f711dc59ae60e46892af3704e38d38037
| 8,718
|
py
|
Python
|
examples/dgpr_aep_examples.py
|
MattAshman/geepee
|
ae71998579cb80e160f7ea5eb5adfa1c937fb90a
|
[
"MIT"
] | 24
|
2017-08-16T18:45:22.000Z
|
2021-04-28T08:21:36.000Z
|
examples/dgpr_aep_examples.py
|
yohanJung/geepee
|
4809c4f78efb9134677af57187957d8ba479ddea
|
[
"MIT"
] | 7
|
2017-05-31T17:09:58.000Z
|
2018-05-21T02:26:25.000Z
|
examples/dgpr_aep_examples.py
|
yohanJung/geepee
|
4809c4f78efb9134677af57187957d8ba479ddea
|
[
"MIT"
] | 10
|
2017-06-02T08:13:54.000Z
|
2021-12-07T13:48:22.000Z
|
print "importing stuff..."
import numpy as np
import pdb
# import matplotlib
# matplotlib.use('Agg')
import matplotlib.pylab as plt
from scipy import special
from .context import aep
from .datautils import step, spiral
from .context import config
def run_regression_1D():
np.random.seed(42)
print "create dataset ..."
N = 200
X = np.random.rand(N, 1)
Y = np.sin(12 * X) + 0.5 * np.cos(25 * X) + np.random.randn(N, 1) * 0.2
# plt.plot(X, Y, 'kx', mew=2)
def plot(m):
xx = np.linspace(-0.5, 1.5, 100)[:, None]
# mean, var = m.predict_f(xx)
samples, mf, vf = m.predict_f(xx, config.PROP_MC)
zu = m.sgp_layers[0].zu
mean_u, var_u = m.predict_f(zu)
plt.figure()
plt.plot(X, Y, 'kx', mew=2)
# plt.plot(xx, mean, 'b', lw=2)
# plt.fill_between(
# xx[:, 0],
# mean[:, 0] - 2 * np.sqrt(var[:, 0]),
# mean[:, 0] + 2 * np.sqrt(var[:, 0]),
# color='blue', alpha=0.2)
plt.plot(np.tile(xx[np.newaxis, :], [200, 1]))
plt.errorbar(zu, mean_u, yerr=2 * np.sqrt(var_u), fmt='ro')
plt.xlim(-0.1, 1.1)
# inference
print "create model and optimize ..."
M = 20
hidden_size = [2]
model = aep.SDGPR(X, Y, M, hidden_size, lik='Gaussian')
model.optimise(method='L-BFGS-B', alpha=1, maxiter=2000)
plot(model)
# plt.show()
plt.savefig('/tmp/aep_dgpr_1D.pdf')
def run_banana():
def gridParams():
mins = [-3.25, -2.85]
maxs = [3.65, 3.4]
nGrid = 50
xspaced = np.linspace(mins[0], maxs[0], nGrid)
yspaced = np.linspace(mins[1], maxs[1], nGrid)
xx, yy = np.meshgrid(xspaced, yspaced)
Xplot = np.vstack((xx.flatten(), yy.flatten())).T
return mins, maxs, xx, yy, Xplot
def plot(m):
col1 = '#0172B2'
col2 = '#CC6600'
mins, maxs, xx, yy, Xplot = gridParams()
mf, vf = m.predict_f(Xplot)
plt.figure()
plt.plot(
Xtrain[:, 0][Ytrain[:, 0] == 1],
Xtrain[:, 1][Ytrain[:, 0] == 1],
'o', color=col1, mew=0, alpha=0.5)
plt.plot(
Xtrain[:, 0][Ytrain[:, 0] == -1],
Xtrain[:, 1][Ytrain[:, 0] == -1],
'o', color=col2, mew=0, alpha=0.5)
zu = m.sgp_layers[0].zu
plt.plot(zu[:, 0], zu[:, 1], 'ro', mew=0, ms=4)
plt.contour(xx, yy, mf.reshape(*xx.shape),
[0], colors='k', linewidths=1.8, zorder=100)
Xtrain = np.loadtxt('./examples/data/banana_X_train.txt', delimiter=',')
Ytrain = np.loadtxt('./examples/data/banana_Y_train.txt',
delimiter=',').reshape(-1, 1)
Ytrain[np.where(Ytrain == 0)[0]] = -1
M = 30
hidden_size = [2]
model = aep.SDGPR(Xtrain, Ytrain, M, hidden_size, lik='Probit')
model.optimise(method='L-BFGS-B', alpha=1.0, maxiter=2000)
plot(model)
plt.show()
plt.savefig('/tmp/aep_dgpc_banana.pdf')
def run_regression_1D_stoc():
np.random.seed(42)
print "create dataset ..."
N = 200
X = np.random.rand(N, 1)
Y = np.sin(12 * X) + 0.5 * np.cos(25 * X) + np.random.randn(N, 1) * 0.2
# plt.plot(X, Y, 'kx', mew=2)
def plot(m):
xx = np.linspace(-0.5, 1.5, 100)[:, None]
mean, var = m.predict_f(xx)
zu = m.sgp_layers[0].zu
mean_u, var_u = m.predict_f(zu)
plt.figure()
plt.plot(X, Y, 'kx', mew=2)
plt.plot(xx, mean, 'b', lw=2)
plt.fill_between(
xx[:, 0],
mean[:, 0] - 2 * np.sqrt(var[:, 0]),
mean[:, 0] + 2 * np.sqrt(var[:, 0]),
color='blue', alpha=0.2)
plt.errorbar(zu, mean_u, yerr=2 * np.sqrt(var_u), fmt='ro')
plt.xlim(-0.1, 1.1)
# inference
print "create model and optimize ..."
M = 20
hidden_size = [2]
model = aep.SDGPR(X, Y, M, hidden_size, lik='Gaussian')
model.optimise(method='adam', alpha=1.0,
maxiter=50000, mb_size=M, adam_lr=0.001)
plot(model)
plt.show()
plt.savefig('/tmp/aep_dgpr_1D_stoc.pdf')
def run_banana_stoc():
def gridParams():
mins = [-3.25, -2.85]
maxs = [3.65, 3.4]
nGrid = 50
xspaced = np.linspace(mins[0], maxs[0], nGrid)
yspaced = np.linspace(mins[1], maxs[1], nGrid)
xx, yy = np.meshgrid(xspaced, yspaced)
Xplot = np.vstack((xx.flatten(), yy.flatten())).T
return mins, maxs, xx, yy, Xplot
def plot(m):
col1 = '#0172B2'
col2 = '#CC6600'
mins, maxs, xx, yy, Xplot = gridParams()
mf, vf = m.predict_f(Xplot)
plt.figure()
plt.plot(
Xtrain[:, 0][Ytrain[:, 0] == 1],
Xtrain[:, 1][Ytrain[:, 0] == 1],
'o', color=col1, mew=0, alpha=0.5)
plt.plot(
Xtrain[:, 0][Ytrain[:, 0] == -1],
Xtrain[:, 1][Ytrain[:, 0] == -1],
'o', color=col2, mew=0, alpha=0.5)
zu = m.sgp_layers[0].zu
plt.plot(zu[:, 0], zu[:, 1], 'ro', mew=0, ms=4)
plt.contour(xx, yy, mf.reshape(*xx.shape),
[0], colors='k', linewidths=1.8, zorder=100)
Xtrain = np.loadtxt('./examples/data/banana_X_train.txt', delimiter=',')
Ytrain = np.loadtxt('./examples/data/banana_Y_train.txt',
delimiter=',').reshape(-1, 1)
Ytrain[np.where(Ytrain == 0)[0]] = -1
M = 30
hidden_size = [2]
model = aep.SDGPR(Xtrain, Ytrain, M, hidden_size, lik='Probit')
mb_size = int(Xtrain.shape[0] / 4)
model.optimise(method='adam', alpha=1.0, maxiter=100000,
mb_size=mb_size, adam_lr=0.001)
plot(model)
plt.show()
plt.savefig('/tmp/aep_dgpc_banana_stoc.pdf')
def run_step_1D():
np.random.seed(42)
def step(x):
y = x.copy()
y[y < 0.0] = 0.0
y[y > 0.0] = 1.0
return y + 0.02 * np.random.randn(x.shape[0], 1)
print "create dataset ..."
N = 100
X = np.random.rand(N, 1) * 3 - 1.5
Y = step(X) - 0.5
# plt.plot(X, Y, 'kx', mew=2)
def plot(m):
xx = np.linspace(-3, 3, 100)[:, None]
mean, var = m.predict_f(xx)
zu = m.sgp_layers[0].zu
mean_u, var_u = m.predict_f(zu)
plt.figure()
plt.plot(X, Y, 'kx', mew=2)
plt.plot(xx, mean, 'b', lw=2)
plt.fill_between(
xx[:, 0],
mean[:, 0] - 2 * np.sqrt(var[:, 0]),
mean[:, 0] + 2 * np.sqrt(var[:, 0]),
color='blue', alpha=0.2)
plt.errorbar(zu, mean_u, yerr=2 * np.sqrt(var_u), fmt='ro')
no_samples = 20
xx = np.linspace(-3, 3, 500)[:, None]
f_samples = m.sample_f(xx, no_samples)
for i in range(no_samples):
plt.plot(xx, f_samples[:, :, i], linewidth=0.5, alpha=0.5)
plt.xlim(-3, 3)
# inference
print "create model and optimize ..."
M = 20
hidden_size = [2]
model = aep.SDGPR(X, Y, M, hidden_size, lik='Gaussian')
# model.optimise(method='L-BFGS-B', alpha=1, maxiter=1000)
model.optimise(method='adam', adam_lr=0.05, alpha=1, maxiter=2000)
plot(model)
plt.show()
def run_spiral():
np.random.seed(42)
def gridParams():
mins = [-1.2, -1.2]
maxs = [1.2, 1.2]
nGrid = 80
xspaced = np.linspace(mins[0], maxs[0], nGrid)
yspaced = np.linspace(mins[1], maxs[1], nGrid)
xx, yy = np.meshgrid(xspaced, yspaced)
Xplot = np.vstack((xx.flatten(), yy.flatten())).T
return mins, maxs, xx, yy, Xplot
def plot(m):
col1 = '#0172B2'
col2 = '#CC6600'
mins, maxs, xx, yy, Xplot = gridParams()
mf, vf = m.predict_f(Xplot)
plt.figure()
plt.plot(
Xtrain[:, 0][Ytrain[:, 0] == 1],
Xtrain[:, 1][Ytrain[:, 0] == 1],
'o', color=col1, mew=0, alpha=0.5)
plt.plot(
Xtrain[:, 0][Ytrain[:, 0] == -1],
Xtrain[:, 1][Ytrain[:, 0] == -1],
'o', color=col2, mew=0, alpha=0.5)
zu = m.sgp_layers[0].zu
plt.plot(zu[:, 0], zu[:, 1], 'ro', mew=0, ms=4)
plt.contour(xx, yy, mf.reshape(*xx.shape),
[0], colors='k', linewidths=1.8, zorder=100)
N = 100
M = 20
Xtrain, Ytrain = spiral(N)
Xtrain /= 6
hidden_size = [2, 2]
model = aep.SDGPR(Xtrain, Ytrain, M, hidden_size, lik='Probit')
model.set_fixed_params(['sf_0', 'sf_1', 'sf_2'])
model.optimise(method='L-BFGS-B', alpha=1, maxiter=5000)
plot(model)
plt.show()
if __name__ == '__main__':
# run_regression_1D()
# run_banana()
run_step_1D()
# run_spiral()
# run_regression_1D_stoc()
# run_banana_stoc()
| 31.135714
| 76
| 0.516059
| 1,312
| 8,718
| 3.345274
| 0.135671
| 0.031898
| 0.021873
| 0.020506
| 0.80884
| 0.793575
| 0.790157
| 0.790157
| 0.764183
| 0.755525
| 0
| 0.067328
| 0.294678
| 8,718
| 279
| 77
| 31.247312
| 0.646447
| 0.058614
| 0
| 0.763636
| 0
| 0
| 0.070137
| 0.026149
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.036364
| null | null | 0.031818
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
78e8a450e89afd85d2f2287871dbe35f179f1c49
| 90
|
py
|
Python
|
mtuq/misfit/__init__.py
|
ammcpherson/mtuq
|
aecfb732ea459e0236c082db5b765bf7bda9950a
|
[
"BSD-2-Clause"
] | 23
|
2019-09-20T16:51:05.000Z
|
2022-03-28T18:09:56.000Z
|
mtuq/misfit/__init__.py
|
Liang-Ding/mtuq
|
2547a7b2684b1c769cea6323f2aac267ccbeaeef
|
[
"BSD-2-Clause"
] | 14
|
2019-09-25T06:06:43.000Z
|
2022-03-10T18:05:46.000Z
|
mtuq/misfit/__init__.py
|
Liang-Ding/mtuq
|
2547a7b2684b1c769cea6323f2aac267ccbeaeef
|
[
"BSD-2-Clause"
] | 6
|
2019-12-13T12:55:28.000Z
|
2022-02-03T18:10:30.000Z
|
from mtuq.misfit.waveform import Misfit
from mtuq.misfit.polarity import PolarityMisfit
| 18
| 47
| 0.844444
| 12
| 90
| 6.333333
| 0.583333
| 0.210526
| 0.368421
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 90
| 4
| 48
| 22.5
| 0.95
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
60346f039c3e508ec28357f4a4a05b1b25f28adb
| 5,463
|
py
|
Python
|
codegen/python/fixtures/client/aiohttp_client/unmarshall_response/users_service.py
|
feeltheajf/go-raml
|
57ce1f4c47bca464efee03cb4a7a28efcd00bea2
|
[
"BSD-2-Clause"
] | null | null | null |
codegen/python/fixtures/client/aiohttp_client/unmarshall_response/users_service.py
|
feeltheajf/go-raml
|
57ce1f4c47bca464efee03cb4a7a28efcd00bea2
|
[
"BSD-2-Clause"
] | null | null | null |
codegen/python/fixtures/client/aiohttp_client/unmarshall_response/users_service.py
|
feeltheajf/go-raml
|
57ce1f4c47bca464efee03cb4a7a28efcd00bea2
|
[
"BSD-2-Clause"
] | null | null | null |
# DO NOT EDIT THIS FILE. This file will be overwritten when re-running go-raml.
from .Address import Address
from .City import City
from .unhandled_api_error import UnhandledAPIError
from .unmarshall_error import UnmarshallError
class UsersService:
_methods = (
"users_userId_address_addressId_get",
"users_userId_delete",
"getuserid",
"users_userId_post",
"users_delete",
"get_users",
"option_users",
"create_users",
)
def __init__(self, client):
self.client = client
async def users_userId_address_addressId_get(
self,
addressId,
userId,
headers=None,
query_params=None,
content_type="application/json",
):
"""
get address id
of address
It is method for GET /users/{userId}/address/{addressId}
"""
if query_params is None:
query_params = {}
uri = (
self.client.base_url + "/users/" + userId + "/address/" + addressId
)
resp = await self.client.get(
uri, None, headers, query_params, content_type
)
try:
if resp.status == 200:
return Address(await resp.json()), resp
except ValueError as msg:
raise UnmarshallError(resp, msg)
except Exception as e:
raise UnmarshallError(resp, e.message)
async def users_userId_delete(
self,
userId,
headers=None,
query_params=None,
content_type="application/json",
):
"""
It is method for DELETE /users/{userId}
"""
if query_params is None:
query_params = {}
uri = self.client.base_url + "/users/" + userId
return await self.client.delete(
uri, None, headers, query_params, content_type
)
async def getuserid(
self,
userId,
headers=None,
query_params=None,
content_type="application/json",
):
"""
get id
It is method for GET /users/{userId}
"""
if query_params is None:
query_params = {}
uri = self.client.base_url + "/users/" + userId
resp = await self.client.get(
uri, None, headers, query_params, content_type
)
try:
if resp.status == 200:
return City(await resp.json()), resp
except ValueError as msg:
raise UnmarshallError(resp, msg)
except Exception as e:
raise UnmarshallError(resp, e.message)
async def users_userId_post(
self,
data,
userId,
headers=None,
query_params=None,
content_type="application/json",
):
"""
post without request body
It is method for POST /users/{userId}
"""
if query_params is None:
query_params = {}
uri = self.client.base_url + "/users/" + userId
return await self.client.post(
uri, data, headers, query_params, content_type
)
async def users_delete(
self,
data,
headers=None,
query_params=None,
content_type="application/json",
):
"""
delete with request body
It is method for DELETE /users
"""
if query_params is None:
query_params = {}
uri = self.client.base_url + "/users"
resp = await self.client.delete(
uri, data, headers, query_params, content_type
)
try:
if resp.status == 200:
return City(await resp.json()), resp
except ValueError as msg:
raise UnmarshallError(resp, msg)
except Exception as e:
raise UnmarshallError(resp, e.message)
async def get_users(
self,
data,
headers=None,
query_params=None,
content_type="application/json",
):
"""
First line of comment.
Second line of comment
It is method for GET /users
"""
if query_params is None:
query_params = {}
uri = self.client.base_url + "/users"
return await self.client.get(
uri, data, headers, query_params, content_type
)
async def option_users(
self, headers=None, query_params=None, content_type="application/json"
):
"""
It is method for OPTIONS /users
"""
if query_params is None:
query_params = {}
uri = self.client.base_url + "/users"
return await self.client.options(
uri, None, headers, query_params, content_type
)
async def create_users(
self,
data,
headers=None,
query_params=None,
content_type="application/json",
):
"""
create users
It is method for POST /users
"""
if query_params is None:
query_params = {}
uri = self.client.base_url + "/users"
resp = await self.client.post(
uri, data, headers, query_params, content_type
)
try:
if resp.status == 200:
return City(await resp.json()), resp
except ValueError as msg:
raise UnmarshallError(resp, msg)
except Exception as e:
raise UnmarshallError(resp, e.message)
| 26.91133
| 79
| 0.547867
| 586
| 5,463
| 4.957338
| 0.139932
| 0.12117
| 0.082616
| 0.060585
| 0.810327
| 0.784509
| 0.751119
| 0.736317
| 0.736317
| 0.688124
| 0
| 0.003455
| 0.364269
| 5,463
| 202
| 80
| 27.044554
| 0.832997
| 0.014095
| 0
| 0.697987
| 1
| 0
| 0.066582
| 0.007233
| 0
| 0
| 0
| 0
| 0
| 1
| 0.006711
| false
| 0
| 0.026846
| 0
| 0.100671
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
605a315d0aa2cc6dd8e278f71a1a0049934ca0af
| 34,521
|
py
|
Python
|
gluoncv/model_zoo/ssd/presets.py
|
Rainweic/gluon-cv
|
4540edbf7cd6ca40878752e20586f5b87c9e307f
|
[
"Apache-2.0"
] | 2
|
2020-11-27T01:20:29.000Z
|
2020-11-27T01:20:30.000Z
|
gluoncv/model_zoo/ssd/presets.py
|
Rainweic/gluon-cv
|
4540edbf7cd6ca40878752e20586f5b87c9e307f
|
[
"Apache-2.0"
] | null | null | null |
gluoncv/model_zoo/ssd/presets.py
|
Rainweic/gluon-cv
|
4540edbf7cd6ca40878752e20586f5b87c9e307f
|
[
"Apache-2.0"
] | 1
|
2020-08-17T09:30:16.000Z
|
2020-08-17T09:30:16.000Z
|
"""SSD predefined models."""
from __future__ import absolute_import
import warnings
from .ssd import get_ssd
from .vgg_atrous import vgg16_atrous_300, vgg16_atrous_512
from ...data import VOCDetection
__all__ = ['ssd_300_vgg16_atrous_voc',
'ssd_300_vgg16_atrous_coco',
'ssd_300_vgg16_atrous_custom',
'ssd_512_vgg16_atrous_voc',
'ssd_512_vgg16_atrous_coco',
'ssd_512_vgg16_atrous_custom',
'ssd_512_resnet18_v1_voc',
'ssd_512_resnet18_v1_coco',
'ssd_512_resnet18_v1_custom',
'ssd_512_resnet50_v1_voc',
'ssd_512_lresnet50_v1_voc',
'ssd_512_resnet50_v1_coco',
'ssd_512_resnet50_v1_custom',
'ssd_512_resnet101_v2_voc',
'ssd_512_resnet152_v2_voc',
'ssd_512_mobilenet1_0_voc',
'ssd_512_mobilenet1_0_coco',
'ssd_512_mobilenet1_0_custom',
'ssd_300_mobilenet0_25_voc',
'ssd_300_mobilenet0_25_coco',
'ssd_300_mobilenet0_25_custom']
def ssd_300_vgg16_atrous_voc(pretrained=False, pretrained_base=True, **kwargs):
"""SSD architecture with VGG16 atrous 300x300 base network for Pascal VOC.
Parameters
----------
pretrained : bool or str
Boolean value controls whether to load the default pretrained weights for model.
String value represents the hashtag for a certain version of pretrained weights.
pretrained_base : bool or str, optional, default is True
Load pretrained base network, the extra layers are randomized.
Returns
-------
HybridBlock
A SSD detection network.
"""
classes = VOCDetection.CLASSES
net = get_ssd('vgg16_atrous', 300, features=vgg16_atrous_300, filters=None,
sizes=[30, 60, 111, 162, 213, 264, 315],
ratios=[[1, 2, 0.5]] + [[1, 2, 0.5, 3, 1.0/3]] * 3 + [[1, 2, 0.5]] * 2,
steps=[8, 16, 32, 64, 100, 300],
classes=classes, dataset='voc', pretrained=pretrained,
pretrained_base=pretrained_base, **kwargs)
return net
def ssd_300_vgg16_atrous_coco(pretrained=False, pretrained_base=True, **kwargs):
"""SSD architecture with VGG16 atrous 300x300 base network for COCO.
Parameters
----------
pretrained : bool or str
Boolean value controls whether to load the default pretrained weights for model.
String value represents the hashtag for a certain version of pretrained weights.
pretrained_base : bool or str, optional, default is True
Load pretrained base network, the extra layers are randomized.
Returns
-------
HybridBlock
A SSD detection network.
"""
from ...data import COCODetection
classes = COCODetection.CLASSES
net = get_ssd('vgg16_atrous', 300, features=vgg16_atrous_300, filters=None,
sizes=[21, 45, 99, 153, 207, 261, 315],
ratios=[[1, 2, 0.5]] + [[1, 2, 0.5, 3, 1.0/3]] * 3 + [[1, 2, 0.5]] * 2,
steps=[8, 16, 32, 64, 100, 300],
classes=classes, dataset='coco', pretrained=pretrained,
pretrained_base=pretrained_base, **kwargs)
return net
def ssd_300_vgg16_atrous_custom(classes, pretrained_base=True, pretrained=False,
transfer=None, **kwargs):
"""SSD architecture with VGG16 atrous 300x300 base network for COCO.
Parameters
----------
classes : iterable of str
Names of custom foreground classes. `len(classes)` is the number of foreground classes.
pretrained_base : bool or str, optional, default is True
Load pretrained base network, the extra layers are randomized.
transfer : str or None
If not `None`, will try to reuse pre-trained weights from SSD networks trained on other
datasets.
Returns
-------
HybridBlock
A SSD detection network.
Example
-------
>>> net = ssd_300_vgg16_atrous_custom(classes=['a', 'b', 'c'], pretrained_base=True)
>>> net = ssd_300_vgg16_atrous_custom(classes=['foo', 'bar'], transfer='coco')
"""
if pretrained:
warnings.warn("Custom models don't provide `pretrained` weights, ignored.")
if transfer is None:
kwargs['pretrained'] = False
net = get_ssd('vgg16_atrous', 300, features=vgg16_atrous_300, filters=None,
sizes=[21, 45, 99, 153, 207, 261, 315],
ratios=[[1, 2, 0.5]] + [[1, 2, 0.5, 3, 1.0/3]] * 3 + [[1, 2, 0.5]] * 2,
steps=[8, 16, 32, 64, 100, 300],
classes=classes, dataset='',
pretrained_base=pretrained_base, **kwargs)
else:
from ...model_zoo import get_model
net = get_model('ssd_300_vgg16_atrous_' + str(transfer), pretrained=True, **kwargs)
reuse_classes = [x for x in classes if x in net.classes]
net.reset_class(classes, reuse_weights=reuse_classes)
return net
def ssd_512_vgg16_atrous_voc(pretrained=False, pretrained_base=True, **kwargs):
"""SSD architecture with VGG16 atrous 512x512 base network.
Parameters
----------
pretrained : bool or str
Boolean value controls whether to load the default pretrained weights for model.
String value represents the hashtag for a certain version of pretrained weights.
pretrained_base : bool or str, optional, default is True
Load pretrained base network, the extra layers are randomized.
Returns
-------
HybridBlock
A SSD detection network.
"""
classes = VOCDetection.CLASSES
net = get_ssd('vgg16_atrous', 512, features=vgg16_atrous_512, filters=None,
sizes=[51.2, 76.8, 153.6, 230.4, 307.2, 384.0, 460.8, 537.6],
ratios=[[1, 2, 0.5]] + [[1, 2, 0.5, 3, 1.0/3]] * 4 + [[1, 2, 0.5]] * 2,
steps=[8, 16, 32, 64, 128, 256, 512],
classes=classes, dataset='voc', pretrained=pretrained,
pretrained_base=pretrained_base, **kwargs)
return net
def ssd_512_vgg16_atrous_coco(pretrained=False, pretrained_base=True, **kwargs):
"""SSD architecture with VGG16 atrous layers for COCO.
Parameters
----------
pretrained : bool or str
Boolean value controls whether to load the default pretrained weights for model.
String value represents the hashtag for a certain version of pretrained weights.
pretrained_base : bool or str, optional, default is True
Load pretrained base network, the extra layers are randomized.
Returns
-------
HybridBlock
A SSD detection network.
"""
from ...data import COCODetection
classes = COCODetection.CLASSES
return get_ssd('vgg16_atrous', 512, features=vgg16_atrous_512, filters=None,
sizes=[51.2, 76.8, 153.6, 230.4, 307.2, 384.0, 460.8, 537.6],
ratios=[[1, 2, 0.5]] + [[1, 2, 0.5, 3, 1.0/3]] * 4 + [[1, 2, 0.5]] * 2,
steps=[8, 16, 32, 64, 128, 256, 512],
classes=classes, dataset='coco', pretrained=pretrained,
pretrained_base=pretrained_base, **kwargs)
def ssd_512_vgg16_atrous_custom(classes, pretrained_base=True, pretrained=False,
transfer=None, **kwargs):
"""SSD architecture with VGG16 atrous 300x300 base network for COCO.
Parameters
----------
classes : iterable of str
Names of custom foreground classes. `len(classes)` is the number of foreground classes.
pretrained_base : bool or str, optional, default is True
Load pretrained base network, the extra layers are randomized.
transfer : str or None
If not `None`, will try to reuse pre-trained weights from SSD networks trained on other
datasets.
Returns
-------
HybridBlock
A SSD detection network.
Example
-------
>>> net = ssd_512_vgg16_atrous_custom(classes=['a', 'b', 'c'], pretrained_base=True)
>>> net = ssd_512_vgg16_atrous_custom(classes=['foo', 'bar'], transfer='coco')
"""
if pretrained:
warnings.warn("Custom models don't provide `pretrained` weights, ignored.")
if transfer is None:
kwargs['pretrained'] = False
net = get_ssd('vgg16_atrous', 512, features=vgg16_atrous_512, filters=None,
sizes=[51.2, 76.8, 153.6, 230.4, 307.2, 384.0, 460.8, 537.6],
ratios=[[1, 2, 0.5]] + [[1, 2, 0.5, 3, 1.0/3]] * 4 + [[1, 2, 0.5]] * 2,
steps=[8, 16, 32, 64, 128, 256, 512],
classes=classes, dataset='',
pretrained_base=pretrained_base, **kwargs)
else:
from ...model_zoo import get_model
net = get_model('ssd_512_vgg16_atrous_' + str(transfer), pretrained=True, **kwargs)
reuse_classes = [x for x in classes if x in net.classes]
net.reset_class(classes, reuse_weights=reuse_classes)
return net
def ssd_512_resnet18_v1_voc(pretrained=False, pretrained_base=True, **kwargs):
"""SSD architecture with ResNet v1 18 layers.
Parameters
----------
pretrained : bool or str
Boolean value controls whether to load the default pretrained weights for model.
String value represents the hashtag for a certain version of pretrained weights.
pretrained_base : bool or str, optional, default is True
Load pretrained base network, the extra layers are randomized.
norm_layer : object
Normalization layer used (default: :class:`mxnet.gluon.nn.BatchNorm`)
Can be :class:`mxnet.gluon.nn.BatchNorm` or :class:`mxnet.gluon.contrib.nn.SyncBatchNorm`.
norm_kwargs : dict
Additional `norm_layer` arguments, for example `num_devices=4`
for :class:`mxnet.gluon.contrib.nn.SyncBatchNorm`.
Returns
-------
HybridBlock
A SSD detection network.
"""
classes = VOCDetection.CLASSES
return get_ssd('resnet18_v1', 512,
features=['stage3_activation1', 'stage4_activation1'],
filters=[512, 512, 256, 256],
sizes=[51.2, 102.4, 189.4, 276.4, 363.52, 450.6, 492],
ratios=[[1, 2, 0.5]] + [[1, 2, 0.5, 3, 1.0/3]] * 3 + [[1, 2, 0.5]] * 2,
steps=[16, 32, 64, 128, 256, 512],
classes=classes, dataset='voc', pretrained=pretrained,
pretrained_base=pretrained_base, **kwargs)
def ssd_512_resnet18_v1_coco(pretrained=False, pretrained_base=True, **kwargs):
"""SSD architecture with ResNet v1 18 layers.
Parameters
----------
pretrained : bool or str
Boolean value controls whether to load the default pretrained weights for model.
String value represents the hashtag for a certain version of pretrained weights.
pretrained_base : bool or str, optional, default is True
Load pretrained base network, the extra layers are randomized.
norm_layer : object
Normalization layer used (default: :class:`mxnet.gluon.nn.BatchNorm`)
Can be :class:`mxnet.gluon.nn.BatchNorm` or :class:`mxnet.gluon.contrib.nn.SyncBatchNorm`.
norm_kwargs : dict
Additional `norm_layer` arguments, for example `num_devices=4`
for :class:`mxnet.gluon.contrib.nn.SyncBatchNorm`.
Returns
-------
HybridBlock
A SSD detection network.
"""
from ...data import COCODetection
classes = COCODetection.CLASSES
return get_ssd('resnet18_v1', 512,
features=['stage3_activation1', 'stage4_activation1'],
filters=[512, 512, 256, 256],
sizes=[51.2, 102.4, 189.4, 276.4, 363.52, 450.6, 492],
ratios=[[1, 2, 0.5]] + [[1, 2, 0.5, 3, 1.0/3]] * 3 + [[1, 2, 0.5]] * 2,
steps=[16, 32, 64, 128, 256, 512],
classes=classes, dataset='coco', pretrained=pretrained,
pretrained_base=pretrained_base, **kwargs)
def ssd_512_resnet18_v1_custom(classes, pretrained_base=True, pretrained=False,
transfer=None, **kwargs):
"""SSD architecture with ResNet18 v1 512 base network for COCO.
Parameters
----------
classes : iterable of str
Names of custom foreground classes. `len(classes)` is the number of foreground classes.
pretrained_base : bool or str, optional, default is True
Load pretrained base network, the extra layers are randomized.
transfer : str or None
If not `None`, will try to reuse pre-trained weights from SSD networks trained on other
datasets.
norm_layer : object
Normalization layer used (default: :class:`mxnet.gluon.nn.BatchNorm`)
Can be :class:`mxnet.gluon.nn.BatchNorm` or :class:`mxnet.gluon.contrib.nn.SyncBatchNorm`.
norm_kwargs : dict
Additional `norm_layer` arguments, for example `num_devices=4`
for :class:`mxnet.gluon.contrib.nn.SyncBatchNorm`.
Returns
-------
HybridBlock
A SSD detection network.
Example
-------
>>> net = ssd_512_resnet18_v1_custom(classes=['a', 'b', 'c'], pretrained_base=True)
>>> net = ssd_512_resnet18_v1_custom(classes=['foo', 'bar'], transfer='voc')
"""
if pretrained:
warnings.warn("Custom models don't provide `pretrained` weights, ignored.")
if transfer is None:
kwargs['pretrained'] = False
net = get_ssd('resnet18_v1', 512,
features=['stage3_activation1', 'stage4_activation1'],
filters=[512, 512, 256, 256],
sizes=[51.2, 102.4, 189.4, 276.4, 363.52, 450.6, 492],
ratios=[[1, 2, 0.5]] + [[1, 2, 0.5, 3, 1.0/3]] * 3 + [[1, 2, 0.5]] * 2,
steps=[16, 32, 64, 128, 256, 512],
classes=classes, dataset='',
pretrained_base=pretrained_base, **kwargs)
else:
from ...model_zoo import get_model
net = get_model('ssd_512_resnet18_v1_' + str(transfer), pretrained=True, **kwargs)
reuse_classes = [x for x in classes if x in net.classes]
net.reset_class(classes, reuse_weights=reuse_classes)
return net
def ssd_512_resnet50_v1_voc(pretrained=False, pretrained_base=True, **kwargs):
"""SSD architecture with ResNet v1 50 layers.
Parameters
----------
pretrained : bool or str
Boolean value controls whether to load the default pretrained weights for model.
String value represents the hashtag for a certain version of pretrained weights.
pretrained_base : bool or str, optional, default is True
Load pretrained base network, the extra layers are randomized.
norm_layer : object
Normalization layer used (default: :class:`mxnet.gluon.nn.BatchNorm`)
Can be :class:`mxnet.gluon.nn.BatchNorm` or :class:`mxnet.gluon.contrib.nn.SyncBatchNorm`.
norm_kwargs : dict
Additional `norm_layer` arguments, for example `num_devices=4`
for :class:`mxnet.gluon.contrib.nn.SyncBatchNorm`.
Returns
-------
HybridBlock
A SSD detection network.
"""
classes = VOCDetection.CLASSES
return get_ssd('resnet50_v1', 512,
features=['stage3_activation5', 'stage4_activation2'],
filters=[512, 512, 256, 256],
sizes=[51.2, 102.4, 189.4, 276.4, 363.52, 450.6, 492],
ratios=[[1, 2, 0.5]] + [[1, 2, 0.5, 3, 1.0/3]] * 3 + [[1, 2, 0.5]] * 2,
steps=[16, 32, 64, 128, 256, 512],
classes=classes, dataset='voc', pretrained=pretrained,
pretrained_base=pretrained_base, **kwargs)
def ssd_512_lresnet50_v1_voc(pretrained=False, pretrained_base=False, **kwargs):
"""SSD architecture with ResNet v1 50 layers.
Parameters
----------
pretrained : bool or str
Boolean value controls whether to load the default pretrained weights for model.
String value represents the hashtag for a certain version of pretrained weights.
pretrained_base : bool or str, optional, default is True
Load pretrained base network, the extra layers are randomized.
norm_layer : object
Normalization layer used (default: :class:`mxnet.gluon.nn.BatchNorm`)
Can be :class:`mxnet.gluon.nn.BatchNorm` or :class:`mxnet.gluon.contrib.nn.SyncBatchNorm`.
norm_kwargs : dict
Additional `norm_layer` arguments, for example `num_devices=4`
for :class:`mxnet.gluon.contrib.nn.SyncBatchNorm`.
Returns
-------
HybridBlock
A SSD detection network.
"""
classes = VOCDetection.CLASSES
return get_ssd('lresnet50_v1', 512,
features=['stage3_activation5', 'stage4_activation2'],
filters=[512, 512, 256, 256],
sizes=[51.2, 102.4, 189.4, 276.4, 363.52, 450.6, 492],
ratios=[[1, 2, 0.5]] + [[1, 2, 0.5, 3, 1.0/3]] * 3 + [[1, 2, 0.5]] * 2,
steps=[16, 32, 64, 128, 256, 512],
classes=classes, dataset='voc', pretrained=pretrained,
pretrained_base=pretrained_base, **kwargs)
def ssd_512_resnet50_v1_coco(pretrained=False, pretrained_base=True, **kwargs):
"""SSD architecture with ResNet v1 50 layers for COCO.
Parameters
----------
pretrained : bool or str
Boolean value controls whether to load the default pretrained weights for model.
String value represents the hashtag for a certain version of pretrained weights.
pretrained_base : bool or str, optional, default is True
Load pretrained base network, the extra layers are randomized.
norm_layer : object
Normalization layer used (default: :class:`mxnet.gluon.nn.BatchNorm`)
Can be :class:`mxnet.gluon.nn.BatchNorm` or :class:`mxnet.gluon.contrib.nn.SyncBatchNorm`.
norm_kwargs : dict
Additional `norm_layer` arguments, for example `num_devices=4`
for :class:`mxnet.gluon.contrib.nn.SyncBatchNorm`.
Returns
-------
HybridBlock
A SSD detection network.
"""
from ...data import COCODetection
classes = COCODetection.CLASSES
return get_ssd('resnet50_v1', 512,
features=['stage3_activation5', 'stage4_activation2'],
filters=[512, 512, 256, 256],
sizes=[51.2, 133.12, 215.04, 296.96, 378.88, 460.8, 542.72],
ratios=[[1, 2, 0.5]] + [[1, 2, 0.5, 3, 1.0/3]] * 3 + [[1, 2, 0.5]] * 2,
steps=[16, 32, 64, 128, 256, 512],
classes=classes, dataset='coco', pretrained=pretrained,
pretrained_base=pretrained_base, **kwargs)
def ssd_512_resnet50_v1_custom(classes, pretrained_base=True, pretrained=False,
transfer=None, **kwargs):
"""SSD architecture with ResNet50 v1 512 base network for custom dataset.
Parameters
----------
classes : iterable of str
Names of custom foreground classes. `len(classes)` is the number of foreground classes.
pretrained_base : bool or str, optional, default is True
Load pretrained base network, the extra layers are randomized.
transfer : str or None
If not `None`, will try to reuse pre-trained weights from SSD networks trained on other
datasets.
norm_layer : object
Normalization layer used (default: :class:`mxnet.gluon.nn.BatchNorm`)
Can be :class:`mxnet.gluon.nn.BatchNorm` or :class:`mxnet.gluon.contrib.nn.SyncBatchNorm`.
norm_kwargs : dict
Additional `norm_layer` arguments, for example `num_devices=4`
for :class:`mxnet.gluon.contrib.nn.SyncBatchNorm`.
Returns
-------
HybridBlock
A SSD detection network.
Example
-------
>>> net = ssd_512_resnet50_v1_custom(classes=['a', 'b', 'c'], pretrained_base=True)
>>> net = ssd_512_resnet50_v1_custom(classes=['foo', 'bar'], transfer='voc')
"""
if pretrained:
warnings.warn("Custom models don't provide `pretrained` weights, ignored.")
if transfer is None:
kwargs['pretrained'] = False
net = get_ssd('resnet50_v1', 512,
features=['stage3_activation5', 'stage4_activation2'],
filters=[512, 512, 256, 256],
sizes=[51.2, 133.12, 215.04, 296.96, 378.88, 460.8, 542.72],
ratios=[[1, 2, 0.5]] + [[1, 2, 0.5, 3, 1.0/3]] * 3 + [[1, 2, 0.5]] * 2,
steps=[16, 32, 64, 128, 256, 512],
classes=classes, dataset='',
pretrained_base=pretrained_base, **kwargs)
else:
from ...model_zoo import get_model
net = get_model('ssd_512_resnet50_v1_' + str(transfer), pretrained=True, **kwargs)
reuse_classes = [x for x in classes if x in net.classes]
net.reset_class(classes, reuse_weights=reuse_classes)
return net
def ssd_512_resnet101_v2_voc(pretrained=False, pretrained_base=True, **kwargs):
"""SSD architecture with ResNet v2 101 layers.
Parameters
----------
pretrained : bool or str
Boolean value controls whether to load the default pretrained weights for model.
String value represents the hashtag for a certain version of pretrained weights.
pretrained_base : bool or str, optional, default is True
Load pretrained base network, the extra layers are randomized.
norm_layer : object
Normalization layer used (default: :class:`mxnet.gluon.nn.BatchNorm`)
Can be :class:`mxnet.gluon.nn.BatchNorm` or :class:`mxnet.gluon.contrib.nn.SyncBatchNorm`.
norm_kwargs : dict
Additional `norm_layer` arguments, for example `num_devices=4`
for :class:`mxnet.gluon.contrib.nn.SyncBatchNorm`.
Returns
-------
HybridBlock
A SSD detection network.
"""
classes = VOCDetection.CLASSES
return get_ssd('resnet101_v2', 512,
features=['stage3_activation22', 'stage4_activation2'],
filters=[512, 512, 256, 256],
sizes=[51.2, 102.4, 189.4, 276.4, 363.52, 450.6, 492],
ratios=[[1, 2, 0.5]] + [[1, 2, 0.5, 3, 1.0/3]] * 3 + [[1, 2, 0.5]] * 2,
steps=[16, 32, 64, 128, 256, 512],
classes=classes, dataset='voc', pretrained=pretrained,
pretrained_base=pretrained_base, **kwargs)
def ssd_512_resnet152_v2_voc(pretrained=False, pretrained_base=True, **kwargs):
"""SSD architecture with ResNet v2 152 layers.
Parameters
----------
pretrained : bool or str
Boolean value controls whether to load the default pretrained weights for model.
String value represents the hashtag for a certain version of pretrained weights.
pretrained_base : bool or str, optional, default is True
Load pretrained base network, the extra layers are randomized.
norm_layer : object
Normalization layer used (default: :class:`mxnet.gluon.nn.BatchNorm`)
Can be :class:`mxnet.gluon.nn.BatchNorm` or :class:`mxnet.gluon.contrib.nn.SyncBatchNorm`.
norm_kwargs : dict
Additional `norm_layer` arguments, for example `num_devices=4`
for :class:`mxnet.gluon.contrib.nn.SyncBatchNorm`.
Returns
-------
HybridBlock
A SSD detection network.
"""
classes = VOCDetection.CLASSES
return get_ssd('resnet152_v2', 512,
features=['stage2_activation7', 'stage3_activation35', 'stage4_activation2'],
filters=[512, 512, 256, 256],
sizes=[51.2, 76.8, 153.6, 230.4, 307.2, 384.0, 460.8, 537.6],
ratios=[[1, 2, 0.5]] + [[1, 2, 0.5, 3, 1.0/3]] * 4 + [[1, 2, 0.5]] * 2,
steps=[8, 16, 32, 64, 128, 256, 512],
classes=classes, dataset='voc', pretrained=pretrained,
pretrained_base=pretrained_base, **kwargs)
def ssd_512_mobilenet1_0_voc(pretrained=False, pretrained_base=True, **kwargs):
"""SSD architecture with mobilenet1.0 base networks.
Parameters
----------
pretrained : bool or str
Boolean value controls whether to load the default pretrained weights for model.
String value represents the hashtag for a certain version of pretrained weights.
pretrained_base : bool or str, optional, default is True
Load pretrained base network, the extra layers are randomized.
norm_layer : object
Normalization layer used (default: :class:`mxnet.gluon.nn.BatchNorm`)
Can be :class:`mxnet.gluon.nn.BatchNorm` or :class:`mxnet.gluon.contrib.nn.SyncBatchNorm`.
norm_kwargs : dict
Additional `norm_layer` arguments, for example `num_devices=4`
for :class:`mxnet.gluon.contrib.nn.SyncBatchNorm`.
Returns
-------
HybridBlock
A SSD detection network.
"""
classes = VOCDetection.CLASSES
return get_ssd('mobilenet1.0', 512,
features=['relu22_fwd', 'relu26_fwd'],
filters=[512, 512, 256, 256],
sizes=[51.2, 102.4, 189.4, 276.4, 363.52, 450.6, 492],
ratios=[[1, 2, 0.5]] + [[1, 2, 0.5, 3, 1.0/3]] * 3 + [[1, 2, 0.5]] * 2,
steps=[16, 32, 64, 128, 256, 512],
classes=classes, dataset='voc', pretrained=pretrained,
pretrained_base=pretrained_base, **kwargs)
def ssd_512_mobilenet1_0_coco(pretrained=False, pretrained_base=True, **kwargs):
"""SSD architecture with mobilenet1.0 base networks for COCO.
Parameters
----------
pretrained : bool or str
Boolean value controls whether to load the default pretrained weights for model.
String value represents the hashtag for a certain version of pretrained weights.
pretrained_base : bool or str, optional, default is True
Load pretrained base network, the extra layers are randomized.
norm_layer : object
Normalization layer used (default: :class:`mxnet.gluon.nn.BatchNorm`)
Can be :class:`mxnet.gluon.nn.BatchNorm` or :class:`mxnet.gluon.contrib.nn.SyncBatchNorm`.
norm_kwargs : dict
Additional `norm_layer` arguments, for example `num_devices=4`
for :class:`mxnet.gluon.contrib.nn.SyncBatchNorm`.
Returns
-------
HybridBlock
A SSD detection network.
"""
from ...data import COCODetection
classes = COCODetection.CLASSES
return get_ssd('mobilenet1.0', 512,
features=['relu22_fwd', 'relu26_fwd'],
filters=[512, 512, 256, 256],
sizes=[51.2, 102.4, 189.4, 276.4, 363.52, 450.6, 492],
ratios=[[1, 2, 0.5]] + [[1, 2, 0.5, 3, 1.0/3]] * 3 + [[1, 2, 0.5]] * 2,
steps=[16, 32, 64, 128, 256, 512],
classes=classes, dataset='coco', pretrained=pretrained,
pretrained_base=pretrained_base, **kwargs)
def ssd_512_mobilenet1_0_custom(classes, pretrained_base=True, pretrained=False,
transfer=None, **kwargs):
"""SSD architecture with mobilenet1.0 512 base network for custom dataset.
Parameters
----------
classes : iterable of str
Names of custom foreground classes. `len(classes)` is the number of foreground classes.
pretrained_base : bool or str, optional, default is True
Load pretrained base network, the extra layers are randomized.
transfer : str or None
If not `None`, will try to reuse pre-trained weights from SSD networks trained on other
datasets.
norm_layer : object
Normalization layer used (default: :class:`mxnet.gluon.nn.BatchNorm`)
Can be :class:`mxnet.gluon.nn.BatchNorm` or :class:`mxnet.gluon.contrib.nn.SyncBatchNorm`.
norm_kwargs : dict
Additional `norm_layer` arguments, for example `num_devices=4`
for :class:`mxnet.gluon.contrib.nn.SyncBatchNorm`.
Returns
-------
HybridBlock
A SSD detection network.
Example
-------
>>> net = ssd_512_mobilenet1_0_custom(classes=['a', 'b', 'c'], pretrained_base=True)
>>> net = ssd_512_mobilenet1_0_custom(classes=['foo', 'bar'], transfer='voc')
"""
if pretrained:
warnings.warn("Custom models don't provide `pretrained` weights, ignored.")
if transfer is None:
kwargs['pretrained'] = False
net = get_ssd('mobilenet1.0', 512,
features=['relu22_fwd', 'relu26_fwd'],
filters=[512, 512, 256, 256],
sizes=[51.2, 102.4, 189.4, 276.4, 363.52, 450.6, 492],
ratios=[[1, 2, 0.5]] + [[1, 2, 0.5, 3, 1.0/3]] * 3 + [[1, 2, 0.5]] * 2,
steps=[16, 32, 64, 128, 256, 512],
classes=classes, dataset='',
pretrained_base=pretrained_base, **kwargs)
else:
from ...model_zoo import get_model
net = get_model('ssd_512_mobilenet1.0_' + str(transfer), pretrained=True, **kwargs)
reuse_classes = [x for x in classes if x in net.classes]
net.reset_class(classes, reuse_weights=reuse_classes)
return net
def ssd_300_mobilenet0_25_voc(pretrained=False, pretrained_base=True, **kwargs):
"""SSD architecture with mobilenet0.25 base networks.
Parameters
----------
pretrained : bool or str
Boolean value controls whether to load the default pretrained weights for model.
String value represents the hashtag for a certain version of pretrained weights.
pretrained_base : bool or str, optional, default is True
Load pretrained base network, the extra layers are randomized.
norm_layer : object
Normalization layer used (default: :class:`mxnet.gluon.nn.BatchNorm`)
Can be :class:`mxnet.gluon.nn.BatchNorm` or :class:`mxnet.gluon.contrib.nn.SyncBatchNorm`.
norm_kwargs : dict
Additional `norm_layer` arguments, for example `num_devices=4`
for :class:`mxnet.gluon.contrib.nn.SyncBatchNorm`.
Returns
-------
HybridBlock
A SSD detection network.
"""
classes = VOCDetection.CLASSES
return get_ssd('mobilenet0.25', 300,
features=['relu22_fwd', 'relu26_fwd'],
filters=[256, 256, 128, 128],
sizes=[21, 45, 99, 153, 207, 261, 315],
ratios=[[1, 2, 0.5]] + [[1, 2, 0.5, 3, 1.0/3]] * 3 + [[1, 2, 0.5]] * 2,
steps=[8, 16, 32, 64, 100, 300],
classes=classes, dataset='voc', pretrained=pretrained,
pretrained_base=pretrained_base, **kwargs)
def ssd_300_mobilenet0_25_coco(pretrained=False, pretrained_base=True, **kwargs):
"""SSD architecture with mobilenet0.25 base networks for COCO.
Parameters
----------
pretrained : bool or str
Boolean value controls whether to load the default pretrained weights for model.
String value represents the hashtag for a certain version of pretrained weights.
pretrained_base : bool or str, optional, default is True
Load pretrained base network, the extra layers are randomized.
norm_layer : object
Normalization layer used (default: :class:`mxnet.gluon.nn.BatchNorm`)
Can be :class:`mxnet.gluon.nn.BatchNorm` or :class:`mxnet.gluon.contrib.nn.SyncBatchNorm`.
norm_kwargs : dict
Additional `norm_layer` arguments, for example `num_devices=4`
for :class:`mxnet.gluon.contrib.nn.SyncBatchNorm`.
Returns
-------
HybridBlock
A SSD detection network.
"""
from ...data import COCODetection
classes = COCODetection.CLASSES
return get_ssd('mobilenet0.25', 300,
features=['relu22_fwd', 'relu26_fwd'],
filters=[256, 256, 128, 128],
sizes=[21, 45, 99, 153, 207, 261, 315],
ratios=[[1, 2, 0.5]] + [[1, 2, 0.5, 3, 1.0/3]] * 3 + [[1, 2, 0.5]] * 2,
steps=[8, 16, 32, 64, 100, 300],
classes=classes, dataset='coco', pretrained=pretrained,
pretrained_base=pretrained_base, **kwargs)
def ssd_300_mobilenet0_25_custom(classes, pretrained_base=True, pretrained=False,
transfer=None, **kwargs):
"""SSD architecture with mobilenet0.25 300 base network for custom dataset.
Parameters
----------
classes : iterable of str
Names of custom foreground classes. `len(classes)` is the number of foreground classes.
pretrained_base : bool or str, optional, default is True
Load pretrained base network, the extra layers are randomized.
transfer : str or None
If not `None`, will try to reuse pre-trained weights from SSD networks trained on other
datasets.
norm_layer : object
Normalization layer used (default: :class:`mxnet.gluon.nn.BatchNorm`)
Can be :class:`mxnet.gluon.nn.BatchNorm` or :class:`mxnet.gluon.contrib.nn.SyncBatchNorm`.
norm_kwargs : dict
Additional `norm_layer` arguments, for example `num_devices=4`
for :class:`mxnet.gluon.contrib.nn.SyncBatchNorm`.
Returns
-------
HybridBlock
A SSD detection network.
Example
-------
>>> net = ssd_300_mobilenet0_25_custom(classes=['a', 'b', 'c'], pretrained_base=True)
>>> net = ssd_300_mobilenet0_25_custom(classes=['foo', 'bar'], transfer='voc')
"""
if pretrained:
warnings.warn("Custom models don't provide `pretrained` weights, ignored.")
if transfer is None:
kwargs['pretrained'] = False
net = get_ssd('mobilenet0.25', 300,
features=['relu22_fwd', 'relu26_fwd'],
filters=[256, 256, 128, 128],
sizes=[21, 45, 99, 153, 207, 261, 315],
ratios=[[1, 2, 0.5]] + [[1, 2, 0.5, 3, 1.0/3]] * 3 + [[1, 2, 0.5]] * 2,
steps=[8, 16, 32, 64, 100, 300],
classes=classes, dataset='',
pretrained_base=pretrained_base, **kwargs)
else:
from ...model_zoo import get_model
net = get_model('ssd_300_mobilenet0.25_' + str(transfer), pretrained=True, **kwargs)
reuse_classes = [x for x in classes if x in net.classes]
net.reset_class(classes, reuse_weights=reuse_classes)
return net
| 44.774319
| 98
| 0.615683
| 4,370
| 34,521
| 4.743707
| 0.046453
| 0.074964
| 0.009117
| 0.012156
| 0.981862
| 0.957019
| 0.954269
| 0.950362
| 0.950362
| 0.950362
| 0
| 0.081908
| 0.266504
| 34,521
| 770
| 99
| 44.832468
| 0.73678
| 0.462096
| 0
| 0.78157
| 0
| 0
| 0.10769
| 0.036054
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071672
| false
| 0
| 0.05802
| 0
| 0.201365
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6090cfa314802c4ca62fb4926fa63c587ee3a9f5
| 105,591
|
py
|
Python
|
scipy/ndimage/tests/test_morphology.py
|
lorentzenchr/scipy
|
393a05ee927883ad6316b7092c851afea8f16816
|
[
"BSD-3-Clause"
] | 9,095
|
2015-01-02T18:24:23.000Z
|
2022-03-31T20:35:31.000Z
|
scipy/ndimage/tests/test_morphology.py
|
lorentzenchr/scipy
|
393a05ee927883ad6316b7092c851afea8f16816
|
[
"BSD-3-Clause"
] | 11,500
|
2015-01-01T01:15:30.000Z
|
2022-03-31T23:07:35.000Z
|
scipy/ndimage/tests/test_morphology.py
|
lorentzenchr/scipy
|
393a05ee927883ad6316b7092c851afea8f16816
|
[
"BSD-3-Clause"
] | 5,838
|
2015-01-05T11:56:42.000Z
|
2022-03-31T23:21:19.000Z
|
import numpy
from numpy.testing import (assert_, assert_equal, assert_array_equal,
assert_array_almost_equal)
import pytest
from pytest import raises as assert_raises
from scipy import ndimage
from . import types
class TestNdimageMorphology:
@pytest.mark.parametrize('dtype', types)
def test_distance_transform_bf01(self, dtype):
# brute force (bf) distance transform
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype)
out, ft = ndimage.distance_transform_bf(data, 'euclidean',
return_indices=True)
expected = [[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 2, 4, 2, 1, 0, 0],
[0, 0, 1, 4, 8, 4, 1, 0, 0],
[0, 0, 1, 2, 4, 2, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]]
assert_array_almost_equal(out * out, expected)
expected = [[[0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1],
[2, 2, 2, 2, 1, 2, 2, 2, 2],
[3, 3, 3, 2, 1, 2, 3, 3, 3],
[4, 4, 4, 4, 6, 4, 4, 4, 4],
[5, 5, 6, 6, 7, 6, 6, 5, 5],
[6, 6, 6, 7, 7, 7, 6, 6, 6],
[7, 7, 7, 7, 7, 7, 7, 7, 7],
[8, 8, 8, 8, 8, 8, 8, 8, 8]],
[[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 2, 4, 6, 6, 7, 8],
[0, 1, 1, 2, 4, 6, 7, 7, 8],
[0, 1, 1, 1, 6, 7, 7, 7, 8],
[0, 1, 2, 2, 4, 6, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8]]]
assert_array_almost_equal(ft, expected)
@pytest.mark.parametrize('dtype', types)
def test_distance_transform_bf02(self, dtype):
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype)
out, ft = ndimage.distance_transform_bf(data, 'cityblock',
return_indices=True)
expected = [[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 2, 2, 2, 1, 0, 0],
[0, 0, 1, 2, 3, 2, 1, 0, 0],
[0, 0, 1, 2, 2, 2, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]]
assert_array_almost_equal(out, expected)
expected = [[[0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1],
[2, 2, 2, 2, 1, 2, 2, 2, 2],
[3, 3, 3, 3, 1, 3, 3, 3, 3],
[4, 4, 4, 4, 7, 4, 4, 4, 4],
[5, 5, 6, 7, 7, 7, 6, 5, 5],
[6, 6, 6, 7, 7, 7, 6, 6, 6],
[7, 7, 7, 7, 7, 7, 7, 7, 7],
[8, 8, 8, 8, 8, 8, 8, 8, 8]],
[[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 2, 4, 6, 6, 7, 8],
[0, 1, 1, 1, 4, 7, 7, 7, 8],
[0, 1, 1, 1, 4, 7, 7, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8]]]
assert_array_almost_equal(expected, ft)
@pytest.mark.parametrize('dtype', types)
def test_distance_transform_bf03(self, dtype):
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype)
out, ft = ndimage.distance_transform_bf(data, 'chessboard',
return_indices=True)
expected = [[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 2, 1, 1, 0, 0],
[0, 0, 1, 2, 2, 2, 1, 0, 0],
[0, 0, 1, 1, 2, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]]
assert_array_almost_equal(out, expected)
expected = [[[0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1],
[2, 2, 2, 2, 1, 2, 2, 2, 2],
[3, 3, 4, 2, 2, 2, 4, 3, 3],
[4, 4, 5, 6, 6, 6, 5, 4, 4],
[5, 5, 6, 6, 7, 6, 6, 5, 5],
[6, 6, 6, 7, 7, 7, 6, 6, 6],
[7, 7, 7, 7, 7, 7, 7, 7, 7],
[8, 8, 8, 8, 8, 8, 8, 8, 8]],
[[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 2, 5, 6, 6, 7, 8],
[0, 1, 1, 2, 6, 6, 7, 7, 8],
[0, 1, 1, 2, 6, 7, 7, 7, 8],
[0, 1, 2, 2, 6, 6, 7, 7, 8],
[0, 1, 2, 4, 5, 6, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8]]]
assert_array_almost_equal(ft, expected)
@pytest.mark.parametrize('dtype', types)
def test_distance_transform_bf04(self, dtype):
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype)
tdt, tft = ndimage.distance_transform_bf(data, return_indices=1)
dts = []
fts = []
dt = numpy.zeros(data.shape, dtype=numpy.float64)
ndimage.distance_transform_bf(data, distances=dt)
dts.append(dt)
ft = ndimage.distance_transform_bf(
data, return_distances=False, return_indices=1)
fts.append(ft)
ft = numpy.indices(data.shape, dtype=numpy.int32)
ndimage.distance_transform_bf(
data, return_distances=False, return_indices=True, indices=ft)
fts.append(ft)
dt, ft = ndimage.distance_transform_bf(
data, return_indices=1)
dts.append(dt)
fts.append(ft)
dt = numpy.zeros(data.shape, dtype=numpy.float64)
ft = ndimage.distance_transform_bf(
data, distances=dt, return_indices=True)
dts.append(dt)
fts.append(ft)
ft = numpy.indices(data.shape, dtype=numpy.int32)
dt = ndimage.distance_transform_bf(
data, return_indices=True, indices=ft)
dts.append(dt)
fts.append(ft)
dt = numpy.zeros(data.shape, dtype=numpy.float64)
ft = numpy.indices(data.shape, dtype=numpy.int32)
ndimage.distance_transform_bf(
data, distances=dt, return_indices=True, indices=ft)
dts.append(dt)
fts.append(ft)
for dt in dts:
assert_array_almost_equal(tdt, dt)
for ft in fts:
assert_array_almost_equal(tft, ft)
@pytest.mark.parametrize('dtype', types)
def test_distance_transform_bf05(self, dtype):
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype)
out, ft = ndimage.distance_transform_bf(
data, 'euclidean', return_indices=True, sampling=[2, 2])
expected = [[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 4, 4, 4, 0, 0, 0],
[0, 0, 4, 8, 16, 8, 4, 0, 0],
[0, 0, 4, 16, 32, 16, 4, 0, 0],
[0, 0, 4, 8, 16, 8, 4, 0, 0],
[0, 0, 0, 4, 4, 4, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]]
assert_array_almost_equal(out * out, expected)
expected = [[[0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1],
[2, 2, 2, 2, 1, 2, 2, 2, 2],
[3, 3, 3, 2, 1, 2, 3, 3, 3],
[4, 4, 4, 4, 6, 4, 4, 4, 4],
[5, 5, 6, 6, 7, 6, 6, 5, 5],
[6, 6, 6, 7, 7, 7, 6, 6, 6],
[7, 7, 7, 7, 7, 7, 7, 7, 7],
[8, 8, 8, 8, 8, 8, 8, 8, 8]],
[[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 2, 4, 6, 6, 7, 8],
[0, 1, 1, 2, 4, 6, 7, 7, 8],
[0, 1, 1, 1, 6, 7, 7, 7, 8],
[0, 1, 2, 2, 4, 6, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8]]]
assert_array_almost_equal(ft, expected)
@pytest.mark.parametrize('dtype', types)
def test_distance_transform_bf06(self, dtype):
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype)
out, ft = ndimage.distance_transform_bf(
data, 'euclidean', return_indices=True, sampling=[2, 1])
expected = [[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 4, 1, 0, 0, 0],
[0, 0, 1, 4, 8, 4, 1, 0, 0],
[0, 0, 1, 4, 9, 4, 1, 0, 0],
[0, 0, 1, 4, 8, 4, 1, 0, 0],
[0, 0, 0, 1, 4, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]]
assert_array_almost_equal(out * out, expected)
expected = [[[0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1],
[2, 2, 2, 2, 2, 2, 2, 2, 2],
[3, 3, 3, 3, 2, 3, 3, 3, 3],
[4, 4, 4, 4, 4, 4, 4, 4, 4],
[5, 5, 5, 5, 6, 5, 5, 5, 5],
[6, 6, 6, 6, 7, 6, 6, 6, 6],
[7, 7, 7, 7, 7, 7, 7, 7, 7],
[8, 8, 8, 8, 8, 8, 8, 8, 8]],
[[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 2, 6, 6, 6, 7, 8],
[0, 1, 1, 1, 6, 7, 7, 7, 8],
[0, 1, 1, 1, 7, 7, 7, 7, 8],
[0, 1, 1, 1, 6, 7, 7, 7, 8],
[0, 1, 2, 2, 4, 6, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8]]]
assert_array_almost_equal(ft, expected)
def test_distance_transform_bf07(self):
# test input validation per discussion on PR #13302
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]])
with assert_raises(RuntimeError):
ndimage.distance_transform_bf(
data, return_distances=False, return_indices=False
)
@pytest.mark.parametrize('dtype', types)
def test_distance_transform_cdt01(self, dtype):
# chamfer type distance (cdt) transform
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype)
out, ft = ndimage.distance_transform_cdt(
data, 'cityblock', return_indices=True)
bf = ndimage.distance_transform_bf(data, 'cityblock')
assert_array_almost_equal(bf, out)
expected = [[[0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1],
[2, 2, 2, 1, 1, 1, 2, 2, 2],
[3, 3, 2, 1, 1, 1, 2, 3, 3],
[4, 4, 4, 4, 1, 4, 4, 4, 4],
[5, 5, 5, 5, 7, 7, 6, 5, 5],
[6, 6, 6, 6, 7, 7, 6, 6, 6],
[7, 7, 7, 7, 7, 7, 7, 7, 7],
[8, 8, 8, 8, 8, 8, 8, 8, 8]],
[[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 1, 1, 4, 7, 7, 7, 8],
[0, 1, 1, 1, 4, 5, 6, 7, 8],
[0, 1, 2, 2, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8]]]
assert_array_almost_equal(ft, expected)
@pytest.mark.parametrize('dtype', types)
def test_distance_transform_cdt02(self, dtype):
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype)
out, ft = ndimage.distance_transform_cdt(data, 'chessboard',
return_indices=True)
bf = ndimage.distance_transform_bf(data, 'chessboard')
assert_array_almost_equal(bf, out)
expected = [[[0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1],
[2, 2, 2, 1, 1, 1, 2, 2, 2],
[3, 3, 2, 2, 1, 2, 2, 3, 3],
[4, 4, 3, 2, 2, 2, 3, 4, 4],
[5, 5, 4, 6, 7, 6, 4, 5, 5],
[6, 6, 6, 6, 7, 7, 6, 6, 6],
[7, 7, 7, 7, 7, 7, 7, 7, 7],
[8, 8, 8, 8, 8, 8, 8, 8, 8]],
[[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 2, 3, 4, 6, 7, 8],
[0, 1, 1, 2, 2, 6, 6, 7, 8],
[0, 1, 1, 1, 2, 6, 7, 7, 8],
[0, 1, 1, 2, 6, 6, 7, 7, 8],
[0, 1, 2, 2, 5, 6, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8]]]
assert_array_almost_equal(ft, expected)
@pytest.mark.parametrize('dtype', types)
def test_distance_transform_cdt03(self, dtype):
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype)
tdt, tft = ndimage.distance_transform_cdt(data, return_indices=True)
dts = []
fts = []
dt = numpy.zeros(data.shape, dtype=numpy.int32)
ndimage.distance_transform_cdt(data, distances=dt)
dts.append(dt)
ft = ndimage.distance_transform_cdt(
data, return_distances=False, return_indices=True)
fts.append(ft)
ft = numpy.indices(data.shape, dtype=numpy.int32)
ndimage.distance_transform_cdt(
data, return_distances=False, return_indices=True, indices=ft)
fts.append(ft)
dt, ft = ndimage.distance_transform_cdt(
data, return_indices=True)
dts.append(dt)
fts.append(ft)
dt = numpy.zeros(data.shape, dtype=numpy.int32)
ft = ndimage.distance_transform_cdt(
data, distances=dt, return_indices=True)
dts.append(dt)
fts.append(ft)
ft = numpy.indices(data.shape, dtype=numpy.int32)
dt = ndimage.distance_transform_cdt(
data, return_indices=True, indices=ft)
dts.append(dt)
fts.append(ft)
dt = numpy.zeros(data.shape, dtype=numpy.int32)
ft = numpy.indices(data.shape, dtype=numpy.int32)
ndimage.distance_transform_cdt(data, distances=dt,
return_indices=True, indices=ft)
dts.append(dt)
fts.append(ft)
for dt in dts:
assert_array_almost_equal(tdt, dt)
for ft in fts:
assert_array_almost_equal(tft, ft)
def test_distance_transform_cdt04(self):
# test input validation per discussion on PR #13302
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]])
indices_out = numpy.zeros((data.ndim,) + data.shape, dtype=numpy.int32)
with assert_raises(RuntimeError):
ndimage.distance_transform_bf(
data,
return_distances=True,
return_indices=False,
indices=indices_out
)
@pytest.mark.parametrize('dtype', types)
def test_distance_transform_edt01(self, dtype):
# euclidean distance transform (edt)
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype)
out, ft = ndimage.distance_transform_edt(data, return_indices=True)
bf = ndimage.distance_transform_bf(data, 'euclidean')
assert_array_almost_equal(bf, out)
dt = ft - numpy.indices(ft.shape[1:], dtype=ft.dtype)
dt = dt.astype(numpy.float64)
numpy.multiply(dt, dt, dt)
dt = numpy.add.reduce(dt, axis=0)
numpy.sqrt(dt, dt)
assert_array_almost_equal(bf, dt)
@pytest.mark.parametrize('dtype', types)
def test_distance_transform_edt02(self, dtype):
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype)
tdt, tft = ndimage.distance_transform_edt(data, return_indices=True)
dts = []
fts = []
dt = numpy.zeros(data.shape, dtype=numpy.float64)
ndimage.distance_transform_edt(data, distances=dt)
dts.append(dt)
ft = ndimage.distance_transform_edt(
data, return_distances=0, return_indices=True)
fts.append(ft)
ft = numpy.indices(data.shape, dtype=numpy.int32)
ndimage.distance_transform_edt(
data, return_distances=False, return_indices=True, indices=ft)
fts.append(ft)
dt, ft = ndimage.distance_transform_edt(
data, return_indices=True)
dts.append(dt)
fts.append(ft)
dt = numpy.zeros(data.shape, dtype=numpy.float64)
ft = ndimage.distance_transform_edt(
data, distances=dt, return_indices=True)
dts.append(dt)
fts.append(ft)
ft = numpy.indices(data.shape, dtype=numpy.int32)
dt = ndimage.distance_transform_edt(
data, return_indices=True, indices=ft)
dts.append(dt)
fts.append(ft)
dt = numpy.zeros(data.shape, dtype=numpy.float64)
ft = numpy.indices(data.shape, dtype=numpy.int32)
ndimage.distance_transform_edt(
data, distances=dt, return_indices=True, indices=ft)
dts.append(dt)
fts.append(ft)
for dt in dts:
assert_array_almost_equal(tdt, dt)
for ft in fts:
assert_array_almost_equal(tft, ft)
@pytest.mark.parametrize('dtype', types)
def test_distance_transform_edt03(self, dtype):
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype)
ref = ndimage.distance_transform_bf(data, 'euclidean', sampling=[2, 2])
out = ndimage.distance_transform_edt(data, sampling=[2, 2])
assert_array_almost_equal(ref, out)
@pytest.mark.parametrize('dtype', types)
def test_distance_transform_edt4(self, dtype):
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype)
ref = ndimage.distance_transform_bf(data, 'euclidean', sampling=[2, 1])
out = ndimage.distance_transform_edt(data, sampling=[2, 1])
assert_array_almost_equal(ref, out)
def test_distance_transform_edt5(self):
# Ticket #954 regression test
out = ndimage.distance_transform_edt(False)
assert_array_almost_equal(out, [0.])
def test_distance_transform_edt6(self):
# test input validation per discussion on PR #13302
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]])
distances_out = numpy.zeros(data.shape, dtype=numpy.float64)
with assert_raises(RuntimeError):
ndimage.distance_transform_bf(
data,
return_indices=True,
return_distances=False,
distances=distances_out
)
def test_generate_structure01(self):
struct = ndimage.generate_binary_structure(0, 1)
assert_array_almost_equal(struct, 1)
def test_generate_structure02(self):
struct = ndimage.generate_binary_structure(1, 1)
assert_array_almost_equal(struct, [1, 1, 1])
def test_generate_structure03(self):
struct = ndimage.generate_binary_structure(2, 1)
assert_array_almost_equal(struct, [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]])
def test_generate_structure04(self):
struct = ndimage.generate_binary_structure(2, 2)
assert_array_almost_equal(struct, [[1, 1, 1],
[1, 1, 1],
[1, 1, 1]])
def test_iterate_structure01(self):
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
out = ndimage.iterate_structure(struct, 2)
assert_array_almost_equal(out, [[0, 0, 1, 0, 0],
[0, 1, 1, 1, 0],
[1, 1, 1, 1, 1],
[0, 1, 1, 1, 0],
[0, 0, 1, 0, 0]])
def test_iterate_structure02(self):
struct = [[0, 1],
[1, 1],
[0, 1]]
out = ndimage.iterate_structure(struct, 2)
assert_array_almost_equal(out, [[0, 0, 1],
[0, 1, 1],
[1, 1, 1],
[0, 1, 1],
[0, 0, 1]])
def test_iterate_structure03(self):
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
out = ndimage.iterate_structure(struct, 2, 1)
expected = [[0, 0, 1, 0, 0],
[0, 1, 1, 1, 0],
[1, 1, 1, 1, 1],
[0, 1, 1, 1, 0],
[0, 0, 1, 0, 0]]
assert_array_almost_equal(out[0], expected)
assert_equal(out[1], [2, 2])
@pytest.mark.parametrize('dtype', types)
def test_binary_erosion01(self, dtype):
data = numpy.ones([], dtype)
out = ndimage.binary_erosion(data)
assert_array_almost_equal(out, 1)
@pytest.mark.parametrize('dtype', types)
def test_binary_erosion02(self, dtype):
data = numpy.ones([], dtype)
out = ndimage.binary_erosion(data, border_value=1)
assert_array_almost_equal(out, 1)
@pytest.mark.parametrize('dtype', types)
def test_binary_erosion03(self, dtype):
data = numpy.ones([1], dtype)
out = ndimage.binary_erosion(data)
assert_array_almost_equal(out, [0])
@pytest.mark.parametrize('dtype', types)
def test_binary_erosion04(self, dtype):
data = numpy.ones([1], dtype)
out = ndimage.binary_erosion(data, border_value=1)
assert_array_almost_equal(out, [1])
@pytest.mark.parametrize('dtype', types)
def test_binary_erosion05(self, dtype):
data = numpy.ones([3], dtype)
out = ndimage.binary_erosion(data)
assert_array_almost_equal(out, [0, 1, 0])
@pytest.mark.parametrize('dtype', types)
def test_binary_erosion06(self, dtype):
data = numpy.ones([3], dtype)
out = ndimage.binary_erosion(data, border_value=1)
assert_array_almost_equal(out, [1, 1, 1])
@pytest.mark.parametrize('dtype', types)
def test_binary_erosion07(self, dtype):
data = numpy.ones([5], dtype)
out = ndimage.binary_erosion(data)
assert_array_almost_equal(out, [0, 1, 1, 1, 0])
@pytest.mark.parametrize('dtype', types)
def test_binary_erosion08(self, dtype):
data = numpy.ones([5], dtype)
out = ndimage.binary_erosion(data, border_value=1)
assert_array_almost_equal(out, [1, 1, 1, 1, 1])
@pytest.mark.parametrize('dtype', types)
def test_binary_erosion09(self, dtype):
data = numpy.ones([5], dtype)
data[2] = 0
out = ndimage.binary_erosion(data)
assert_array_almost_equal(out, [0, 0, 0, 0, 0])
@pytest.mark.parametrize('dtype', types)
def test_binary_erosion10(self, dtype):
data = numpy.ones([5], dtype)
data[2] = 0
out = ndimage.binary_erosion(data, border_value=1)
assert_array_almost_equal(out, [1, 0, 0, 0, 1])
@pytest.mark.parametrize('dtype', types)
def test_binary_erosion11(self, dtype):
data = numpy.ones([5], dtype)
data[2] = 0
struct = [1, 0, 1]
out = ndimage.binary_erosion(data, struct, border_value=1)
assert_array_almost_equal(out, [1, 0, 1, 0, 1])
@pytest.mark.parametrize('dtype', types)
def test_binary_erosion12(self, dtype):
data = numpy.ones([5], dtype)
data[2] = 0
struct = [1, 0, 1]
out = ndimage.binary_erosion(data, struct, border_value=1, origin=-1)
assert_array_almost_equal(out, [0, 1, 0, 1, 1])
@pytest.mark.parametrize('dtype', types)
def test_binary_erosion13(self, dtype):
data = numpy.ones([5], dtype)
data[2] = 0
struct = [1, 0, 1]
out = ndimage.binary_erosion(data, struct, border_value=1, origin=1)
assert_array_almost_equal(out, [1, 1, 0, 1, 0])
@pytest.mark.parametrize('dtype', types)
def test_binary_erosion14(self, dtype):
data = numpy.ones([5], dtype)
data[2] = 0
struct = [1, 1]
out = ndimage.binary_erosion(data, struct, border_value=1)
assert_array_almost_equal(out, [1, 1, 0, 0, 1])
@pytest.mark.parametrize('dtype', types)
def test_binary_erosion15(self, dtype):
data = numpy.ones([5], dtype)
data[2] = 0
struct = [1, 1]
out = ndimage.binary_erosion(data, struct, border_value=1, origin=-1)
assert_array_almost_equal(out, [1, 0, 0, 1, 1])
@pytest.mark.parametrize('dtype', types)
def test_binary_erosion16(self, dtype):
data = numpy.ones([1, 1], dtype)
out = ndimage.binary_erosion(data, border_value=1)
assert_array_almost_equal(out, [[1]])
@pytest.mark.parametrize('dtype', types)
def test_binary_erosion17(self, dtype):
data = numpy.ones([1, 1], dtype)
out = ndimage.binary_erosion(data)
assert_array_almost_equal(out, [[0]])
@pytest.mark.parametrize('dtype', types)
def test_binary_erosion18(self, dtype):
data = numpy.ones([1, 3], dtype)
out = ndimage.binary_erosion(data)
assert_array_almost_equal(out, [[0, 0, 0]])
@pytest.mark.parametrize('dtype', types)
def test_binary_erosion19(self, dtype):
data = numpy.ones([1, 3], dtype)
out = ndimage.binary_erosion(data, border_value=1)
assert_array_almost_equal(out, [[1, 1, 1]])
@pytest.mark.parametrize('dtype', types)
def test_binary_erosion20(self, dtype):
data = numpy.ones([3, 3], dtype)
out = ndimage.binary_erosion(data)
assert_array_almost_equal(out, [[0, 0, 0],
[0, 1, 0],
[0, 0, 0]])
@pytest.mark.parametrize('dtype', types)
def test_binary_erosion21(self, dtype):
data = numpy.ones([3, 3], dtype)
out = ndimage.binary_erosion(data, border_value=1)
assert_array_almost_equal(out, [[1, 1, 1],
[1, 1, 1],
[1, 1, 1]])
@pytest.mark.parametrize('dtype', types)
def test_binary_erosion22(self, dtype):
expected = [[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], dtype)
out = ndimage.binary_erosion(data, border_value=1)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('dtype', types)
def test_binary_erosion23(self, dtype):
struct = ndimage.generate_binary_structure(2, 2)
expected = [[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], dtype)
out = ndimage.binary_erosion(data, struct, border_value=1)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('dtype', types)
def test_binary_erosion24(self, dtype):
struct = [[0, 1],
[1, 1]]
expected = [[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], dtype)
out = ndimage.binary_erosion(data, struct, border_value=1)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('dtype', types)
def test_binary_erosion25(self, dtype):
struct = [[0, 1, 0],
[1, 0, 1],
[0, 1, 0]]
expected = [[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 1, 1, 1, 0, 1, 1],
[0, 0, 1, 0, 1, 1, 0, 0],
[0, 1, 0, 1, 1, 1, 1, 0],
[0, 1, 1, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], dtype)
out = ndimage.binary_erosion(data, struct, border_value=1)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('dtype', types)
def test_binary_erosion26(self, dtype):
struct = [[0, 1, 0],
[1, 0, 1],
[0, 1, 0]]
expected = [[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 1, 0, 0, 1],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1]]
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 1, 1, 1, 0, 1, 1],
[0, 0, 1, 0, 1, 1, 0, 0],
[0, 1, 0, 1, 1, 1, 1, 0],
[0, 1, 1, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], dtype)
out = ndimage.binary_erosion(data, struct, border_value=1,
origin=(-1, -1))
assert_array_almost_equal(out, expected)
def test_binary_erosion27(self):
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
expected = [[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]]
data = numpy.array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]], bool)
out = ndimage.binary_erosion(data, struct, border_value=1,
iterations=2)
assert_array_almost_equal(out, expected)
def test_binary_erosion28(self):
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
expected = [[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]]
data = numpy.array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]], bool)
out = numpy.zeros(data.shape, bool)
ndimage.binary_erosion(data, struct, border_value=1,
iterations=2, output=out)
assert_array_almost_equal(out, expected)
def test_binary_erosion29(self):
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
expected = [[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]]
data = numpy.array([[0, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 0]], bool)
out = ndimage.binary_erosion(data, struct,
border_value=1, iterations=3)
assert_array_almost_equal(out, expected)
def test_binary_erosion30(self):
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
expected = [[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]]
data = numpy.array([[0, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 0]], bool)
out = numpy.zeros(data.shape, bool)
ndimage.binary_erosion(data, struct, border_value=1,
iterations=3, output=out)
assert_array_almost_equal(out, expected)
# test with output memory overlap
ndimage.binary_erosion(data, struct, border_value=1,
iterations=3, output=data)
assert_array_almost_equal(data, expected)
def test_binary_erosion31(self):
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
expected = [[0, 0, 1, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 1],
[0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 1]]
data = numpy.array([[0, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 0]], bool)
out = numpy.zeros(data.shape, bool)
ndimage.binary_erosion(data, struct, border_value=1,
iterations=1, output=out, origin=(-1, -1))
assert_array_almost_equal(out, expected)
def test_binary_erosion32(self):
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
expected = [[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]]
data = numpy.array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]], bool)
out = ndimage.binary_erosion(data, struct,
border_value=1, iterations=2)
assert_array_almost_equal(out, expected)
def test_binary_erosion33(self):
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
expected = [[0, 0, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]]
mask = [[1, 1, 1, 1, 1, 0, 0],
[1, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1]]
data = numpy.array([[0, 0, 0, 0, 0, 1, 1],
[0, 0, 0, 1, 0, 0, 1],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]], bool)
out = ndimage.binary_erosion(data, struct,
border_value=1, mask=mask, iterations=-1)
assert_array_almost_equal(out, expected)
def test_binary_erosion34(self):
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
expected = [[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]]
mask = [[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 0, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]]
data = numpy.array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]], bool)
out = ndimage.binary_erosion(data, struct,
border_value=1, mask=mask)
assert_array_almost_equal(out, expected)
def test_binary_erosion35(self):
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
mask = [[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 0, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]]
data = numpy.array([[0, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 0]], bool)
tmp = [[0, 0, 1, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 1],
[0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 1]]
expected = numpy.logical_and(tmp, mask)
tmp = numpy.logical_and(data, numpy.logical_not(mask))
expected = numpy.logical_or(expected, tmp)
out = numpy.zeros(data.shape, bool)
ndimage.binary_erosion(data, struct, border_value=1,
iterations=1, output=out,
origin=(-1, -1), mask=mask)
assert_array_almost_equal(out, expected)
def test_binary_erosion36(self):
struct = [[0, 1, 0],
[1, 0, 1],
[0, 1, 0]]
mask = [[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
tmp = [[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 1, 0, 0, 1],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1]]
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 1, 1, 1, 0, 1, 1],
[0, 0, 1, 0, 1, 1, 0, 0],
[0, 1, 0, 1, 1, 1, 1, 0],
[0, 1, 1, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0]])
expected = numpy.logical_and(tmp, mask)
tmp = numpy.logical_and(data, numpy.logical_not(mask))
expected = numpy.logical_or(expected, tmp)
out = ndimage.binary_erosion(data, struct, mask=mask,
border_value=1, origin=(-1, -1))
assert_array_almost_equal(out, expected)
def test_binary_erosion37(self):
a = numpy.array([[1, 0, 1],
[0, 1, 0],
[1, 0, 1]], dtype=bool)
b = numpy.zeros_like(a)
out = ndimage.binary_erosion(a, structure=a, output=b, iterations=0,
border_value=True, brute_force=True)
assert_(out is b)
assert_array_equal(
ndimage.binary_erosion(a, structure=a, iterations=0,
border_value=True),
b)
def test_binary_erosion38(self):
data = numpy.array([[1, 0, 1],
[0, 1, 0],
[1, 0, 1]], dtype=bool)
iterations = 2.0
with assert_raises(TypeError):
_ = ndimage.binary_erosion(data, iterations=iterations)
def test_binary_erosion39(self):
iterations = numpy.int32(3)
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
expected = [[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]]
data = numpy.array([[0, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 0]], bool)
out = numpy.zeros(data.shape, bool)
ndimage.binary_erosion(data, struct, border_value=1,
iterations=iterations, output=out)
assert_array_almost_equal(out, expected)
def test_binary_erosion40(self):
iterations = numpy.int64(3)
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
expected = [[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]]
data = numpy.array([[0, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 0]], bool)
out = numpy.zeros(data.shape, bool)
ndimage.binary_erosion(data, struct, border_value=1,
iterations=iterations, output=out)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('dtype', types)
def test_binary_dilation01(self, dtype):
data = numpy.ones([], dtype)
out = ndimage.binary_dilation(data)
assert_array_almost_equal(out, 1)
@pytest.mark.parametrize('dtype', types)
def test_binary_dilation02(self, dtype):
data = numpy.zeros([], dtype)
out = ndimage.binary_dilation(data)
assert_array_almost_equal(out, 0)
@pytest.mark.parametrize('dtype', types)
def test_binary_dilation03(self, dtype):
data = numpy.ones([1], dtype)
out = ndimage.binary_dilation(data)
assert_array_almost_equal(out, [1])
@pytest.mark.parametrize('dtype', types)
def test_binary_dilation04(self, dtype):
data = numpy.zeros([1], dtype)
out = ndimage.binary_dilation(data)
assert_array_almost_equal(out, [0])
@pytest.mark.parametrize('dtype', types)
def test_binary_dilation05(self, dtype):
data = numpy.ones([3], dtype)
out = ndimage.binary_dilation(data)
assert_array_almost_equal(out, [1, 1, 1])
@pytest.mark.parametrize('dtype', types)
def test_binary_dilation06(self, dtype):
data = numpy.zeros([3], dtype)
out = ndimage.binary_dilation(data)
assert_array_almost_equal(out, [0, 0, 0])
@pytest.mark.parametrize('dtype', types)
def test_binary_dilation07(self, dtype):
data = numpy.zeros([3], dtype)
data[1] = 1
out = ndimage.binary_dilation(data)
assert_array_almost_equal(out, [1, 1, 1])
@pytest.mark.parametrize('dtype', types)
def test_binary_dilation08(self, dtype):
data = numpy.zeros([5], dtype)
data[1] = 1
data[3] = 1
out = ndimage.binary_dilation(data)
assert_array_almost_equal(out, [1, 1, 1, 1, 1])
@pytest.mark.parametrize('dtype', types)
def test_binary_dilation09(self, dtype):
data = numpy.zeros([5], dtype)
data[1] = 1
out = ndimage.binary_dilation(data)
assert_array_almost_equal(out, [1, 1, 1, 0, 0])
@pytest.mark.parametrize('dtype', types)
def test_binary_dilation10(self, dtype):
data = numpy.zeros([5], dtype)
data[1] = 1
out = ndimage.binary_dilation(data, origin=-1)
assert_array_almost_equal(out, [0, 1, 1, 1, 0])
@pytest.mark.parametrize('dtype', types)
def test_binary_dilation11(self, dtype):
data = numpy.zeros([5], dtype)
data[1] = 1
out = ndimage.binary_dilation(data, origin=1)
assert_array_almost_equal(out, [1, 1, 0, 0, 0])
@pytest.mark.parametrize('dtype', types)
def test_binary_dilation12(self, dtype):
data = numpy.zeros([5], dtype)
data[1] = 1
struct = [1, 0, 1]
out = ndimage.binary_dilation(data, struct)
assert_array_almost_equal(out, [1, 0, 1, 0, 0])
@pytest.mark.parametrize('dtype', types)
def test_binary_dilation13(self, dtype):
data = numpy.zeros([5], dtype)
data[1] = 1
struct = [1, 0, 1]
out = ndimage.binary_dilation(data, struct, border_value=1)
assert_array_almost_equal(out, [1, 0, 1, 0, 1])
@pytest.mark.parametrize('dtype', types)
def test_binary_dilation14(self, dtype):
data = numpy.zeros([5], dtype)
data[1] = 1
struct = [1, 0, 1]
out = ndimage.binary_dilation(data, struct, origin=-1)
assert_array_almost_equal(out, [0, 1, 0, 1, 0])
@pytest.mark.parametrize('dtype', types)
def test_binary_dilation15(self, dtype):
data = numpy.zeros([5], dtype)
data[1] = 1
struct = [1, 0, 1]
out = ndimage.binary_dilation(data, struct,
origin=-1, border_value=1)
assert_array_almost_equal(out, [1, 1, 0, 1, 0])
@pytest.mark.parametrize('dtype', types)
def test_binary_dilation16(self, dtype):
data = numpy.ones([1, 1], dtype)
out = ndimage.binary_dilation(data)
assert_array_almost_equal(out, [[1]])
@pytest.mark.parametrize('dtype', types)
def test_binary_dilation17(self, dtype):
data = numpy.zeros([1, 1], dtype)
out = ndimage.binary_dilation(data)
assert_array_almost_equal(out, [[0]])
@pytest.mark.parametrize('dtype', types)
def test_binary_dilation18(self, dtype):
data = numpy.ones([1, 3], dtype)
out = ndimage.binary_dilation(data)
assert_array_almost_equal(out, [[1, 1, 1]])
@pytest.mark.parametrize('dtype', types)
def test_binary_dilation19(self, dtype):
data = numpy.ones([3, 3], dtype)
out = ndimage.binary_dilation(data)
assert_array_almost_equal(out, [[1, 1, 1],
[1, 1, 1],
[1, 1, 1]])
@pytest.mark.parametrize('dtype', types)
def test_binary_dilation20(self, dtype):
data = numpy.zeros([3, 3], dtype)
data[1, 1] = 1
out = ndimage.binary_dilation(data)
assert_array_almost_equal(out, [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]])
@pytest.mark.parametrize('dtype', types)
def test_binary_dilation21(self, dtype):
struct = ndimage.generate_binary_structure(2, 2)
data = numpy.zeros([3, 3], dtype)
data[1, 1] = 1
out = ndimage.binary_dilation(data, struct)
assert_array_almost_equal(out, [[1, 1, 1],
[1, 1, 1],
[1, 1, 1]])
@pytest.mark.parametrize('dtype', types)
def test_binary_dilation22(self, dtype):
expected = [[0, 1, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], dtype)
out = ndimage.binary_dilation(data)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('dtype', types)
def test_binary_dilation23(self, dtype):
expected = [[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 0, 0, 0, 0, 1],
[1, 1, 0, 0, 0, 1, 0, 1],
[1, 0, 0, 1, 1, 1, 1, 1],
[1, 0, 1, 1, 1, 1, 0, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 0, 1, 0, 0, 1, 0, 1],
[1, 1, 1, 1, 1, 1, 1, 1]]
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], dtype)
out = ndimage.binary_dilation(data, border_value=1)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('dtype', types)
def test_binary_dilation24(self, dtype):
expected = [[1, 1, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 0, 0],
[0, 1, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], dtype)
out = ndimage.binary_dilation(data, origin=(1, 1))
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('dtype', types)
def test_binary_dilation25(self, dtype):
expected = [[1, 1, 0, 0, 0, 0, 1, 1],
[1, 0, 0, 0, 1, 0, 1, 1],
[0, 0, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 0, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[0, 1, 0, 0, 1, 0, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1]]
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], dtype)
out = ndimage.binary_dilation(data, origin=(1, 1), border_value=1)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('dtype', types)
def test_binary_dilation26(self, dtype):
struct = ndimage.generate_binary_structure(2, 2)
expected = [[1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], dtype)
out = ndimage.binary_dilation(data, struct)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('dtype', types)
def test_binary_dilation27(self, dtype):
struct = [[0, 1],
[1, 1]]
expected = [[0, 1, 0, 0, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 1, 1, 0, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], dtype)
out = ndimage.binary_dilation(data, struct)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('dtype', types)
def test_binary_dilation28(self, dtype):
expected = [[1, 1, 1, 1],
[1, 0, 0, 1],
[1, 0, 0, 1],
[1, 1, 1, 1]]
data = numpy.array([[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]], dtype)
out = ndimage.binary_dilation(data, border_value=1)
assert_array_almost_equal(out, expected)
def test_binary_dilation29(self):
struct = [[0, 1],
[1, 1]]
expected = [[0, 0, 0, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 0]]
data = numpy.array([[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 0]], bool)
out = ndimage.binary_dilation(data, struct, iterations=2)
assert_array_almost_equal(out, expected)
def test_binary_dilation30(self):
struct = [[0, 1],
[1, 1]]
expected = [[0, 0, 0, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 0]]
data = numpy.array([[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 0]], bool)
out = numpy.zeros(data.shape, bool)
ndimage.binary_dilation(data, struct, iterations=2, output=out)
assert_array_almost_equal(out, expected)
def test_binary_dilation31(self):
struct = [[0, 1],
[1, 1]]
expected = [[0, 0, 0, 1, 0],
[0, 0, 1, 1, 0],
[0, 1, 1, 1, 0],
[1, 1, 1, 1, 0],
[0, 0, 0, 0, 0]]
data = numpy.array([[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 0]], bool)
out = ndimage.binary_dilation(data, struct, iterations=3)
assert_array_almost_equal(out, expected)
def test_binary_dilation32(self):
struct = [[0, 1],
[1, 1]]
expected = [[0, 0, 0, 1, 0],
[0, 0, 1, 1, 0],
[0, 1, 1, 1, 0],
[1, 1, 1, 1, 0],
[0, 0, 0, 0, 0]]
data = numpy.array([[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 0]], bool)
out = numpy.zeros(data.shape, bool)
ndimage.binary_dilation(data, struct, iterations=3, output=out)
assert_array_almost_equal(out, expected)
def test_binary_dilation33(self):
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
expected = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0, 0],
[0, 1, 1, 0, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], bool)
mask = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0, 0],
[0, 1, 1, 0, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], bool)
data = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], bool)
out = ndimage.binary_dilation(data, struct, iterations=-1,
mask=mask, border_value=0)
assert_array_almost_equal(out, expected)
def test_binary_dilation34(self):
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
expected = [[0, 1, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
mask = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], bool)
data = numpy.zeros(mask.shape, bool)
out = ndimage.binary_dilation(data, struct, iterations=-1,
mask=mask, border_value=1)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('dtype', types)
def test_binary_dilation35(self, dtype):
tmp = [[1, 1, 0, 0, 0, 0, 1, 1],
[1, 0, 0, 0, 1, 0, 1, 1],
[0, 0, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 0, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[0, 1, 0, 0, 1, 0, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1]]
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]])
mask = [[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
expected = numpy.logical_and(tmp, mask)
tmp = numpy.logical_and(data, numpy.logical_not(mask))
expected = numpy.logical_or(expected, tmp)
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], dtype)
out = ndimage.binary_dilation(data, mask=mask,
origin=(1, 1), border_value=1)
assert_array_almost_equal(out, expected)
def test_binary_propagation01(self):
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
expected = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0, 0],
[0, 1, 1, 0, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], bool)
mask = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0, 0],
[0, 1, 1, 0, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], bool)
data = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], bool)
out = ndimage.binary_propagation(data, struct,
mask=mask, border_value=0)
assert_array_almost_equal(out, expected)
def test_binary_propagation02(self):
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
expected = [[0, 1, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
mask = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], bool)
data = numpy.zeros(mask.shape, bool)
out = ndimage.binary_propagation(data, struct,
mask=mask, border_value=1)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('dtype', types)
def test_binary_opening01(self, dtype):
expected = [[0, 1, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
data = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 0, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], dtype)
out = ndimage.binary_opening(data)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('dtype', types)
def test_binary_opening02(self, dtype):
struct = ndimage.generate_binary_structure(2, 2)
expected = [[1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
data = numpy.array([[1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 0, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], dtype)
out = ndimage.binary_opening(data, struct)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('dtype', types)
def test_binary_closing01(self, dtype):
expected = [[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
data = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 0, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], dtype)
out = ndimage.binary_closing(data)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('dtype', types)
def test_binary_closing02(self, dtype):
struct = ndimage.generate_binary_structure(2, 2)
expected = [[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
data = numpy.array([[1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 0, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], dtype)
out = ndimage.binary_closing(data, struct)
assert_array_almost_equal(out, expected)
def test_binary_fill_holes01(self):
expected = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], bool)
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], bool)
out = ndimage.binary_fill_holes(data)
assert_array_almost_equal(out, expected)
def test_binary_fill_holes02(self):
expected = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], bool)
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], bool)
out = ndimage.binary_fill_holes(data)
assert_array_almost_equal(out, expected)
def test_binary_fill_holes03(self):
expected = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 1, 1, 1],
[0, 1, 1, 1, 0, 1, 1, 1],
[0, 1, 1, 1, 0, 1, 1, 1],
[0, 0, 1, 0, 0, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0]], bool)
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 1, 0, 1, 0, 1, 1, 1],
[0, 1, 0, 1, 0, 1, 0, 1],
[0, 1, 0, 1, 0, 1, 0, 1],
[0, 0, 1, 0, 0, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0]], bool)
out = ndimage.binary_fill_holes(data)
assert_array_almost_equal(out, expected)
def test_grey_erosion01(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
output = ndimage.grey_erosion(array, footprint=footprint)
assert_array_almost_equal([[2, 2, 1, 1, 1],
[2, 3, 1, 3, 1],
[5, 5, 3, 3, 1]], output)
def test_grey_erosion01_overlap(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
ndimage.grey_erosion(array, footprint=footprint, output=array)
assert_array_almost_equal([[2, 2, 1, 1, 1],
[2, 3, 1, 3, 1],
[5, 5, 3, 3, 1]], array)
def test_grey_erosion02(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
structure = [[0, 0, 0], [0, 0, 0]]
output = ndimage.grey_erosion(array, footprint=footprint,
structure=structure)
assert_array_almost_equal([[2, 2, 1, 1, 1],
[2, 3, 1, 3, 1],
[5, 5, 3, 3, 1]], output)
def test_grey_erosion03(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
structure = [[1, 1, 1], [1, 1, 1]]
output = ndimage.grey_erosion(array, footprint=footprint,
structure=structure)
assert_array_almost_equal([[1, 1, 0, 0, 0],
[1, 2, 0, 2, 0],
[4, 4, 2, 2, 0]], output)
def test_grey_dilation01(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[0, 1, 1], [1, 0, 1]]
output = ndimage.grey_dilation(array, footprint=footprint)
assert_array_almost_equal([[7, 7, 9, 9, 5],
[7, 9, 8, 9, 7],
[8, 8, 8, 7, 7]], output)
def test_grey_dilation02(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[0, 1, 1], [1, 0, 1]]
structure = [[0, 0, 0], [0, 0, 0]]
output = ndimage.grey_dilation(array, footprint=footprint,
structure=structure)
assert_array_almost_equal([[7, 7, 9, 9, 5],
[7, 9, 8, 9, 7],
[8, 8, 8, 7, 7]], output)
def test_grey_dilation03(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[0, 1, 1], [1, 0, 1]]
structure = [[1, 1, 1], [1, 1, 1]]
output = ndimage.grey_dilation(array, footprint=footprint,
structure=structure)
assert_array_almost_equal([[8, 8, 10, 10, 6],
[8, 10, 9, 10, 8],
[9, 9, 9, 8, 8]], output)
def test_grey_opening01(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
tmp = ndimage.grey_erosion(array, footprint=footprint)
expected = ndimage.grey_dilation(tmp, footprint=footprint)
output = ndimage.grey_opening(array, footprint=footprint)
assert_array_almost_equal(expected, output)
def test_grey_opening02(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
structure = [[0, 0, 0], [0, 0, 0]]
tmp = ndimage.grey_erosion(array, footprint=footprint,
structure=structure)
expected = ndimage.grey_dilation(tmp, footprint=footprint,
structure=structure)
output = ndimage.grey_opening(array, footprint=footprint,
structure=structure)
assert_array_almost_equal(expected, output)
def test_grey_closing01(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
tmp = ndimage.grey_dilation(array, footprint=footprint)
expected = ndimage.grey_erosion(tmp, footprint=footprint)
output = ndimage.grey_closing(array, footprint=footprint)
assert_array_almost_equal(expected, output)
def test_grey_closing02(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
structure = [[0, 0, 0], [0, 0, 0]]
tmp = ndimage.grey_dilation(array, footprint=footprint,
structure=structure)
expected = ndimage.grey_erosion(tmp, footprint=footprint,
structure=structure)
output = ndimage.grey_closing(array, footprint=footprint,
structure=structure)
assert_array_almost_equal(expected, output)
def test_morphological_gradient01(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
structure = [[0, 0, 0], [0, 0, 0]]
tmp1 = ndimage.grey_dilation(array, footprint=footprint,
structure=structure)
tmp2 = ndimage.grey_erosion(array, footprint=footprint,
structure=structure)
expected = tmp1 - tmp2
output = numpy.zeros(array.shape, array.dtype)
ndimage.morphological_gradient(array, footprint=footprint,
structure=structure, output=output)
assert_array_almost_equal(expected, output)
def test_morphological_gradient02(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
structure = [[0, 0, 0], [0, 0, 0]]
tmp1 = ndimage.grey_dilation(array, footprint=footprint,
structure=structure)
tmp2 = ndimage.grey_erosion(array, footprint=footprint,
structure=structure)
expected = tmp1 - tmp2
output = ndimage.morphological_gradient(array, footprint=footprint,
structure=structure)
assert_array_almost_equal(expected, output)
def test_morphological_laplace01(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
structure = [[0, 0, 0], [0, 0, 0]]
tmp1 = ndimage.grey_dilation(array, footprint=footprint,
structure=structure)
tmp2 = ndimage.grey_erosion(array, footprint=footprint,
structure=structure)
expected = tmp1 + tmp2 - 2 * array
output = numpy.zeros(array.shape, array.dtype)
ndimage.morphological_laplace(array, footprint=footprint,
structure=structure, output=output)
assert_array_almost_equal(expected, output)
def test_morphological_laplace02(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
structure = [[0, 0, 0], [0, 0, 0]]
tmp1 = ndimage.grey_dilation(array, footprint=footprint,
structure=structure)
tmp2 = ndimage.grey_erosion(array, footprint=footprint,
structure=structure)
expected = tmp1 + tmp2 - 2 * array
output = ndimage.morphological_laplace(array, footprint=footprint,
structure=structure)
assert_array_almost_equal(expected, output)
def test_white_tophat01(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
structure = [[0, 0, 0], [0, 0, 0]]
tmp = ndimage.grey_opening(array, footprint=footprint,
structure=structure)
expected = array - tmp
output = numpy.zeros(array.shape, array.dtype)
ndimage.white_tophat(array, footprint=footprint,
structure=structure, output=output)
assert_array_almost_equal(expected, output)
def test_white_tophat02(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
structure = [[0, 0, 0], [0, 0, 0]]
tmp = ndimage.grey_opening(array, footprint=footprint,
structure=structure)
expected = array - tmp
output = ndimage.white_tophat(array, footprint=footprint,
structure=structure)
assert_array_almost_equal(expected, output)
def test_white_tophat03(self):
array = numpy.array([[1, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 0, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 1]], dtype=numpy.bool_)
structure = numpy.ones((3, 3), dtype=numpy.bool_)
expected = numpy.array([[0, 1, 1, 0, 0, 0, 0],
[1, 0, 0, 1, 1, 1, 0],
[1, 0, 0, 1, 1, 1, 0],
[0, 1, 1, 0, 0, 0, 1],
[0, 1, 1, 0, 1, 0, 1],
[0, 1, 1, 0, 0, 0, 1],
[0, 0, 0, 1, 1, 1, 1]], dtype=numpy.bool_)
output = ndimage.white_tophat(array, structure=structure)
assert_array_equal(expected, output)
def test_white_tophat04(self):
array = numpy.eye(5, dtype=numpy.bool_)
structure = numpy.ones((3, 3), dtype=numpy.bool_)
# Check that type mismatch is properly handled
output = numpy.empty_like(array, dtype=numpy.float64)
ndimage.white_tophat(array, structure=structure, output=output)
def test_black_tophat01(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
structure = [[0, 0, 0], [0, 0, 0]]
tmp = ndimage.grey_closing(array, footprint=footprint,
structure=structure)
expected = tmp - array
output = numpy.zeros(array.shape, array.dtype)
ndimage.black_tophat(array, footprint=footprint,
structure=structure, output=output)
assert_array_almost_equal(expected, output)
def test_black_tophat02(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
structure = [[0, 0, 0], [0, 0, 0]]
tmp = ndimage.grey_closing(array, footprint=footprint,
structure=structure)
expected = tmp - array
output = ndimage.black_tophat(array, footprint=footprint,
structure=structure)
assert_array_almost_equal(expected, output)
def test_black_tophat03(self):
array = numpy.array([[1, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 0, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 1]], dtype=numpy.bool_)
structure = numpy.ones((3, 3), dtype=numpy.bool_)
expected = numpy.array([[0, 1, 1, 1, 1, 1, 1],
[1, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 1, 0, 1],
[1, 0, 0, 0, 0, 0, 1],
[1, 1, 1, 1, 1, 1, 0]], dtype=numpy.bool_)
output = ndimage.black_tophat(array, structure=structure)
assert_array_equal(expected, output)
def test_black_tophat04(self):
array = numpy.eye(5, dtype=numpy.bool_)
structure = numpy.ones((3, 3), dtype=numpy.bool_)
# Check that type mismatch is properly handled
output = numpy.empty_like(array, dtype=numpy.float64)
ndimage.black_tophat(array, structure=structure, output=output)
@pytest.mark.parametrize('dtype', types)
def test_hit_or_miss01(self, dtype):
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
expected = [[0, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]
data = numpy.array([[0, 1, 0, 0, 0],
[1, 1, 1, 0, 0],
[0, 1, 0, 1, 1],
[0, 0, 1, 1, 1],
[0, 1, 1, 1, 0],
[0, 1, 1, 1, 1],
[0, 1, 1, 1, 1],
[0, 0, 0, 0, 0]], dtype)
out = numpy.zeros(data.shape, bool)
ndimage.binary_hit_or_miss(data, struct, output=out)
assert_array_almost_equal(expected, out)
@pytest.mark.parametrize('dtype', types)
def test_hit_or_miss02(self, dtype):
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
expected = [[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
data = numpy.array([[0, 1, 0, 0, 1, 1, 1, 0],
[1, 1, 1, 0, 0, 1, 0, 0],
[0, 1, 0, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], dtype)
out = ndimage.binary_hit_or_miss(data, struct)
assert_array_almost_equal(expected, out)
@pytest.mark.parametrize('dtype', types)
def test_hit_or_miss03(self, dtype):
struct1 = [[0, 0, 0],
[1, 1, 1],
[0, 0, 0]]
struct2 = [[1, 1, 1],
[0, 0, 0],
[1, 1, 1]]
expected = [[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
data = numpy.array([[0, 1, 0, 0, 1, 1, 1, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 1, 0, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 0, 1, 1, 0],
[0, 0, 0, 0, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], dtype)
out = ndimage.binary_hit_or_miss(data, struct1, struct2)
assert_array_almost_equal(expected, out)
class TestDilateFix:
def setup_method(self):
# dilation related setup
self.array = numpy.array([[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 1, 1, 0],
[0, 0, 0, 0, 0]], dtype=numpy.uint8)
self.sq3x3 = numpy.ones((3, 3))
dilated3x3 = ndimage.binary_dilation(self.array, structure=self.sq3x3)
self.dilated3x3 = dilated3x3.view(numpy.uint8)
def test_dilation_square_structure(self):
result = ndimage.grey_dilation(self.array, structure=self.sq3x3)
# +1 accounts for difference between grey and binary dilation
assert_array_almost_equal(result, self.dilated3x3 + 1)
def test_dilation_scalar_size(self):
result = ndimage.grey_dilation(self.array, size=3)
assert_array_almost_equal(result, self.dilated3x3)
class TestBinaryOpeningClosing:
def setup_method(self):
a = numpy.zeros((5, 5), dtype=bool)
a[1:4, 1:4] = True
a[4, 4] = True
self.array = a
self.sq3x3 = numpy.ones((3, 3))
self.opened_old = ndimage.binary_opening(self.array, self.sq3x3,
1, None, 0)
self.closed_old = ndimage.binary_closing(self.array, self.sq3x3,
1, None, 0)
def test_opening_new_arguments(self):
opened_new = ndimage.binary_opening(self.array, self.sq3x3, 1, None,
0, None, 0, False)
assert_array_equal(opened_new, self.opened_old)
def test_closing_new_arguments(self):
closed_new = ndimage.binary_closing(self.array, self.sq3x3, 1, None,
0, None, 0, False)
assert_array_equal(closed_new, self.closed_old)
def test_binary_erosion_noninteger_iterations():
# regression test for gh-9905, gh-9909: ValueError for
# non integer iterations
data = numpy.ones([1])
assert_raises(TypeError, ndimage.binary_erosion, data, iterations=0.5)
assert_raises(TypeError, ndimage.binary_erosion, data, iterations=1.5)
def test_binary_dilation_noninteger_iterations():
# regression test for gh-9905, gh-9909: ValueError for
# non integer iterations
data = numpy.ones([1])
assert_raises(TypeError, ndimage.binary_dilation, data, iterations=0.5)
assert_raises(TypeError, ndimage.binary_dilation, data, iterations=1.5)
def test_binary_opening_noninteger_iterations():
# regression test for gh-9905, gh-9909: ValueError for
# non integer iterations
data = numpy.ones([1])
assert_raises(TypeError, ndimage.binary_opening, data, iterations=0.5)
assert_raises(TypeError, ndimage.binary_opening, data, iterations=1.5)
def test_binary_closing_noninteger_iterations():
# regression test for gh-9905, gh-9909: ValueError for
# non integer iterations
data = numpy.ones([1])
assert_raises(TypeError, ndimage.binary_closing, data, iterations=0.5)
assert_raises(TypeError, ndimage.binary_closing, data, iterations=1.5)
def test_binary_closing_noninteger_brute_force_passes_when_true():
# regression test for gh-9905, gh-9909: ValueError for
# non integer iterations
data = numpy.ones([1])
assert ndimage.binary_erosion(
data, iterations=2, brute_force=1.5
) == ndimage.binary_erosion(data, iterations=2, brute_force=bool(1.5))
assert ndimage.binary_erosion(
data, iterations=2, brute_force=0.0
) == ndimage.binary_erosion(data, iterations=2, brute_force=bool(0.0))
@pytest.mark.parametrize(
'function',
['binary_erosion', 'binary_dilation', 'binary_opening', 'binary_closing'],
)
@pytest.mark.parametrize('iterations', [1, 5])
@pytest.mark.parametrize('brute_force', [False, True])
def test_binary_input_as_output(function, iterations, brute_force):
rstate = numpy.random.RandomState(123)
data = rstate.randint(low=0, high=2, size=100).astype(bool)
ndi_func = getattr(ndimage, function)
# input data is not modified
data_orig = data.copy()
expected = ndi_func(data, brute_force=brute_force, iterations=iterations)
assert_array_equal(data, data_orig)
# data should now contain the expected result
ndi_func(data, brute_force=brute_force, iterations=iterations, output=data)
assert_array_equal(expected, data)
def test_binary_hit_or_miss_input_as_output():
rstate = numpy.random.RandomState(123)
data = rstate.randint(low=0, high=2, size=100).astype(bool)
# input data is not modified
data_orig = data.copy()
expected = ndimage.binary_hit_or_miss(data)
assert_array_equal(data, data_orig)
# data should now contain the expected result
ndimage.binary_hit_or_miss(data, output=data)
assert_array_equal(expected, data)
| 44.515599
| 79
| 0.381254
| 14,912
| 105,591
| 2.61816
| 0.020722
| 0.233595
| 0.297679
| 0.338815
| 0.91942
| 0.911608
| 0.891322
| 0.87224
| 0.846371
| 0.810819
| 0
| 0.17429
| 0.456895
| 105,591
| 2,371
| 80
| 44.534374
| 0.506513
| 0.009556
| 0
| 0.787935
| 0
| 0
| 0.005471
| 0
| 0
| 0
| 0
| 0
| 0.077494
| 1
| 0.068213
| false
| 0.000464
| 0.002784
| 0
| 0.07239
| 0.026914
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
715ed8dff575dad01c60817da2ebfff4a69b9877
| 41
|
py
|
Python
|
.history/section4/pp_05_20200731000641.py
|
KustomApe/python_engineer
|
cb931f5c685be87ea518c7a0a6dd89154dce03a8
|
[
"MIT"
] | null | null | null |
.history/section4/pp_05_20200731000641.py
|
KustomApe/python_engineer
|
cb931f5c685be87ea518c7a0a6dd89154dce03a8
|
[
"MIT"
] | null | null | null |
.history/section4/pp_05_20200731000641.py
|
KustomApe/python_engineer
|
cb931f5c685be87ea518c7a0a6dd89154dce03a8
|
[
"MIT"
] | null | null | null |
t = (1, 2, 3, 4, 1, 2)
type(t)
t[0] = 100
| 13.666667
| 22
| 0.414634
| 12
| 41
| 1.416667
| 0.666667
| 0.235294
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 0.268293
| 41
| 3
| 23
| 13.666667
| 0.233333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
71ca44daabef295113ae1820fef7784588c09e76
| 35
|
py
|
Python
|
schieber/player/challenge_player/strategy/flags/previously_had_stich_flag.py
|
JoelNiklaus/pyschieber
|
29ef6def16db90765849be7f572ef81f03c5f6fb
|
[
"MIT"
] | 5
|
2018-01-17T08:11:14.000Z
|
2018-11-27T11:37:15.000Z
|
schieber/player/challenge_player/strategy/flags/previously_had_stich_flag.py
|
JoelNiklaus/pyschieber
|
29ef6def16db90765849be7f572ef81f03c5f6fb
|
[
"MIT"
] | 4
|
2018-05-09T08:41:05.000Z
|
2018-11-16T08:07:39.000Z
|
schieber/player/challenge_player/strategy/flags/previously_had_stich_flag.py
|
JoelNiklaus/pyschieber
|
29ef6def16db90765849be7f572ef81f03c5f6fb
|
[
"MIT"
] | 3
|
2018-04-20T07:39:30.000Z
|
2018-11-10T12:44:08.000Z
|
class PreviouslyHadStichFlag: pass
| 17.5
| 34
| 0.885714
| 3
| 35
| 10.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085714
| 35
| 1
| 35
| 35
| 0.96875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 1
| 0
| 0
| 1
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
|
0
| 7
|
e0a5f4aad26497f3d6c423265f041bdfe5bf920e
| 2,937
|
py
|
Python
|
great_international/migrations/0065_auto_20190808_1032.py
|
uktrade/directory-cms
|
8c8d13ce29ea74ddce7a40f3dd29c8847145d549
|
[
"MIT"
] | 6
|
2018-03-20T11:19:07.000Z
|
2021-10-05T07:53:11.000Z
|
great_international/migrations/0065_auto_20190808_1032.py
|
uktrade/directory-cms
|
8c8d13ce29ea74ddce7a40f3dd29c8847145d549
|
[
"MIT"
] | 802
|
2018-02-05T14:16:13.000Z
|
2022-02-10T10:59:21.000Z
|
great_international/migrations/0065_auto_20190808_1032.py
|
uktrade/directory-cms
|
8c8d13ce29ea74ddce7a40f3dd29c8847145d549
|
[
"MIT"
] | 6
|
2019-01-22T13:19:37.000Z
|
2019-07-01T10:35:26.000Z
|
# Generated by Django 2.2.3 on 2019-08-08 10:32
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('wagtaildocs', '0010_document_file_hash'),
('great_international', '0064_merge_20190808_0928'),
]
operations = [
migrations.AddField(
model_name='aboutukwhychoosetheukpage',
name='ebook_section_pdf_link',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtaildocs.Document'),
),
migrations.AddField(
model_name='aboutukwhychoosetheukpage',
name='ebook_section_pdf_link_ar',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtaildocs.Document'),
),
migrations.AddField(
model_name='aboutukwhychoosetheukpage',
name='ebook_section_pdf_link_de',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtaildocs.Document'),
),
migrations.AddField(
model_name='aboutukwhychoosetheukpage',
name='ebook_section_pdf_link_en_gb',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtaildocs.Document'),
),
migrations.AddField(
model_name='aboutukwhychoosetheukpage',
name='ebook_section_pdf_link_es',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtaildocs.Document'),
),
migrations.AddField(
model_name='aboutukwhychoosetheukpage',
name='ebook_section_pdf_link_fr',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtaildocs.Document'),
),
migrations.AddField(
model_name='aboutukwhychoosetheukpage',
name='ebook_section_pdf_link_ja',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtaildocs.Document'),
),
migrations.AddField(
model_name='aboutukwhychoosetheukpage',
name='ebook_section_pdf_link_pt',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtaildocs.Document'),
),
migrations.AddField(
model_name='aboutukwhychoosetheukpage',
name='ebook_section_pdf_link_zh_hans',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtaildocs.Document'),
),
]
| 48.147541
| 150
| 0.667688
| 320
| 2,937
| 5.878125
| 0.18125
| 0.046784
| 0.074429
| 0.116959
| 0.870813
| 0.870813
| 0.870813
| 0.870813
| 0.870813
| 0.870813
| 0
| 0.015073
| 0.209397
| 2,937
| 60
| 151
| 48.95
| 0.795004
| 0.015322
| 0
| 0.666667
| 1
| 0
| 0.249481
| 0.173702
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.037037
| 0
| 0.092593
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e0b7fb6385450608845b0f322c93de62da17a49b
| 7,311
|
py
|
Python
|
libvpx/tools/3D-Reconstruction/MotionEST/SearchSmooth.py
|
juanjp600/webm_mem_plaback
|
8e166bf6f0920bc91369db5d8ae1f5462c6ac99b
|
[
"BSD-3-Clause"
] | 6
|
2021-07-05T16:09:39.000Z
|
2022-03-06T22:44:42.000Z
|
libvpx/tools/3D-Reconstruction/MotionEST/SearchSmooth.py
|
juanjp600/webm_mem_plaback
|
8e166bf6f0920bc91369db5d8ae1f5462c6ac99b
|
[
"BSD-3-Clause"
] | 7
|
2022-03-15T13:25:39.000Z
|
2022-03-15T13:25:44.000Z
|
libvpx/tools/3D-Reconstruction/MotionEST/SearchSmooth.py
|
juanjp600/webm_mem_plaback
|
8e166bf6f0920bc91369db5d8ae1f5462c6ac99b
|
[
"BSD-3-Clause"
] | 2
|
2018-10-30T01:48:36.000Z
|
2021-05-25T02:39:44.000Z
|
#!/usr/bin/env python
# coding: utf-8
import numpy as np
import numpy.linalg as LA
from Util import MSE
from MotionEST import MotionEST
"""Search & Smooth Model with Adapt Weights"""
class SearchSmoothAdapt(MotionEST):
"""
Constructor:
cur_f: current frame
ref_f: reference frame
blk_sz: block size
wnd_size: search window size
beta: neigbor loss weight
max_iter: maximum number of iterations
metric: metric to compare the blocks distrotion
"""
def __init__(self, cur_f, ref_f, blk_size, search, max_iter=100):
self.search = search
self.max_iter = max_iter
super(SearchSmoothAdapt, self).__init__(cur_f, ref_f, blk_size)
"""
get local diffiencial of refernce
"""
def getRefLocalDiff(self, mvs):
m, n = self.num_row, self.num_col
localDiff = [[] for _ in xrange(m)]
blk_sz = self.blk_sz
for r in xrange(m):
for c in xrange(n):
I_row = 0
I_col = 0
#get ssd surface
count = 0
center = self.cur_yuv[r * blk_sz:(r + 1) * blk_sz,
c * blk_sz:(c + 1) * blk_sz, 0]
ty = np.clip(r * blk_sz + int(mvs[r, c, 0]), 0, self.height - blk_sz)
tx = np.clip(c * blk_sz + int(mvs[r, c, 1]), 0, self.width - blk_sz)
target = self.ref_yuv[ty:ty + blk_sz, tx:tx + blk_sz, 0]
for y, x in {(ty - blk_sz, tx), (ty + blk_sz, tx)}:
if 0 <= y < self.height - blk_sz and 0 <= x < self.width - blk_sz:
nb = self.ref_yuv[y:y + blk_sz, x:x + blk_sz, 0]
I_row += np.sum(np.abs(nb - center)) - np.sum(
np.abs(target - center))
count += 1
I_row //= (count * blk_sz * blk_sz)
count = 0
for y, x in {(ty, tx - blk_sz), (ty, tx + blk_sz)}:
if 0 <= y < self.height - blk_sz and 0 <= x < self.width - blk_sz:
nb = self.ref_yuv[y:y + blk_sz, x:x + blk_sz, 0]
I_col += np.sum(np.abs(nb - center)) - np.sum(
np.abs(target - center))
count += 1
I_col //= (count * blk_sz * blk_sz)
localDiff[r].append(
np.array([[I_row * I_row, I_row * I_col],
[I_col * I_row, I_col * I_col]]))
return localDiff
"""
add smooth constraint
"""
def smooth(self, uvs, mvs):
sm_uvs = np.zeros(uvs.shape)
blk_sz = self.blk_sz
for r in xrange(self.num_row):
for c in xrange(self.num_col):
nb_uv = np.array([0.0, 0.0])
for i, j in {(r - 1, c), (r + 1, c), (r, c - 1), (r, c + 1)}:
if 0 <= i < self.num_row and 0 <= j < self.num_col:
nb_uv += uvs[i, j] / 6.0
else:
nb_uv += uvs[r, c] / 6.0
for i, j in {(r - 1, c - 1), (r - 1, c + 1), (r + 1, c - 1),
(r + 1, c + 1)}:
if 0 <= i < self.num_row and 0 <= j < self.num_col:
nb_uv += uvs[i, j] / 12.0
else:
nb_uv += uvs[r, c] / 12.0
ssd_nb = self.block_dist(r, c, self.blk_sz * nb_uv)
mv = mvs[r, c]
ssd_mv = self.block_dist(r, c, mv)
alpha = (ssd_nb - ssd_mv) / (ssd_mv + 1e-6)
M = alpha * self.localDiff[r][c]
P = M + np.identity(2)
inv_P = LA.inv(P)
sm_uvs[r, c] = np.dot(inv_P, nb_uv) + np.dot(
np.matmul(inv_P, M), mv / blk_sz)
return sm_uvs
def block_matching(self):
self.search.motion_field_estimation()
def motion_field_estimation(self):
self.localDiff = self.getRefLocalDiff(self.search.mf)
#get matching results
mvs = self.search.mf
#add smoothness constraint
uvs = mvs / self.blk_sz
for _ in xrange(self.max_iter):
uvs = self.smooth(uvs, mvs)
self.mf = uvs * self.blk_sz
"""Search & Smooth Model with Fixed Weights"""
class SearchSmoothFix(MotionEST):
"""
Constructor:
cur_f: current frame
ref_f: reference frame
blk_sz: block size
wnd_size: search window size
beta: neigbor loss weight
max_iter: maximum number of iterations
metric: metric to compare the blocks distrotion
"""
def __init__(self, cur_f, ref_f, blk_size, search, beta, max_iter=100):
self.search = search
self.max_iter = max_iter
self.beta = beta
super(SearchSmoothFix, self).__init__(cur_f, ref_f, blk_size)
"""
get local diffiencial of refernce
"""
def getRefLocalDiff(self, mvs):
m, n = self.num_row, self.num_col
localDiff = [[] for _ in xrange(m)]
blk_sz = self.blk_sz
for r in xrange(m):
for c in xrange(n):
I_row = 0
I_col = 0
#get ssd surface
count = 0
center = self.cur_yuv[r * blk_sz:(r + 1) * blk_sz,
c * blk_sz:(c + 1) * blk_sz, 0]
ty = np.clip(r * blk_sz + int(mvs[r, c, 0]), 0, self.height - blk_sz)
tx = np.clip(c * blk_sz + int(mvs[r, c, 1]), 0, self.width - blk_sz)
target = self.ref_yuv[ty:ty + blk_sz, tx:tx + blk_sz, 0]
for y, x in {(ty - blk_sz, tx), (ty + blk_sz, tx)}:
if 0 <= y < self.height - blk_sz and 0 <= x < self.width - blk_sz:
nb = self.ref_yuv[y:y + blk_sz, x:x + blk_sz, 0]
I_row += np.sum(np.abs(nb - center)) - np.sum(
np.abs(target - center))
count += 1
I_row //= (count * blk_sz * blk_sz)
count = 0
for y, x in {(ty, tx - blk_sz), (ty, tx + blk_sz)}:
if 0 <= y < self.height - blk_sz and 0 <= x < self.width - blk_sz:
nb = self.ref_yuv[y:y + blk_sz, x:x + blk_sz, 0]
I_col += np.sum(np.abs(nb - center)) - np.sum(
np.abs(target - center))
count += 1
I_col //= (count * blk_sz * blk_sz)
localDiff[r].append(
np.array([[I_row * I_row, I_row * I_col],
[I_col * I_row, I_col * I_col]]))
return localDiff
"""
add smooth constraint
"""
def smooth(self, uvs, mvs):
sm_uvs = np.zeros(uvs.shape)
blk_sz = self.blk_sz
for r in xrange(self.num_row):
for c in xrange(self.num_col):
nb_uv = np.array([0.0, 0.0])
for i, j in {(r - 1, c), (r + 1, c), (r, c - 1), (r, c + 1)}:
if 0 <= i < self.num_row and 0 <= j < self.num_col:
nb_uv += uvs[i, j] / 6.0
else:
nb_uv += uvs[r, c] / 6.0
for i, j in {(r - 1, c - 1), (r - 1, c + 1), (r + 1, c - 1),
(r + 1, c + 1)}:
if 0 <= i < self.num_row and 0 <= j < self.num_col:
nb_uv += uvs[i, j] / 12.0
else:
nb_uv += uvs[r, c] / 12.0
mv = mvs[r, c] / blk_sz
M = self.localDiff[r][c]
P = M + self.beta * np.identity(2)
inv_P = LA.inv(P)
sm_uvs[r, c] = np.dot(inv_P, self.beta * nb_uv) + np.dot(
np.matmul(inv_P, M), mv)
return sm_uvs
def block_matching(self):
self.search.motion_field_estimation()
def motion_field_estimation(self):
#get local structure
self.localDiff = self.getRefLocalDiff(self.search.mf)
#get matching results
mvs = self.search.mf
#add smoothness constraint
uvs = mvs / self.blk_sz
for _ in xrange(self.max_iter):
uvs = self.smooth(uvs, mvs)
self.mf = uvs * self.blk_sz
| 34.163551
| 77
| 0.530023
| 1,189
| 7,311
| 3.081581
| 0.111018
| 0.094159
| 0.009825
| 0.021834
| 0.895197
| 0.887009
| 0.877729
| 0.877729
| 0.877729
| 0.877729
| 0
| 0.022681
| 0.330598
| 7,311
| 213
| 78
| 34.323944
| 0.725991
| 0.090959
| 0
| 0.84
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.026667
| 0
| 0.133333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e0bd9eac7242ce3eea6a03127dac7cf25e854765
| 3,470
|
py
|
Python
|
samples/tabletop/configurations.py
|
fbottarel/Mask_RCNN
|
3e2a9152a85a72c589d0fc0b77ddcfed3561d302
|
[
"MIT"
] | null | null | null |
samples/tabletop/configurations.py
|
fbottarel/Mask_RCNN
|
3e2a9152a85a72c589d0fc0b77ddcfed3561d302
|
[
"MIT"
] | 1
|
2019-06-12T08:18:21.000Z
|
2019-06-12T08:18:21.000Z
|
samples/tabletop/configurations.py
|
fbottarel/Mask_RCNN
|
3e2a9152a85a72c589d0fc0b77ddcfed3561d302
|
[
"MIT"
] | 1
|
2019-06-11T14:07:18.000Z
|
2019-06-11T14:07:18.000Z
|
############################################################
# Training Configurations
############################################################
from mrcnn.config import Config
class TabletopConfigTraining(Config):
"""Configuration for training on the synthetic tabletop dataset.
Derives from the base Config class and overrides some values.
"""
# Give the configuration a recognizable name
NAME = "synth_tabletop_training"
# P100s can hold up to 4 images using ResNet50.
# During inference, make sure to set this to 1.
IMAGES_PER_GPU = 4
# Define number of GPUs to use
GPU_COUNT = 4
GPU_ID = "0,1,2,3"
# Number of classes (including background)
NUM_CLASSES = 1 + 21 # Background + random YCB objects
# Specify the backbone network
BACKBONE = "resnet50"
# Number of training steps per epoch
STEPS_PER_EPOCH = None
# Number of epochs
EPOCHS = 100
# Skip detections with < some confidence level
DETECTION_MIN_CONFIDENCE = 0.9
# Define stages to be fine tuned
LAYERS_TUNE = '4+'
class TabletopConfigInference(Config):
"""Configuration for training on the synthetic tabletop dataset.
Derives from the base Config class and overrides some values.
"""
# Give the configuration a recognizable name
NAME = "synth_tabletop_inference"
# P100s can hold up to 4 images using ResNet50.
# During inference, make sure to set this to 1.
IMAGES_PER_GPU = 1
# Define number of GPUs to use
GPU_COUNT = 1
GPU_ID = "0"
# Number of classes (including background)
NUM_CLASSES = 1 + 21 # Background + random YCB objects
# Specify the backbone network
BACKBONE = "resnet50"
# Skip detections with < some confidence level
DETECTION_MIN_CONFIDENCE = 0.75
class YCBVideoConfigTraining(Config):
"""Configuration for training on the YCB_Video dataset for segmentation.
Derives from the base Config class and overrides some values.
"""
# Give the configuration a recognizable name
NAME = "ycb_video_training"
# P100s can hold up to 4 images using ResNet50.
# During inference, make sure to set this to 1.
IMAGES_PER_GPU = 4
# Define number of GPUs to use
GPU_COUNT = 4
GPU_ID = "0,1,2,3"
# Number of classes (including background)
NUM_CLASSES = 1 + 21 # Background + 21 YCB_video objects
# Specify the backbone network
BACKBONE = "resnet50"
# Number of training steps per epoch
STEPS_PER_EPOCH = None
# Number of epochs
#EPOCHS = 50
# Skip detections with < some confidence level
DETECTION_MIN_CONFIDENCE = 0.9
# Define stages to be fine tuned
LAYERS_TUNE = 'heads'
class YCBVideoConfigInference(Config):
"""Configuration for performing inference with the YCB_Video dataset for segmentation.
Derives from the base Config class and overrides some values.
"""
# Give the configuration a recognizable name
NAME = "ycb_video_inference"
# P100s can hold up to 4 images using ResNet50.
# During inference, make sure to set this to 1.
IMAGES_PER_GPU = 1
# Define number of GPUs to use
GPU_COUNT = 1
GPU_ID = "0"
# Number of classes (including background)
NUM_CLASSES = 1 + 21 # Background + 21 YCB_video objects
# Specify the backbone network
BACKBONE = "resnet50"
# Skip detections with < some confidence level
DETECTION_MIN_CONFIDENCE = 0.75
| 27.76
| 90
| 0.668876
| 448
| 3,470
| 5.078125
| 0.207589
| 0.042198
| 0.038681
| 0.031648
| 0.905495
| 0.905495
| 0.891429
| 0.891429
| 0.891429
| 0.891429
| 0
| 0.032687
| 0.241787
| 3,470
| 124
| 91
| 27.983871
| 0.832003
| 0.569741
| 0
| 0.684211
| 0
| 0
| 0.108934
| 0.036834
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.026316
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
460fa4b3c19d1f39c737ecea2ee0d2b4c02ccca2
| 70
|
py
|
Python
|
src/dataset/mnist.py
|
ytsmiling/lmt
|
31e36e54d858fbc0a1f2eab12d5a86a97284339f
|
[
"MIT"
] | 32
|
2018-02-13T10:41:09.000Z
|
2021-07-07T16:07:40.000Z
|
src/dataset/mnist.py
|
ytsmiling/lmt
|
31e36e54d858fbc0a1f2eab12d5a86a97284339f
|
[
"MIT"
] | 4
|
2018-02-16T14:15:04.000Z
|
2019-12-14T10:39:06.000Z
|
src/dataset/mnist.py
|
ytsmiling/lmt
|
31e36e54d858fbc0a1f2eab12d5a86a97284339f
|
[
"MIT"
] | 6
|
2018-03-22T20:49:39.000Z
|
2022-01-08T23:47:17.000Z
|
import chainer
def mnist():
return chainer.datasets.get_mnist()
| 11.666667
| 39
| 0.728571
| 9
| 70
| 5.555556
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.171429
| 70
| 5
| 40
| 14
| 0.862069
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.